query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
sequencelengths 30
30
| negative_scores
sequencelengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Get the URL of the docker builder image for fuzzing the benchmark with fuzzer. | def get_builder_image_url(benchmark, fuzzer, docker_registry):
return f'{docker_registry}/builders/{fuzzer}/{benchmark}' | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_runner_image_url(experiment, benchmark, fuzzer, docker_registry):\n tag = 'latest' if environment.get('LOCAL_EXPERIMENT') else experiment\n return f'{docker_registry}/runners/{fuzzer}/{benchmark}:{tag}'",
"def _to_dockerfile_url(image):\n path = \"/\".join((image.platform, image.release, image.architecture, \"Dockerfile\"))\n return git.get_github_blob_url(path, ref=f\"v{image.version}\")",
"def getBuildbotURL():",
"def _GetBuildBotUrl(builder_host, builder_port):\n if (builder_host == BISECT_BUILDER_HOST and\n builder_port == BISECT_BUILDER_PORT):\n return TRY_SERVER_URL\n else:\n return 'http://%s:%s' % (builder_host, builder_port)",
"def get_buildbot_url():\n return \"http://10.45.4.98:8001/\"",
"def get_fuzz_target(benchmark):\n # Do this because of OSS-Fuzz-on-demand.\n # TODO(metzman): Use classes to mock a benchmark config for\n # OSS_FUZZ_ON_DEMAND.\n return benchmark_config.get_config(benchmark).get(\n 'fuzz_target', environment.get('FUZZ_TARGET'))",
"def get_image_url():",
"def query_repo_url_from_buildername(buildername):\n repo_name = query_repo_name_from_buildername(buildername)\n return buildapi.query_repo_url(repo_name)",
"def get_image_name():\n try:\n return os.environ['AIRFLOW_IMAGE']\n except KeyError:\n raise Exception(\"Please provide docker image name to pytest using environment variable AIRFLOW_IMAGE\")",
"def get_image_registry_url(self, image_name):\n c = self._oc_command([\"get\", \"is\", image_name,\n \"--output=jsonpath=\\'{ .status.dockerImageRepository }\\'\"])\n try:\n internal_registry_name = run_cmd(c, return_output=True)\n except subprocess.CalledProcessError as ex:\n raise ConuException(\"oc get is failed: %s\" % ex)\n\n logger.info(\"Image registry url: %s\", internal_registry_name)\n\n return internal_registry_name.replace(\"'\", \"\").replace('\"', '')",
"def _dockerfile(self):\n return self.config.get('docker', {}).get('dockerfile', 'Dockerfile')",
"def build_container(client):\n client.images.build(path=os.path.join(os.path.abspath(\"\"), \"docker\"), tag=\"scrape_light\")",
"def _get_image(runtime):\n return \"{}:{}\".format(LambdaContainer._IMAGE_REPO_NAME, runtime)",
"def http_service(docker_ip: Any, docker_services: Any) -> Any:\n # `port_for` takes a container port and returns the corresponding host port\n port = docker_services.port_for(\"fdk-baseregistries-publisher\", HOST_PORT)\n url = \"http://{}:{}\".format(docker_ip, port)\n docker_services.wait_until_responsive(\n timeout=30.0, pause=0.1, check=lambda: is_responsive(url)\n )\n return url",
"def create_image_builder_streaming_url(Name=None, Validity=None):\n pass",
"def url(self):\r\n return \"{}/container/{}\".format(BASE_URL, self.unit_locator)",
"def _ensure_image(testkit_path, branch_name, artifacts_path):\n # Construct Docker image name from branch name\n image_name = \"runner:%s\" % branch_name\n image_path = os.path.join(testkit_path, \"runner_image\")\n docker.build_and_tag(image_name, image_path, log_path=artifacts_path)\n\n return image_name",
"def build_docker(c):\n tag = c.run('git describe', hide=True)\n docker_img = f'{docker_repo}:{tag.stdout.strip()}'\n c.run(f'docker build -t {docker_img} .')",
"def get_base_docker_image(docker_file):\n with open(docker_file) as f:\n from_line = next(\n line for line in f.read().split(\"\\n\") if line.startswith(\"FROM\")\n )\n _from, base_image = from_line.split()\n return base_image",
"def build_nighthawk_benchmark_image_from_source(manager: source_manager.SourceManager) -> None:\n # TODO: Inject the builder object into this method\n builder = nighthawk_builder.NightHawkBuilder(manager)\n builder.build_nighthawk_benchmark_image()",
"def docker_image_tag(self, app):\n return f\"briefcase/{app.bundle}.{app.app_name.lower()}:{app.target_vendor}-{app.target_codename}\"",
"def _get_base_url(self):\n template = config.baseurl_template\n # get distro name and arch\n base_url = template.format(\n host=config.gitbuilder_host,\n proj=self.project,\n pkg_type=self.pkg_type,\n arch=self.arch,\n dist=self.distro,\n flavor=self.flavor,\n uri=self.uri_reference,\n )\n return base_url",
"def dockerfile(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"dockerfile\")",
"def url(self, name):\n return '%s/%s' % (self.container_url, name)",
"def get_rule_container_image_uri(name, region):\n if name is not None and name.startswith(\"DetailedProfilerProcessingJobConfig\"):\n # should have the format like \"123456789012.dkr.ecr.us-west-2.amazonaws.com/detailed-profiler-processing:latest\"\n return image_uris.retrieve(detailed_framework_name, region)\n\n return image_uris.retrieve(framework_name, region)",
"def get_builder(self, request):\n base_url = request.application_url\n return images_view.ViewBuilderV10(base_url)",
"def _build_url(self):\n url = BASE_URL.format(self._host, self._port)\n _LOGGER.debug(\"TOON fetch URL: %s\", url)\n return url",
"def pyfunc_build_image(model_uri, extra_args=None):\n name = uuid.uuid4().hex\n cmd = [\"mlflow\", \"models\", \"build-docker\", \"-m\", model_uri, \"-n\", name]\n if extra_args:\n cmd += extra_args\n p = subprocess.Popen(cmd,)\n assert p.wait() == 0, \"Failed to build docker image to serve model from %s\" % model_uri\n return name",
"def dockerfile(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"dockerfile\")",
"def build_url(cls, config, namespace, name):\n return \"hxxp://mock.repo.url/\" + namespace + \"/\" + name + \".git\""
] | [
"0.800936",
"0.68247736",
"0.62919927",
"0.6119331",
"0.60827637",
"0.5836558",
"0.5680232",
"0.5606805",
"0.5593281",
"0.55321765",
"0.5527971",
"0.55201703",
"0.5511697",
"0.5464959",
"0.5450358",
"0.5436414",
"0.5423743",
"0.5383551",
"0.53793126",
"0.5351387",
"0.53473455",
"0.5292844",
"0.5277089",
"0.5235646",
"0.5235194",
"0.52274054",
"0.5220325",
"0.52090615",
"0.51965827",
"0.5178419"
] | 0.8987414 | 0 |
Returns True if |benchmark| is a valid fuzzbench benchmark name. | def validate_name(benchmark):
if VALID_BENCHMARK_REGEX.match(benchmark) is None:
logs.error('%s does not conform to %s pattern.', benchmark,
VALID_BENCHMARK_REGEX.pattern)
return False
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def validate(benchmark):\n if not validate_name(benchmark):\n return False\n\n if benchmark not in get_all_benchmarks():\n logs.error('%s must have a benchmark.yaml.', benchmark)\n return False\n\n # Validate config file can be parsed.\n try:\n get_fuzz_target(benchmark)\n except yaml.parser.ParserError:\n logs.error('%s must have a valid benchmark.yaml file. Failed to parse.',\n benchmark)\n return False\n except KeyError:\n logs.error('%s\\'s benchmark.yaml does not define \"fuzz_target\".',\n benchmark)\n return False\n\n # Validate type.\n return validate_type(benchmark)",
"def validate_type(benchmark):\n benchmark_type = get_type(benchmark)\n if benchmark_type not in BENCHMARK_TYPE_STRS:\n logs.error('%s has an invalid benchmark type %s, must be one of %s',\n benchmark, benchmark_type, BENCHMARK_TYPE_STRS)\n return False\n return True",
"def _is_valid_keyspace_name(self, keyspace_name):\n if keyspace_name == None or not keyspace_name:\n return False\n return re.match(r\"^[a-z_]*[^-]$\", keyspace_name)",
"def valid_routine_name(routine):\n\treturn re.match('^[a-z_]([a-z0-9_]*)', routine) is not None",
"def validName(configsetname):\n for c in configsetname:\n if not c in string.letters+string.digits+\"$_-\":\n return False\n return configsetname != \"\"",
"def is_dev_name_valid(self):\n return self._name_re.match(self.dev_name) is not None",
"def is_valid(name):\n return bool(name)",
"def validname(name):\r\n return len(name)>0 and (\r\n Context.__invalid_character.search(name) is None)",
"def get_benchmark(self, benchmark):\n\t\tif not isinstance(benchmark, str) and not callable(benchmark): return benchmark\n\t\telif benchmark in self.classes:\treturn self.classes[benchmark]()\n\t\traise TypeError('Passed benchmark is not defined!')",
"def isValidDataTypeName(name: unicode) -> bool:\n ...",
"def verify_name(name):\n try:\n if name.index(' '):\n return False\n except ValueError:\n return True",
"def is_bank_name_valid(name_to_check: str):\n def is_name_short_enough():\n return True if len(name_to_check) <= 12 else False\n\n def is_name_only_letter():\n return True if name_to_check.isalpha() else False\n\n return True if is_name_short_enough() and is_name_only_letter() else False",
"def is_valid_name(self):\n\n if self.whitelist_name == '':\n return True\n\n if len(self.whitelist_name) >= 64:\n LOGGER.debug('invalid name %s; must be less than 64 bytes',\n self.whitelist_name)\n return False\n\n return True",
"def is_builtin_name(name):\r\n if name.startswith('__') and name.endswith('__'):\r\n return ALL_LOWER_CASE_RE.match(name[2:-2]) is not None\r\n return False",
"def _is_fan(compressor: Compressor) -> bool:\n name = compressor.name.lower()\n return 'fan' in name or 'crtf' in name",
"def get_fuzzer_benchmark_key(fuzzer: str, benchmark: str):\n return fuzzer + ' ' + benchmark",
"def _validate_name(name):\r\n\tif HOST_NAME != name and len(name) > 0 and ZOOM_PHRASES[0] not in name and name not in WAITING_ROOM:\r\n\t\treturn True\r\n\treturn False",
"def name_valid(name):\n return name.isalpha()",
"def is_mbid(mbid):\n try:\n mbid = uuid.UUID(mbid)\n good = True\n except ValueError as e:\n good = False\n except AttributeError:\n good = False\n\n return good",
"def verify_name(name):\n if name and not name.isspace(): # if it's not empty/NULL and it's not whitespace\n return True\n else:\n return False",
"def _check_if_valid_dataset_name(dataset_name: str) -> str:\n if not re.match(r\"^[A-Za-z0-9_]+$\", dataset_name):\n raise ExecutionEngineError(\n f\"dataset_name: {dataset_name} is not valid, because it contains non-alphanumeric and _ characters.\"\n f\"Please check your configuration.\"\n )\n\n if len(dataset_name) >= MAX_TABLE_NAME_LENGTH:\n # starting from the end, so that we always get the index and sub_index\n new_dataset_name = dataset_name[-MAX_TABLE_NAME_LENGTH:]\n logger.info(\n f\"dataset_name: '{dataset_name}' was truncated to '{new_dataset_name}' to keep within length limits.\"\n )\n dataset_name = new_dataset_name\n\n while not re.match(r\"^[A-Za-z]+$\", dataset_name[0]):\n dataset_name = dataset_name[1:]\n\n return dataset_name",
"def create_benchmark(self, benchmark):\n self.crd_client.create_namespaced_custom_object(\n group=\"ripsaw.cloudbulldozer.io\",\n version=\"v1alpha1\",\n namespace=benchmark[\"metadata\"][\"namespace\"],\n plural=\"benchmarks\",\n body=benchmark,\n )",
"def is_valid_business_name(self):\n return self.business_name.lower() not in INVALID_BUSINESS_NAME",
"def has_name(self):\n return self.unpack_word(0x2) != 0",
"def is_valid_cname(common_name: str) -> bool:\n return True if Band.band_range(common_name) else False",
"def full_name(self, test_name: str) -> bool:\n if not self._full_test_name_patterns:\n return True\n\n if self._compiled_test_name_pattern is None:\n self._compiled_test_name_pattern = re.compile('|'.join(self._full_test_name_patterns))\n\n return self._compiled_test_name_pattern.match(test_name)",
"def IsValidFacename(*args, **kwargs):\n return _gdi_.FontEnumerator_IsValidFacename(*args, **kwargs)",
"def is_basic_name(name):\n if name is None:\n raise AdasDBError(\"Invalid name '%s'.\" % name)\n return name.find(PATH_SEPARATOR)",
"def _check_name(self, symbol):\n if symbol.type == self.scanner.NAME:\n return True\n else:\n return False",
"def get_fuzz_target(benchmark):\n # Do this because of OSS-Fuzz-on-demand.\n # TODO(metzman): Use classes to mock a benchmark config for\n # OSS_FUZZ_ON_DEMAND.\n return benchmark_config.get_config(benchmark).get(\n 'fuzz_target', environment.get('FUZZ_TARGET'))"
] | [
"0.75245595",
"0.70732296",
"0.5810576",
"0.5674972",
"0.5586452",
"0.55509335",
"0.55345166",
"0.5492754",
"0.54216146",
"0.5396599",
"0.533939",
"0.52890986",
"0.52809453",
"0.5276064",
"0.5262049",
"0.5236118",
"0.5226639",
"0.51745903",
"0.51519513",
"0.51472944",
"0.50536746",
"0.50375664",
"0.5028737",
"0.5014385",
"0.49864894",
"0.49848303",
"0.49750835",
"0.49659556",
"0.49601093",
"0.49598923"
] | 0.816597 | 0 |
Returns True if |benchmark| has a valid type. | def validate_type(benchmark):
benchmark_type = get_type(benchmark)
if benchmark_type not in BENCHMARK_TYPE_STRS:
logs.error('%s has an invalid benchmark type %s, must be one of %s',
benchmark, benchmark_type, BENCHMARK_TYPE_STRS)
return False
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def validate(benchmark):\n if not validate_name(benchmark):\n return False\n\n if benchmark not in get_all_benchmarks():\n logs.error('%s must have a benchmark.yaml.', benchmark)\n return False\n\n # Validate config file can be parsed.\n try:\n get_fuzz_target(benchmark)\n except yaml.parser.ParserError:\n logs.error('%s must have a valid benchmark.yaml file. Failed to parse.',\n benchmark)\n return False\n except KeyError:\n logs.error('%s\\'s benchmark.yaml does not define \"fuzz_target\".',\n benchmark)\n return False\n\n # Validate type.\n return validate_type(benchmark)",
"def is_valid_type(type):\n return type in type_to_adapter",
"def is_valid_type(self, question_type):\n\t\treturn question_type in self.valid_types",
"def is_valid_type(self, attr: Optional[str] = None) -> bool:\n try:\n self.validate_type(attr)\n except TypeError:\n return False\n return True",
"def check_type(self):\n return True",
"def validate_name(benchmark):\n if VALID_BENCHMARK_REGEX.match(benchmark) is None:\n logs.error('%s does not conform to %s pattern.', benchmark,\n VALID_BENCHMARK_REGEX.pattern)\n return False\n return True",
"def is_type(self, type_name):\n\n return type_name in self._symtab",
"def verify_type(self, obj):\n return isinstance(obj, self.type_)",
"def _is_run_type(cls, object_):\n # Do a string comparison instead of using isinstance() to avoid needing\n # to import lyse or other modules with these classes.\n return (type(object_).__name__ in cls._RUN_TYPES)",
"def test_has_exactly_type():\r\n\r\n return has_exactly_type(1, int) and not has_exactly_type(True, int) and has_exactly_type(True, bool)",
"def _is_valid_type(_type: Type[Any]) -> bool:\n\n if _type in _TYPE_MAP:\n return True\n\n if not inspect.isclass(_type):\n return False\n\n return issubclass(_type, Table)",
"def get_type(benchmark):\n # TODO(metzman): Use classes to mock a benchmark config for\n # OSS_FUZZ_ON_DEMAND.\n default_value = os.getenv('EXPERIMENT_TYPE', BenchmarkType.CODE.value)\n return benchmark_config.get_config(benchmark).get('type', default_value)",
"def is_supported_type(self) -> bool:\n t = self.type.strip()\n return t in self.SUPPORTED_LABELS or t.lower() in self.SUPPORTED_LABELS",
"def check_frame_type(self, ftype, fitstbl, exprng=None):\n good_exp = framematch.check_frame_exptime(fitstbl['exptime'], exprng)\n if ftype in ['science', 'standard']:\n return good_exp & (np.logical_not(np.char.startswith(np.char.lower(fitstbl['target']), 'arclamp'))) & \\\n (np.char.lower(fitstbl['target']) != 'spectralflat') & \\\n (np.char.lower(fitstbl['target']) != 'bias')\n if ftype in ['arc', 'tilt']:\n return good_exp & (np.char.startswith(np.char.lower(fitstbl['target']), 'arclamp'))\n if ftype in ['pixelflat', 'trace', 'illumflat']:\n return good_exp & (np.char.lower(fitstbl['target']) == 'spectralflat')\n if ftype == 'bias':\n return good_exp & (np.char.lower(fitstbl['target']) == 'bias')\n\n msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype))\n return np.zeros(len(fitstbl), dtype=bool)",
"def _type_check(data):\n if data.__class__.__name__ != \"Matrix3\":\n return False\n return True",
"def CheckType(self, *args, **kwargs):\n pass",
"def is_basic_type(self, objtype):\n if not hasattr(objtype, '_type_'):\n # could be python types\n return objtype in [int, long, float, bool]\n return self.is_basic_ctype(objtype)",
"def is_result_appropriate(self, benchmark_result):\n return True",
"def is_valid(self, attribute: Attribute) -> bool:\n return self.get_data_type() == attribute.type",
"def isquantized(val):\n # try: 1.1 * val ... except: might be more general, but could be expensive.\n retval = True\n if isinstance(val, (float, complex)):\n retval = False\n elif hasattr(val, 'issubdtype'):\n if numpy.issubdtype(val.dtype, float) or numpy.issubdtype(val.dtype, complex):\n retval = False\n return retval",
"def check_frame_type(self, ftype, fitstbl, exprng=None):\n good_exp = framematch.check_frame_exptime(fitstbl['exptime'], exprng)\n if ftype in ['science', 'standard']:\n return good_exp & (fitstbl['idname'] == 'OBJECT')\n if ftype == 'bias':\n return good_exp & (fitstbl['idname'] == 'BIAS')\n if ftype in ['pixelflat', 'trace', 'illumflat']:\n return good_exp & (fitstbl['idname'] == 'FLAT,LAMP')\n if ftype in ['pinhole', 'dark']:\n # Don't type pinhole or dark frames\n return np.zeros(len(fitstbl), dtype=bool)\n if ftype in ['arc','tilt']:\n return good_exp & (fitstbl['idname'] == 'WAVE,LAMP')\n msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype))\n return np.zeros(len(fitstbl), dtype=bool)",
"def _valid_typable_object(ui_object, platform=Platform.ANDROID):\n if platform == Platform.ANDROID:\n return ui_object.obj_type in _TYPABLE_OBJECT_DESC.keys()\n else:\n assert False, 'Wrong Platform'",
"def checkType(self, value):\n pass",
"def check_frame_type(self, ftype, fitstbl, exprng=None):\n good_exp = framematch.check_frame_exptime(fitstbl['exptime'], exprng)\n # TODO: Allow for 'sky' frame type, for now include sky in\n # 'science' category\n if ftype == 'science':\n return good_exp & (fitstbl['idname'] == 'Object')\n if ftype == 'standard':\n return good_exp & (fitstbl['idname'] == 'Object')\n if ftype == 'bias':\n return good_exp & (fitstbl['idname'] == 'Bias')\n if ftype == 'dark':\n return good_exp & (fitstbl['idname'] == 'Dark')\n if ftype in ['pixelflat', 'trace']:\n # Flats and trace frames are typed together\n return good_exp & (fitstbl['idname'] == 'IntFlat')\n if ftype in ['arc', 'tilt']:\n # Arc and tilt frames are typed together\n return good_exp & (fitstbl['idname'] == 'Line')\n\n msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype))\n return np.zeros(len(fitstbl), dtype=bool)",
"def is_valid_silktype(type_name, permit_array=False):\n if not type_name.replace(\"_\", \"x\").isalnum():\n return False\n\n if not type_name[0].isupper():\n return False\n\n if len(type_name) > 1 and type_name == type_name.upper():\n return False\n\n if permit_array:\n array_depth = 0\n while type_name.endswith(\"Array\"):\n type_name = type_name[:-len(\"Array\")]\n array_depth += 1\n\n if array_depth > max_array_depth:\n return False\n\n elif type_name.endswith(\"Array\"):\n return False\n\n if type_name in reserved_types:\n return False\n\n for ending in reserved_endings:\n if type_name.endswith(ending):\n return False\n return True",
"def check_frame_type(self, ftype, fitstbl, exprng=None):\n good_exp = framematch.check_frame_exptime(fitstbl['exptime'], exprng)\n if ftype in ['science','standard']:\n return good_exp & (fitstbl['target'] != 'ArcLamp_Xe') \\\n & (fitstbl['target'] != 'ArcLamp_HgAr') \\\n & (fitstbl['target'] != 'ArcLamp_Ne') \\\n & (fitstbl['target'] != 'SpectralFlat') \\\n & (fitstbl['target'] != 'BIAS')\n if ftype in ['arc', 'tilt']:\n return good_exp & ((fitstbl['target'] == 'ArcLamp_Xe') \\\n | (fitstbl['target'] == 'ArcLamp_HgAr') \\\n | (fitstbl['target'] == 'ArcLamp_Ne'))\n if ftype in ['pixelflat', 'trace', 'illumflat']:\n return good_exp & (fitstbl['target'] == 'SpectralFlat')\n if ftype == 'bias':\n return good_exp & (fitstbl['target'] == 'BIAS')\n\n msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype))\n return np.zeros(len(fitstbl), dtype=bool)",
"def is_type(self, typ):\n return typ == self.__class__.__name__",
"def _is_typevar(typeval: Type) -> bool:\n return isinstance(typeval, TypeVar) # type: ignore",
"def check_frame_type(self, ftype, fitstbl, exprng=None):\n good_exp = framematch.check_frame_exptime(fitstbl['exptime'], exprng)\n if ftype in ['pinhole', 'bias']:\n # No pinhole or bias frames\n return np.zeros(len(fitstbl), dtype=bool)\n if ftype in ['pixelflat', 'trace', 'illumflat']:\n return good_exp & (fitstbl['idname'] == 'flat')\n if ftype == 'standard':\n return good_exp & (fitstbl['idname'] == 'object')\n if ftype == 'science':\n return good_exp & (fitstbl['idname'] == 'object')\n if ftype in ['arc', 'tilt']:\n return good_exp & (fitstbl['idname'] == 'object')\n if ftype == 'dark':\n return good_exp & (fitstbl['idname'] == 'dark')\n msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype))\n return np.zeros(len(fitstbl), dtype=bool)",
"def test_expected_type(val, exp_type):\n\n if not isinstance(val, exp_type):\n return False"
] | [
"0.6986698",
"0.6440675",
"0.6303851",
"0.62154716",
"0.6090882",
"0.608178",
"0.6064316",
"0.6046675",
"0.5970051",
"0.5952641",
"0.58843154",
"0.58391774",
"0.5755268",
"0.57513607",
"0.5732901",
"0.57144207",
"0.56996477",
"0.56742626",
"0.55769104",
"0.5570763",
"0.55672246",
"0.5563812",
"0.5538002",
"0.5537267",
"0.55359256",
"0.55262315",
"0.55261034",
"0.549746",
"0.54852664",
"0.54820013"
] | 0.837678 | 0 |
Returns True if |benchmark| is a valid fuzzbench benchmark. | def validate(benchmark):
if not validate_name(benchmark):
return False
if benchmark not in get_all_benchmarks():
logs.error('%s must have a benchmark.yaml.', benchmark)
return False
# Validate config file can be parsed.
try:
get_fuzz_target(benchmark)
except yaml.parser.ParserError:
logs.error('%s must have a valid benchmark.yaml file. Failed to parse.',
benchmark)
return False
except KeyError:
logs.error('%s\'s benchmark.yaml does not define "fuzz_target".',
benchmark)
return False
# Validate type.
return validate_type(benchmark) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def validate_type(benchmark):\n benchmark_type = get_type(benchmark)\n if benchmark_type not in BENCHMARK_TYPE_STRS:\n logs.error('%s has an invalid benchmark type %s, must be one of %s',\n benchmark, benchmark_type, BENCHMARK_TYPE_STRS)\n return False\n return True",
"def validate_name(benchmark):\n if VALID_BENCHMARK_REGEX.match(benchmark) is None:\n logs.error('%s does not conform to %s pattern.', benchmark,\n VALID_BENCHMARK_REGEX.pattern)\n return False\n return True",
"def is_result_appropriate(self, benchmark_result):\n return True",
"def _is_fan(compressor: Compressor) -> bool:\n name = compressor.name.lower()\n return 'fan' in name or 'crtf' in name",
"def test_not_a_valid_fuzzer(self):\n self.assertFalse(cifuzz.check_fuzzer_build(TEST_FILES_PATH))",
"def check_benchmark_result(result, expectation):\n for storage_cfg, caches in result['cache_data'].items():\n for cache, percent_recorded in caches.items():\n if ((percent_recorded['min'] < expectation['min'])\n or (percent_recorded['avg'] < expectation['avg'])\n or (percent_recorded['max'] < expectation['max'])):\n return False\n return True",
"def get_benchmark(self, benchmark):\n\t\tif not isinstance(benchmark, str) and not callable(benchmark): return benchmark\n\t\telif benchmark in self.classes:\treturn self.classes[benchmark]()\n\t\traise TypeError('Passed benchmark is not defined!')",
"def validate(self, benchmarks):\n class_code = self.setup_src\n instance_creation = '\\ninstance = {}'.format(self.stmt)\n for i, benchmark in enumerate(benchmarks):\n if not benchmark.result_validation:\n break\n\n validation_code = class_code + instance_creation + '\\nvalidation_result = ' + benchmark.stmt\n validation_scope = {}\n exec(validation_code, validation_scope)\n # Store the result in the first function in the group.\n if i == 0:\n compare_against_function = benchmarks[0].callable.__name__\n compare_against_result = validation_scope['validation_result']\n logging.info('PyPerform: Validating group \"{b.group}\" against method '\n '\"{b.classname}.{b.callable.__name__}\"'.format(b=benchmarks[0]))\n else:\n if compare_against_result == validation_scope['validation_result']:\n logging.info('PyPerform: Validating {b.classname}.{b.callable.__name__}......PASSED!'\n .format(b=benchmark))\n else:\n error = 'Results of functions {0} and {1} are not equivalent.\\n{0}:\\t {2}\\n{1}:\\t{3}'\n raise ValidationError(error.format(compare_against_function, benchmark.callable.__name__,\n compare_against_result, validation_scope['validation_result']))",
"def get_fuzz_target(benchmark):\n # Do this because of OSS-Fuzz-on-demand.\n # TODO(metzman): Use classes to mock a benchmark config for\n # OSS_FUZZ_ON_DEMAND.\n return benchmark_config.get_config(benchmark).get(\n 'fuzz_target', environment.get('FUZZ_TARGET'))",
"def isValidTest(self):\n if not self.hasError():\n return False\n distance = dameraulevenshtein(self.word, self.error) \n if(distance > 1):\n return False\n regex = '.*[^a-zA-Z].*'\n if re.match(regex, self.word) or re.match(regex, self.error):\n return False\n return True",
"def is_mbid(mbid):\n try:\n mbid = uuid.UUID(mbid)\n good = True\n except ValueError as e:\n good = False\n except AttributeError:\n good = False\n\n return good",
"def _is_valid_adapter(self, adapter_name):\n\n valid_adapters = self.valid_adapters\n\n result = False\n\n if adapter_name in valid_adapters:\n\n result = True\n\n return result",
"def test_BenchmarkSuite_integration_test(\n benchmark_suite: typing.Callable, tempdir: pathlib.Path\n):\n with benchmark_suite() as bs:\n bs.ForceOpenCLEnvironment(cldrive_env.OclgrindOpenCLEnvironment())\n observer = MockBenchmarkObserver(stop_after=1)\n\n # `stop_after` raises BenchmarkInterrupt.\n try:\n bs.Run([observer])\n assert False\n except gpgpu.BenchmarkInterrupt:\n pass\n\n assert len(observer.logs) == 1\n assert observer.logs[0].benchmark_name in bs.benchmarks",
"def __isFastener(f):\n\n if type(f) != Fastener:\n raise TypeError(\"FastnerGroups may contain only Fasteners\")\n else:\n return True",
"def assert_models_equal(self, benchmark1, benchmark2):\n if (not isinstance(benchmark1, ate.BenchmarkATE) or\n not isinstance(benchmark2, ate.BenchmarkATE)):\n self.fail('object was not a BenchmarkATE')\n self.assertEqual(benchmark1.identifier, benchmark2.identifier)\n self.assertEqual(benchmark1.offset, benchmark2.offset)\n self.assertEqual(benchmark1.max_difference, benchmark2.max_difference)\n self.assertEqual(benchmark1.scale, benchmark2.scale)",
"def is_valid(passwd: str) -> bool:\n return (\n re.search(r'abc|bcd|cde|def|efg|fgh|ghi|hij|jkl|klm|lmn|mno|nop|opq|pqr|qrs|rst|stu|tuv|uvw|vwx|wxy|xyz', passwd) is not None and\n all([c not in passwd for c in 'iol']) and\n re.search(r'([a-z])\\1.*([a-z])\\2', passwd) is not None\n )",
"def create_benchmark(self, benchmark):\n self.crd_client.create_namespaced_custom_object(\n group=\"ripsaw.cloudbulldozer.io\",\n version=\"v1alpha1\",\n namespace=benchmark[\"metadata\"][\"namespace\"],\n plural=\"benchmarks\",\n body=benchmark,\n )",
"def is_valid(self, user_specific_config: Any, factor: str) -> bool:",
"def check(self, password):\n\n if len(password) < self.min_length:\n return False\n\n digits = len(findall(r\"\\d\", password))\n if digits < self.min_digits:\n return False\n\n special_chars = sum(v for k, v in Counter(password).items() if k in punctuation)\n if special_chars < self.min_special:\n return False\n\n alpha_chars = sum(v for k, v in Counter(password).items() if k in ascii_letters)\n if alpha_chars < self.min_alpha:\n return False\n\n upper_chars = sum(\n v for k, v in Counter(password).items() if k in ascii_uppercase\n )\n if upper_chars < self.min_upper:\n return False\n\n lower_chars = sum(\n v for k, v in Counter(password).items() if k in ascii_lowercase\n )\n if lower_chars < self.min_lower:\n return False\n\n if self.check_breaches and check_password(password):\n return False\n\n if self.func and not self.func(password):\n return False\n\n return True",
"def is_cpp(benchmark):\n return get_language(benchmark) == 'c++'",
"def _quick_and_dirty_glyph_is_empty(font, glyph_name):\n if 'glyf' in font:\n glyph = font['glyf'][glyph_name]\n if not glyph.isComposite():\n if glyph.numberOfContours == 0:\n return True\n return False\n elif 'CFF2' in font:\n top_dict = font['CFF2'].cff.topDictIndex[0]\n else:\n top_dict = font['CFF '].cff.topDictIndex[0]\n char_strings = top_dict.CharStrings\n char_string = char_strings[glyph_name]\n if len(char_string.bytecode) <= 1:\n return True\n return False",
"def assert_models_equal(self, benchmark1, benchmark2):\n if (not isinstance(benchmark1, detection_comp.FeatureDetectionComparison) or\n not isinstance(benchmark2, detection_comp.FeatureDetectionComparison)):\n self.fail('object was not a FeatureDetectionComparison')\n self.assertEqual(benchmark1.identifier, benchmark2.identifier)\n self.assertEqual(benchmark1._acceptable_radius, benchmark2._acceptable_radius)",
"def is_valid(name):\n return bool(name)",
"def is_valid(self):\n # check if cpf isn't in invalid_cpfs list\n if self.cpf in self.invalid_cpfs: return False\n\n # get first nine digits to calculate two verification digits\n cpf = self.cpf[:9]\n # while cpf isn't complete (this runs two loops)\n while len(cpf) < 11:\n\n # run trought numbers multiplying number (v) by weight (len(cpf)+1-i)\n # and then get sum rest of division by 11 as integer\n r = int(sum([math.floor((len(cpf)+1-i_v[0])*i_v[1]) for i_v in enumerate(cpf)]) % 11)\n\n # if digit is smaller than 2, turns 0\n if r < 2:\n f = 0\n else:\n f = 11 -r\n\n # append to cpf list\n cpf.append(f)\n\n # if created number is same as original number, cpf is valid\n return bool(cpf == self.cpf)",
"def is_system_ready_for_benchmarking():\n\n # check if scaling_governor is set to 'performance' for all cpu cores\n cpu_governors = glob.glob('/sys/devices/system/cpu/cpu*/cpufreq/scaling_governor')\n if not cpu_governors:\n logger.error('no scaling_governor found. Do you run on a Linux System?')\n return False\n for governor in sorted(cpu_governors):\n with open(governor, 'r') as f:\n line = f.read().splitlines()[0]\n logger.debug('%s is set to \\\"%s\\\"', governor, line)\n if line != 'performance':\n logger.warning('please set all scaling_governor to \\\"performance\\\" (using \"sudo ./ondemand.sh start\")')\n return False\n\n return True",
"def assert_passes_fuzz(self, feature_spec, tries=1000):\n feature_spec = make_feature(feature_spec)\n for i in range(tries):\n data_point = generate.generate(feature_spec.input_schema)\n try:\n feature = feature_spec(data_point)\n except Exception as e:\n self.fail(\"Error evaluating; input=%r error=%r\" %\n (data_point, e))\n try:\n feature_spec.output_schema.validate(feature)\n except schema.SchemaError:\n self.fail(\"Invalid output schema; input=%r output=%r\" %\n (data_point, feature))",
"def run_benchmark(curl, benchmark, test_config = TestConfig()):\n\n warmup_runs = benchmark.warmup_runs\n benchmark_runs = benchmark.benchmark_runs\n message = '' #Message is name of benchmark... print it?\n\n if (warmup_runs <= 0):\n raise Exception(\"Invalid number of warmup runs, must be > 0 :\" + warmup_runs)\n if (benchmark_runs <= 0):\n raise Exception(\"Invalid number of benchmark runs, must be > 0 :\" + benchmark_runs)\n\n #Initialize variables to store output\n output = BenchmarkResult()\n output.name = benchmark.name\n output.group = benchmark.group\n metricnames = list(benchmark.metrics)\n metricvalues = [METRICS[name] for name in metricnames] # Metric variable for curl, to avoid hash lookup for every metric name\n results = [list() for x in xrange(0, len(metricnames))] # Initialize arrays to store results for each metric\n\n curl.setopt(pycurl.WRITEFUNCTION, lambda x: None) #Do not store actual response body at all.\n\n #Benchmark warm-up to allow for caching, JIT compiling, on client\n logging.info('Warmup: ' + message + ' started')\n for x in xrange(0, warmup_runs):\n if benchmark.method == u'POST' or benchmark.method == u'PUT':\n curl.setopt(curl.READFUNCTION, StringIO.StringIO(benchmark.body).read)\n curl.perform()\n logging.info('Warmup: ' + message + ' finished')\n\n logging.info('Benchmark: ' + message + ' starting')\n\n for x in xrange(0, benchmark_runs): # Run the actual benchmarks\n if benchmark.method == u'POST' or benchmark.method == u'PUT':\n curl.setopt(curl.READFUNCTION, StringIO.StringIO(benchmark.body).read)\n\n try: # Run the curl call, if it errors, then add to failure counts for benchmark\n curl.perform()\n except Exception:\n output.failures = output.failures + 1\n continue # Skip metrics collection\n\n # Get all metrics values for this run, and store to metric lists\n for i in xrange(0, len(metricnames)):\n results[i].append( curl.getinfo(metricvalues[i]) )\n\n logging.info('Benchmark: ' + message + ' ending')\n\n temp_results = dict()\n for i in xrange(0, len(metricnames)):\n temp_results[metricnames[i]] = results[i]\n output.results = temp_results\n\n curl.close()\n return analyze_benchmark_results(output, benchmark)",
"def CheckPrerequisites(benchmark_config):\n dpb_service_type = benchmark_config.dpb_service.service_type\n if dpb_service_type not in SUPPORTED_DPB_BACKENDS:\n raise errors.Config.InvalidValue('Invalid backend for distcp. Not in:{}'.\n format(str(SUPPORTED_DPB_BACKENDS)))",
"def assert_models_equal(self, benchmark1, benchmark2):\n if (not isinstance(benchmark1, detection_comp.FeatureDetectionComparisonResult) or\n not isinstance(benchmark2, detection_comp.FeatureDetectionComparisonResult)):\n self.fail('object was not a FeatureDetectionComparisonResult')\n self.assertEqual(benchmark1.identifier, benchmark2.identifier)\n self.assertEqual(benchmark1.success, benchmark2.success)\n self.assertEqual(benchmark1.benchmark, benchmark2.benchmark)\n self.assertEqual(benchmark1.trial_result, benchmark2.trial_result)\n self.assertEqual(benchmark1.reference_trial_result, benchmark2.reference_trial_result)\n self.assertEqual(benchmark1._feature_changes, benchmark2._feature_changes)\n self.assertEqual(benchmark1._changes_id, benchmark2._changes_id)",
"def fuzz():\n if FUZZ:\n time.sleep(random.random())"
] | [
"0.69998956",
"0.67811495",
"0.5615518",
"0.5273485",
"0.5138776",
"0.5041119",
"0.48480907",
"0.48040372",
"0.47753853",
"0.47583094",
"0.4682528",
"0.4670917",
"0.466327",
"0.4620052",
"0.45891234",
"0.45834735",
"0.45678422",
"0.4562975",
"0.45465788",
"0.45328742",
"0.45173",
"0.45090833",
"0.44996414",
"0.44785774",
"0.4448101",
"0.44395432",
"0.4427622",
"0.44208667",
"0.44115433",
"0.44073173"
] | 0.7685098 | 0 |
Returns the list of all benchmarks. | def get_all_benchmarks():
all_benchmarks = []
for benchmark in os.listdir(BENCHMARKS_DIR):
benchmark_path = os.path.join(BENCHMARKS_DIR, benchmark)
if os.path.isfile(os.path.join(benchmark_path, 'benchmark.yaml')):
all_benchmarks.append(benchmark)
return sorted(all_benchmarks) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run_all(self):\n runs = []\n for run in self.benchmarks:\n run.start()\n run.wait()\n runs.append(run.metadata)\n return runs",
"def list(self, classes):\n\n def add(benchmarks, parts, flags, exclude):\n if (\n flags[\"language\"] != \"C++\"\n and flags[\"language\"] != \"Java\"\n and \"--drop-caches=true\" not in parts\n ):\n parts.append(\"--drop-caches=true\")\n command = \" \".join(parts)\n if command not in exclude:\n benchmarks.append({\"command\": command, \"flags\": flags})\n\n benchmarks = []\n for name, benchmark in classes.items():\n if name.startswith(\"example\"):\n continue\n\n instance, parts = benchmark(), [name]\n\n exclude = getattr(benchmark, \"exclude\", [])\n if \"source\" in getattr(benchmark, \"arguments\", []):\n parts.append(\"ALL\")\n\n iterations = getattr(instance, \"iterations\", 3)\n parts.append(f\"--iterations={iterations}\")\n\n if instance.cases:\n parts.append(\"--all=true\")\n\n flags = getattr(instance, \"flags\", {})\n\n if getattr(instance, \"r_only\", False):\n flags[\"language\"] = \"R\"\n add(benchmarks, parts, flags, exclude)\n else:\n if \"language\" not in flags:\n flags[\"language\"] = \"Python\"\n add(benchmarks, parts, flags, exclude)\n\n if hasattr(instance, \"r_name\"):\n flags_ = flags.copy()\n flags_[\"language\"] = \"R\"\n parts.append(\"--language=R\")\n add(benchmarks, parts, flags_, exclude)\n\n return sorted(benchmarks, key=lambda k: k[\"command\"])",
"def get(self, request, format=None):\n benchmarkmodels = BenchmarkModel.objects.all()\n serializer = BenchmarkModelListSerializer(benchmarkmodels, many=True)\n return Response(serializer.data)",
"def test_list_benchmarks(client):\n # The benchmark listing contains one element (independently of whether the\n # user is logged in or not).\n r = client.get(config.API_PATH() + '/workflows')\n assert r.status_code == 200\n doc = r.json\n assert len(doc[labels.WORKFLOW_LIST]) == 1\n # Create user and the request header that contains the API key for the\n # logged in user.\n _, token = create_user(client, '0000')\n headers = {HEADER_TOKEN: token}\n r = client.get(config.API_PATH() + '/workflows', headers=headers)\n assert r.status_code == 200\n doc = r.json\n assert len(doc[labels.WORKFLOW_LIST]) == 1",
"def get_coverage_benchmarks():\n return [\n benchmark for benchmark in get_all_benchmarks()\n if get_type(benchmark) == BenchmarkType.CODE.value\n ]",
"def delete_all_benchmarks(self, namespace=\"benchmark-operator\"):\n all_benchmarks = self.crd_client.list_namespaced_custom_object(\n group=\"ripsaw.cloudbulldozer.io\", version=\"v1alpha1\", namespace=namespace, plural=\"benchmarks\"\n )\n\n _ = [\n self.delete_benchmark(benchmark[\"metadata\"][\"name\"], namespace)\n for benchmark in all_benchmarks.get(\"items\", [])\n ]",
"def list_runtimes(self, workbench):\n pass",
"def benchmark(self):\n logger.info(self.benchmark.__doc__)\n return self.run(self.benchmark_profile())",
"def _parse_suite(\n self, results: dict, extra_tags: dict = None\n ) -> List[BenchmarkResult]:\n # all results share a batch id\n batch_id = uuid.uuid4().hex\n\n parsed_results = []\n for result in results[\"benchmarks\"]:\n result_parsed = self._parse_benchmark(\n result=GoogleBenchmark(**result),\n batch_id=batch_id,\n extra_tags=extra_tags,\n )\n parsed_results.append(result_parsed)\n\n return parsed_results",
"def get_benchmark_requirements(cls):\n pass",
"def __init__(self, conf, benchmarks, regex=None):\n self._conf = conf\n self._benchmark_dir = conf.benchmark_dir\n\n if not regex:\n regex = []\n if isinstance(regex, str):\n regex = [regex]\n\n self._all_benchmarks = {}\n self._benchmark_selection = {}\n for benchmark in benchmarks:\n self._all_benchmarks[benchmark['name']] = benchmark\n if benchmark['params']:\n self._benchmark_selection[benchmark['name']] = []\n for idx, param_set in enumerate(\n itertools.product(*benchmark['params'])):\n name = f\"{benchmark['name']}({', '.join(param_set)})\"\n if not regex or any(re.search(reg, name) for reg in regex):\n self[benchmark['name']] = benchmark\n self._benchmark_selection[benchmark['name']].append(idx)\n else:\n self._benchmark_selection[benchmark['name']] = None\n if not regex or any(re.search(reg, benchmark['name']) for reg in regex):\n self[benchmark['name']] = benchmark",
"def get_all_reporters():\r\n for ep in iter_entry_points('attest.reporters'):\r\n yield ep.name",
"def get_testbench_specs(self, tb_type: str) -> Dict[str, Any]:\n return self._specs['testbenches'][tb_type]",
"def get_methods(cls):\n # get all the methods that have the _benchmark_this flag\n for method in (getattr(cls, m) for m in dir(cls)):\n if hasattr(method, \"_benchmark_this\"):\n yield method",
"def get_list(cls, suite_model) -> list:\n suites = []\n for s in cls.SUITES:\n s[\"tests\"] = cls._get_tests(s)\n s[\"approxRunTime\"] = cls._get_average_run_time(suite_model)\n suites.append(s)\n return suites",
"def test_get_benchmark_methods_filter(self):\n config = mock.Mock()\n config.workspace = 'workspace'\n config.benchmark_method_patterns = ['new_foo.BenchmarkClass.filter:bench.*']\n benchmark_runner = benchmark.BenchmarkRunner(config)\n\n mock_benchmark_class = mock.Mock()\n mock_benchmark_class.benchmark_method_1 = 'foo'\n\n mock_module = mock.Mock()\n sys.modules['new_foo'] = mock_module\n mock_module.BenchmarkClass.return_value = mock_benchmark_class\n\n methods = benchmark_runner._get_benchmark_methods()\n\n self.assertEqual(1, len(methods))\n self.assertEqual('new_foo.BenchmarkClass.benchmark_method_1', methods[0])",
"def get_bug_benchmarks():\n return [\n benchmark for benchmark in get_all_benchmarks()\n if get_type(benchmark) == BenchmarkType.BUG.value\n ]",
"def get_benchmark(client):\n r = client.get(config.API_PATH() + '/benchmarks')\n benchmarks = json.loads(r.data)\n return benchmarks['benchmarks'][0]['id']",
"def test_get_benchmark_methods_exact_match(self):\n config = mock.Mock()\n config.workspace = 'workspace'\n config.benchmark_method_patterns = [\n 'new_foo.BenchmarkClass.benchmark_method_1',\n 'new_foo.BenchmarkClass.benchmark_method_2']\n benchmark_runner = benchmark.BenchmarkRunner(config)\n\n methods = benchmark_runner._get_benchmark_methods()\n self.assertEqual(['new_foo.BenchmarkClass.benchmark_method_1',\n 'new_foo.BenchmarkClass.benchmark_method_2'], methods)",
"def _generate_benchmark_variants(benchmark_spec):\n variants = []\n # Cold start.\n variants.append({\n 'name': benchmark_spec['name'] + ' (cold start)',\n 'app': benchmark_spec['app'],\n 'duration': benchmark_spec['duration'],\n 'measurements': benchmark_spec['measurements'],\n 'shell-args': benchmark_spec.get('shell-args',\n []) + _COLD_START_SHELL_ARGS})\n # Warm start.\n variants.append({\n 'name': benchmark_spec['name'] + ' (warm start)',\n 'app': benchmark_spec['app'],\n 'duration': benchmark_spec['duration'],\n 'measurements': benchmark_spec['measurements'],\n 'shell-args': benchmark_spec.get('shell-args', [])})\n return variants",
"def get_all_stats():\n\n return get_component(CachingPackage.COMPONENT_NAME).get_all_stats()",
"def _run():\n subprocess.check_call(\n [\n \"tools/bazel\",\n \"build\",\n \"-c\",\n \"opt\",\n \"test/core/memory_usage/memory_usage_test\",\n ]\n )\n ret = {}\n for name, benchmark_args in _BENCHMARKS.items():\n for scenario, extra_args in _SCENARIOS.items():\n # TODO(chenancy) Remove when minstack is implemented for channel\n if name == \"channel\" and scenario == \"minstack\":\n continue\n try:\n output = subprocess.check_output(\n [\n \"bazel-bin/test/core/memory_usage/memory_usage_test\",\n ]\n + benchmark_args\n + extra_args\n )\n except subprocess.CalledProcessError as e:\n print(\"Error running benchmark:\", e)\n continue\n for line in output.splitlines():\n for key, (pattern, conversion) in _INTERESTING.items():\n m = re.match(pattern, line)\n if m:\n ret[scenario + \": \" + key] = conversion(m.group(1))\n return ret",
"def measure_all(backend) -> Schedule:\n # backend is V2.\n if isinstance(backend, BackendV2):\n qubits = list(range(backend.num_qubits))\n else:\n qubits = list(range(backend.configuration().n_qubits))\n return measure(qubits=qubits, backend=backend)",
"def generate_figures():\r\n # create results directory if necessary\r\n try:\r\n makedirs(\"results\")\r\n except OSError as e:\r\n if e.errno != errno.EEXIST:\r\n raise\r\n \r\n for b in benchmarks:\r\n generate_figure(model[b], b)",
"def register_benchmarks(directory=None):\n dirs = places_to_look() if directory is None else [directory]\n for directory in dirs:\n with os.scandir(directory) as scan:\n for entry in scan:\n filename = entry.name\n if (\n filename.startswith(\".\")\n or not entry.is_file()\n or not filename.endswith(\".py\")\n ):\n continue\n if (\n filename.startswith(\"benchmark\")\n or filename.endswith(\"benchmark.py\")\n or filename.endswith(\"benchmarks.py\")\n ):\n import_path(f\"{directory}/{filename}\")",
"def count_benchmarks():\n return len(setup_storage().fetch_benchmark({}))",
"def suite_list(self):\n return ','.join([s.__unicode__() for s in self.suites.all()])",
"def suite_list(self):\n return ','.join([s.__unicode__() for s in self.suites.all()])",
"def ballot_get_all_contests():\r\n all_contests = []\r\n result = ballot_list_contests(limit=100000000).get('result')\r\n if result:\r\n contests = batch('ballot_get_contest_by_id', [[r] for r in result]).get('result')\r\n if contests:\r\n all_contests = contests\r\n\r\n return all_contests",
"def Run(benchmark_spec: bm_spec.BenchmarkSpec) -> List[sample.Sample]:\n discovery_duration = benchmark_spec.data_discovery_service.DiscoverData()\n return [\n sample.Sample('data_discovery_duration', discovery_duration, 'seconds',\n benchmark_spec.data_discovery_service.GetMetadata())]"
] | [
"0.71444166",
"0.6889727",
"0.64351124",
"0.6397667",
"0.6397363",
"0.62924254",
"0.6077235",
"0.59082776",
"0.58880574",
"0.57301986",
"0.56475395",
"0.5644938",
"0.5587757",
"0.55655915",
"0.5550465",
"0.5548563",
"0.5522972",
"0.5509917",
"0.5496706",
"0.54954356",
"0.54953796",
"0.5484505",
"0.5480505",
"0.545044",
"0.5436377",
"0.54351074",
"0.54257315",
"0.54257315",
"0.5412383",
"0.5411625"
] | 0.79568917 | 0 |
Returns the list of all coverage benchmarks. | def get_coverage_benchmarks():
return [
benchmark for benchmark in get_all_benchmarks()
if get_type(benchmark) == BenchmarkType.CODE.value
] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_all_benchmarks():\n all_benchmarks = []\n for benchmark in os.listdir(BENCHMARKS_DIR):\n benchmark_path = os.path.join(BENCHMARKS_DIR, benchmark)\n if os.path.isfile(os.path.join(benchmark_path, 'benchmark.yaml')):\n all_benchmarks.append(benchmark)\n return sorted(all_benchmarks)",
"def run_all(self):\n runs = []\n for run in self.benchmarks:\n run.start()\n run.wait()\n runs.append(run.metadata)\n return runs",
"def list(self, classes):\n\n def add(benchmarks, parts, flags, exclude):\n if (\n flags[\"language\"] != \"C++\"\n and flags[\"language\"] != \"Java\"\n and \"--drop-caches=true\" not in parts\n ):\n parts.append(\"--drop-caches=true\")\n command = \" \".join(parts)\n if command not in exclude:\n benchmarks.append({\"command\": command, \"flags\": flags})\n\n benchmarks = []\n for name, benchmark in classes.items():\n if name.startswith(\"example\"):\n continue\n\n instance, parts = benchmark(), [name]\n\n exclude = getattr(benchmark, \"exclude\", [])\n if \"source\" in getattr(benchmark, \"arguments\", []):\n parts.append(\"ALL\")\n\n iterations = getattr(instance, \"iterations\", 3)\n parts.append(f\"--iterations={iterations}\")\n\n if instance.cases:\n parts.append(\"--all=true\")\n\n flags = getattr(instance, \"flags\", {})\n\n if getattr(instance, \"r_only\", False):\n flags[\"language\"] = \"R\"\n add(benchmarks, parts, flags, exclude)\n else:\n if \"language\" not in flags:\n flags[\"language\"] = \"Python\"\n add(benchmarks, parts, flags, exclude)\n\n if hasattr(instance, \"r_name\"):\n flags_ = flags.copy()\n flags_[\"language\"] = \"R\"\n parts.append(\"--language=R\")\n add(benchmarks, parts, flags_, exclude)\n\n return sorted(benchmarks, key=lambda k: k[\"command\"])",
"def coverage_files() -> Iterable[str]:\n packs_path = CONTENT_PATH / \"Packs\"\n for cov_path in packs_path.glob(\"*/Integrations/*/.coverage\"):\n yield str(cov_path)\n for cov_path in packs_path.glob(\"*/Scripts/*/.coverage\"):\n yield str(cov_path)",
"def benchmark(self):\n nsites = []\n for m in self.methods:\n for name, structure in self.test_structures.items():\n cns = []\n if self.unique_sites:\n es = SpacegroupAnalyzer(structure).get_symmetrized_structure().equivalent_sites\n sites = [structure.index(x[0]) for x in es]\n else:\n sites = range(len(structure))\n\n for key, val in self.hi.items():\n if name == key:\n for j in sites:\n if isinstance(m, NearNeighbors):\n tmpcn = m.get_cn_dict(structure, j, self.use_weights)\n else:\n tmpcn = m.compute(structure, j)\n if tmpcn == \"null\":\n continue\n if self.nround:\n self._roundcns(tmpcn, self.nround)\n cns.append((structure[j].species_string, tmpcn))\n if self.cation_anion:\n for mat, cat in self.cations.items():\n if (name == mat) and cat:\n cns = self._popel(cns, cat)\n elif self.anion_cation:\n for mat, an in self.anions.items():\n if name == mat:\n cns = self._popel(cns, an)\n m._cns[name] = cns\n nsites.append(len(cns))\n self.nsites = max(nsites)",
"def tests_generator(self):\n cb_bin = os.path.join(bin_path, 'compilebench')\n cmd_list = [\n (\"Initial Create/Compile/Read Compiled Tree\", \"{0} -D {1} -i 10 --makej -s {2}\"),\n ]\n\n tests = []\n for idx, (desc, cmd) in enumerate(cmd_list):\n test_name = \"compile_bench_{0}_{1}\".format(idx + 1, to_safe_name(desc))\n test = TestProfile(\n name=test_name,\n desc=desc,\n test_path=self.test_path,\n bin_path=bin_path,\n command=cmd.format(cb_bin, self.test_path, bin_path))\n tests.append(test)\n return tests",
"def get_bug_benchmarks():\n return [\n benchmark for benchmark in get_all_benchmarks()\n if get_type(benchmark) == BenchmarkType.BUG.value\n ]",
"def test_multiple_coverages(self):\n\n params_82 = {\n 'ReQuEsT': \"DescribeCoverage\",\n 'SeRvIcE': \"WCS\",\n \"BOGUS\": \"SSS\",\n 'Version': \"1.0.0\",\n \"COVERAGE\": \",\".join(self.names)\n }\n response = self.query_server(params_82)\n soup = BeautifulSoup(response.text, 'xml')\n self.assertTrue(\n len(soup.find_all('CoverageOffering')) == len(self.names),\n msg=\"If multiple valid coverages are submitted with a DescribeCoverage request, all requested coverages should be returned.\"\n )\n for elem in soup.find_all('CoverageOffering'):\n self.assertTrue(\n elem.find('name').text in self.names,\n msg=\"All requested coverages should be returned in a DescribeCoverage request.\")",
"def benchmark(self):\n logger.info(self.benchmark.__doc__)\n return self.run(self.benchmark_profile())",
"def cov():\n cov = coverage.coverage(branch=True, include='project/*')\n cov.start()\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)\n cov.stop()\n cov.save()\n print('Coverage Summary:')\n cov.report()\n basedir = os.path.abspath(os.path.dirname(__file__))\n covdir = os.path.join(basedir, 'tmp/coverage')\n cov.html_report(directory=covdir)\n print('HTML version: file://%s/index.html' % covdir)\n cov.erase()",
"def RunUnitTests():\n if FLAGS.test_targets:\n tests = FLAGS.test_targets\n else:\n tests = shell_interfaces.GetStdout(\n 'bazel query kind(\"cc_test\", ...)').split()\n\n # Run coverage, joining all data into one file.\n subprocess.check_call(['bazel', 'coverage', '--instrument_test_targets',\n '--experimental_cc_coverage',\n '--combined_report=lcov',\n ('--coverage_report_generator=@bazel_tools//tools/tes'\n 't/CoverageOutputGenerator/java/com/google/devtools/'\n 'coverageoutputgenerator:Main')] + tests)",
"def _disc_benchmarks(cls, conf, repo, environments, commit_hashes, check):\n root = conf.benchmark_dir\n\n cls.check_tree(root)\n\n if len(environments) == 0:\n raise util.UserError(\"No available environments\")\n\n # Try several different commits:\n #\n # - First of commit_hashes provided\n # - Tips of branches from configuration file\n # - Rest of the commit_hashes\n #\n\n def iter_hashes():\n for h in commit_hashes[:1]:\n yield h\n for branch in conf.branches:\n try:\n yield repo.get_hash_from_name(branch)\n except NoSuchNameError:\n continue\n for h in commit_hashes[1:]:\n yield h\n\n def iter_unique(iter):\n seen = set()\n for item in iter:\n if item not in seen:\n seen.add(item)\n yield item\n\n try_hashes = iter_unique(iter_hashes())\n\n log.info(\"Discovering benchmarks\")\n with log.indent():\n last_err = None\n for env, commit_hash in itertools.product(environments, try_hashes):\n env.create()\n\n if last_err is not None:\n log.warning(\"Failed: trying different commit/environment\")\n\n result_dir = tempfile.mkdtemp()\n try:\n env.install_project(conf, repo, commit_hash)\n\n env_vars = dict(os.environ)\n env_vars.update(env.env_vars)\n\n result_file = os.path.join(result_dir, 'result.json')\n env.run(\n [runner.BENCHMARK_RUN_SCRIPT, 'discover',\n os.path.abspath(root),\n os.path.abspath(result_file)],\n cwd=result_dir,\n env=env_vars,\n dots=False)\n\n try:\n with open(result_file, 'r') as fp:\n benchmarks = json.load(fp)\n except (IOError, ValueError):\n log.error(\"Invalid discovery output\")\n raise util.UserError()\n\n break\n except (util.UserError, util.ProcessError) as err:\n last_err = err\n continue\n except KeyboardInterrupt:\n raise util.UserError(\"Interrupted.\")\n finally:\n util.long_path_rmtree(result_dir)\n else:\n raise util.UserError(\"Failed to build the project and import the benchmark suite.\")\n\n if check:\n log.info(\"Checking benchmarks\")\n with log.indent():\n result_dir = tempfile.mkdtemp()\n try:\n out, err, retcode = env.run(\n [runner.BENCHMARK_RUN_SCRIPT, 'check',\n os.path.abspath(root)],\n cwd=result_dir,\n dots=False,\n env=env_vars,\n valid_return_codes=None,\n return_stderr=True,\n redirect_stderr=True)\n finally:\n util.long_path_rmtree(result_dir)\n\n out = out.strip()\n if retcode == 0:\n if out:\n log.info(out)\n log.info(\"No problems found.\")\n else:\n if out:\n log.error(out)\n raise util.UserError(\"Benchmark suite check failed.\")\n\n return benchmarks",
"def cov():\n cov = coverage.coverage(\n branch=True,\n include='project/*',\n omit=\"*/__init__.py\"\n )\n cov.start()\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)\n cov.stop()\n cov.save()\n print 'Coverage Summary:'\n cov.report()\n basedir = os.path.abspath(os.path.dirname(__file__))\n covdir = os.path.join(basedir, 'tmp/coverage')\n cov.html_report(directory=covdir)\n print('HTML version: file://%s/index.html' % covdir)\n cov.erase()",
"def delete_all_benchmarks(self, namespace=\"benchmark-operator\"):\n all_benchmarks = self.crd_client.list_namespaced_custom_object(\n group=\"ripsaw.cloudbulldozer.io\", version=\"v1alpha1\", namespace=namespace, plural=\"benchmarks\"\n )\n\n _ = [\n self.delete_benchmark(benchmark[\"metadata\"][\"name\"], namespace)\n for benchmark in all_benchmarks.get(\"items\", [])\n ]",
"def task_coverage():\n return {\n 'actions': ['py.test --cov nikola --cov-report term-missing tests/'],\n 'verbosity': 2,\n }",
"def list(self):\n print \"\\nAvailable Test Cases\"\n print \"====================\"\n for case in self.cases:\n print case.__name__",
"def gather_tests(self):\n rosie_tests_dir = os.path.join(cp_tests_dir(),\n \"circuitpython\",\n \"rosie_tests\")\n test_files = []\n for test in os.scandir(rosie_tests_dir):\n # TODO: implement exclusions by board\n if test.path.endswith(\".py\"):\n test_files.append(TestObject(test.path))\n\n return test_files",
"def coverage(session):\n session.install(\"coverage[toml]\", \"codecov\")\n session.run(\"coverage\", \"xml\", \"--fail-under=0\")\n session.run(\"codecov\", *session.posargs)",
"def test_list_benchmarks(client):\n # The benchmark listing contains one element (independently of whether the\n # user is logged in or not).\n r = client.get(config.API_PATH() + '/workflows')\n assert r.status_code == 200\n doc = r.json\n assert len(doc[labels.WORKFLOW_LIST]) == 1\n # Create user and the request header that contains the API key for the\n # logged in user.\n _, token = create_user(client, '0000')\n headers = {HEADER_TOKEN: token}\n r = client.get(config.API_PATH() + '/workflows', headers=headers)\n assert r.status_code == 200\n doc = r.json\n assert len(doc[labels.WORKFLOW_LIST]) == 1",
"def generate_coverage_reports(project, output_dir=None):\n outdir = output_dir or os.path.join(project.sdk_directory, \"coverage-results\")\n sdir = project.path\n # Make sure output dir exists and is empty:\n qisys.sh.rm(outdir)\n qisys.sh.mkdir(outdir, recursive=True)\n formats = {\"xml\": [\"--xml\"],\n \"html\": [\"--html\", \"--html-details\"]}\n for fmt, opts in formats.items():\n base_report = os.path.join(outdir, project.name + \".\" + fmt)\n cmd = [\"gcovr\",\n \"--root\", sdir,\n \"--exclude\", \".*test.*\",\n \"--exclude\", \".*external.*\",\n \"--exclude\", \".*example.*\"] + opts + \\\n [\"--output\", base_report]\n qisys.command.call(cmd, cwd=sdir, quiet=True)\n ui.info(ui.green, \"*\", ui.reset, \"Generated\", fmt.upper(),\n \"coverage report in\", ui.reset, ui.bold, base_report)",
"def get_methods(cls):\n # get all the methods that have the _benchmark_this flag\n for method in (getattr(cls, m) for m in dir(cls)):\n if hasattr(method, \"_benchmark_this\"):\n yield method",
"def evaluate_benchmarks(self):\n\n # iterate over replicates\n results = {}\n for replicate_id, replicate in self.replicates:\n\n # evaluate benchmark for current replicate\n bmark = SimulationBenchmark(replicate.copy(),\n graph=self.graphs[replicate_id],\n **self.params)\n\n # store results\n results[replicate_id] = dict(\n\n labels_MAE=bmark.scores['labels'].MAE,\n level_only_MAE=bmark.scores['level_only'].MAE,\n spatial_only_MAE=bmark.scores['spatial_only'].MAE,\n community_MAE=bmark.scores['labels_comm'].MAE,\n\n labels_PCT=bmark.scores['labels'].percent_correct,\n level_only_PCT=bmark.scores['level_only'].percent_correct,\n spatial_only_PCT=bmark.scores['spatial_only'].percent_correct,\n community_PCT=bmark.scores['labels_comm'].percent_correct)\n\n # compile dataframe\n results = pd.DataFrame.from_dict(results, orient='index')\n results.index.set_names(self.multiindex, inplace=True)\n\n return results",
"def test_get_benchmark_methods_filter(self):\n config = mock.Mock()\n config.workspace = 'workspace'\n config.benchmark_method_patterns = ['new_foo.BenchmarkClass.filter:bench.*']\n benchmark_runner = benchmark.BenchmarkRunner(config)\n\n mock_benchmark_class = mock.Mock()\n mock_benchmark_class.benchmark_method_1 = 'foo'\n\n mock_module = mock.Mock()\n sys.modules['new_foo'] = mock_module\n mock_module.BenchmarkClass.return_value = mock_benchmark_class\n\n methods = benchmark_runner._get_benchmark_methods()\n\n self.assertEqual(1, len(methods))\n self.assertEqual('new_foo.BenchmarkClass.benchmark_method_1', methods[0])",
"def get_benchmark_requirements(cls):\n pass",
"def exclude_non_cpp(benchmarks):\n return [benchmark for benchmark in benchmarks if is_cpp(benchmark)]",
"def get_all_stats():\n\n return get_component(CachingPackage.COMPONENT_NAME).get_all_stats()",
"def run_coverage(session):\n set_environment_variables(PYBAMM_ENV, session=session)\n session.run_always(\"pip\", \"install\", \"coverage\")\n session.run_always(\"pip\", \"install\", \"-e\", \".[all]\")\n if sys.platform != \"win32\":\n session.run_always(\"pip\", \"install\", \"-e\", \".[odes]\")\n session.run_always(\"pip\", \"install\", \"-e\", \".[jax]\")\n session.run(\"coverage\", \"run\", \"--rcfile=.coveragerc\", \"run-tests.py\", \"--nosub\")\n session.run(\"coverage\", \"combine\")\n session.run(\"coverage\", \"xml\")",
"def get(self, request, format=None):\n benchmarkmodels = BenchmarkModel.objects.all()\n serializer = BenchmarkModelListSerializer(benchmarkmodels, many=True)\n return Response(serializer.data)",
"def get_list(cls, suite_model) -> list:\n suites = []\n for s in cls.SUITES:\n s[\"tests\"] = cls._get_tests(s)\n s[\"approxRunTime\"] = cls._get_average_run_time(suite_model)\n suites.append(s)\n return suites",
"def test_get_benchmark_methods_exact_match(self):\n config = mock.Mock()\n config.workspace = 'workspace'\n config.benchmark_method_patterns = [\n 'new_foo.BenchmarkClass.benchmark_method_1',\n 'new_foo.BenchmarkClass.benchmark_method_2']\n benchmark_runner = benchmark.BenchmarkRunner(config)\n\n methods = benchmark_runner._get_benchmark_methods()\n self.assertEqual(['new_foo.BenchmarkClass.benchmark_method_1',\n 'new_foo.BenchmarkClass.benchmark_method_2'], methods)"
] | [
"0.6990162",
"0.6294294",
"0.61898667",
"0.57358605",
"0.5668825",
"0.5625119",
"0.5591114",
"0.54327077",
"0.5404844",
"0.5391268",
"0.53692985",
"0.5357925",
"0.5353722",
"0.5342524",
"0.53213197",
"0.5292404",
"0.52296937",
"0.52178067",
"0.5196261",
"0.51935357",
"0.5189432",
"0.51893944",
"0.515206",
"0.5151165",
"0.51314455",
"0.5113743",
"0.5110983",
"0.5110962",
"0.5110231",
"0.5107682"
] | 0.80346346 | 0 |
Returns the list of standard bug benchmarks. | def get_bug_benchmarks():
return [
benchmark for benchmark in get_all_benchmarks()
if get_type(benchmark) == BenchmarkType.BUG.value
] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_all_benchmarks():\n all_benchmarks = []\n for benchmark in os.listdir(BENCHMARKS_DIR):\n benchmark_path = os.path.join(BENCHMARKS_DIR, benchmark)\n if os.path.isfile(os.path.join(benchmark_path, 'benchmark.yaml')):\n all_benchmarks.append(benchmark)\n return sorted(all_benchmarks)",
"def get_coverage_benchmarks():\n return [\n benchmark for benchmark in get_all_benchmarks()\n if get_type(benchmark) == BenchmarkType.CODE.value\n ]",
"def run_all(self):\n runs = []\n for run in self.benchmarks:\n run.start()\n run.wait()\n runs.append(run.metadata)\n return runs",
"def get_testbench_specs(self, tb_type: str) -> Dict[str, Any]:\n return self._specs['testbenches'][tb_type]",
"def get_tests():\n # tests = ['test_build_gaussian_pyramid_random', 'test_build_gaussian_pyramid_static', 'test_build_laplacian_pyramid_random', 'test_build_laplacian_pyramid_static', 'test_laplacian_to_image', 'test_render_pyramid_random', 'test_render_pyramid_static']\n # return [tester.TestEx3(method) for method in tests]\n return [tester.TestEx3(method) for method in dir(tester.TestEx3) if method.startswith('test')]",
"def list(self, classes):\n\n def add(benchmarks, parts, flags, exclude):\n if (\n flags[\"language\"] != \"C++\"\n and flags[\"language\"] != \"Java\"\n and \"--drop-caches=true\" not in parts\n ):\n parts.append(\"--drop-caches=true\")\n command = \" \".join(parts)\n if command not in exclude:\n benchmarks.append({\"command\": command, \"flags\": flags})\n\n benchmarks = []\n for name, benchmark in classes.items():\n if name.startswith(\"example\"):\n continue\n\n instance, parts = benchmark(), [name]\n\n exclude = getattr(benchmark, \"exclude\", [])\n if \"source\" in getattr(benchmark, \"arguments\", []):\n parts.append(\"ALL\")\n\n iterations = getattr(instance, \"iterations\", 3)\n parts.append(f\"--iterations={iterations}\")\n\n if instance.cases:\n parts.append(\"--all=true\")\n\n flags = getattr(instance, \"flags\", {})\n\n if getattr(instance, \"r_only\", False):\n flags[\"language\"] = \"R\"\n add(benchmarks, parts, flags, exclude)\n else:\n if \"language\" not in flags:\n flags[\"language\"] = \"Python\"\n add(benchmarks, parts, flags, exclude)\n\n if hasattr(instance, \"r_name\"):\n flags_ = flags.copy()\n flags_[\"language\"] = \"R\"\n parts.append(\"--language=R\")\n add(benchmarks, parts, flags_, exclude)\n\n return sorted(benchmarks, key=lambda k: k[\"command\"])",
"def get_benchmark_specification(benchmark = 'FSI1'):\n if benchmark == 'FSI1':\n rho_s = Constant(1e03)\n nu_s = Constant(0.4)\n mu_s = Constant(5e05)\n rho_f = Constant(1e03)\n nu_f = Constant(1e-03)\n U = 0.2\n T_end = 60.0\n result = \"results-FSI1/\"\n elif benchmark == 'FSI2':\n rho_s = Constant(1e04)\n nu_s = Constant(0.4)\n mu_s = Constant(5e05)\n rho_f = Constant(1e03)\n nu_f = Constant(1e-03)\n U = 1.0\n T_end = 15.0\n result = \"results-FSI2/\"\t\t\n elif benchmark == 'FSI3':\n rho_s = Constant(1e03)\n nu_s = Constant(0.4)\n mu_s = Constant(2e06)\n rho_f = Constant(1e03)\n nu_f = Constant(1e-03)\n U = 2.0\n T_end = 20.0\n result = \"results-FSI3/\"\t\t\n else:\n raise ValueError('\"{}\" is a wrong name for problem specification.'.format(benchmark))\n v_max = Constant(1.5*U) # mean velocity to maximum velocity \n # (we have parabolic profile)\n E_s = Constant(2*mu_s*(1+nu_s))\n lambda_s = Constant((nu_s*E_s)/((1+nu_s)*(1-2*nu_s)))\n mu_f = Constant(nu_f*rho_f)\n return v_max, lambda_s, mu_s, rho_s, mu_f, rho_f, T_end, result",
"def get_performance_test_cases(test_suite):\n return get_cases(test_suite, r'test_perf_')",
"def exclude_non_cpp(benchmarks):\n return [benchmark for benchmark in benchmarks if is_cpp(benchmark)]",
"def run_benchmark():\n import argparse\n parser = argparse.ArgumentParser(description='Benchmark alchemically modified system against unmodified system.')\n parser.add_argument('--platform', dest='platform_name', action='store', default=None, help='platform name to benchmark (default: None)')\n options = parser.parse_args()\n\n from sams.tests import testsystems\n for testsystem_name in ['AblImatinibExplicitAlchemical']:\n cls = getattr(testsystems, testsystem_name)\n testsystem = cls()\n factory_args = { 'ligand_atoms' : testsystem.alchemical_atoms, 'receptor_atoms' : range(0,4266) }\n benchmark(testsystem.system, testsystem.positions, platform_name=options.platform_name, nsteps=5000, timestep=1.0*unit.femtoseconds, factory_args=factory_args)",
"def _parameterize_simple_benchmarks():\n parameterized_tuples = []\n for index, simple_benchmark in enumerate(\n all_benchmarks.all_benchmarks(modules=[simple_benchmarks])):\n # The index ensures all test cases have distinct names, even if multiple\n # benchmarks have the same name.\n test_case_name = '{index}_{name}'.format(index=index,\n name=simple_benchmark.name)\n parameterized_tuples.append((test_case_name, simple_benchmark))\n return parameterized_tuples",
"def print_scenario(benchmarks, name):\n # Remember, the first entry in the times array is an empty object.\n\n benchmarks = sorted(benchmarks, key = benchmark_sort_key)\n for benchmark in benchmarks:\n block_times = benchmark[\"times\"][2][\"block_times\"] # list of floats\n it = iter(block_times)\n for start in it:\n end = next(it)\n\n print(\"%s , %.3f \" % (benchmark[\"label\"], float(end) - float(start)))",
"def delete_all_benchmarks(self, namespace=\"benchmark-operator\"):\n all_benchmarks = self.crd_client.list_namespaced_custom_object(\n group=\"ripsaw.cloudbulldozer.io\", version=\"v1alpha1\", namespace=namespace, plural=\"benchmarks\"\n )\n\n _ = [\n self.delete_benchmark(benchmark[\"metadata\"][\"name\"], namespace)\n for benchmark in all_benchmarks.get(\"items\", [])\n ]",
"def _disc_benchmarks(cls, conf, repo, environments, commit_hashes, check):\n root = conf.benchmark_dir\n\n cls.check_tree(root)\n\n if len(environments) == 0:\n raise util.UserError(\"No available environments\")\n\n # Try several different commits:\n #\n # - First of commit_hashes provided\n # - Tips of branches from configuration file\n # - Rest of the commit_hashes\n #\n\n def iter_hashes():\n for h in commit_hashes[:1]:\n yield h\n for branch in conf.branches:\n try:\n yield repo.get_hash_from_name(branch)\n except NoSuchNameError:\n continue\n for h in commit_hashes[1:]:\n yield h\n\n def iter_unique(iter):\n seen = set()\n for item in iter:\n if item not in seen:\n seen.add(item)\n yield item\n\n try_hashes = iter_unique(iter_hashes())\n\n log.info(\"Discovering benchmarks\")\n with log.indent():\n last_err = None\n for env, commit_hash in itertools.product(environments, try_hashes):\n env.create()\n\n if last_err is not None:\n log.warning(\"Failed: trying different commit/environment\")\n\n result_dir = tempfile.mkdtemp()\n try:\n env.install_project(conf, repo, commit_hash)\n\n env_vars = dict(os.environ)\n env_vars.update(env.env_vars)\n\n result_file = os.path.join(result_dir, 'result.json')\n env.run(\n [runner.BENCHMARK_RUN_SCRIPT, 'discover',\n os.path.abspath(root),\n os.path.abspath(result_file)],\n cwd=result_dir,\n env=env_vars,\n dots=False)\n\n try:\n with open(result_file, 'r') as fp:\n benchmarks = json.load(fp)\n except (IOError, ValueError):\n log.error(\"Invalid discovery output\")\n raise util.UserError()\n\n break\n except (util.UserError, util.ProcessError) as err:\n last_err = err\n continue\n except KeyboardInterrupt:\n raise util.UserError(\"Interrupted.\")\n finally:\n util.long_path_rmtree(result_dir)\n else:\n raise util.UserError(\"Failed to build the project and import the benchmark suite.\")\n\n if check:\n log.info(\"Checking benchmarks\")\n with log.indent():\n result_dir = tempfile.mkdtemp()\n try:\n out, err, retcode = env.run(\n [runner.BENCHMARK_RUN_SCRIPT, 'check',\n os.path.abspath(root)],\n cwd=result_dir,\n dots=False,\n env=env_vars,\n valid_return_codes=None,\n return_stderr=True,\n redirect_stderr=True)\n finally:\n util.long_path_rmtree(result_dir)\n\n out = out.strip()\n if retcode == 0:\n if out:\n log.info(out)\n log.info(\"No problems found.\")\n else:\n if out:\n log.error(out)\n raise util.UserError(\"Benchmark suite check failed.\")\n\n return benchmarks",
"def get_functional_test_cases(test_suite):\n return get_cases(test_suite, r'test_(?!perf_)')",
"def main():\n parser = optparse.OptionParser()\n parser.add_option('--debug', action='store_true', default=False,\n help='run in debug mode')\n parser.add_option('-i', '--iteration', type=int, default=DEFAULT_ITERATION,\n metavar='NUM',\n help='set the number of iterations for each test (defualt:%d)' % \\\n DEFAULT_ITERATION)\n parser.add_option('-f', '--fstypes', default='ext2,ext3,ext4,btrfs,xfs',\n type='string', metavar='TYPES', help='set the file systems to test')\n parser.add_option('-n', '--num', default=10000, type=int, metavar='NUM',\n help='set the number of file created')\n parser.add_option('-N', '--numa', action='store_true', default=False,\n help='run NUMA test')\n parser.add_option('-S', '--scalability', action='store_true', default=False,\n help='run scalability test')\n global options\n options, args = parser.parse_args()\n\n benchutils.check_root_or_die()\n suffix = ''\n if options.numa:\n suffix = 'numa'\n else:\n suffix = 'scale'\n output_dir = benchutils.get_output_directory(suffix=suffix, timestamp=True)\n fstypes = options.fstypes.split(',')\n for fs in fstypes:\n if options.numa:\n run_tests(output_dir, fs)\n elif options.scalability:\n run_scalability_tests(output_dir, fs)",
"def _run():\n subprocess.check_call(\n [\n \"tools/bazel\",\n \"build\",\n \"-c\",\n \"opt\",\n \"test/core/memory_usage/memory_usage_test\",\n ]\n )\n ret = {}\n for name, benchmark_args in _BENCHMARKS.items():\n for scenario, extra_args in _SCENARIOS.items():\n # TODO(chenancy) Remove when minstack is implemented for channel\n if name == \"channel\" and scenario == \"minstack\":\n continue\n try:\n output = subprocess.check_output(\n [\n \"bazel-bin/test/core/memory_usage/memory_usage_test\",\n ]\n + benchmark_args\n + extra_args\n )\n except subprocess.CalledProcessError as e:\n print(\"Error running benchmark:\", e)\n continue\n for line in output.splitlines():\n for key, (pattern, conversion) in _INTERESTING.items():\n m = re.match(pattern, line)\n if m:\n ret[scenario + \": \" + key] = conversion(m.group(1))\n return ret",
"def main():\r\n algos = [merge_sort, quick_sort, heap_sort, radix_sort, bucket_sort_general]\r\n array_sizes = [5000, 10000, 15000, 20000, 50000, 75000, 100000, 150000]\r\n results = {algo.__name__: [] for algo in algos}\r\n for algo in algos:\r\n result = []\r\n for size in array_sizes:\r\n time = test(algo, size)\r\n result.append(time)\r\n results[algo.__name__] = result\r\n\r\n display_results(results, array_sizes)",
"def generate_benchmarks(self, num, graphs = []):\n if num != 0:\n try:\n print(\"{0} graphs left\".format(num), end='\\r')\n graphs.append(self.generate_lfr_benchmark())\n return self.generate_benchmarks(num-1, graphs)\n except nx.exception.ExceededMaxIterations:\n return self.generate_benchmarks(num, graphs)\n else:\n return graphs",
"def test():\n\t\treturn [\"vice.core.dataframe\",\n\t\t\t[\n\t\t\t\ttests.test(run = False),\n\t\t\t\t_builtin_dataframes.test(run = False)\n\t\t\t]\n\t\t]",
"def get_benchmark_requirements(cls):\n pass",
"def list_test_cases(program):\n\n return list(INFO[program].test_cases)",
"def test_list_benchmarks(client):\n # The benchmark listing contains one element (independently of whether the\n # user is logged in or not).\n r = client.get(config.API_PATH() + '/workflows')\n assert r.status_code == 200\n doc = r.json\n assert len(doc[labels.WORKFLOW_LIST]) == 1\n # Create user and the request header that contains the API key for the\n # logged in user.\n _, token = create_user(client, '0000')\n headers = {HEADER_TOKEN: token}\n r = client.get(config.API_PATH() + '/workflows', headers=headers)\n assert r.status_code == 200\n doc = r.json\n assert len(doc[labels.WORKFLOW_LIST]) == 1",
"def main(r_min=1, r_max=101):\n fizz_buzz_all = (\n fizz_buzz1(r_min, r_max),\n fizz_buzz2(r_min, r_max),\n fizz_buzz3(r_min, r_max)\n )\n for f in fizz_buzz_all:\n start = time.time()\n print(*f, sep='\\n')\n end = time.time()\n print(\"Execution time: \", end - start)",
"def manquant(suite):\n manque = []\n for i in range(1, 10):\n if i not in suite:\n manque.append(i)\n return manque",
"def list_runtimes(self, workbench):\n pass",
"def Cleanup(benchmark_spec):\n pass",
"def benchmark():\n print defer.Deferred.__module__\n for func, args, iter in benchmarkFuncs:\n print func.__name__, args, timeit(func, iter, *args)",
"def tests_generator(self):\n cb_bin = os.path.join(bin_path, 'compilebench')\n cmd_list = [\n (\"Initial Create/Compile/Read Compiled Tree\", \"{0} -D {1} -i 10 --makej -s {2}\"),\n ]\n\n tests = []\n for idx, (desc, cmd) in enumerate(cmd_list):\n test_name = \"compile_bench_{0}_{1}\".format(idx + 1, to_safe_name(desc))\n test = TestProfile(\n name=test_name,\n desc=desc,\n test_path=self.test_path,\n bin_path=bin_path,\n command=cmd.format(cb_bin, self.test_path, bin_path))\n tests.append(test)\n return tests",
"def assert_tests_stable(tests):\n return (stress_test(t) for t in tests)"
] | [
"0.6447797",
"0.58259064",
"0.5799858",
"0.5799062",
"0.5770896",
"0.5712318",
"0.5706508",
"0.5695553",
"0.5673421",
"0.5479634",
"0.54711425",
"0.5403768",
"0.5383399",
"0.5374102",
"0.5373461",
"0.5352589",
"0.5352316",
"0.5337508",
"0.5330979",
"0.53233963",
"0.5322599",
"0.5299671",
"0.52748275",
"0.5273337",
"0.52399105",
"0.5226855",
"0.5219952",
"0.51898926",
"0.51733243",
"0.51176226"
] | 0.8056906 | 0 |
Returns True if |benchmark| is written in C/C++. | def is_cpp(benchmark):
return get_language(benchmark) == 'c++' | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _c_optimizations_required():\n pure_env = os.environ.get('PURE_PYTHON')\n require_c = pure_env == \"0\"\n return require_c",
"def _should_attempt_c_optimizations():\n if PYPY:\n return False\n\n if _c_optimizations_required():\n return True\n return not _c_optimizations_ignored()",
"def supports_refcounts(self):\n return sys.implementation.name == \"cpython\"",
"def _c_optimizations_ignored():\n pure_env = os.environ.get('PURE_PYTHON')\n return pure_env != \"0\" if pure_env is not None else PYPY",
"def test_rust_code_analysis_tokei_c() -> None:\n\n ret_value = compare(\n \"rust-code-analysis\",\n \"tokei\",\n [\"-g\", \"-f\"],\n [\"SLOC\", \"PLOC\", \"CLOC\", \"BLANK\"],\n \"C\",\n \"bubble_sort.c\",\n )\n\n assert ret_value == 0",
"def is_cxx_compiler():\n\n wrapper_command = os.path.basename(sys.argv[0])\n return re.match(r'(.+)c\\+\\+(.*)', wrapper_command)",
"def is_library(code):\n return 3000 <= code <= 3999",
"def test_py_compile_condition(self):\n self._test_py_compile('coin')",
"def Cpp_test():\n pass",
"def _c_optimizations_available(module_name):\n import importlib\n catch = () if _c_optimizations_required() else (ImportError,)\n try:\n return importlib.import_module('BTrees._' + module_name)\n except catch: # pragma: no cover\n return False",
"def _clang_at_least(compiler: 'Compiler', minver: str, apple_minver: T.Optional[str]) -> bool:\n if isinstance(compiler, (AppleClangCCompiler, AppleClangCPPCompiler)):\n if apple_minver is None:\n return False\n return version_compare(compiler.version, apple_minver)\n return version_compare(compiler.version, minver)",
"def has_flag(compiler, flagname):\n import tempfile\n with tempfile.NamedTemporaryFile('w', suffix='.cc') as f:\n f.write('int main (int argc, char **argv) { return 0; }')\n try:\n compiler.compile([f.name], extra_postargs=[flagname])\n except setuptools.distutils.errors.CompileError:\n return False\n return True",
"def CustomCFlagCheck(context, flag, append=True):\n context.Message(\"Checking if C compiler supports \" + flag + \" flag \")\n ccflags = context.env[\"CCFLAGS\"]\n context.env.Append(CCFLAGS=flag)\n result = context.TryCompile(\"int main(int argc, char **argv) { return 0; }\", \".c\")\n context.Result(result)\n if not append or not result:\n context.env.Replace(CCFLAGS=ccflags)\n return result",
"def _supported_compilers_available():\n return _supported_gcc_available()[0] and supported_nvcc_available()[0]",
"def has_flag(compiler, flagname):\n import tempfile\n with tempfile.NamedTemporaryFile('w', suffix='.cpp') as f:\n f.write('int main (int argc, char **argv) { return 0; }')\n try:\n compiler.compile([f.name], extra_postargs=[flagname])\n except setuptools.distutils.errors.CompileError:\n return False\n return True",
"def has_flag(compiler, flagname):\n import tempfile\n\n with tempfile.NamedTemporaryFile(\"w\", suffix=\".cpp\") as f:\n f.write(\"int main (int argc, char **argv) { return 0; }\")\n try:\n compiler.compile([f.name], extra_postargs=[flagname])\n except setuptools.distutils.errors.CompileError:\n return False\n return True",
"def test_clang_cxx(self):\n self.assertEqual(\n self.ndk.clang_cxx,\n f\"/opt/android/android-ndk/toolchains/llvm/prebuilt/{self.ndk.host_tag}/bin/clang++\",\n )",
"def has_flag(compiler, flagname):\n with tempfile.TemporaryDirectory() as tmpdir:\n fname = join(tmpdir, \"test.cpp\")\n with open(fname, \"w\") as fp:\n fp.write(\"int main (int argc, char **argv) { return 0; }\")\n try:\n compiler.compile([fname], output_dir=tmpdir, extra_postargs=[flagname])\n except setuptools.distutils.errors.CompileError:\n return False\n return True",
"def test_make_benchmark_single_ll():\n benchmark = llvm.make_benchmark(INVALID_IR_PATH)\n assert str(benchmark.uri).startswith(\"benchmark://user-v0/\")\n assert benchmark.uri.scheme == \"benchmark\"\n assert benchmark.uri.dataset == \"user-v0\"",
"def exclude_non_cpp(benchmarks):\n return [benchmark for benchmark in benchmarks if is_cpp(benchmark)]",
"def supports_c_code(self, inputs):\r\n\r\n # If we don't even have the right method, we certainly\r\n # don't support the C code\r\n # (This is the test that used to be implemented by\r\n # local_gpu_sum)\r\n pattern = (''.join(str(i) for i in self.reduce_mask))\r\n if not hasattr(self, 'c_code_reduce_%s' % pattern):\r\n return False\r\n\r\n # Now that this is a general reduction op, we might\r\n # have a method for a pattern, but that pattern\r\n # might not be implemented for the current scalar op.\r\n # To detect this more complicated situation, we\r\n # make fake arguments to c_code, try to run them,\r\n # and see if NotImplementedError gets raised.\r\n\r\n node = self.make_node(*inputs)\r\n\r\n name = 'fake_name'\r\n\r\n inp = ['fake_input_name_%d' % i for i in xrange(len(inputs))]\r\n out = ['fake_output_name_%d' % i for i in xrange(len(node.outputs))]\r\n\r\n sub = {'fail': 'fake failure code'}\r\n\r\n try:\r\n self.c_code(node, name, inp, out, sub)\r\n self.c_support_code_apply(node, name)\r\n except NotImplementedError:\r\n return False\r\n return True",
"def supports_c_code(self, inputs):\r\n\r\n # If we don't even have the right method, we certainly\r\n # don't support the C code\r\n # (This is the test that used to be implemented by\r\n # local_gpu_sum)\r\n pattern = (''.join(str(i) for i in self.reduce_mask))\r\n if not hasattr(self, 'c_code_reduce_%s' % pattern):\r\n return False\r\n\r\n # Now that this is a general reduction op, we might\r\n # have a method for a pattern, but that pattern\r\n # might not be implemented for the current scalar op.\r\n # To detect this more complicated situation, we\r\n # make fake arguments to c_code, try to run them,\r\n # and see if NotImplementedError gets raised.\r\n\r\n node = self.make_node(*inputs)\r\n\r\n name = 'fake_name'\r\n\r\n inp = ['fake_input_name_%d' % i for i in xrange(len(inputs))]\r\n out = ['fake_output_name_%d' % i for i in xrange(len(node.outputs))]\r\n\r\n sub = {'fail': 'fake failure code'}\r\n\r\n try:\r\n self.c_code(node, name, inp, out, sub)\r\n self.c_support_code_apply(node, name)\r\n except NotImplementedError:\r\n return False\r\n return True",
"def has_flag(compiler, flagname):\n import tempfile\n import os\n with tempfile.NamedTemporaryFile('w', suffix='.cpp', delete=False) as f:\n f.write('int main (int argc, char **argv) { return 0; }')\n fname = f.name\n try:\n compiler.compile([fname], extra_postargs=[flagname])\n except setuptools.distutils.errors.CompileError:\n return False\n finally:\n try:\n os.remove(fname)\n except OSError:\n pass\n return True",
"def test_make_benchmark_from_command_line_mixed_source_and_object_files(\n env: LlvmEnv, retcode: int\n):\n with temporary_working_directory():\n with open(\"a.c\", \"w\") as f:\n f.write(\n \"\"\"\n#include \"b.h\"\n\nint A() {\n return B();\n}\n\nint main() {\n return A();\n}\n\"\"\"\n )\n\n with open(\"b.c\", \"w\") as f:\n f.write(f\"int B() {{ return {retcode}; }}\")\n\n with open(\"b.h\", \"w\") as f:\n f.write(\"int B();\")\n\n # Compile b.c to object file:\n subprocess.check_call([str(llvm_paths.clang_path()), \"b.c\", \"-c\"], timeout=60)\n assert (Path(\"b.o\")).is_file()\n\n bm = env.make_benchmark_from_command_line([\"gcc\", \"a.c\", \"b.o\", \"-o\", \"foo\"])\n env.reset(benchmark=bm)\n\n bm.compile(env)\n assert Path(\"foo\").is_file()\n\n p = subprocess.Popen([\"./foo\"])\n p.communicate(timeout=60)\n assert p.returncode == retcode",
"def check_java(interface):\n\n interface.info(__(\"I'm compiling a short test program, to see if you have a working JDK on your system.\"))\n\n if not run_slow(interface, plat.javac, plat.path(\"buildlib/CheckJDK8.java\"), use_path=True):\n interface.fail(__(\"I was unable to use javac to compile a test file. If you haven't installed the Java Development Kit yet, please download it from:\\n\\nhttp://www.oracle.com/technetwork/java/javase/downloads/jdk8-downloads-2133151.html\\n\\nThe JDK is different from the JRE, so it's possible you have Java without having the JDK. Without a working JDK, I can't continue.\"))\n\n if not run_slow(interface, plat.java, \"-classpath\", plat.path(\"buildlib\"), \"CheckJDK8\", use_path=True):\n interface.fail(__(\"The version of Java on your computer does not appear to be JDK 8, which is the only version supported by the Android SDK. If you need to install JDK 8, you can download it from:\\n\\nhttp://www.oracle.com/technetwork/java/javase/downloads/jdk8-downloads-2133151.html\\n\\nYou can also set the JAVA_HOME environment variable to use a different version of Java.\"))\n\n interface.success(__(\"The JDK is present and working. Good!\"))",
"def py_versiontest(c):\n pass",
"def CustomCompileCheck(context, message, source, extension=\".cc\"):\n context.Message(message)\n\n env = context.env\n if env.GetOption(\"clean\") or env.GetOption(\"help\") or env.GetOption(\"no_exec\"):\n result = True\n else:\n result = context.TryCompile(source, extension)\n\n context.Result(result)\n\n return result",
"def check_cc(self):\n # simply generates a C program containing a couple of calls\n # to MPI routines and checks if the compilation and execution\n # are succesful\n print 'Checking if cc works...',\n sys.stdout.flush()\n # generate\n writefile('tmpc.c',\"\"\"\n #include <stdio.h>\n int main(int argc, char **argv){\n int iam;\n fprintf(stdout, \\\"success\\\" );fflush(stdout);\n return 0;\n }\\n\"\"\")\n\n # compile\n ccomm = self.config.cc+\" \"+self.config.ccflags+\" \"+self.config.ldflags_c+\" -o tmpc \"+os.path.join(os.getcwd(),\"tmpc.c\")\n (output, error, retz) = runShellCommand(ccomm)\n\n if retz:\n print '\\n\\nCOMMON: C compiler not working! aborting...'\n print 'stderr:\\n','*'*40,'\\n',error,'\\n','*'*40\n sys.exit()\n\n # run\n comm = './tmpc'\n (output, error, retz) = runShellCommand(comm)\n if retz:\n print '\\n\\nCOMMON: cc not working! aborting...'\n print 'error is:\\n','*'*40,'\\n',error,'\\n','*'*40\n sys.exit()\n\n # cleanup\n killfiles(['tmpc.c','tmpc'])\n print 'yes'\n return 0;",
"def numba_check():\n numba = importlib.util.find_spec(\"numba\")\n return numba is not None",
"def is_ctu_capable():\n\n context = package_context.get_context()\n ctu_func_map_cmd = context.ctu_func_map_cmd\n try:\n version = subprocess.check_output([ctu_func_map_cmd, '-version'])\n except (subprocess.CalledProcessError, OSError):\n version = 'ERROR'\n return version != 'ERROR'"
] | [
"0.6263037",
"0.6229608",
"0.6097612",
"0.6020831",
"0.5982895",
"0.58478355",
"0.57693976",
"0.57392836",
"0.5669367",
"0.55203027",
"0.5460529",
"0.53889805",
"0.5354336",
"0.5303164",
"0.5300068",
"0.5299565",
"0.5283687",
"0.5242149",
"0.52138",
"0.5201266",
"0.5121601",
"0.5121601",
"0.51187706",
"0.5099935",
"0.5085485",
"0.507701",
"0.50553465",
"0.5052362",
"0.50408286",
"0.5039025"
] | 0.82368374 | 0 |
Returns |benchmarks| with only benchmarks written in C/C++. | def exclude_non_cpp(benchmarks):
return [benchmark for benchmark in benchmarks if is_cpp(benchmark)] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_coverage_benchmarks():\n return [\n benchmark for benchmark in get_all_benchmarks()\n if get_type(benchmark) == BenchmarkType.CODE.value\n ]",
"def get_bug_benchmarks():\n return [\n benchmark for benchmark in get_all_benchmarks()\n if get_type(benchmark) == BenchmarkType.BUG.value\n ]",
"def get_all_benchmarks():\n all_benchmarks = []\n for benchmark in os.listdir(BENCHMARKS_DIR):\n benchmark_path = os.path.join(BENCHMARKS_DIR, benchmark)\n if os.path.isfile(os.path.join(benchmark_path, 'benchmark.yaml')):\n all_benchmarks.append(benchmark)\n return sorted(all_benchmarks)",
"def list(self, classes):\n\n def add(benchmarks, parts, flags, exclude):\n if (\n flags[\"language\"] != \"C++\"\n and flags[\"language\"] != \"Java\"\n and \"--drop-caches=true\" not in parts\n ):\n parts.append(\"--drop-caches=true\")\n command = \" \".join(parts)\n if command not in exclude:\n benchmarks.append({\"command\": command, \"flags\": flags})\n\n benchmarks = []\n for name, benchmark in classes.items():\n if name.startswith(\"example\"):\n continue\n\n instance, parts = benchmark(), [name]\n\n exclude = getattr(benchmark, \"exclude\", [])\n if \"source\" in getattr(benchmark, \"arguments\", []):\n parts.append(\"ALL\")\n\n iterations = getattr(instance, \"iterations\", 3)\n parts.append(f\"--iterations={iterations}\")\n\n if instance.cases:\n parts.append(\"--all=true\")\n\n flags = getattr(instance, \"flags\", {})\n\n if getattr(instance, \"r_only\", False):\n flags[\"language\"] = \"R\"\n add(benchmarks, parts, flags, exclude)\n else:\n if \"language\" not in flags:\n flags[\"language\"] = \"Python\"\n add(benchmarks, parts, flags, exclude)\n\n if hasattr(instance, \"r_name\"):\n flags_ = flags.copy()\n flags_[\"language\"] = \"R\"\n parts.append(\"--language=R\")\n add(benchmarks, parts, flags_, exclude)\n\n return sorted(benchmarks, key=lambda k: k[\"command\"])",
"def get_benchmark_requirements(cls):\n pass",
"def is_cpp(benchmark):\n return get_language(benchmark) == 'c++'",
"def count_benchmarks():\n return len(setup_storage().fetch_benchmark({}))",
"def _run():\n subprocess.check_call(\n [\n \"tools/bazel\",\n \"build\",\n \"-c\",\n \"opt\",\n \"test/core/memory_usage/memory_usage_test\",\n ]\n )\n ret = {}\n for name, benchmark_args in _BENCHMARKS.items():\n for scenario, extra_args in _SCENARIOS.items():\n # TODO(chenancy) Remove when minstack is implemented for channel\n if name == \"channel\" and scenario == \"minstack\":\n continue\n try:\n output = subprocess.check_output(\n [\n \"bazel-bin/test/core/memory_usage/memory_usage_test\",\n ]\n + benchmark_args\n + extra_args\n )\n except subprocess.CalledProcessError as e:\n print(\"Error running benchmark:\", e)\n continue\n for line in output.splitlines():\n for key, (pattern, conversion) in _INTERESTING.items():\n m = re.match(pattern, line)\n if m:\n ret[scenario + \": \" + key] = conversion(m.group(1))\n return ret",
"def filter_out(self, skip):\n benchmarks = super(Benchmarks, self).__new__(self.__class__)\n benchmarks._conf = self._conf\n benchmarks._benchmark_dir = self._benchmark_dir\n benchmarks._all_benchmarks = self._all_benchmarks\n\n selected_idx = {}\n\n for name, benchmark in self.items():\n if name not in skip:\n benchmarks[name] = benchmark\n if name in self._benchmark_selection:\n selected_idx[name] = self._benchmark_selection[name]\n\n benchmarks._benchmark_selection = selected_idx\n\n return benchmarks",
"def benchmark(self):\n logger.info(self.benchmark.__doc__)\n return self.run(self.benchmark_profile())",
"def test_rust_code_analysis_tokei_c() -> None:\n\n ret_value = compare(\n \"rust-code-analysis\",\n \"tokei\",\n [\"-g\", \"-f\"],\n [\"SLOC\", \"PLOC\", \"CLOC\", \"BLANK\"],\n \"C\",\n \"bubble_sort.c\",\n )\n\n assert ret_value == 0",
"def run_benchmark():\n import argparse\n parser = argparse.ArgumentParser(description='Benchmark alchemically modified system against unmodified system.')\n parser.add_argument('--platform', dest='platform_name', action='store', default=None, help='platform name to benchmark (default: None)')\n options = parser.parse_args()\n\n from sams.tests import testsystems\n for testsystem_name in ['AblImatinibExplicitAlchemical']:\n cls = getattr(testsystems, testsystem_name)\n testsystem = cls()\n factory_args = { 'ligand_atoms' : testsystem.alchemical_atoms, 'receptor_atoms' : range(0,4266) }\n benchmark(testsystem.system, testsystem.positions, platform_name=options.platform_name, nsteps=5000, timestep=1.0*unit.femtoseconds, factory_args=factory_args)",
"def time_binaries():\n print()\n print(\"=== BENCHMARKING ===\")\n with tempfile.NamedTemporaryFile(\n prefix=\"riff-benchmark\", suffix=\".gitlog\"\n ) as testdata:\n subprocess.check_call(\n [\"git\", \"log\", \"--color=always\", \"-p\", \"master\"], stdout=testdata\n )\n\n binaries = sorted(glob.glob(os.path.join(BINDIR, \"*\")), key=natural_keys)\n\n # Do riff-current last: https://stackoverflow.com/a/20320940/473672\n binaries.sort(key=lambda s: s.endswith(\"riff-current\"))\n\n for binary in binaries:\n print_timings(binary, testdata.name)\n print_timings(\"/bin/cat\", testdata.name)",
"def test_benchmark1(capsys):\n student_1 = Student('114007245','Mario Castillo',2018, [\"INST 201\",\"INST 326\"])\n student_2 = Student('117006012', 'Joe Rogan', 2018, [\"MATH 115\",\"PSYC 100\"])\n student_3 = Student(\"117008490\", \"Kanye West\", 2018, [\"MATH 120\",\"STAT 003\"])\n student_4 = Student('118009044', \"Elon Musk\", 2018, [\"PSYC 100\",\"MATH 003\"])\n \n student_1.benchmark_I()\n outerr = capsys.readouterr()\n out = outerr.out\n assert out == ('You have not completed the Benchmark I requirements.\\n'\n 'You have not taken MATH 115 or higher.\\n'\n 'You have not taken PSYC 100.\\n')\n\n student_2.benchmark_I()\n outerr = capsys.readouterr()\n out = outerr.out\n assert out == (f'You have completed all of your Benchmark I courses! '\n f'Congratulations, {student_2.student_name}!\\n')\n\n student_3.benchmark_I()\n outerr = capsys.readouterr()\n out = outerr.out \n assert out == ('You have not completed the Benchmark I requirements.\\n'\n 'You have not taken PSYC 100.\\n')\n\n student_4.benchmark_I()\n outerr = capsys.readouterr()\n out = outerr.out\n assert out == ('You have not completed the Benchmark I requirements.\\n'\n 'You have not taken MATH 115 or higher.\\n')",
"def Cleanup(benchmark_spec):\n pass",
"def delete_all_benchmarks(self, namespace=\"benchmark-operator\"):\n all_benchmarks = self.crd_client.list_namespaced_custom_object(\n group=\"ripsaw.cloudbulldozer.io\", version=\"v1alpha1\", namespace=namespace, plural=\"benchmarks\"\n )\n\n _ = [\n self.delete_benchmark(benchmark[\"metadata\"][\"name\"], namespace)\n for benchmark in all_benchmarks.get(\"items\", [])\n ]",
"def test_make_benchmark_from_command_line_multiple_input_sources(\n env: LlvmEnv, retcode: int\n):\n with temporary_working_directory() as cwd:\n with open(\"a.c\", \"w\") as f:\n f.write(\"int main() { return B(); }\")\n\n with open(\"b.c\", \"w\") as f:\n f.write(f\"int B() {{ return {retcode}; }}\")\n\n bm = env.make_benchmark_from_command_line([\"gcc\", \"a.c\", \"b.c\", \"-o\", \"foo\"])\n assert not (cwd / \"foo\").is_file()\n\n env.reset(benchmark=bm)\n assert \"main()\" in env.ir\n\n bm.compile(env)\n assert (cwd / \"foo\").is_file()\n\n p = subprocess.Popen([\"./foo\"])\n p.communicate(timeout=60)\n assert p.returncode == retcode",
"def benchmark_profile(self):\n cb_bin = os.path.join(bin_path, 'compilebench')\n desc = \"benchmark\"\n test_name = \"compilebench_{0}\".format(to_safe_name(desc))\n test = TestProfile(\n name=test_name,\n desc=desc,\n test_path=self.test_path,\n bin_path=bin_path,\n command=\"{0} -D {1} -i 10 --makej\".format(cb_bin, self.test_path))\n\n return test",
"def run_benchmarks(urls, urlIndices, trial_number):\n path.append(os.path.join(CHROMIUM_SRC, 'tools/perf/'))\n benchmark_path = os.path.join(CHROMIUM_SRC, 'tools/perf/run_benchmark')\n output_path = 'temp'\n trial_key = 'trial{0}'.format(trial_number)\n\n cmd = ('sudo ' + benchmark_path + ' --profiler=trace telemetryBenchmarks.url{0}')\n for i in urlIndices:\n try:\n out, err, returncode = get_benchmark_result(cmd.format(i))\n timeout = False\n print 'successfully ran benchmark for url' + str(i)\n except TimeoutError:\n # Benchmark failed\n print 'Benchmark Timeout!'\n out = ''\n returncode = 1\n timeout = True\n\n failed = ['FAILED']\n if returncode != 0 or any(x in out for x in failed) or timeout:\n # If a benchmark fails, remove its corresponding wpr file, and act\n # as if it didn't exist\n # Remove from data/wpr_source\n print 'Benchmark {0} failed'.format(i)\n print 'return code is ' + str(returncode)\n print 'Out:'\n print out\n print 'Err:'\n print err\n urlName = 'url{0}_page_set_000.wpr'.format(i)\n urlpcName = 'url{0}_pc_page_set_000.wpr'.format(i)\n urlFilePath = os.path.join('data/wpr_source',urlName)\n urlpcFilePath = os.path.join('data/wpr_source',urlpcName)\n urlCmd = 'rm -f {0}'.format(urlFilePath)\n urlpcCmd = 'rm -f {0}'.format(urlpcFilePath)\n print 'Removing: {0}, {1}'.format(urlFilePath, urlpcFilePath)\n commands = [\n 'rm -f {0}'.format(urlFilePath),\n 'rm -f {0}'.format(urlpcFilePath)\n ]\n for cmdss in commands:\n p = Popen(cmdss, shell=True)\n p.wait()\n # Skip the rest of this url\n print \"Moving on!\"\n continue\n\n # Parse data\n tmp_path = 'temp/tmp_benchmark_result_json'\n with open(tmp_path, 'rb') as f:\n tmp_json = json.load(f)\n benchmark_results = tmp_json['values']\n commands = [\n 'rm -f ~/page_load_time/telemetry/temp/tmp_benchmark_result_json',\n ]\n for cmds in commands:\n p = Popen(cmds, shell=True)\n p.wait()\n\n output = {urls[i]: {'cold_times': {trial_key: benchmark_results}}}\n output_file = os.path.join(output_path, urlsafe_b64encode(urls[i]))\n output_file += '.' + str(trial_number)\n try:\n with open(output_file, 'w') as f:\n json.dump(output, f)\n except IOError:\n raise IOError('Unable to write to {0}'.format(output_file))\n\n\n ############### Now run for Perfect Cache file ################\n\n try:\n out, err, returncode = \\\n get_benchmark_result(cmd.format(str(i) + '_pc'))\n timeout = False\n print 'successfully ran benchmark for url' + str(i) + '_pc'\n except TimeoutError:\n # Benchmark failed\n print 'Benchmark Timeout!'\n out = ''\n returncode = 1\n timeout = True\n\n failed = ['FAILED']\n if returncode != 0 or any(x in out for x in failed) or timeout:\n # If a benchmark fails, remove its corresponding wpr file, and act\n # as if it didn't exist\n # Remove from data/wpr_source\n\n print 'Benchmark {0}_pc failed'.format(i)\n print 'Out:'\n print out\n print 'Err:'\n print err\n urlName = 'url{0}_page_set_000.wpr'.format(i)\n urlpcName = 'url{0}_pc_page_set_000.wpr'.format(i)\n urlFilePath = os.path.join('data/wpr_source',urlName)\n urlpcFilePath = os.path.join('data/wpr_source',urlpcName)\n urlCmd = 'rm -f {0}'.format(urlFilePath)\n urlpcCmd = 'rm -f {0}'.format(urlpcFilePath)\n print 'Removing: {0}, {1}'.format(urlFilePath, urlpcFilePath)\n commands = [\n 'rm -f {0}'.format(urlFilePath),\n 'rm -f {0}'.format(urlpcFilePath)\n ]\n for cmdss in commands:\n p = Popen(cmdss, shell=True)\n p.wait()\n # Skip the rest of this url\n print \"Moving on!\"\n continue\n\n # Parse data\n tmp_path = 'temp/tmp_benchmark_result_json'\n with open(tmp_path, 'rb') as f:\n tmp_json = json.load(f)\n benchmark_results = tmp_json['values']\n\n commands = [\n 'rm -f ~/page_load_time/telemetry/temp/tmp_benchmark_result_json',\n ]\n for cmds in commands:\n p = Popen(cmds, shell=True)\n p.wait()\n\n output = {urls[i]: {'cold_times': {trial_key: benchmark_results}}}\n output_file = os.path.join(output_path, urlsafe_b64encode(urls[i]))\n output_file += '.' + str(trial_number) + '.pc'\n try:\n with open(output_file, 'w') as f:\n json.dump(output, f)\n except IOError:\n raise IOError('Unable to write to {0}'.format(output_file))",
"def _c_optimizations_ignored():\n pure_env = os.environ.get('PURE_PYTHON')\n return pure_env != \"0\" if pure_env is not None else PYPY",
"def _c_optimizations_required():\n pure_env = os.environ.get('PURE_PYTHON')\n require_c = pure_env == \"0\"\n return require_c",
"def benchmark(self):\n nsites = []\n for m in self.methods:\n for name, structure in self.test_structures.items():\n cns = []\n if self.unique_sites:\n es = SpacegroupAnalyzer(structure).get_symmetrized_structure().equivalent_sites\n sites = [structure.index(x[0]) for x in es]\n else:\n sites = range(len(structure))\n\n for key, val in self.hi.items():\n if name == key:\n for j in sites:\n if isinstance(m, NearNeighbors):\n tmpcn = m.get_cn_dict(structure, j, self.use_weights)\n else:\n tmpcn = m.compute(structure, j)\n if tmpcn == \"null\":\n continue\n if self.nround:\n self._roundcns(tmpcn, self.nround)\n cns.append((structure[j].species_string, tmpcn))\n if self.cation_anion:\n for mat, cat in self.cations.items():\n if (name == mat) and cat:\n cns = self._popel(cns, cat)\n elif self.anion_cation:\n for mat, an in self.anions.items():\n if name == mat:\n cns = self._popel(cns, an)\n m._cns[name] = cns\n nsites.append(len(cns))\n self.nsites = max(nsites)",
"def _disc_benchmarks(cls, conf, repo, environments, commit_hashes, check):\n root = conf.benchmark_dir\n\n cls.check_tree(root)\n\n if len(environments) == 0:\n raise util.UserError(\"No available environments\")\n\n # Try several different commits:\n #\n # - First of commit_hashes provided\n # - Tips of branches from configuration file\n # - Rest of the commit_hashes\n #\n\n def iter_hashes():\n for h in commit_hashes[:1]:\n yield h\n for branch in conf.branches:\n try:\n yield repo.get_hash_from_name(branch)\n except NoSuchNameError:\n continue\n for h in commit_hashes[1:]:\n yield h\n\n def iter_unique(iter):\n seen = set()\n for item in iter:\n if item not in seen:\n seen.add(item)\n yield item\n\n try_hashes = iter_unique(iter_hashes())\n\n log.info(\"Discovering benchmarks\")\n with log.indent():\n last_err = None\n for env, commit_hash in itertools.product(environments, try_hashes):\n env.create()\n\n if last_err is not None:\n log.warning(\"Failed: trying different commit/environment\")\n\n result_dir = tempfile.mkdtemp()\n try:\n env.install_project(conf, repo, commit_hash)\n\n env_vars = dict(os.environ)\n env_vars.update(env.env_vars)\n\n result_file = os.path.join(result_dir, 'result.json')\n env.run(\n [runner.BENCHMARK_RUN_SCRIPT, 'discover',\n os.path.abspath(root),\n os.path.abspath(result_file)],\n cwd=result_dir,\n env=env_vars,\n dots=False)\n\n try:\n with open(result_file, 'r') as fp:\n benchmarks = json.load(fp)\n except (IOError, ValueError):\n log.error(\"Invalid discovery output\")\n raise util.UserError()\n\n break\n except (util.UserError, util.ProcessError) as err:\n last_err = err\n continue\n except KeyboardInterrupt:\n raise util.UserError(\"Interrupted.\")\n finally:\n util.long_path_rmtree(result_dir)\n else:\n raise util.UserError(\"Failed to build the project and import the benchmark suite.\")\n\n if check:\n log.info(\"Checking benchmarks\")\n with log.indent():\n result_dir = tempfile.mkdtemp()\n try:\n out, err, retcode = env.run(\n [runner.BENCHMARK_RUN_SCRIPT, 'check',\n os.path.abspath(root)],\n cwd=result_dir,\n dots=False,\n env=env_vars,\n valid_return_codes=None,\n return_stderr=True,\n redirect_stderr=True)\n finally:\n util.long_path_rmtree(result_dir)\n\n out = out.strip()\n if retcode == 0:\n if out:\n log.info(out)\n log.info(\"No problems found.\")\n else:\n if out:\n log.error(out)\n raise util.UserError(\"Benchmark suite check failed.\")\n\n return benchmarks",
"def test_get_benchmark_methods_filter(self):\n config = mock.Mock()\n config.workspace = 'workspace'\n config.benchmark_method_patterns = ['new_foo.BenchmarkClass.filter:bench.*']\n benchmark_runner = benchmark.BenchmarkRunner(config)\n\n mock_benchmark_class = mock.Mock()\n mock_benchmark_class.benchmark_method_1 = 'foo'\n\n mock_module = mock.Mock()\n sys.modules['new_foo'] = mock_module\n mock_module.BenchmarkClass.return_value = mock_benchmark_class\n\n methods = benchmark_runner._get_benchmark_methods()\n\n self.assertEqual(1, len(methods))\n self.assertEqual('new_foo.BenchmarkClass.benchmark_method_1', methods[0])",
"def _run_benchmark(shell, shell_args, name, app, duration_seconds, measurements,\n verbose, android, save_traces):\n timeout = duration_seconds + _EXTRA_TIMEOUT\n benchmark_args = []\n benchmark_args.append('--app=' + app)\n benchmark_args.append('--duration=' + str(duration_seconds))\n\n output_file = None\n device_output_file = None\n if save_traces:\n output_file = 'benchmark-%s-%s.trace' % (name.replace(' ', '_'),\n time.strftime('%Y%m%d%H%M%S'))\n if android:\n device_output_file = os.path.join(shell.get_tmp_dir_path(), output_file)\n benchmark_args.append('--trace-output=' + device_output_file)\n else:\n benchmark_args.append('--trace-output=' + output_file)\n\n for measurement in measurements:\n benchmark_args.append(measurement)\n\n shell_args = list(shell_args)\n shell_args.append(_BENCHMARK_APP)\n shell_args.append('--force-offline-by-default')\n shell_args.append('--args-for=%s %s' % (_BENCHMARK_APP,\n ' '.join(benchmark_args)))\n\n if verbose:\n print 'shell arguments: ' + str(shell_args)\n return_code, output, did_time_out = shell.run_and_get_output(\n shell_args, timeout=timeout)\n\n if did_time_out:\n return False, 'timed out', output\n if return_code:\n return False, 'return code: ' + str(return_code), output\n\n # Pull the trace file even if some measurements are missing, as it can be\n # useful in debugging.\n if device_output_file:\n shell.pull_file(device_output_file, output_file, remove_original=True)\n\n return True, None, output",
"def get_performance_test_cases(test_suite):\n return get_cases(test_suite, r'test_perf_')",
"def main():\n test_folders = argv[1]\n benchmark_file = argv[2]\n output_path = argv[3]\n\n method = ['n', 'a', 'a']\n bigram = ['False', 'False', 'True']\n output_file = output_path + '/' + 'method_comparison_cosine_values.csv'\n\n with open(test_folders, 'r') as f:\n test_folders = f.read()\n\n test_folders = test_folders.splitlines()\n\n with open(benchmark_file, 'r') as f:\n benchmark_file = f.read()\n\n benchmark_file = benchmark_file.splitlines()\n\n # initialize big data frame\n frames = []\n\n for k in xrange(len(benchmark_file)):\n\n test = str(test_folders[k]).replace('\"', '')\n print \"Reading test files from folder:\"\n print test\n\n benchmark = str(benchmark_file[k]).replace('\"', '')\n print \"Reading benchmark form file:\"\n print benchmark\n\n # read file paths from test documents folder\n query = sorted([os.path.join(test, f) for f in os.listdir(test) if f.endswith('.txt')])\n\n # load benchmark text file\n with open(benchmark, \"r\", encoding=\"utf-8\", errors='ignore') as doc:\n raw = doc.read()\n\n # initialize dict of dicts for data frame\n method_csv = {}\n\n for j in xrange(len(method)):\n # extract features from benchmark\n dtm = ExtractFeatures(method[j], bigram[j])\n benchmark_name = benchmark_file[k].split('\\\\')[-1]\n benchmark_features = dtm.extract_features_from_text(raw, benchmark_name)\n\n # extract terms from each text document to create a vocabulary (keeping unique terms only)\n vocabulary = sorted(set(w[1] for w in benchmark_features))\n print \"{0} features produced.\".format(str(len(vocabulary)))\n\n benchmark_dtv = DTM(vocabulary, benchmark_name, benchmark_features)\n benchmark_dtv = benchmark_dtv.compute_dtv()\n\n # load test document features\n test_features = []\n for q in query:\n dtm1 = ExtractFeatures(method[j], bigram[j])\n test_features = test_features + dtm1.extract_features_from_file(q)\n\n documents = sorted(set([d for d, w in test_features]))\n print \"{0} test documents read.\".format(str(len(documents)))\n\n print \"Computing DTM...\"\n test_dtm = DTM(vocabulary, documents, test_features)\n test_dtm = test_dtm.compute_dtm()\n\n print \"Computing cosine values...\"\n dv = {}\n for i in xrange(len(documents)):\n d = 1 - spatial.distance.cosine(benchmark_dtv[benchmark_name], test_dtm[documents[i]])\n if isnan(d):\n d = 0\n dv[documents[i]] = d\n\n this_method = \"method=\" + method[j] + '_' + \"bigram=\" + bigram[j]\n method_csv[this_method] = pd.Series(dv)\n\n print \"Saving to data frame...\"\n df = pd.DataFrame(method_csv)\n test = test.split('\\\\')[-1]\n test = test.split('.')[0]\n df['test_group'] = test\n\n frames.append(df)\n\n result = pd.concat(frames)\n\n print \"Saving results to file: \", output_file\n result.to_csv(output_file)\n\n print 'Finished computing {0} data frames'.format(str(len(test_folders)))",
"def test_list_benchmarks(client):\n # The benchmark listing contains one element (independently of whether the\n # user is logged in or not).\n r = client.get(config.API_PATH() + '/workflows')\n assert r.status_code == 200\n doc = r.json\n assert len(doc[labels.WORKFLOW_LIST]) == 1\n # Create user and the request header that contains the API key for the\n # logged in user.\n _, token = create_user(client, '0000')\n headers = {HEADER_TOKEN: token}\n r = client.get(config.API_PATH() + '/workflows', headers=headers)\n assert r.status_code == 200\n doc = r.json\n assert len(doc[labels.WORKFLOW_LIST]) == 1",
"def get_benchmark_specification(benchmark = 'FSI1'):\n if benchmark == 'FSI1':\n rho_s = Constant(1e03)\n nu_s = Constant(0.4)\n mu_s = Constant(5e05)\n rho_f = Constant(1e03)\n nu_f = Constant(1e-03)\n U = 0.2\n T_end = 60.0\n result = \"results-FSI1/\"\n elif benchmark == 'FSI2':\n rho_s = Constant(1e04)\n nu_s = Constant(0.4)\n mu_s = Constant(5e05)\n rho_f = Constant(1e03)\n nu_f = Constant(1e-03)\n U = 1.0\n T_end = 15.0\n result = \"results-FSI2/\"\t\t\n elif benchmark == 'FSI3':\n rho_s = Constant(1e03)\n nu_s = Constant(0.4)\n mu_s = Constant(2e06)\n rho_f = Constant(1e03)\n nu_f = Constant(1e-03)\n U = 2.0\n T_end = 20.0\n result = \"results-FSI3/\"\t\t\n else:\n raise ValueError('\"{}\" is a wrong name for problem specification.'.format(benchmark))\n v_max = Constant(1.5*U) # mean velocity to maximum velocity \n # (we have parabolic profile)\n E_s = Constant(2*mu_s*(1+nu_s))\n lambda_s = Constant((nu_s*E_s)/((1+nu_s)*(1-2*nu_s)))\n mu_f = Constant(nu_f*rho_f)\n return v_max, lambda_s, mu_s, rho_s, mu_f, rho_f, T_end, result",
"def run_all(self):\n runs = []\n for run in self.benchmarks:\n run.start()\n run.wait()\n runs.append(run.metadata)\n return runs"
] | [
"0.69631344",
"0.64020723",
"0.61482394",
"0.56974643",
"0.5492792",
"0.54700804",
"0.5441572",
"0.5409977",
"0.5309843",
"0.52634937",
"0.5233209",
"0.52182776",
"0.52005297",
"0.5169763",
"0.5148801",
"0.51456755",
"0.51412374",
"0.51203877",
"0.5105856",
"0.5091856",
"0.5067771",
"0.5053111",
"0.5039504",
"0.50090104",
"0.49922386",
"0.4979186",
"0.49667445",
"0.4966193",
"0.49571356",
"0.4929252"
] | 0.801431 | 0 |
Returns the prorgamming language the benchmark was written in. | def get_language(benchmark):
config = benchmark_config.get_config(benchmark)
return config.get('language', 'c++') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def programming_language(self) -> str:\n return self.random.choice(PROGRAMMING_LANGS)",
"def generation_language(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"generation_language\")",
"def code(self):\n return self.language()",
"def language(self):\n return \"QSS\"",
"def getWikiLanguageName():\r\n return \"wikidpad_mini_1_0\"",
"def get_language():\n disabled_modules = ['tokenizer', 'tagger', 'parser', 'textcat']\n nlp = spacy.load('en_core_web_md', disable=disabled_modules)\n # we are not interested in stop-words as most of them are\n # needed in the short sentence examples in relation definitions\n spacy_wmd_hook = wmd.WMD.SpacySimilarityHook(nlp, ignore_stops=False)\n nlp.add_pipe(spacy_wmd_hook, last=True)\n return nlp",
"def language(self, target):\n self._check_target(target)\n return target.language or self._default_language",
"def language(self, max_length=None, **kwargs):\n T = self.with_output()\n return T.language(max_length)",
"def language(self):\n # type: () -> string_types\n return self._language",
"def language(self):\n # type: () -> string_types\n return self._language",
"def audio_language(self):\n # type: () -> string_types\n return self._audio_language",
"def lang_genoeg(lengte):\n return",
"def language(self):\n portal_state = self.context.unrestrictedTraverse(\"@@plone_portal_state\")\n return aq_inner(self.context).Language() or portal_state.default_language()",
"def language():\r\n\r\n cursor.execute('SELECT name from languages order by RANDOM() limit 1;')\r\n return cursor.fetchone()[0]",
"def identifyLangage(script):\n\tlangage = \"undefined\"\n\tscriptNameInArray = script.split(\".\")\n\textension = scriptNameInArray[-1]\n\t\n\tif(extension == \"pl\"):\n\t\tlangage = \"perl\"\n\telif(extension == \"py\"):\n\t\tlangage = \"python\"\n\telif(extension == \"sh\"):\n\t\tlangage = \"bash\"\n\telse:\n\t\tlangage == \"not recognised\"\n\n\treturn langage",
"def language(self):\n if \"language\" in self._prop_dict:\n return self._prop_dict[\"language\"]\n else:\n return None",
"def get_full_language(self, language):\n if language:\n language = pycountry.languages.get(alpha_2=language)\n if language:\n language = language.name\n return language.title()",
"def language(self) -> str:\n if self.language_code in CODE_TO_LANGUAGE:\n return CODE_TO_LANGUAGE[self.language_code]\n\n return self.language_code",
"def get_language(self, text):\n try:\n post_lang = detect(text)\n except:\n post_lang = 'N/A'\n return post_lang",
"def language(self):\n hcell = self._get_hcell2()\n celltype = hcell[\"celltype\"]\n if celltype != \"code\":\n raise AttributeError\n return hcell.get(\"language\", \"python\")",
"def srclang(self):\n return self.__srclang",
"def language_code(self) -> str:\n return pulumi.get(self, \"language_code\")",
"def get_lang(self):\n return self.langs.lang",
"def get_language(mgroups):\n\n if mgroups:\n lang = mgroups[0].strip('[').strip(']')\n return lang.lower().strip()\n return None",
"def get_related_language(self) -> str:\n pass",
"def _getLang(self, language):\n if language == None:\n language = self.getDefaultLanguage()\n\n return language",
"def get_language(self) -> str:\n return self.language",
"def get_language(lang_code) -> str:\n langs = defaultdict(lambda: \"en\", {\"ru\": \"ru\"})\n return langs[lang_code.split(\"-\")[0]] if lang_code else \"en\"",
"def language(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"language\")",
"def get_locale():\n return \"he\""
] | [
"0.72861236",
"0.65404236",
"0.6198768",
"0.61458135",
"0.6069835",
"0.6027784",
"0.59970075",
"0.5983806",
"0.5961204",
"0.5961204",
"0.5915103",
"0.5895517",
"0.5860599",
"0.58391285",
"0.58170784",
"0.57956773",
"0.5790107",
"0.57584816",
"0.57295835",
"0.572077",
"0.57125485",
"0.56796414",
"0.56795347",
"0.56661993",
"0.56655854",
"0.5632322",
"0.5631046",
"0.5628728",
"0.5628638",
"0.5610644"
] | 0.76412416 | 0 |
Returns list of IDs of tags for specified model name by (code, name) pair | def get_tag_ids(self, cr, uid, model, code=None, name=None, context=None):
assert bool(code) or bool(name), "code or name must not be None! (code=%s;name=%s)" % (code, name)
tag_domain = [('model_id.model', '=', model)]
if code is not None:
tag_domain.append(('code', '=', code))
if name is not None:
tag_domain.append(('name', '=', name))
return self.search(cr, uid, tag_domain, context=context) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _getTagIDs(self):\n paths = self._criteria.get('paths')\n if paths:\n store = getMainStore()\n return list(store.find(Tag.id, Tag.path.is_in(paths)))",
"def get_tag_ids(tag_names):\n\ttag_names = tuple(tag_names)\n\twith postgres, postgres.cursor() as cur:\n\t\tcur.execute(\"select id from mustard.tags where english_name in %s\", (tag_names,))\n\t\treturn [row[0] for row in cur]",
"def findTaggedServiceIds(self, name):\n pass;",
"def find_usefull_tags(tags, tagmodel, tag_count_vect):\n\n final_tags = []\n for tag in tags:\n if tag == None:\n continue\n else:\n tagpd = pd.Series(tag)\n tag_feature = tag_count_vect.transform(tagpd)\n result = tagmodel.predict(tag_feature)\n\n result = result.tolist() \n result = str(result)\n if result == '[1]':\n final_tags.append(tag)\n final_tags = list(dict.fromkeys(final_tags))\n return(final_tags)",
"def get_tags_list(*args, **kwargs):\n return Tag.objects.active()",
"def get_tags_list(*args, **kwargs):\n return Tag.objects.active()",
"def dataset_tags(connection):\n assert connection\n query = \"\"\"select * from tags()\"\"\"\n result = sqlio.read_sql_query(query, connection)\n return [item.strip() for item in result['name']], [tag_id.strip() for tag_id in result['tag_id']]",
"def tags():",
"def _model_tags(kwargs, key):\r\n if key not in kwargs:\r\n return []\r\n\r\n instance = kwargs[key]\r\n tags = [\r\n u'{}.{}:{}'.format(key, attr, getattr(instance, attr))\r\n for attr in getattr(instance, 'MODEL_TAGS', [])\r\n ]\r\n tags.append(u'model_class:{}'.format(instance.__class__.__name__))\r\n return tags",
"def get_tags(request):\n as_list = request.params.get('as_list')\n if as_list:\n return [\n tag.name\n for tag in Tag.query.all()\n ]\n else:\n return [\n {\n 'name': tag.name,\n 'id': tag.id\n }\n for tag in Tag.query.all()\n ]",
"def get_tags(self):\r\n\r\n\r\n\r\n #using database\r\n\r\n if self.using_database:\r\n aprint('GET TAGS')\r\n value_tuple = (notebookname,)\r\n db_cursor.execute(\"SELECT tag\"\r\n +\" FROM tags_to_keys\"\r\n +\" WHERE notebook=?;\",\r\n value_tuple)\r\n fetched = db_cursor.fetchall()\r\n if fetched:\r\n return {tag[0] for tag in fetched}\r\n\r\n return set()\r\n\r\n #using shelf\r\n\r\n return self.tag_dict.keys()",
"def get_tagname(tags, tagid):\n for tag in tags:\n if tag['id'] == tagid:\n return tag['name']",
"def tags(self) -> list[str]:\n _args: list[Arg] = []\n _ctx = self._select(\"tags\", _args)\n return _ctx.execute_sync(list[str])",
"def get_tag_names(language_code: str, tag_field: dict):\n tag_names = {}\n tag_names_default = _read_translation_file(\"en\", \"tag_names\")\n tag_names_lang = _read_translation_file(language_code, \"tag_names\")\n\n if tag_field:\n for tag in tag_field[\"tags\"]:\n if tag in tag_names_lang:\n tag_names[tag] = tag_names_lang[tag]\n else:\n tag_names[tag] = tag_names_default[tag]\n\n return tag_names",
"def prepare_tags(self, obj):\n return [tag.name for tag in obj.tags.all()]",
"def tag_mapping(sentences):\n tags = [[word[-1] for word in s] for s in sentences]\n dico = create_dico(tags)\n tag_to_id, id_to_tag = create_mapping(dico)\n print(\"Found %i unique named entity tags\" % len(dico))\n return dico, tag_to_id, id_to_tag",
"def _tags(self):\n retval = []\n for of in self.tagnames:\n retval.append([of, self.get_datatype(of), self.get(of)])\n return retval",
"def get_tag(tag_name, tag_list):\n for i in range(len(tag_list)):\n if tag_name == str(tag_list[i]):\n return tag_list[i]",
"def get_photo_tags(self, photo_id):\n\n query_string = '''\n select photo_tag.tag_name from photo\n join photo_tag on(photo_tag.photo_id=photo.photo_id)\n where photo.photo_id={}\n '''.format(photo_id)\n\n # so an array of tags would be ok\n tag_data = self.db.get_query_as_list(query_string)\n for tag in tag_data:\n # print(self.decode_tag(tag['tag_name']))\n\n tag['human_readable_tag'] = self.decode_tag(tag['tag_name'])\n\n # print(tag_data)\n\n return tag_data",
"def tag_mapping(sentences):\n tags = [[char[-1] for char in s] for s in sentences]\n dico = create_dico(tags)\n tag_to_id, id_to_tag = create_mapping(dico)\n print(\"Found %i unique named entity tags\" % len(dico))\n return dico, tag_to_id, id_to_tag",
"def get_tag_list(tags):\r\n from tagging.models import Tag\r\n if isinstance(tags, Tag):\r\n return [tags]\r\n elif isinstance(tags, QuerySet) and tags.model is Tag:\r\n return tags\r\n elif isinstance(tags, types.StringTypes):\r\n return Tag.objects.filter(name__in=parse_tag_input(tags))\r\n elif isinstance(tags, (types.ListType, types.TupleType)):\r\n if len(tags) == 0:\r\n return tags\r\n contents = set()\r\n for item in tags:\r\n if isinstance(item, types.StringTypes):\r\n contents.add('string')\r\n elif isinstance(item, Tag):\r\n contents.add('tag')\r\n elif isinstance(item, (types.IntType, types.LongType)):\r\n contents.add('int')\r\n if len(contents) == 1:\r\n if 'string' in contents:\r\n return Tag.objects.filter(name__in=[force_unicode(tag) \\\r\n for tag in tags])\r\n elif 'tag' in contents:\r\n return tags\r\n elif 'int' in contents:\r\n return Tag.objects.filter(id__in=tags)\r\n else:\r\n raise ValueError(_('If a list or tuple of tags is provided, they must all be tag names, Tag objects or Tag ids.'))\r\n else:\r\n raise ValueError(_('The tag input given was invalid.'))",
"def getTags(number=None):",
"def get_all_tagged(self,tag_name):\n return self.tag2elements[tag_name]",
"def get_all_id_and_tags(self):\n return self.database.select(self.tname,\n [self.primary_key, 'tags'])",
"def get_ids(self) -> List[str]:",
"def tag_mapping(data_path, data_type):\n with open(data_path+data_type+\"_labels.txt\", \"r\") as file1:\n tags = [line.split(\" \")[:-1] for line in file1.readlines()]\n dico = create_dico(tags)\n dico[model.START_TAG] = -1\n dico[model.STOP_TAG] = -2\n tag_to_id, id_to_tag = create_mapping(dico)\n print(\"Found %i unique named entity tags\" % len(dico))\n return dico, tag_to_id, id_to_tag",
"def get_tags(self, tags):\n tag_list = []\n for tag in tags:\n tag_list.append(tag[\"name\"])\n return tag_list",
"def get_post_tags(postid, posttags, tags):\n _tags = []\n _nametags = []\n for item in posttags:\n if item['post_id'] == postid:\n _tags.append(item['tag_id'])\n for tag in _tags:\n nametag = get_tagname(tags, tag)\n _nametags.append(nametag)\n return _nametags",
"def list_tags():\r\n tags = Tag.query.order_by(Tag.name).all()\r\n return render_template('tags.html', tags=tags)",
"def get_keys_for_tag(self,tag):\r\n\r\n #using database\r\n if self.using_database:\r\n value_tuple = (notebookname, tag,)\r\n db_cursor.execute(\"SELECT keyword\"\r\n +\" FROM tags_to_keys\"\r\n +\" WHERE notebook=?\"\r\n +\" AND tag=?;\",\r\n value_tuple)\r\n fetched = db_cursor.fetchall()\r\n if fetched:\r\n return {tag[0] for tag in fetched}\r\n\r\n return set()\r\n #using shelf\r\n if self.using_shelf:\r\n if self.tag_dict_contains(tag):\r\n return self.tag_dict[tag]\r\n return set()"
] | [
"0.6331528",
"0.61098045",
"0.5848875",
"0.5817225",
"0.57898426",
"0.57898426",
"0.5781574",
"0.5777682",
"0.5743095",
"0.56093746",
"0.5590287",
"0.5589514",
"0.5496177",
"0.54333645",
"0.5430249",
"0.5416349",
"0.54159564",
"0.54110205",
"0.54062647",
"0.53955483",
"0.5384277",
"0.5354498",
"0.5345137",
"0.5321173",
"0.52960914",
"0.5277989",
"0.52756363",
"0.52575725",
"0.52567166",
"0.5253493"
] | 0.8124625 | 0 |
Checks if all of supplied objects have tag with specified code and/or name Return True if all object ids has specified tags | def check_tag(self, cr, uid, ids, code=None, name=None, context=None):
assert bool(code is None) or bool(name is None), "code or name must not be None"
tag_domain = [('id', 'in', ids)]
if code is not None:
tag_domain.append(('tag_ids.code', '=', code))
if name is not None:
tag_domain.append(('tag_ids.name', '=', name))
count = self.search(cr, uid, tag_domain, count=1)
return bool(count == len(ids)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_for_tag(tags, tagged_events):\n found_tags = set()\n tags_set = set(tags)\n for tag in tags:\n for tag_event in tagged_events:\n if tag in tag_event[1][\"tag\"][\"labels\"]:\n found_tags.add(tag)\n not_found = tags_set - found_tags\n tag_status = {}\n for tag in found_tags:\n tag_status[tag] = True\n for tag in not_found:\n tag_status[tag] = False\n return tag_status",
"def is_tagged(self,tag_name,element):\n return (tag_name in self.tag2elements.keys()) and (element in self.tag2elements[tag_name])",
"def match(self, name, tags):\n return name.lower() in tags",
"def supported(self, tags):\n # type: (List[Any]) -> bool\n return not self.file_tags.isdisjoint(tags)",
"def id_in_list(obj_list, sb_object):\n if __debug__:\n print(\"Checking if sb_object in list...\")\n for sb_objects in obj_list:\n if sb_object.ID == sb_objects.ID:\n if __debug__:\n print(\"Object in list.\")\n return True\n if __debug__:\n print(\"Object not in list\")\n return False",
"def match(self, name, tags):\n S, tags = self.get_compiled(name, tags)\n return bool(S & tags)",
"def match(self, name, tags):\n or_exprs, tags = self.get_compiled(name, tags)\n \n # or_exprs = [{'a'}, {'c'}, {'d', 'a'}, {'d', 'e'}]\n return any(and_expr <= tags for and_expr in or_exprs)",
"def has_all(self, tag, indexes):\n\n return all(self.has(tag, index) for index in indexes)",
"def IsTagExists(self, ResourceId, TagName):\n\n try:\n if self.Service == 'ec2':\n response = self.DescribeTags(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['Tags']])):\n return True\n elif self.Service == 's3':\n response = self.GetBucketTagging(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['TagSet']])):\n return True\n elif self.Service == 'lambda':\n response = self.ListTags(ResourceId)\n if TagName in [x for x in response['Tags']]:\n return True\n elif self.Service == 'logs':\n response = self.ListTagsLogGroup(ResourceId)\n if TagName in [x for x in response['tags']]:\n return True\n elif self.Service == 'rds':\n response = self.ListTagsForResource(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['TagList']])):\n return True\n elif self.Service == 'es':\n response = self.ListTags(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['TagList']])):\n return True\n elif self.Service == 'emr':\n response = self.DescribeCluster(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [Tag for Tag in response['Cluster']['Tags']])):\n return True\n elif self.Service == 'dynamodb':\n response = self.ListTagsOfResource(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['Tags']])):\n return True\n elif self.Service == 'firehose':\n response = self.ListTagsForDeliveryStream(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['Tags']])):\n return True\n elif self.Service == 'glacier':\n response = self.ListTagsForVault(ResourceId)\n if TagName in [x for x in response['Tags']]:\n return True\n elif self.Service == 'kms':\n response = self.ListResourceTags(ResourceId)\n if TagName in list(map(lambda x: x['TagKey'], [x for x in response['Tags']])):\n return True\n elif self.Service == 'apigateway':\n print('No api to list tags')\n return False\n elif self.Service == 'kinesis':\n response = self.ListTagsForStream(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['Tags']])):\n return True\n elif self.Service == 'cloudtrail':\n response = self.ListTags(ResourceId)\n TagsList = map(lambda RTL: RTL['TagsList'], [RTL for RTL in response['ResourceTagList']])\n for Tags in TagsList:\n for Tag in Tags:\n if Tag['Key'] == 'Channel':\n return True\n elif self.Service == 'sqs':\n response = self.ListTags(ResourceId)\n if TagName in [x for x in response['Tags']]:\n return True\n elif self.Service == 'secretsmanager':\n response = self.DescribeSecret(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['Tags']])):\n return True\n elif self.Service == 'cloudfront':\n response = self.ListTagsForResource(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['Tags']])):\n return True\n elif self.Service == 'efs':\n response = self.DescribeTags(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['Tags']])):\n return True\n elif self.Service == 'sagemaker':\n response = self.ListTags(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['Tags']])):\n return True\n elif self.Service == 'redshift':\n response = self.DescribeTags(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['Tags']])):\n return True\n elif self.Service == 'elasticache':\n response = self.ListTagsForResource(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['TagList']])):\n return True\n elif self.Service == 'workspaces':\n response = self.DescribeTags(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['Tags']])):\n return True\n elif self.Service == 'ds':\n response = self.ListTagsForResource(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['Tags']])):\n return True\n elif self.Service == 'dax':\n response = self.ListTags(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['Tags']])):\n return True\n elif self.Service == 'route53':\n response = self.ListTagsForResource(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['Tags']])):\n return True\n elif self.Service == 'directconnect':\n response = self.DescribeTags(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['Tags']])):\n return True\n elif self.Service == 'datapipeline':\n response = self.DescribePipelines(ResourceId)\n Tags = list(map(lambda x: x['tags'], [tags for tags in response['pipelineDescriptionList']]))\n for i in Tags:\n for j in i:\n if j['key'] == 'Channel':\n return True\n else:\n raise TagNotSupportedError(self.Service)\n except Exception as e:\n raise e\n\n return False",
"def objExists(*args, **kwargs)->bool:\n pass",
"def check_for_tags(self, data_in):\n # possible header tags\n tags = ['NODE:', 'PORT:', 'STARTOFFSET:', 'ENDOFFSET:']\n\n # check for tags\n for tag in tags:\n if data_in.find(tag) != -1:\n print \"Found tag %s in data file\" % tag\n return False\n\n return True",
"def ok_tags(tags: dict) -> bool:\n\n if not tags:\n return True\n depth = 0\n queue = [(i, depth+1) for i in tags.values() if isinstance(i, dict)]\n max_depth = 0\n while queue and max_depth < 2:\n sub, depth = queue.pop()\n max_depth = max(max_depth, depth)\n queue = queue + [(i, depth+1) for i in sub.values() if isinstance(i, dict)]\n\n return max_depth < 2 and all(isinstance(k, str) and isinstance(tags[k], str) for k in tags)",
"def __contains__(self, obj):\n if isinstance(obj, self):\n query = self.where(**obj.data).select()\n result = query.execute()\n if result.count:\n return True\n return False",
"def check_inner_tag_availability(**kwargs):\n\n inner = kwargs.pop('inner')\n tag = kwargs.pop('tag')\n used_inners = [item.inner for item in [\n *MLPAv4.objects.filter(tag=tag),\n *MLPAv6.objects.filter(tag=tag),\n *BilateralPeer.objects.filter(tag=tag),\n *Monitorv4.objects.filter(tag=tag)]]\n check_inner = inner in used_inners\n if check_inner:\n return False\n else:\n return True",
"def tag_exists(tag, directory=None):\n return tag in get_tags(directory)",
"def __contains__(self, obj):\n\n if isinstance(obj, str):\n return obj in FileStorage.__objects\n return key(type(obj), obj.id) in FileStorage.__objects",
"def has_tag(self, tag):\n return tag in self.tags",
"def has_tag(self, tag):\n return tag in self.tags",
"def tag_dict_contains (self,\r\n tag):\r\n\r\n\r\n\r\n if self.using_database:\r\n aprint('TAGDICT CONTAINS')\r\n value_tuple = (notebookname, tag,)\r\n db_cursor.execute(\"SELECT rowid \"\r\n +\"FROM tags_to_keys\"\r\n +\" WHERE notebook=?\"\r\n +\" AND tag=?;\",\r\n value_tuple)\r\n try:\r\n return db_cursor.fetchone()[0] # MIGHT BE PROBLEMATIC\r\n except:\r\n return False\r\n\r\n return str(tag) in self.tag_dict",
"def containsAll(self, *args):\n pass",
"def hasattrs(obj, names):\n return all(hasattr(obj, attr) for attr in names)",
"def hasattrs(obj, names):\n return all(hasattr(obj, attr) for attr in names)",
"def exists(cls, ko):\n if isinstance(ko, BagDocument):\n return ko._key in cls._dbag\n else:\n return ko in cls._dbag",
"def label_intersects_tags(label, tags):\n for tag in tags:\n if tag in label:\n return True\n return False",
"def has_tag(lst, tag):\n if not isinstance(lst, list):\n lst = [lst]\n for l in lst:\n if l.tag == tag:\n return True\n else:\n return False",
"def assert_tags_present(payload, tags_to_find):\n tags_found = []\n for tag_dict in payload['data']['tags']:\n tags_found.append(tag_dict.get('text'))\n for tag in tags_to_find:\n assert tag in tags_found\n return",
"def contains(self, *args):\n pass",
"def validate(self, obj):\n if 'tags' in obj and not isinstance(obj['tags'], list):\n raise aomi.exceptions.Validation('tags must be a list')\n\n if self.present:\n check_obj(self.required_fields, self.name(), obj)",
"def tag_key_exists(self, key):\n return key in self.map",
"def tag_all_the_things(self, netbox_handler):\n\n for object_type in NetBoxObject.__subclasses__():\n\n for this_object in self.get_all_items(object_type):\n\n # if object was found in source\n if this_object.source is not None:\n this_object.add_tags([netbox_handler.primary_tag, this_object.source.source_tag])\n\n # if object was orphaned remove tag again\n if netbox_handler.orphaned_tag in this_object.get_tags():\n this_object.remove_tags(netbox_handler.orphaned_tag)\n\n # if object was tagged by this program in previous runs but is not present\n # anymore then add the orphaned tag except it originated from a disabled source\n else:\n if bool(set(this_object.get_tags()).intersection(self.source_tags_of_disabled_sources)) is True:\n log.debug2(f\"Object '{this_object.get_display_name()}' was added \"\n f\"from a currently disabled source. Skipping orphaned tagging.\")\n continue\n\n if getattr(this_object, \"prune\", False) is True:\n if netbox_handler.primary_tag in this_object.get_tags():\n this_object.add_tags(netbox_handler.orphaned_tag)\n\n # or just remove primary tag if pruning is disabled\n else:\n if netbox_handler.primary_tag in this_object.get_tags():\n this_object.remove_tags(netbox_handler.primary_tag)\n if netbox_handler.orphaned_tag in this_object.get_tags():\n this_object.remove_tags(netbox_handler.orphaned_tag)"
] | [
"0.61141014",
"0.6103309",
"0.5986072",
"0.5873774",
"0.5779968",
"0.57688856",
"0.57678956",
"0.576175",
"0.5726771",
"0.57225233",
"0.57074004",
"0.563474",
"0.5622186",
"0.5621437",
"0.5620893",
"0.56001145",
"0.55933976",
"0.55933976",
"0.5574528",
"0.5566219",
"0.5503636",
"0.5503636",
"0.5503575",
"0.5502966",
"0.548435",
"0.546435",
"0.54636055",
"0.5444265",
"0.54361796",
"0.5434005"
] | 0.67218775 | 0 |
Take the path to a raw json asset and convert it to target directory. | def processed_json_dir(path):
return os.path.dirname(path.replace(RAW_ASSETS_PATH, ASSETS_PATH)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def processed_json_path(path):\n return path.replace(RAW_ASSETS_PATH, ASSETS_PATH).replace('.json', '.bin')",
"def copy_json():\n sourcePath = 'contents/external/'\n targetPath = 'build/external/'\n for base,subdirs,files in os.walk(sourcePath):\n for file in files:\n orig = os.path.join(base, file)\n if os.path.isfile(orig) and file[-5:] == '.json':\n targetBase = os.path.join(targetPath, base[len(sourcePath):])\n dest = os.path.join(targetBase, file)\n puts(\"Checking diretory %s\" % targetBase)\n if not os.path.exists(targetBase):\n puts(yellow(\"Not found! Creating...\"))\n os.makedirs(targetBase)\n puts(\"Copying from %s to %s\" % (orig, dest))\n copyfile(orig, dest)",
"def processed_texture_path(path):\n return path.replace(RAW_ASSETS_PATH, ASSETS_PATH).replace('png', 'webp')",
"def get_source_file_name(json_name):\n assert json_name.endswith(JSON_EXT)\n (directory, base_name) = os.path.split(json_name)\n new_directory = os.path.relpath(directory, start=CACHE)\n new_directory = os.path.join(\"/\", new_directory)\n new_base_name = base_name[:-len(JSON_EXT)]\n i = new_base_name.rfind(\"-\")\n if i != -1:\n new_base_name = new_base_name[:i] + \".\" + new_base_name[i + 1:]\n result = os.path.join(new_directory, new_base_name)\n return result",
"def processed_to_raw_path(self, processed_path):\n # Extract useful information from <path>\n stage, hash_dir, cloud_id = \\\n osp.splitext(processed_path)[0].split('/')[-3:]\n\n # Remove the tiling in the cloud_id, if any\n base_cloud_id = self.id_to_base_id(cloud_id)\n\n # Read the raw cloud data\n raw_ext = osp.splitext(self.raw_file_names_3d[0])[1]\n raw_path = osp.join(self.raw_dir, base_cloud_id + raw_ext)\n\n return raw_path",
"def save_json_file(article_data, dir_path):\n current_date = str(datetime.now().date())\n output_path = os.path.join(dir_path, current_date)\n os.makedirs(output_path, exist_ok=True)\n filepath = os.path.join(output_path, article_data['source'] + '.json')\n if not os.path.isfile(filepath):\n with open(filepath, 'w') as fp:\n json_data = {\n 'results': {\n article_data['id']: article_data\n }\n }\n json.dump(json_data, fp)\n else:\n with open(filepath, 'r') as fp:\n json_data = json.load(fp)\n json_data['results'][article_data['id']] = article_data\n\n with open(filepath, 'w') as fp2:\n json.dump(json_data, fp2)",
"def prep_path(path):\n os.makedirs(os.path.join(path, 'processed'), exist_ok=True)\n os.makedirs(os.path.join(path, 'raw'), exist_ok=True)",
"def json_file():\r\n urlretrieve(URL, PATH)\r\n return PATH",
"def json_file():\r\n urlretrieve(URL, PATH)\r\n return PATH",
"def Sourceify(path):\n return path",
"def process_json(path):\n path = os.path.abspath(os.path.expanduser(path))\n try:\n with open(path) as f:\n return json.load(f, object_hook=ascii_encode_dict)\n except ValueError as e:\n logging.error(\"File: %s\\nInvalid JSON:\\n%s\", str(path), str(e))\n raise\n except IOError as io:\n logging.error(\"Provided json file path does not exist %s\", str(path))\n raise",
"def minify_json(self) -> None:\n print(f\"[FileManager: INFO] Minifing file {self.path}\")\n file_data = open(self.path, \"r\", encoding=\"utf-8\").read()\n json_data = json.loads(file_data) \n json_string = json.dumps(json_data, separators=(',', \":\")) \n path = str(self.path).replace(\".json\", \"\")\n new_path = \"{0}_min.json\".format(path)\n open(new_path, \"w+\", encoding=\"utf-8\").write(json_string)",
"def load_json(path):\n with open(normpath(path), 'r', encoding='utf-8') as file:\n return json.load(file)",
"def from_json(path: str):\n with open(path) as f:\n return json.load(f)",
"def setup_rawpath(job, raw_path):\n\n logging.info(f\"Destination is {raw_path}\")\n if not os.path.exists(raw_path):\n try:\n os.makedirs(raw_path)\n except OSError:\n err = f\"Couldn't create the base file path: {raw_path}. Probably a permissions error\"\n logging.error(err)\n else:\n logging.info(f\"{raw_path} exists. Adding timestamp.\")\n raw_path = os.path.join(str(job.config.RAW_PATH), f\"{job.title}_{job.stage}\")\n logging.info(f\"raw_path is {raw_path}\")\n try:\n os.makedirs(raw_path)\n except OSError:\n err = f\"Couldn't create the base file path: {raw_path}. Probably a permissions error\"\n raise OSError(err) from OSError\n return raw_path",
"def fullpath(data_folder, name):\n return os.path.join(data_folder, f\"{alias(name)}.json\")",
"def convert_tmpfile(src_file_name:str, dest_path:str):\n src_path = os.path.join(\n current_app.config['UPLOAD_FOLDER'],\n src_file_name\n )\n if not os.path.exists(src_path):\n abort(http.HTTPStatus.BAD_REQUEST, message='raw file not exist')\n pathlib.Path(os.path.dirname(dest_path)).mkdir(parents=True, exist_ok=True)\n shutil.move(src_path, dest_path)",
"def from_path(cls, path: str):\n with open(path) as f:\n return json.load(f)",
"def format_data(PATH, download=False):\n # iterate over files in directory\n for file_name in listdir(PATH):\n if file_name[0] == '.': # skip .DS_Store and other hidden files\n continue\n # open each JSON file\n with open(PATH+'/'+file_name) as json_file:\n data = json.load(json_file)\n\n # get image url and download it\n image_name = data['image']['original_filename']\n if download:\n url = data['image']['url']\n download_file_from_url(url, image_name, PATH)\n\n # rename json file to match image name\n hash_name = image_name[:-4]\n rename(PATH+'/'+file_name, PATH+'/'+hash_name+'.json')",
"def write(self, path):\n\n annotation = copy.deepcopy(self.annotation)\n\n for image_info in annotation['images']:\n image_info['file_name'] = os.path.relpath(image_info['file_name'],\n os.path.dirname(path))\n\n with open(path, 'w') as read_file:\n json.dump(annotation, read_file)",
"def load_json(self, unformatted_path: str):\n formatted_path = unformatted_path.format(\n experiment_folder=self.experiment_folder\n )\n if not os.path.isfile(formatted_path):\n self.dump_json(formatted_path, data={})\n with open(formatted_path, \"r\") as infile:\n json_data = json.load(infile)\n return json_data",
"def _save_file(json_response, path, filename):\n if path is not None:\n if path[-1] != \"/\":\n path = path+\"/\"\n filepath = os.path.join(path, filename)\n if not os.path.exists(path):\n os.makedirs(path)\n\n with open(filepath+'.json', 'w') as output_file:\n output_file.write(json_response.text)",
"def asset_forward(path):\n asset = get(f'http://localhost:5001/assets/{path}', timeout=15)\n return Response(\n asset.content,\n mimetype=asset.headers['Content-Type']\n )",
"def json_to_cache(new_json: Dict, file_name: str) -> None:\n\n json_path = os.path.join(CACHE_DIR, file_name)\n ensure_dir(json_path)\n with open(json_path, \"w\") as outfile:\n json.dump(new_json, outfile, ensure_ascii=False)",
"def get_output_json(self, case_path):\r\n if not os.path.exists(case_path):\r\n logging.ERROR('the path of source files does not exist')\r\n else:\r\n self.case_path = os.path.abspath(case_path)\r\n self.case_json = os.path.join(self.case_path, 'output.json')\r\n self.case_image = os.path.join(self.case_path, 'images')\r\n self.num_name = os.path.abspath(self.case_path).split(sep='\\\\')[-2]\r\n self.chi_name = IdToChinese[self.num_name]\r\n\r\n with io.open(self.case_json, 'r', encoding='utf-8') as f:\r\n json_data = json.load(f)\r\n self.audioResult = json_data['data']['audioResult']\r\n self.docs = self.audioResult['docs']\r\n self.classify_four_w= self.audioResult['4W']\r\n self.approval_information = self.audioResult['approval_information']\r\n return True",
"def load_json(path, name):\n if 'txt' not in name:\n name += '.json'\n with open(os.path.join(path, name), 'r') as json_file:\n return json.load(json_file)",
"def make_cached_json(file_name):\n result = None\n path = clean_path(file_name)\n working_directory = os.path.split(os.path.abspath(file_name))[0]\n file_type_switch = \"-s\" if is_sats_file(file_name) else \"-d\"\n command = []\n command.append(\"patsopt\")\n command.append(\"--jsonize-2\")\n command.append(file_type_switch)\n command.append(path)\n (stdout, _stderr, return_code) = run(\n working_directory,\n command,\n POSTIATS_ENCODING)\n if return_code == 0:\n cached_file_name = get_cached_file_name(path)\n cached_directory = os.path.split(cached_file_name)[0]\n if not os.path.exists(cached_directory):\n os.makedirs(cached_directory)\n output = open(cached_file_name, \"w\")\n output.write(stdout)\n output.close()\n result = json.loads(stdout)\n return result",
"def unformat_file(path: str, out_dir: str):\n\n data_dir = get_data_dir()\n path = Path(path)\n out_dir = Path(out_dir)\n if not path.exists() and out_dir.exists() and out_dir.is_dir():\n return\n\n if path.is_dir():\n path.mkdir(exist_ok=True)\n for filename in path.iterdir():\n unformat_file(filename, str(out_dir))\n\n else:\n dataobj = frontmatter.load(str(path))\n\n try:\n # get relative path of object in `data` dir\n datapath = path.parent.resolve().relative_to(data_dir)\n except ValueError:\n datapath = Path()\n\n # create subdir if doesn't exist\n (out_dir / datapath).mkdir(exist_ok=True)\n new_path = out_dir / datapath / f\"{dataobj.metadata['title']}.md\"\n with new_path.open(\"w\") as f:\n f.write(dataobj.content)\n\n current_app.logger.info(\n f\"Unformatted and moved {str(path)} to {str(new_path.resolve())}\"\n )\n path.unlink()",
"def calc_fullpath(data_folder, name):\n return os.path.join(data_folder, f\"{alias(name)}.json\")",
"def _path(name: str):\n return os.path.join(ASSET_PATH, name)"
] | [
"0.7122769",
"0.6316052",
"0.5610777",
"0.54790395",
"0.52522516",
"0.522991",
"0.5213277",
"0.51795095",
"0.51795095",
"0.51695764",
"0.51168185",
"0.5074967",
"0.5063665",
"0.50393045",
"0.50152886",
"0.4990693",
"0.49906608",
"0.49886268",
"0.49885842",
"0.49729767",
"0.49597967",
"0.49533492",
"0.49173167",
"0.49170667",
"0.49080256",
"0.48977965",
"0.48974547",
"0.48953485",
"0.48904803",
"0.4885386"
] | 0.6894641 | 1 |
Initializes this object's schema, input_files and output_path. | def __init__(self, schema, input_files, output_path):
self.schema = schema
self.input_files = input_files
self.output_path = output_path | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self):\n\n self.read_input_file()\n self.read_simulation_files()",
"def __init__(self):\n\t\tself.parsed = False\n\t\tdir_path = os.path.dirname(os.path.realpath(__file__))\n\t\tself.xsdfilename = os.path.join(dir_path, 'xml', 'schema.xsd')\n\t\tself.schema = 'schema.xsd'\n\t\tself.predictors = []\n\t\tself.predictors_types = []\n\t\tself.preprocessing_methods = []",
"def __init__(\n self,\n data_path: str,\n output_path: str\n ):\n\n self.data_path = data_path\n self.output_path = output_path",
"def _set_up(self):\n # Also recreates any attributes not in _io_attrs on load from input file.\n # See IOAble class docstring for more info.\n self._enforce_symmetry()\n self._sort_modes()\n self._create_idx()",
"def __init__(self, reader_schema, writer_schema=None, input_file=None):\n\n if writer_schema is None:\n writer_schema = reader_schema\n self._reader_schema = reader_schema\n self._writer_schema = writer_schema\n self._reader_schema_json = json.loads(str(self._reader_schema))\n self._writer_schema_json = json.loads(str(self._writer_schema))\n self._input_file = input_file\n self._set_avro_readers()",
"def __init__(self, inputmanager=None, outputpath=default.output_path):\n self.__inputmanager = inputmanager\n self.__outputpath = outputpath",
"def __init__(self, data=None, filename=None, schema=None):\n self.data = None\n self.schema = None\n self.filename = None\n if schema:\n self.load_schema(schema)\n if filename:\n self.load_file(filename)\n if data:\n self.load_data(data)",
"def init(self):\n\n self.checkDirectory(self.output_dir,\"output\")\n self.checkDirectory(self.working_dir,\"working\")",
"def __init__(self, input_directory, save_directory):\n self.input_directory = input_directory\n self.save_directory = save_directory\n self.__concatonate_files_controller()",
"def __init__(self, db_location, input_location = None,\n force_regenerate_input = False):\n \n if input_location == None:\n input_location = str(Path(db_location, 'rf_input_data'))\n \n # Check if at least gauge.parquet, refer_x0y0.parquet and radar_x0y0.parquet\n # are present\n valid = True\n if not os.path.exists(input_location):\n valid = False\n os.makedirs(input_location)\n files = glob.glob(str(Path(input_location, '*')))\n files = [os.path.basename(f) for f in files]\n if ('gauge.parquet' not in files or 'reference_x0y0.parquet' not in files\n or 'radar_x0y0.parquet' not in files):\n valid = False\n \n self.input_location = input_location\n self.db_location = db_location\n \n if not valid :\n logging.info('Could not find valid input data from the folder {:s}'.format(input_location))\n if force_regenerate_input or not valid:\n logging.info('The program will now compute this input data from the database, this takes quite some time')\n self.prepare_input()",
"def __init__(self):\n INSTALL_DIR = dirname(__file__)\n CONFIG_DIR = '/etc/Model2WADL/'\n logging.basicConfig(level=logging.ERROR)\n logging.config.fileConfig([join(CONFIG_DIR, 'logging.conf'), expanduser('~/.logging.conf'), 'logging.conf'])\n self.__log = logging.getLogger('thesis')\n\n self.__log.debug(\"Reading general configuration from Model2WADL.cfg\")\n self.__m2wConfig = ConfigParser.SafeConfigParser()\n self.__m2wConfig.read(\n [join(CONFIG_DIR, 'Physical2Virtual.cfg'), expanduser('~/.Physical2Virtual.cfg'), 'Physical2Virtual.cfg'])\n\n self.__baseURI = self.__m2wConfig.get(\"Config\", \"baseURI\")\n self.__basePackage = self.__m2wConfig.get(\"Config\", \"basePackage\")\n self.__schemaFile = self.__m2wConfig.get(\"Config\", \"schemaFile\")\n self.__model = None\n self.__input = None\n self.__output = None",
"def __init__(self):\n\n self.root_path = os.path.dirname(os.path.abspath(__file__))[:-5]\n self.config_path = os.path.join(self.root_path, \"files\\\\CONFIG.txt\")\n self.metrics_path = os.path.join(self.root_path, \"files\\\\metrics.txt\")\n\n self.setup_metrics_file()\n\n if self.check_configuration() is False:\n self.setup_configuration_file()",
"def __init__(self, schema_name, schema_path):\n self.__schema_name = schema_name\n self.__schema_path = schema_path\n\n self.__test_set_list = [] # Creates an empty list of test sets\n\n self.__validate_and_compile()",
"def __init__(self):\n\n self.current_path = os.getcwd()\n self.data_path = self.current_path + \"/data\"\n\n self.original_files = {}\n self.imitation_files = {}\n self.original_test_files = {}\n self.imitation_test_files = {}\n\n self.training_set = None\n self.original_test_set = None\n self.imitation_test_set = None\n\n self.accuracy = 0.\n self.threshold = 0.\n\n self.get_files()",
"def initialize_options(self):\n self.input_dir = getcwd()\n self.output_dir = path.join(getcwd(), 'dependency', 'static', 'apidocs')",
"def __init__(self, data, schema_file=None):\n if schema_file is not None:\n if not os.path.exists(schema_file):\n logging.warning(\n 'Configuration schema file could not be found. Please check the path and try again.'\n )\n raise Exception()\n\n self.schema_file = schema_file\n else:\n self.schema_file = None\n\n if not os.path.exists(data):\n logging.warning(\n 'Configuration data could not be found. Please check the path and try again.'\n )\n raise Exception()\n\n self.datafile = data\n\n self.data = None",
"def init(self) -> None:\n self.faithful = self.is_faithful()\n if not self.faithful:\n # We retrieve a list of each element into the directory_path\n files_into_directory = list_files_into_directory(self.directory_path)\n if files_into_directory.__len__() == 0:\n self.create_required_structure()\n self.faithful = True\n else:\n raise AttributeError(\"The directory seems improper but not empty\")\n self.class_names = self.find_class_names()\n self.training_image_names = self.find_training_image_names()\n self.extracted_features = self.find_features_already_extracted()\n if len(self.extracted_features) != 0:\n self.selected_features = set(self.extracted_features)\n self.generate_csv_dictionary()",
"def __init__(self, **kwargs):\n cls = self.__class__\n\n # Initialize all configurables and input arguments\n for arg in cls.configurables():\n try: # Read from class constructor\n setattr(self, arg, kwargs[arg])\n except KeyError:\n try: # Set from default value defined in class\n default_value = getattr(self, arg).kwargs[\"default\"]\n setattr(self, arg, default_value)\n except KeyError: # if nothing is provided, fallbakcs to None\n setattr(self, arg, None)\n\n self.input_arguments = None\n if cls.input_configurables():\n self.input_arguments = [\n getattr(self, arg) for arg in cls.input_configurables()\n ]\n\n self.json_config = cfg.JsonConfig(self.config)\n self.output_objects = []\n self.file = None",
"def __init__(self, output_dir):\n self.output_dir = os.path.abspath(output_dir)\n # Create the file if it doesn't already exist\n os.makedirs(self.output_dir, exist_ok=True)\n self.f = None\n self.data = None\n self.L = None",
"def __init__(self, input_files):\n\n self._input_files = input_files\n self._cached_workspace_names = {} # {ID: Name}",
"def __init__(self, synapse_df):\n self._file_df = synapse_df[synapse_df.type == \"file\"]\n self._folder_df = synapse_df[synapse_df.type == \"folder\"]\n self._init_file_types()\n self._identify_archive_folders()\n self._walk_files()",
"def setUp(self, path, structure_file, input_file):\n database.clean()\n self.path = path\n self.structure_file = path + structure_file\n self.input_file = path + input_file\n\n string_processor.project = Project()\n\n self.input_project = Project()\n self.input_project.document_files.append(\n DocumentFile(path=self.input_file))\n self.input_project.save()\n\n with open(self.structure_file) as f:\n self.json = json.load(f)\n\n self.xml = etree.parse(self.input_file)\n self.extractor = StructureExtractor(string_processor,\n self.structure_file)",
"def __init__(self,\n output_type=None,\n gzip_filehandle=None,\n gzip_filehandle_parent=None,\n schema=None):\n self.output_type = output_type\n self.gzip_filehandle = gzip_filehandle\n self.schema = schema\n self.gzip_filehandle_parent = gzip_filehandle_parent",
"def __init__(self, \n save_data_folder: str,\n reader:FileReader = None,\n input_file:str = None,\n *args, **kwargs):\n \n if reader:\n self.files, self.attr_names = reader.read_file(input_file, *args, **kwargs)\n \n self.save_data_folder = Path(save_data_folder)\n self.save_data_folder.mkdir(parents=True, exist_ok=True)\n BaseProcess.set_logger('generator.log')",
"def __init__(self, path, input_type='f'):\n if input_type == 'f':\n file = open(path, 'r')\n elif input_type == 's':\n file = path\n else:\n raise exceptions.BadInputError(f\"invalid input type {input_type}\")\n\n pdl = yaml.safe_load(file)\n\n self.type_checks = {\n 'typedef': self.validate_typedef,\n 'component': self.validate_component,\n 'graph': self.validate_graph,\n }\n\n self.imports = []\n if 'import' in pdl:\n self.imports = pdl['import']\n\n self.namespace = pdl['name']\n self.body = pdl['body']\n self.typedefs = {}\n self.components = []\n self.graphs = []\n self.validate()",
"def initialize(self):\n self.initialize_edges()\n self.initialize_prob()\n self.initialize_total_input_dict()\n\n self.initialize_fpmusigv_dict()",
"def __init__(self, targetDir, model):\n \n self.categoryFolder = targetDir\n self.model = model\n self.inputsFolder = os.path.join(targetDir, \"Inputs\")",
"def setup_class(cls):\n self = cls()\n self.remove_files_created_during_previous_runs()\n if not os.path.exists(self.plaintext_directory):\n os.makedirs(self.plaintext_directory)\n\n if not os.path.exists(self.training_path):\n os.makedirs(self.training_path)\n\n if not os.path.exists(self.heldout_path):\n os.makedirs(self.heldout_path)\n\n prepare_data(self.paths)",
"def __init__(self, input_filename='input.txt', output_filename='output.txt'):\n self._input = input_filename\n self._output = output_filename\n self._fin = open(self._input, 'r')\n self._fout = open(self._output, 'w')",
"def init(self):\n\t\tfrom splat_to_db import splat_to_db\n\t\tfrom visualize.clustering_test import clustering_test\n\t\tfrom codense.codense2db import codense2db\n\t\tself.splat_to_db_instance = splat_to_db()\n\t\tself.clustering_test_instance = clustering_test()\n\t\tself.codense2db_instance = codense2db()\n\t\t\n\t\tif not os.path.isdir(self.dir_files):\n\t\t\tos.makedirs(self.dir_files)\n\t\telse:\n\t\t\tsys.stderr.write(\"Warning, directory %s already exists.\\n\"%(self.dir_files))\n\t\tself.tmpinfname = os.path.join(self.dir_files, 'input')\n\t\tself.tmpoutfname = os.path.join(self.dir_files, 'output')\n\t\t\n\t\tself.crack_dict = {1: crack_by_modes(self.debug),\n\t\t\t2:crack_by_splat(self.debug)}\n\t\tself.argument1_dict = {1: self.clustering_test_instance,\n\t\t\t2: self.splat_to_db_instance}\n\t\t\n\t\t#two descending tables\n\t\tself.splat_table = '%ss'%self.table\n\t\tself.mcl_table = self.splat_table.replace('splat','mcl')\n\t\tif self.mcl_table == self.splat_table:\n\t\t\tsys.stderr.write(\"Error: new splat and mcl tables have the same name, %s\\n\"%self.splat_table)\n\t\t\tsys.exit(2)"
] | [
"0.69186145",
"0.6781864",
"0.6756163",
"0.6735031",
"0.6637486",
"0.6575436",
"0.6545899",
"0.65405905",
"0.6535554",
"0.65054566",
"0.6497379",
"0.6446711",
"0.6439875",
"0.64162254",
"0.6388665",
"0.63697845",
"0.63621044",
"0.63425386",
"0.6337463",
"0.6333185",
"0.6330976",
"0.6310273",
"0.6302738",
"0.62996733",
"0.62843364",
"0.62698",
"0.62557936",
"0.6251302",
"0.622112",
"0.6208984"
] | 0.8291974 | 0 |
Take the path to a raw png asset and convert it to target webp path. | def processed_texture_path(path):
return path.replace(RAW_ASSETS_PATH, ASSETS_PATH).replace('png', 'webp') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def img2webp(path):\n file, ext = os.path.splitext(path)\n image = Image.open(path).convert(\"RGBA\")\n image = ImageOps.expand(image, 75)\n image.save(file + \".webp\", \"WEBP\")\n os.remove(path)",
"def image_webp():\n data = resource(\"images/wolf_1.webp\")\n return Response(data, headers={\"Content-Type\": \"image/webp\"})",
"def convert_png_image_to_webp(png, out, quality=80):\n command = [CWEBP, '-q', str(quality), png, '-o', out]\n run_subprocess(command)",
"def generate_webp_textures():\n input_files = PNG_TEXTURES['input_files']\n output_files = PNG_TEXTURES['output_files']\n if not os.path.exists(TEXTURE_PATH):\n os.makedirs(TEXTURE_PATH)\n for png, out in zip(input_files, output_files):\n if needs_rebuild(png, out):\n convert_png_image_to_webp(png, out, WEBP_QUALITY)",
"def make_image(self, path):\n\t\treturn None",
"def _webp(self, tile: bytes) -> np.ndarray:\n decoded = np.rollaxis(imagecodecs.webp_decode(tile), 2, 0)\n return decoded",
"def test_write_lossless_rgb(tmp_path):\n\n temp_file = str(tmp_path / \"temp.webp\")\n # temp_file = \"temp.webp\"\n\n pil_image = hopper(\"RGBA\")\n\n mask = Image.new(\"RGBA\", (64, 64), (128, 128, 128, 128))\n # Add some partially transparent bits:\n pil_image.paste(mask, (0, 0), mask)\n\n pil_image.save(temp_file, lossless=True)\n\n with Image.open(temp_file) as image:\n image.load()\n\n assert image.mode == \"RGBA\"\n assert image.size == pil_image.size\n assert image.format == \"WEBP\"\n image.load()\n image.getdata()\n\n assert_image_equal(image, pil_image)",
"def handle_as_url(view: View, point: int, string: str, name: str):\n\n # Let's assume this url as input:\n # (https://upload.wikimedia.org/wikipedia/commons/8/84/Example.svg)\n\n # Download the image\n # FIXME: avoid nested try-except clauses\n try:\n try:\n f = urlopen(unquote(string)) # <==\n except:\n try:\n url_path = quote(string).replace(\"%3A\", ':', 1)\n f = urlopen(url_path)\n except:\n f = urlopen(string)\n # don't fill the console with stack-trace when there`s no connection !!\n except Exception as e:\n print(e)\n return\n\n # file needs conversion ?\n need_conversion = name.endswith(FORMAT_TO_CONVERT) # => True\n basename, ext = osp.splitext(name) # => (\"Example\", \".svg\")\n # create a temporary file\n tmp_file = osp.join(TEMP_DIR,\n \"tmp_image\" + (ext if need_conversion else \".png\")\n ) # => \"TEMP_DIR/tmp_image.svg\"\n\n # Save downloaded data in the temporary file\n content = f.read()\n with open(tmp_file, \"wb\") as dst:\n dst.write(content)\n\n # if the file needs conversion, convert it then read data from the resulting png\n if need_conversion:\n # keep the image's temporary file and name for later use\n conv_file = tmp_file # => \"TEMP_DIR/tmp_image.svg\"\n conv_name = name # => \"Example.svg\"\n\n # => \"TEMP_DIR/tmp_image.png\"\n png = osp.splitext(tmp_file)[0] + \".png\"\n\n # use the magick command of Imagemagick to convert the image to png\n magick(tmp_file, png)\n\n # set temp_file and name to the png file\n tmp_file = png # => \"TEMP_DIR/tmp_image.png\"\n name = basename + \".png\" # => \"Example.png\"\n\n # read data from the resulting png\n with open(tmp_file, \"rb\") as dst:\n content = dst.read()\n\n real_width, real_height, size = get_image_size(tmp_file)\n width, height = get_dimensions(view, tmp_file)\n encoded = str(base64.b64encode(content), \"utf-8\")\n size = str(size // 1024) + \"KB\" if size >= 1024 else str(size) + 'B'\n\n def on_navigate(href):\n\n if href == \"save\":\n if need_conversion:\n save(conv_file, conv_name, \"url\")\n else:\n save(tmp_file, name, \"url\")\n elif href == \"save_as\":\n if need_conversion:\n convert(conv_file, \"url\", conv_name)\n else:\n convert(tmp_file, \"url\", name)\n else:\n sublime.active_window().open_file(tmp_file)\n\n view.show_popup(\n TEMPLATE % (width, height, \"png\", encoded, real_width, real_height, size),\n sublime.HIDE_ON_MOUSE_MOVE_AWAY,\n point,\n *view.viewport_extent(),\n on_navigate=on_navigate\n )",
"def save_image(data, file_path):\n with open(file_path, 'wb'):\n prefix = 'data:image/webp;base64,'\n data = data[len(prefix):]\n byte_data = base64.b64decode(data)\n image_data = BytesIO(byte_data)\n img = Image.open(image_data)\n img.save(file_path)\n return True",
"def handle_as_url(view: sublime.View, point: int, string: str, name: str):\n\n # Let's assume this url as input:\n # (https://upload.wikimedia.org/wikipedia/commons/8/84/Example.svg)\n\n # Download the image\n # FIXME: avoid nested try-except clauses\n try:\n try:\n f = urlopen(unquote(string)) # <==\n except Exception:\n try:\n url_path = quote(string).replace(\"%3A\", ':', 1)\n f = urlopen(url_path)\n except Exception:\n f = urlopen(string)\n # don't fill the console with stack-trace when there`s no connection !!\n except Exception as e:\n print(e)\n return\n\n # file needs conversion ?\n need_conversion = name.endswith(formats_to_convert) # => True\n basename, ext = osp.splitext(name) # => (\"Example\", \".svg\")\n # create a temporary file\n temp_img = osp.join(TEMP_DIR, \"tmp_image\" + ext) # => \"TEMP_DIR/tmp_image.svg\"\n\n # Save downloaded data in the temporary file\n content = f.read()\n with open(temp_img, \"wb\") as img:\n img.write(content)\n\n # if the file needs conversion, convert it then read data from the resulting png\n if need_conversion:\n ext = \".png\"\n # keep the image's temporary file and name for later use\n conv_file = temp_img # => \"TEMP_DIR/tmp_image.svg\"\n\n # => \"TEMP_DIR/tmp_image.png\"\n temp_png = osp.splitext(temp_img)[0] + \".png\"\n\n # use the magick command of Imagemagick to convert the image to png\n magick(temp_img, temp_png)\n\n # read data from the resulting png\n with open(temp_png, \"rb\") as png:\n content = png.read()\n\n # set temp_file and name to the png file\n temp_img = temp_png # => \"TEMP_DIR/tmp_image.png\"\n\n width, height, real_width, real_height, size = get_data(view, temp_img)\n encoded = str(base64.b64encode(content), \"utf-8\")\n\n def on_navigate(href):\n\n if href == \"save\":\n if need_conversion:\n save(conv_file, name, \"url\")\n else:\n save(temp_img, name, \"url\")\n elif href == \"save_as\":\n if need_conversion:\n convert(conv_file, \"url\", name)\n else:\n convert(temp_img, \"url\", name)\n else:\n sublime.active_window().open_file(temp_img)\n\n view.show_popup(\n TEMPLATE % (width, height, ext, encoded, real_width, real_height,\n str(size // 1024) + \"KB\" if size >= 1024 else str(size) + 'B'),\n sublime.HIDE_ON_MOUSE_MOVE_AWAY,\n point,\n *view.viewport_extent(),\n on_navigate=on_navigate\n )",
"def clean_webp_textures():\n for webp in PNG_TEXTURES['output_files']:\n if os.path.isfile(webp):\n os.remove(webp)",
"def toPng(self):\n\t\tif self.isPng:\n\t\t\treturn self\n\t\telse:\n\t\t\treturn textureFile( self.path.replace( self.extension, '.png' ) )",
"def get_png_abs_path() -> pathlib.Path:\n return PathManager._ROOT.joinpath(\n PathManager._TILINGS_GUI, PathManager._RESOURCES, \"img\", \"png\"\n )",
"def _copy_to_media(self, template_name, source=''):\n dirpath = os.path.join(self.cache_root, os.path.dirname(template_name))\n filename = os.path.basename(template_name)\n fullpath = os.path.join(dirpath, filename)\n\n if not os.path.isfile(fullpath) or settings.DEBUG:\n if not os.path.exists(dirpath):\n os.makedirs(dirpath)\n\n f = open(fullpath, 'w')\n f.write(source)\n f.close()\n\n return urljoin(self.cache_url, template_name), filename",
"def test_write_unsupported_mode_PA(tmp_path):\n\n temp_file = str(tmp_path / \"temp.webp\")\n file_path = \"Tests/images/transparent.gif\"\n with Image.open(file_path) as im:\n im.save(temp_file)\n with Image.open(temp_file) as image:\n assert image.mode == \"RGBA\"\n assert image.size == (200, 150)\n assert image.format == \"WEBP\"\n\n image.load()\n image.getdata()\n with Image.open(file_path) as im:\n target = im.convert(\"RGBA\")\n\n assert_image_similar(image, target, 25.0)",
"def make_image_path(raw_img, input_base, base_path):\n path = os.path.dirname(raw_img)\n relpath = os.path.relpath(path, input_base)\n if relpath == '.':\n dest_folder = base_path\n else:\n dest_folder = os.path.join(base_path, relpath)\n return os.path.normpath(dest_folder)\n # return dest_folder",
"def build_image_path(self, src):\r\n o = urlparse(src)\r\n # we have a full url\r\n if o.hostname:\r\n return o.geturl()\r\n # we have a relative url\r\n return urljoin(self.target_url, src)",
"def copy_png(\n inpath: str, outpath: str, filter_chunks: Optional[Callable[[bytes], bool]] = None, verify_crc: bool = False\n) -> None:\n\n with open(inpath, \"rb\") as fr, open(outpath, \"xb\") as fw:\n copy_png_fp(fr, fw, filter_chunks=filter_chunks, verify_crc=verify_crc)",
"def absolute_asset_url(module, path):\n return absolute_uri(get_asset_url(module, path))",
"def get_image_qm(html_src, todir):\n #print url\n\n img_url, title = img_details(html_src)\n \n r = requests.get(img_url)\n with open(todir+title+'.jpg','wb') as f:\n f.write(r.content)",
"def test_write_rgba(tmp_path):\n\n temp_file = str(tmp_path / \"temp.webp\")\n\n pil_image = Image.new(\"RGBA\", (10, 10), (255, 0, 0, 20))\n pil_image.save(temp_file)\n\n if _webp.WebPDecoderBuggyAlpha():\n return\n\n with Image.open(temp_file) as image:\n image.load()\n\n assert image.mode == \"RGBA\"\n assert image.size == (10, 10)\n assert image.format == \"WEBP\"\n image.load()\n image.getdata()\n\n # Early versions of WebP are known to produce higher deviations:\n # deal with it\n if _webp.WebPDecoderVersion() <= 0x201:\n assert_image_similar(image, pil_image, 3.0)\n else:\n assert_image_similar(image, pil_image, 1.0)",
"def imgOutputPath(self, newpath):\n newimg = self.imagePath().replace(self.home, newpath)\n return newimg",
"def test_encode_webp():\n width = 51\n height = 26\n channels = 3\n bmp_file = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"test_image\", \"lena.bmp\")\n with open(bmp_file, 'rb') as f:\n bmp_contents = f.read()\n image_v = tf.image.decode_bmp(bmp_contents)\n assert image_v.shape == [height, width, channels]\n bmp_encoded = image_io.encode_bmp(image_v)\n image_e = tf.image.decode_bmp(bmp_encoded)\n assert np.all(image_v.numpy() == image_e.numpy())",
"def get_image(url, path):\n r = requests.get(url, stream=True)\n if r.status_code == 200:\n with open(path, 'wb') as f:\n r.raw.decode_content = True\n shutil.copyfileobj(r.raw, f)\n print(\"[>] get\", url, \">>\", path)\n f.close()",
"def read_webp_image(h5_dataset):\n data = h5_dataset[:].tobytes()\n img_bytesio = BytesIO(data)\n pil_img = Image.open(img_bytesio, 'r')\n return pil_img",
"def custom_static_serve(*args, **keywords):\n response = serve(*args, **keywords)\n if keywords[\"path\"].endswith(\".webp\"):\n response.headers[\"Content-Type\"] = \"image/webp\"\n return response",
"def from_url(self) -> PngImagePlugin.PngImageFile:\n response = requests.get(self.url)\n img = Image.open(BytesIO(response.content))\n\n return img",
"def convert_to_high_res(url):\n return add_string_to_image_url(url, 'high-res')",
"def source(request, filepath_pseudos):\n filepath_pseudo = pathlib.Path(filepath_pseudos()) / 'Ar.upf'\n\n if request.param is str:\n return str(filepath_pseudo)\n\n if request.param is pathlib.Path:\n return filepath_pseudo\n\n return io.BytesIO(filepath_pseudo.read_bytes())",
"def __make_png(self, abspath_img_rgb):\n if not os.path.exists(DIR_PNG):\n os.makedirs(DIR_PNG)\n\n outsize = '{}%'.format(OUTSIZE_RGB)\n img_name_rgb = os.path.basename(abspath_img_rgb)\n suffix_extension_tif = Utils.get_suffix_tif(img_name_rgb)\n img_png = img_name_rgb.replace(suffix_extension_tif, '.png')\n path_img_png = os.path.join(DIR_PNG, img_png)\n\n command = \"gdal_translate -ot byte -of PNG -outsize {} {} \" \\\n \"-a_nodata 0 -q {} {}\".format(\n outsize, outsize, abspath_img_rgb, path_img_png\n )\n os.system(command)\n return os.path.join(DIR_PNG_TO_DB, img_png)"
] | [
"0.66254675",
"0.6108834",
"0.59269124",
"0.57612014",
"0.54063636",
"0.5362226",
"0.5322664",
"0.53141046",
"0.5290474",
"0.5259612",
"0.520573",
"0.51810825",
"0.51549965",
"0.51190436",
"0.5110097",
"0.51075953",
"0.50395477",
"0.5022422",
"0.48821872",
"0.48676977",
"0.4838838",
"0.48383144",
"0.48332033",
"0.4826889",
"0.48095867",
"0.48065874",
"0.4792933",
"0.47814026",
"0.47511697",
"0.47389063"
] | 0.72864443 | 0 |
Run the flatbuffer compiler on the given json file and schema. | def convert_json_to_flatbuffer_binary(json, schema, out_dir):
command = [FLATC, '-o', out_dir, '-b', schema, json]
run_subprocess(command) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run(\n self,\n input_file=sys.stdin,\n output_file=sys.stdout,\n schema_map=None,\n ):\n schema_map, error_logs = self.deduce_schema(\n input_file, schema_map=schema_map\n )\n\n for error in error_logs:\n logging.info(\n f\"Problem on line {error['line_number']}: {error['msg']}\"\n )\n\n if self.debugging_map:\n json.dump(schema_map, output_file, indent=2)\n print(file=output_file)\n else:\n schema = self.flatten_schema(schema_map)\n json.dump(schema, output_file, indent=2)\n print(file=output_file)",
"def main(input_json, output_markdown):\n parser = Parser()\n output_md = parser.parse_schema(json.load(input_json))\n output_markdown.writelines(output_md)\n click.secho(\"✔ Successfully parsed schema!\", bold=True, fg=\"green\")",
"def main(schema_file, cxx=None, py=None):\n logging.info('Working on file: ' + schema_file)\n\n # validate file exists\n if not os.path.isfile(schema_file):\n logging.info('File not found')\n return -1\n\n # loading schema from file\n json_parser = JsonComment(json)\n with open(schema_file) as fd:\n schema = json_parser.load(fd)\n\n # validating the schema\n status, msg = validate_protocol_schema(schema)\n if not status:\n logging.info('Invalid Schema:' + msg)\n return -2\n logging.info('Schema is valid')\n\n # Build the protocol\n protocol = Protocol(schema)\n\n # build CXX\n if cxx is not None:\n if not os.path.isdir(cxx):\n logging.info('CXX Dest not found')\n return -1\n\n message.make_message_cxx(protocol, cxx)\n\n for r in protocol.endpoints:\n router.make_router_cxx(protocol, r, cxx)\n\n # build py\n if py is not None:\n if not os.path.isdir(py):\n logging.info('PY Dest not found')\n return -1\n\n message.make_message_py(protocol, py)\n\n for r in protocol.endpoints:\n router.make_router_py(protocol, r, py)\n\n return 0",
"def test_json():\n schemas = {\n 'schema-languages': 'bible/languages.json',\n 'schema-book-metadata': 'bible/book-metadata.json',\n 'schema-bible': 'bible/bible-*.json'\n }\n for schema_name, data_path_glob in schemas.items():\n schema_path = 'schemas/{}.json'.format(schema_name)\n with open(schema_path) as schema_file:\n schema = json.load(schema_file)\n data_paths = glob.iglob(data_path_glob)\n for data_path in data_paths:\n with open(data_path) as data_file:\n data = json.load(data_file)\n yield jsonschema.validate, data, schema",
"def generate_flatbuffer_binaries():\n for element in FLATBUFFERS_CONVERSION_DATA:\n schema = element.schema\n output_path = element.output_path\n if not os.path.exists(output_path):\n os.makedirs(output_path)\n for json in element.input_files:\n target = processed_json_path(json)\n if needs_rebuild(json, target) or needs_rebuild(schema, target):\n convert_json_to_flatbuffer_binary(\n json, schema, output_path)",
"def main(gtfs_file, input_json_file):\n\n with open(input_json_file) as jsonfile:\n input_json = json.load(jsonfile)\n\n gtfs_feed = mzgtfs.feed.Feed(filename=gtfs_file)\n\n for fare_id, rules_attributes in input_json.iteritems():\n add_fare_id(gtfs_feed, fare_id, rules_attributes)\n\n files = ['fare_attributes.txt', 'fare_rules.txt']\n gtfs_feed.write('fare_attributes.txt', gtfs_feed.fares())\n gtfs_feed.write('fare_rules.txt', gtfs_feed.fare_rules())\n\n gtfs_feed.make_zip('output.zip', files=files, clone=gtfs_file)\n shutil.move('output.zip', gtfs_file)\n\n util.delete_temp_files(files)",
"def main():\n\n json_str = sys.stdin.read()\n\n try:\n object_json = json.loads(json_str)\n\n except json.JSONDecodeError:\n print(\"Error with the JSON input. Please check your JSON file.\")\n sys.exit(1)\n\n final_object = flatten_json(object_json, prefix=\"\")\n object_json_output = json.dumps(final_object, indent=4)\n\n print(object_json_output)",
"def main(db_path, schema_json):\n create_db(db_path, schema_json)",
"def main():\n\n # Load arguments\n args = get_args()\n \n assert os.path.exists(args.csv), ' [ERR] File' + os.path.exists(args.csv) +'does not exist'\n\n print(args)\n try:\n dir_name = os.path.dirname(args.json)\n os.mkdir(dir_name)\n print(' [INFO] Creating', dir_name, 'directory')\n except:\n print(' [INFO] Directory', dir_name, 'already exists. Data will be replaced')\n pass\n\n if args.config:\n assert os.path.exists(args.config), ' [ERR] File' + os.path.exists(args.config) +'does not exist'\n dic_types = read_config(args.config)\n else:\n dic_types = {}\n \n # Create json\n create_json_from_csv(args.csv, args.delimiter, args.cols_delimiter, args.keep, dic_types, args.infer_types, args.max_docs, args.json, args.per_line)\n\n return 0",
"def main(filename):\n with open(filename) as json_file:\n data = json.load(json_file)\n\n course_dict = {}\n course_dict['course_id'] = str(os.path.split(filename.strip('/'))[-1])\n course_dict['blocks'] = build_course_map(data)\n\n filename = '%s' % course_dict['course_id']\n filepath = os.path.join('../input/', filename)\n\n with open(filepath, 'w') as outfile:\n json.dump(course_dict, outfile, indent=4)",
"def _load_schema(self, json_schema):\n # use jsonrefs to resolve all $refs in json\n data = jsonref.loads(json.dumps(json_schema))\n return self.__initialise_template(data)",
"def __json_schema_generator(file):\n try:\n data = json.load(file)\n metadata_set = set()\n try:\n for datum in data['meta']['view']['columns']:\n metadata_set.add(datum['name'])\n except Exception as e:\n metadata_set.clear()\n for datum in data:\n if isinstance(datum, str):\n metadata_set.add(datum)\n else:\n for datum_property in datum:\n metadata_set.add(str(datum_property))\n\n metadata_list = list(metadata_set)\n # assumes list of objects with sparsse data\n # OR\n # for data_property in data[0]:\n # metadata_list.append(data_property)\n # assumes list of objects and that first entry has full list of properties\n\n return SchemaGenerator.__build_schema(metadata_list)\n except Exception as e:\n logging.error('Failed to parse json file into schema: ' + str(e))\n raise FailedCreatingSchemaException(\"Failed to create schema from json file.\")",
"def run_json(filepath: str, strict: bool = False, debug: bool = False):\n with open(filepath) as jse:\n return _run_file_full(filepath, json.load(jse), strict=strict, debug=debug)",
"def _convert(self, fn, suffix='json', path='jsonschema', name=None,\n root_class_name=None, data_files=[], target_class=None):\n ie = JsonSchemaImportEngine()\n d = os.path.join(INPUT_DIR, path)\n schema = ie.load(os.path.join(d, f'{fn}.{suffix}'), name=name, format=suffix, root_class_name=root_class_name)\n model_path = os.path.join(OUTPUT_DIR, f'{fn}.yaml')\n write_schema(schema, model_path)\n roundtrip_path = os.path.join(OUTPUT_DIR, f'{fn}.roundtrip.json')\n with open(roundtrip_path, 'w') as stream:\n stream.write(JsonSchemaGenerator(model_path).serialize())\n python_path = os.path.join(OUTPUT_DIR, f'{fn}.py')\n with open(python_path, 'w') as stream:\n stream.write(PythonGenerator(model_path).serialize())\n compile_python(python_path)\n # TODO: test data_files\n return schema",
"def main():\n\n handler = PlanetSourceHandler()\n\n parser = xml.sax.make_parser()\n parser.setFeature(xml.sax.handler.feature_namespaces, 1)\n parser.setContentHandler(handler)\n parser.parse(sys.stdin)\n\n print simplejson.JSONEncoder(indent=True).encode(handler.sources)",
"def main():\n args = parse_arguments()\n\n with open(args.input_file) as infile:\n data = json.load(infile)\n if \"expected_format\" in data and \"compatible_fragment_ratio\" in data:\n send_message(save(\"strandedness\", data[\"expected_format\"]))\n send_message(\n save(\"fragment_ratio\", str(round(data[\"compatible_fragment_ratio\"], 2)))\n )\n else:\n send_message(error(\"Cannot parse library type information file.\"))",
"def main(args=None):\n if args is None:\n args = sys.argv[1:]\n\n usage_ = \"\"\"%prog [OPTIONS] JsonFile [MoreJsonFiles...]\nFormat/Beautify one or more JSON file(s).\"\"\"\n parser = OptionParser(usage=usage_, version=VERSION)\n parser.add_option(\"-i\", \"--indent\", dest=\"indent_size\",\n default=DEFAULT_INDENT_SIZE, type=\"int\",\n help=\"Indent size to use (default: %default).\")\n parser.add_option(\"-c\", \"--compact\", dest=\"compact\",\n action=\"store_true\", default=False,\n help=\"Use compact format (default: %default).\")\n parser.add_option(\"-n\", \"--dry-run\", dest=\"dry_run\",\n action=\"store_true\", default=False,\n help=\"Check only if JSON is well-formed (default: %default).\")\n options, filenames = parser.parse_args(args) #< pylint: disable=W0612\n if not filenames:\n parser.error(\"OOPS, no filenames provided.\")\n if options.compact:\n options.indent_size = None\n\n # -- STEP: Init logging subsystem.\n format_ = \"json.format: %(message)s\"\n logging.basicConfig(level=logging.WARN, format=format_)\n console = logging.getLogger(\"console\")\n\n # -- DOS-SHELL SUPPORT: Perform filename globbing w/ wildcards.\n skipped = 0\n filenames2 = []\n for filename in filenames:\n if \"*\" in filenames:\n files = glob.glob(filename)\n filenames2.extend(files)\n elif os.path.isdir(filename):\n # -- CONVENIENCE-SHORTCUT: Use DIR as shortcut for JSON files.\n files = glob.glob(os.path.join(filename, \"*.json\"))\n filenames2.extend(files)\n if not files:\n console.info(\"SKIP %s, no JSON files found in dir.\", filename)\n skipped += 1\n elif not os.path.exists(filename):\n console.warning(\"SKIP %s, file not found.\", filename)\n skipped += 1\n continue\n else:\n assert os.path.exists(filename)\n filenames2.append(filename)\n filenames = filenames2\n\n # -- NORMAL PROCESSING:\n errors = json_formatall(filenames, options.indent_size,\n dry_run=options.dry_run)\n console.error(\"Processed %d files (%d with errors, skipped=%d).\",\n len(filenames), errors, skipped)\n if not filenames:\n errors += 1\n return errors",
"def load_json_schema(filename):\n relative_path = join('../schema', filename)\n absolute_path = join(dirname(__file__), relative_path)\n\n base_path = dirname(absolute_path)\n base_uri = 'file://{}/'.format(base_path)\n\n with open(absolute_path) as schema_file:\n return jsonref.loads(\n schema_file.read(), base_uri=base_uri, jsonschema=True)",
"def main() -> None:\n # The first thing to do is get the lines of the PyFlex file we are given.\n parser = Parser(filename=sys.argv[1])\n parsed_data = parser.ParseFile()\n\n # Upon retrieving the Parsed Data, assign the parsed data to the\n # Symbol Table.\n SymbolTable.RULESET = parsed_data['ruleset']\n SymbolTable.INSTRUCTIONS = parsed_data['instructions']\n SymbolTable.CODE = parsed_data['code']\n # SymbolTable.PrintTable()\n\n # Using the Generator backend, we can build the generated script\n generator = Generator()\n generator.GenerateNewScript()\n\n autopep8.fix_file(filename=generator.file_main)\n\n print(\"Generated Script can be found in {}\".format(generator.file_main))",
"def _load_json_schema(filename):\n\n relative_path = join(\"schemas\", filename)\n absolute_path = join(dirname(__file__), relative_path)\n\n base_path = dirname(absolute_path)\n base_uri = 'file://{}/'.format(base_path)\n\n print(f\"base uri {base_uri}\")\n print(f\"base path {base_path}\")\n print(f\"relative_path {relative_path}\")\n print(f\"absolute_path {absolute_path}\")\n\n with open(absolute_path) as schema_file:\n return jsonref.loads(schema_file.read(), base_uri=base_uri, jsonschema=True)",
"def start(self, **kwargs):\n super(Json, self).start(**kwargs)\n flat = self.get_arg_value(\"json_flat\")\n\n self._first_row = True\n self.open_fd()\n begin = \"\" if flat else \"[\"\n self._fd.write(begin)",
"def process_jsonld_file(fname):\n with open(fname, 'r', encoding='utf-8') as fh:\n json_dict = json.load(fh)\n return process_jsonld(json_dict)",
"def main():\n widget = ParseGrypeJSON()\n logging.debug(f'argv {\",\".join(sys.argv)}')\n\n if len(sys.argv) > 1:\n widget.filename(sys.argv[1])\n\n sys.exit(widget.report())",
"def main():\n # if there are no command line args, run as normal with the input from stdin\n if len(sys.argv) == 1:\n json_string = \"\".join(sys.stdin)\n hierarchical_jsons = [load_json(json_string)]\n # if there are command line args, run using the first argument as a file path to a file with\n # correctly formatted test json values\n else:\n with open(sys.argv[1]) as file:\n hierarchical_jsons = json.load(file)\n\n flattened_jsons = []\n\n # for all the jsons given, in the non-test case there will only be one\n for hierarchical_json in hierarchical_jsons:\n # flatten the json object\n flat_json = flatten_json(hierarchical_json)\n # print the prettied json to stdout\n json_string = json.dumps(flat_json, indent=4)\n flattened_jsons.append(json_string)\n print(json_string)\n\n return flattened_jsons",
"def main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"datafile\", help=\"Name of JSON file containing WP3-mapped metadata\")\n parser.add_argument(\"csvfile\", help=\"Corrected CSV file\")\n parser.add_argument(\"vcffile\", help=\"VCF file with samples\")\n args = parser.parse_args()\n\n samples = vcf_samples(args.vcffile)\n\n with open(args.datafile, 'r') as infile:\n data = json.load(infile)\n\n if not data:\n print(f\"Error reading data file {args.datafile}.\", file=sys.stderr)\n return\n\n with open(args.csvfile, newline='') as csvfile:\n reader = csv.DictReader(csvfile)\n for idx, row in enumerate(reader):\n data[idx][\"physiologicalMeasurements\"][\"anthropometry\"][\"weight\"] = [str(float(row['birth_weight_g'])/1000.), row['sbjt_weight_kg']]\n data[idx][\"physiologicalMeasurements\"][\"anthropometry\"][\"height\"] = [row['height_cm'], row['sbjt_length_cm']]\n data[idx][\"physiologicalMeasurements\"][\"circulationAndRespiration\"][\"bloodPressure\"] = [row['sbjt_blood_pressure_systolic']]\n if len(samples) > idx:\n data[idx][\"sample\"] = samples[idx]\n\n print(json.dumps(data))",
"def _load_json_schema(filename):\n\n relative_path = join('schemas', filename)\n absolute_path = join(dirname(__file__), relative_path)\n\n with open(absolute_path) as schema_file:\n return json.loads(schema_file.read())",
"def cli(ctx, path, **kwds):\n if not os.path.exists(path):\n io.error(\"Failed to tool test json file at %s\" % path)\n return 1\n\n test_data = StructuredData(path)\n handle_reports(ctx, test_data.structured_data, kwds)",
"def from_json(self, json_filename, dont_change=False, pure_json=False, binary_format=False):\n gc.disable()\n\n with open(json_filename, 'rb') as f:\n s = f.read()\n\n if not pure_json:\n # Please do not pass unicode to jsmin under py2.\n # Otherwise, it cannot use cStringIO and it bursts performance\n s = jsmin(s)\n elif self.VERBOSE:\n print('Assumed file %s is pure json - skip pre-processing' % json_filename)\n\n types = ujson.loads(s.decode(\"utf-8\"))\n if binary_format:\n dawg_checksum = types[0].get(ProductTypeDict.DAWG_CHECKSUM_ATTR)\n in_memory_dawg_checksum = TypeTerm.term_dict.dawg_checksum()\n if not dawg_checksum or in_memory_dawg_checksum != dawg_checksum:\n raise IOError('DAWG checksum does not correspond in memory version')\n types = types[1]\n\n if self.VERBOSE:\n file_rel_count = sum(map(len, types.values())) - (len(types) if not binary_format else 0)\n print(\"Parsing %d type tuples with %d relations from file %s\" % (len(types), file_rel_count, json_filename))\n\n if not dont_change:\n ProductType.reload()\n if self.VERBOSE:\n print(\"Global product types has been reloaded\")\n\n def parse_terms(terms_str):\n terms = [int(ts) if binary_format else ts.strip() for ts in terms_str.split(u'+')]\n if not binary_format:\n # For text format convert to TypeTerms\n terms = map(TypeTerm.make, terms)\n return terms\n\n type_tuples = defaultdict(list)\n pseudo_sqn = 1\n seen_rel = defaultdict(dict)\n for type_str, rel_str in types.viewitems():\n type_items = parse_terms(type_str)\n # All types are loaded from external sources or knowledge base are considered meaningful regardless of\n # other their characteristics\n if dont_change:\n p_type = ProductType(*type_items, meaningful=True, singleton=False)\n wrapper = EqWrapper(p_type)\n if wrapper in seen_rel:\n # Type has been added already by another relation\n p_type = wrapper.match\n else:\n p_type = ProductType.make_from_terms(type_items, meaningful=True)\n type_tuples[p_type] = []\n\n start_relations_index = 0\n if binary_format:\n type_tuples[p_type] = [to_str(pseudo_sqn)]\n pseudo_sqn += 1\n else:\n if isinstance(rel_str[0], int) or rel_str[0].isdigit():\n # Compatibility with old format: there was one plain integer and json parse it to int() itself\n sqns_in_self = rel_str[0]\n start_relations_index = 1\n else:\n sqns_in_self, _ = re.findall('^(\\d+)(?:/(\\d+))?$', rel_str[0])[0]\n if sqns_in_self and sqns_in_self.isdigit():\n start_relations_index = 1\n\n for i in range(int(sqns_in_self)):\n type_tuples[p_type].append(to_str(pseudo_sqn))\n pseudo_sqn += 1\n\n rel_type, rel_attr, is_soft, type_to_str = [None] * 4\n for r_str in rel_str[start_relations_index:]:\n if binary_format:\n r_match = re.findall('^([0-9+]+)(\\w)(.*)$', r_str)\n if r_match:\n type_to_str, rel_type, rel_attr = r_match[0]\n rel_type = ProductTypeDict.REL_MAPPING_BACKWARD[rel_type]\n is_soft = rel_type in (TYPE_TUPLE_RELATION_ALMOST, TYPE_TUPLE_RELATION_SIMILAR)\n else:\n r_match = re.findall('^(\\w+)(?:\\[([^\\]]+)\\])?(~)?\\s+(.*)$', r_str)\n if r_match:\n rel_type, rel_attr, is_soft, type_to_str = r_match[0]\n is_soft = is_soft == u'~'\n\n if rel_type and type_to_str:\n type_to_items = parse_terms(type_to_str)\n\n if dont_change:\n p_type_related = ProductType(*type_to_items, meaningful=True, singleton=False)\n wrapper = EqWrapper(p_type_related)\n if p_type in seen_rel and wrapper in seen_rel[p_type]:\n # Reverse to this relation has been already created. Use type instance from it\n p_type_related = wrapper.match\n else:\n p_type_related = ProductType.make_from_terms(type_to_items, meaningful=True)\n\n if rel_attr:\n try:\n rel_attr = literal_eval(rel_attr)\n except ValueError:\n pass # Just use string as is\n relation = ProductType.Relation(from_type=p_type, to_type=p_type_related, rel_type=rel_type,\n is_soft=is_soft, rel_attr=rel_attr)\n if p_type in seen_rel and p_type_related in seen_rel[p_type]:\n # Already processed back relation\n r_from = seen_rel[p_type][p_type_related]\n assert r_from, \"The same relation is detected more than two times\"\n p_type.make_relation(p_type_related, relation.rel_type, r_from.rel_type,\n relation.is_soft, relation.rel_attr, r_from.rel_attr)\n seen_rel[p_type][p_type_related] = None\n else:\n # Keep back relation until related type is met\n seen_rel[p_type_related][p_type] = relation\n elif self.VERBOSE:\n print(\"WARN: meet unparseable relation: %s\" % r_str)\n\n if self.VERBOSE:\n # Check remaining unresolved relations\n for p_type_related in seen_rel:\n for p_type in seen_rel[p_type_related]:\n if seen_rel[p_type_related][p_type] is not None:\n print(\"Detected hanged unresolved relation from type %s: %s\" % (\n p_type, to_str(seen_rel[p_type_related][p_type])))\n\n self._type_tuples.clear()\n self._type_tuples.update(type_tuples)\n self._type_tuples_on_change()\n\n gc.enable()\n if self.VERBOSE:\n rel_count = sum(map(len, (p_type.relations() for p_type in type_tuples)))\n print(\"Loaded %d type tuples with %d relations from json %s\" % (len(type_tuples), rel_count, json_filename))\n\n return type_tuples",
"def main():\n parser = argparse.ArgumentParser(description=\"Script for generating an index template out of a document\")\n parser.add_argument(\"INDEX_NAME\", help=\"Name of index\")\n parser.add_argument(\"--output_file\", help=\"File to write schema to\")\n args = parser.parse_args()\n\n output = generate_template(args.INDEX_NAME)\n if args.output_file:\n with open(args.output_file, \"w\") as file:\n json.dump(output, file, ensure_ascii=False, indent=4, sort_keys=True)\n else:\n print(json.dumps(output, ensure_ascii=False, indent=4, sort_keys=True))",
"def test_read_json_schema():\n json_schema = os.path.join(TEST_DATA_PATH, 'example_schema.json')\n schema_tree = schema.load_schema(json_schema, resolve_references=True)\n schema.check_schema(schema_tree)"
] | [
"0.6357114",
"0.634119",
"0.6240263",
"0.6036583",
"0.598429",
"0.59565043",
"0.57677084",
"0.56092983",
"0.5584701",
"0.55134827",
"0.54945713",
"0.5492932",
"0.5417545",
"0.5400936",
"0.5390043",
"0.53854465",
"0.5339671",
"0.53241897",
"0.5318094",
"0.5313744",
"0.5285585",
"0.5254115",
"0.5212975",
"0.5172481",
"0.5163187",
"0.5134912",
"0.51278704",
"0.5100583",
"0.5064265",
"0.50622743"
] | 0.69766474 | 0 |
Run the webp converter on the given png file. | def convert_png_image_to_webp(png, out, quality=80):
command = [CWEBP, '-q', str(quality), png, '-o', out]
run_subprocess(command) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def img2webp(path):\n file, ext = os.path.splitext(path)\n image = Image.open(path).convert(\"RGBA\")\n image = ImageOps.expand(image, 75)\n image.save(file + \".webp\", \"WEBP\")\n os.remove(path)",
"def generate_webp_textures():\n input_files = PNG_TEXTURES['input_files']\n output_files = PNG_TEXTURES['output_files']\n if not os.path.exists(TEXTURE_PATH):\n os.makedirs(TEXTURE_PATH)\n for png, out in zip(input_files, output_files):\n if needs_rebuild(png, out):\n convert_png_image_to_webp(png, out, WEBP_QUALITY)",
"def run_turtle_program(source):\n ast = parser.parse(source)\n\n t = turtle.Turtle()\n for stmt in ast.statement:\n do_statement(stmt, t)\n canvas = turtle.Screen().getcanvas()\n canvas.postscript(file='image.eps')\n img = Image.open('image.eps')\n img.save('image.png', 'png')\n turtle.Screen().bye()\n return 'image.png'",
"def test_basic_run_png(self):\n self.expect_datatore_lookup('SomeBlobKey', True)\n self.expect_open_image('SomeBlobKey', (1600, 1200), mime_type='PNG')\n self.expect_resize(blob_image._DEFAULT_SERVING_SIZE)\n self.expect_encode_image('SomeImageInPng',\n images_service_pb.OutputSettings.PNG)\n self.run_request('image/png', 'SomeImageInPng')",
"def handle_as_url(view: sublime.View, point: int, string: str, name: str):\n\n # Let's assume this url as input:\n # (https://upload.wikimedia.org/wikipedia/commons/8/84/Example.svg)\n\n # Download the image\n # FIXME: avoid nested try-except clauses\n try:\n try:\n f = urlopen(unquote(string)) # <==\n except Exception:\n try:\n url_path = quote(string).replace(\"%3A\", ':', 1)\n f = urlopen(url_path)\n except Exception:\n f = urlopen(string)\n # don't fill the console with stack-trace when there`s no connection !!\n except Exception as e:\n print(e)\n return\n\n # file needs conversion ?\n need_conversion = name.endswith(formats_to_convert) # => True\n basename, ext = osp.splitext(name) # => (\"Example\", \".svg\")\n # create a temporary file\n temp_img = osp.join(TEMP_DIR, \"tmp_image\" + ext) # => \"TEMP_DIR/tmp_image.svg\"\n\n # Save downloaded data in the temporary file\n content = f.read()\n with open(temp_img, \"wb\") as img:\n img.write(content)\n\n # if the file needs conversion, convert it then read data from the resulting png\n if need_conversion:\n ext = \".png\"\n # keep the image's temporary file and name for later use\n conv_file = temp_img # => \"TEMP_DIR/tmp_image.svg\"\n\n # => \"TEMP_DIR/tmp_image.png\"\n temp_png = osp.splitext(temp_img)[0] + \".png\"\n\n # use the magick command of Imagemagick to convert the image to png\n magick(temp_img, temp_png)\n\n # read data from the resulting png\n with open(temp_png, \"rb\") as png:\n content = png.read()\n\n # set temp_file and name to the png file\n temp_img = temp_png # => \"TEMP_DIR/tmp_image.png\"\n\n width, height, real_width, real_height, size = get_data(view, temp_img)\n encoded = str(base64.b64encode(content), \"utf-8\")\n\n def on_navigate(href):\n\n if href == \"save\":\n if need_conversion:\n save(conv_file, name, \"url\")\n else:\n save(temp_img, name, \"url\")\n elif href == \"save_as\":\n if need_conversion:\n convert(conv_file, \"url\", name)\n else:\n convert(temp_img, \"url\", name)\n else:\n sublime.active_window().open_file(temp_img)\n\n view.show_popup(\n TEMPLATE % (width, height, ext, encoded, real_width, real_height,\n str(size // 1024) + \"KB\" if size >= 1024 else str(size) + 'B'),\n sublime.HIDE_ON_MOUSE_MOVE_AWAY,\n point,\n *view.viewport_extent(),\n on_navigate=on_navigate\n )",
"def latex2img(expression, filename):\n webp = False\n\n extension = \"png\"\n\n # Preparing text strings\n server = \"http://latex.codecogs.com/\" + extension + \".download?\"\n fullname = filename + \".\" + extension\n size = \"%5Cdpi%7B100%7D%20\"\n\n # Quote expression引用表达式\n expression = quote(expression)\n url = server + size + expression\n\n # Download file from url and save to output_file:\n with urlopen(url) as response, open(fullname, 'wb') as output_file:\n data = response.read() # Un objeto \"bytes\"\n output_file.write(data) # Se escribe en disco\n\n if webp:\n img2webp(fullname)\n extension = \"webp\"\n\n return filename + \".\" + extension",
"def main():\n try:\n pixid = sys.argv[1]\n except IndexError:\n print('Usage: python pixget.py [pixid] (save_path)')\n exit(1)\n\n # get the path\n if len(sys.argv) > 2:\n path = sys.argv[2]\n else:\n path = '.'\n\n imgInfoPool = []\n if get_image_url(pixid, imgInfoPool):\n exit(1)\n download_image(path, imgInfoPool)",
"def image_webp():\n data = resource(\"images/wolf_1.webp\")\n return Response(data, headers={\"Content-Type\": \"image/webp\"})",
"def main():\n argvs = sys.argv\n argc = len(argvs)\n if argc == 1:\n print('usage: convert2png.py <path/to/*.ppm> ...')\n sys.exit(1)\n\n os.makedirs('result/convert2png', exist_ok=True)\n\n for i in range(1, argc):\n img = cv2.imread(argvs[i])\n\n # root, ext = os.path.splitext(argvs[i])\n # cv2.imwrite(root + '.png', img)\n\n root, ext = os.path.splitext(argvs[i])\n strImgName = root.split('/')[-1]\n cv2.imwrite('result/convert2png/' + strImgName + '.png', img)",
"def handle_as_file(view: View, point: int, string: str):\n # \"screenshot.png\"\n\n name = osp.basename(string)\n file, folder = get_file(view, string, name)\n\n # if file doesn't exist, return\n if not osp.isfile(file):\n return\n\n # does the file need conversion ?\n need_conversion = file.endswith(FORMAT_TO_CONVERT)\n\n # if the file needs conversion, convert it and read data from the resulting png\n if need_conversion:\n # keep the image's file and name for later use\n conv_file = file\n conv_name = name\n\n # create a temporary file\n tmp_file = osp.join(TEMP_DIR, \"tmp_png.png\")\n name = osp.splitext(name)[0] + \".png\"\n\n # use the magick command of Imagemagick to convert the image to png\n magick(file, tmp_file)\n\n file = tmp_file\n\n with open(file, \"rb\") as f:\n encoded = str(base64.b64encode(f.read()), \"utf-8\")\n\n real_width, real_height, size = get_image_size(file)\n width, height = get_dimensions(view, file)\n size = str(size // 1024) + \"KB\" if size >= 1024 else str(size) + 'B'\n\n def on_navigate(href):\n\n if href == \"save\":\n if need_conversion:\n save(conv_file, conv_name, \"file\")\n else:\n save(file, name, \"file\", folder)\n elif href == \"save_as\":\n convert(conv_file if need_conversion else file, \"file\")\n else:\n sublime.active_window().open_file(file)\n\n view.show_popup(\n TEMPLATE % (width, height, \"png\", encoded, real_width,\n real_height, size),\n sublime.HIDE_ON_MOUSE_MOVE_AWAY,\n point,\n *view.viewport_extent(),\n on_navigate=on_navigate)",
"def _webp(self, tile: bytes) -> np.ndarray:\n decoded = np.rollaxis(imagecodecs.webp_decode(tile), 2, 0)\n return decoded",
"def convert_gif(ctx):\n ctx.run(\n 'ffmpeg '\n '-i resources/demo.mkv -filter_complex \"[0:v] palettegen\" '\n 'resources/palette.png',\n pty=True\n )\n ctx.run(\n 'ffmpeg -i resources/demo.mkv '\n '-i resources/palette.png '\n '-filter_complex \"[0:v][1:v] paletteuse\" '\n 'resources/demo.gif',\n pty=True\n )",
"def handle_as_url(view: View, point: int, string: str, name: str):\n\n # Let's assume this url as input:\n # (https://upload.wikimedia.org/wikipedia/commons/8/84/Example.svg)\n\n # Download the image\n # FIXME: avoid nested try-except clauses\n try:\n try:\n f = urlopen(unquote(string)) # <==\n except:\n try:\n url_path = quote(string).replace(\"%3A\", ':', 1)\n f = urlopen(url_path)\n except:\n f = urlopen(string)\n # don't fill the console with stack-trace when there`s no connection !!\n except Exception as e:\n print(e)\n return\n\n # file needs conversion ?\n need_conversion = name.endswith(FORMAT_TO_CONVERT) # => True\n basename, ext = osp.splitext(name) # => (\"Example\", \".svg\")\n # create a temporary file\n tmp_file = osp.join(TEMP_DIR,\n \"tmp_image\" + (ext if need_conversion else \".png\")\n ) # => \"TEMP_DIR/tmp_image.svg\"\n\n # Save downloaded data in the temporary file\n content = f.read()\n with open(tmp_file, \"wb\") as dst:\n dst.write(content)\n\n # if the file needs conversion, convert it then read data from the resulting png\n if need_conversion:\n # keep the image's temporary file and name for later use\n conv_file = tmp_file # => \"TEMP_DIR/tmp_image.svg\"\n conv_name = name # => \"Example.svg\"\n\n # => \"TEMP_DIR/tmp_image.png\"\n png = osp.splitext(tmp_file)[0] + \".png\"\n\n # use the magick command of Imagemagick to convert the image to png\n magick(tmp_file, png)\n\n # set temp_file and name to the png file\n tmp_file = png # => \"TEMP_DIR/tmp_image.png\"\n name = basename + \".png\" # => \"Example.png\"\n\n # read data from the resulting png\n with open(tmp_file, \"rb\") as dst:\n content = dst.read()\n\n real_width, real_height, size = get_image_size(tmp_file)\n width, height = get_dimensions(view, tmp_file)\n encoded = str(base64.b64encode(content), \"utf-8\")\n size = str(size // 1024) + \"KB\" if size >= 1024 else str(size) + 'B'\n\n def on_navigate(href):\n\n if href == \"save\":\n if need_conversion:\n save(conv_file, conv_name, \"url\")\n else:\n save(tmp_file, name, \"url\")\n elif href == \"save_as\":\n if need_conversion:\n convert(conv_file, \"url\", conv_name)\n else:\n convert(tmp_file, \"url\", name)\n else:\n sublime.active_window().open_file(tmp_file)\n\n view.show_popup(\n TEMPLATE % (width, height, \"png\", encoded, real_width, real_height, size),\n sublime.HIDE_ON_MOUSE_MOVE_AWAY,\n point,\n *view.viewport_extent(),\n on_navigate=on_navigate\n )",
"def main():\n p = argparse.ArgumentParser(description='Convert images into unicode')\n p.add_argument('image', metavar='<path>', type=str,\n help='path to the file, use - for stdin')\n p.add_argument('--no-x256', action='store_false', dest='x256', default=True,\n help='prints with x256 unicode coloring')\n p.add_argument('--char-set', metavar='<name>', default='default',\n help='prints with character set (e.g. windows)')\n args = p.parse_args()\n print_image_as_unicode(args.image, char_set=CHAR_SETS[args.char_set],\n x256=args.x256)",
"def handle_as_file(view: sublime.View, point: int, string: str):\n\n name = osp.basename(string)\n file, folder = get_file(view, string, name)\n\n # if file doesn't exist, return\n if not osp.isfile(file):\n return\n\n # does the file need conversion ?\n need_conversion = file.endswith(formats_to_convert)\n ext = name.rsplit('.', 1)[1]\n\n # if the file needs conversion, convert it and read data from the resulting png\n if need_conversion:\n ext = \".png\"\n # keep the image's file and name for later use\n conv_file = file\n\n # create a temporary file\n temp_png = osp.join(TEMP_DIR, \"temp_png.png\")\n\n # use the magick command of Imagemagick to convert the image to png\n magick(file, temp_png)\n\n file = temp_png\n\n with open(file, \"rb\") as img:\n encoded = str(base64.b64encode(img.read()), \"utf-8\")\n\n width, height, real_width, real_height, size = get_data(view, file)\n\n def on_navigate(href):\n\n if href == \"save\":\n if need_conversion:\n save(conv_file, name, \"file\")\n else:\n save(file, name, \"file\", folder)\n elif href == \"save_as\":\n convert(conv_file if need_conversion else file, \"file\")\n else:\n sublime.active_window().open_file(file)\n\n view.show_popup(\n TEMPLATE % (width, height, ext, encoded, real_width, real_height,\n str(size // 1024) + \"KB\" if size >= 1024 else str(size) + 'B'),\n sublime.HIDE_ON_MOUSE_MOVE_AWAY,\n point,\n *view.viewport_extent(),\n on_navigate=on_navigate)",
"def run(self):\n generated_gif = self.generate()\n with open(self.out_filename, 'wb') as out_fd:\n out_fd.write(generated_gif)",
"def testConvertHtmlWithPngDataUrlToPdf(self):\n self._testBase(\"data/test_with_png_dataurl.html\")",
"def main():\n tmp_dir = xm.constants['dir_tmp']\n exr_f = join(tmp_dir, 'test.exr')\n exr = EXR(exr_f)\n exr.extract_normal(join(tmp_dir, 'test.png'), vis=True)",
"def start_pupil():\n # runs Pupil Capture from source\n path = os.path.abspath(\"../../pupil/pupil_src/capture/main.py\")\n return subprocess.call('python ' + shellformat(path), shell=True)\n \n # if running Pupil Capture using the app, comment the above code and uncomment below:\n '''\n path = os.path.abspath(\"../pupil_capture_0.4.1_mac.app\")\n return subprocess.call('open ' + shellformat(path), shell=True)\n '''",
"def main():\n folder = \"D:\\\\Noam10\\\\Documents\\\\Documents\\\\dither 2\"\n filename = \"kirigiri\"\n filetype = \".jpg\"\n input_file = folder + \"\\\\\" + filename + filetype\n for palette in paletteDict.keys():\n output_file = folder + \"\\\\\" + filename + \"(\" + palette + \").bmp\"\n Dither(input_file, output=output_file, palette=paletteDict[palette])\n print(output_file)",
"def main():\n test_image = load_image()\n\n pixelate_image(\n normalize_image(test_image)\n )\n pass",
"def main(url):\n print(f\"Running main with URL = {url}...\")\n imagehits(downloaddata(url))",
"def __main__() :\n try :\n poly = Polyhedre(sys.argv[1])\n \n name = sys.argv[2]\n \n createAllFiles(poly, name)\n\n createAllImageFiles(poly, name)\n \n except FileNotFoundError :\n print(\"Use an existing file\")",
"def create_png(dotfile=\"tree.dot\", pngfile=\"tree.png\"):\n dotfile = utils.get_abspath(dotfile)\n pngfile = utils.get_abspath(pngfile)\n cmd = create_cmd(dotfile, pngfile)\n subprocess.run(cmd, check=True)",
"def test_encode_webp():\n width = 51\n height = 26\n channels = 3\n bmp_file = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"test_image\", \"lena.bmp\")\n with open(bmp_file, 'rb') as f:\n bmp_contents = f.read()\n image_v = tf.image.decode_bmp(bmp_contents)\n assert image_v.shape == [height, width, channels]\n bmp_encoded = image_io.encode_bmp(image_v)\n image_e = tf.image.decode_bmp(bmp_encoded)\n assert np.all(image_v.numpy() == image_e.numpy())",
"def new_func():\n dirname, _ = os.path.split(os.path.abspath(__file__))\n pngfile = os.path.sep.join([dirname, \"out.png\"])\n img = [\"110010010011\", \"101011010100\", \"110010110101\", \"100010010011\"]\n img = [[int(val) for val in value] for value in img]\n writer = png.Writer(len(img[0]), len(img), greyscale=True, bitdepth=16)\n with open(pngfile, \"wb\") as file:\n writer.write(file, img)\n try:\n func(pngfile)\n finally:\n os.remove(pngfile)",
"def process(image):\n pass",
"def on_pushButton_2_clicked(self):\n # TODO: not implemented yet\n try:\n str='str.png'\n process_pic.graphics ().process (str)\n self.click=\"process\"\n pixMap = QPixmap(\"temp.png\").scaled(self.label.width(),self.label.height())\n self.label.setPixmap(pixMap)\n except:\n button=QMessageBox.about(self, '注意', '应先向空白处导入图片后再进行处理')\n else:\n pass\n\n\n\n #os.popen('python process_pic.py')",
"def save_png(self, filename):\n post_script = self.canvas.postscript().encode()\n img = Image.open(io.BytesIO(post_script))\n img.save(filename, format=\"PNG\")",
"def convert_pdf_to_web(input_file, quality=90, resolution=150):\n input_file = Path(input_file)\n input_file.resolve(strict=True) # Raises FileNotFound\n output_file, no_change = get_output_file(input_file, 'WEB')\n if no_change:\n return output_file\n\n rgb_profile = Path(__file__).parent / 'sRGB.icc'\n if not rgb_profile.exists():\n msg = f'Color profile \"{rgb_profile.name}\" is missing'\n raise RuntimeError(msg)\n args = [\n GHOSTSCRIPT,\n '-q',\n '-dColorConversionStrategy=/DeviceRGB',\n '-dColorConversionStrategyForImages=/DeviceRGB',\n '-dBATCH',\n '-dNOPAUSE',\n '-sDEVICE=pdfwrite',\n '-dConvertCMYKImagesToRGB=true',\n '-dDownsampleColorImages=true',\n '-dDownsampleGrayImages=true',\n '-dDownsampleMonoImages=true',\n f'-sDefaultRGBProfile={rgb_profile}',\n f'-dJPEGQ={quality}',\n f'-dColorImageResolution={resolution}',\n f'-dGrayImageResolution={resolution}',\n f'-dMonoImageResolution={resolution}',\n '-o',\n output_file,\n input_file,\n ]\n subprocess.run(map(str, args))\n logger.debug(\n f'{input_file} ({input_file.stat().st_size}) -> '\n f'{output_file} ({output_file.stat().st_size})'\n )\n return output_file"
] | [
"0.59895134",
"0.5819123",
"0.5774753",
"0.5767522",
"0.57351494",
"0.57216007",
"0.56728303",
"0.5661198",
"0.5626706",
"0.55680203",
"0.5460881",
"0.5396583",
"0.53937674",
"0.5302584",
"0.5294534",
"0.5288155",
"0.52613574",
"0.523901",
"0.51677066",
"0.5115962",
"0.51095086",
"0.5104897",
"0.50971484",
"0.50862503",
"0.501369",
"0.50127435",
"0.50103486",
"0.49888846",
"0.49851465",
"0.4984686"
] | 0.67798376 | 0 |
Checks if the source file needs to be rebuilt. | def needs_rebuild(source, target):
return not os.path.isfile(target) or (
os.path.getmtime(source) > os.path.getmtime(target)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def has_source_file( self ):\n return self._source_file is not None",
"def source_changed(source, cache):\n return os.path.getmtime(source)>os.path.getmtime(cache)",
"def should_run(self):\n # from IPython.html.tasks.py\n\n css_targets = [pjoin(static, 'css', 'style.min.css')]\n css_maps = [t + '.map' for t in css_targets]\n targets = css_targets + css_maps\n if not all(os.path.exists(t) for t in targets):\n # some generated files don't exist\n return True\n earliest_target = sorted(mtime(t) for t in targets)[0]\n\n # check if any .less files are newer than the generated targets\n for dirpath, dirnames, filenames in os.walk(static):\n for f in filenames:\n if f.endswith('.less'):\n path = pjoin(static, dirpath, f)\n timestamp = mtime(path)\n if timestamp > earliest_target:\n return True\n\n return False",
"def _check_compiled(self):\n\n if self._compiled is None:\n self.compile()",
"def _check_build(self, gppkg_file, gppkg_spec):\n return gppkg_file == gppkg_spec.get_filename()",
"def _need_generate(paths):\r\n if not os.path.exists(paths.generated_dir):\r\n return True\r\n\r\n if not os.path.exists(paths.index_file):\r\n return True\r\n\r\n # Use the index file to determine if regeneration is necessary\r\n with open(paths.index_file, 'r',newline='\\n') as index_file:\r\n indexed = [item for item in\r\n index_file.read().split('\\n') if len(item) != 0 and\r\n not item.startswith(\"#\")]\r\n return indexed != paths.resource_files",
"def _check_if_cff_file_needs_rewriting(self, content):\n logger.info(\"Checking if we can re-use injection config file...\")\n if os.path.isfile(self.config_file_name) is False:\n logger.info(\"...no config file {} found.\".format(self.config_file_name))\n return True\n else:\n logger.info(\n \"...OK: config file {} already exists.\".format(self.config_file_name)\n )\n\n with open(self.config_file_name, \"r\") as f:\n file_content = f.read()\n if file_content == content:\n logger.info(\n \"...OK: file contents match, no update of {} required.\".format(\n self.config_file_name\n )\n )\n return False\n else:\n logger.info(\n \"...file contents unmatched, updating {}.\".format(\n self.config_file_name\n )\n )\n return True",
"def has_source(self):\n return any(map(utils.assert_package_is_source, self.pkg_arguments))",
"def test_verify_changed_source_file(self):\n # This test was made to pass in fixing Bug #1354880\n self.backup(u\"full\", u\"testfiles/various_file_types\", options=[])\n\n # Edit source file\n with open('testfiles/various_file_types/executable', 'r+') as f:\n f.write('This changes a source file.')\n\n # Test verify for the file\n self.verify(u'testfiles/various_file_types/executable', file_to_verify=u'executable', options=[])",
"def checkfile(filename, source=None):\n if source:\n # Let's check some sums\n if os.path.exists(filename) and os.path.exists(source):\n src_sha = calchash(source)\n dest_sha = calchash(filename)\n if DRYRUN:\n print(\"{src} hash {src_sha}. {dest} hash {dest_sha}\".format(src=source, dest=filename, src_sha=src_sha.hexdigest(), dest_sha=dest_sha.hexdigest()))\n return src_sha.digest() == dest_sha.digest()\n else:\n return os.path.exists(filename)",
"def needs_rebuild(self) -> bool:\n old_hash = self._cache.get(\"config\", None)\n new_hash = utilities.hash_object_sha256(self._get_config_raw())\n self._cache[\"config\"] = new_hash\n\n if not old_hash:\n return False\n return old_hash != new_hash",
"def ShouldBuild(self, src_files, dst_files):\n if self.force:\n return True\n\n oldest = None\n for dst in dst_files:\n if not os.path.exists(dst):\n self.DebugMsg(\"Build because %s does not exist\" % dst)\n return True\n modified = os.path.getmtime(dst)\n if oldest == None or modified < oldest:\n old = dst\n oldest = modified\n\n for src in src_files:\n modified = os.path.getmtime(src)\n if modified > oldest:\n self.DebugMsg(\"Build because %s is newer than %s\" % (src, old))\n return True\n\n self.DebugMsg(\"%s are up to date\" % \", \".join(dst_files))\n return False",
"def checkBuildStatus(self):\n pass",
"def process(self, source_path: pathlib.Path) -> bool:",
"def _is_prebuilt(self, cfg, patch_idx, prefix=\"PREFIX\"):\n ext = None\n dir = None\n\n if (cfg.load_models_dir is None):\n return False\n\n # Get the unique lookup file path\n fpath = self._get_unique_lookup_filepath(patch_idx, cfg.load_models_dir, prefix, NNModel._M_FILE_EXT)\n\n # Check the existence of the file\n if not os.path.isfile(fpath):\n raise Exception('Model file does not exist: {0}'.format(fpath))\n\n return True",
"def check_one(filename):\n\n # The file may have been removed from the filesystem.\n # ===================================================\n\n if not isfile(filename):\n if filename in mtimes:\n sys.exit(1) # trigger restart\n else:\n # We haven't seen the file before. It has probably been loaded \n # from a zip (egg) archive.\n return\n\n\n # Or not, in which case, check the mod time.\n # ==========================================\n\n mtime = os.stat(filename).st_mtime\n if filename not in mtimes: # first time we've seen it\n mtimes[filename] = mtime\n if mtime > mtimes[filename]:\n sys.exit(1) # trigger restart",
"def check_cached_data_okay_to_use(self, cl_mfd):\n\n need_new = \"Will create new SFT file(s).\"\n\n logger.info(\"Checking if we can re-use existing SFT data file(s)...\")\n for sftfile in self.sftfilenames:\n if os.path.isfile(sftfile) is False:\n logger.info(\n \"...no SFT file matching '{}' found. {}\".format(sftfile, need_new)\n )\n return False\n logger.info(\"...OK: file(s) found matching '{}'.\".format(sftfile))\n\n if os.path.isfile(self.config_file_name):\n if np.any(\n [\n os.path.getmtime(sftfile) < os.path.getmtime(self.config_file_name)\n for sftfile in self.sftfilenames\n ]\n ):\n logger.info(\n (\n \"...the config file '{}' has been modified since\"\n \" creation of the SFT file(s) '{}'. {}\"\n ).format(self.config_file_name, self.sftfilepath, need_new)\n )\n return False\n else:\n logger.info(\n \"...OK: The config file '{}' is older than the SFT file(s)\"\n \" '{}'.\".format(self.config_file_name, self.sftfilepath)\n )\n # NOTE: at this point we assume it's safe to re-use, since\n # _check_if_cff_file_needs_rewriting()\n # should have already been called before\n elif \"injectionSources\" in cl_mfd:\n raise RuntimeError(\n \"Commandline requires file '{}' but it is missing.\".format(\n self.config_file_name\n )\n )\n\n logger.info(\"...checking new commandline against existing SFT header(s)...\")\n # here we check one SFT header from each SFT file,\n # assuming that any concatenated file has been sanely constructed with\n # matching CLs\n for sftfile in self.sftfilenames:\n catalog = lalpulsar.SFTdataFind(sftfile, None)\n cl_old = utils.get_commandline_from_SFTDescriptor(catalog.data[0])\n if len(cl_old) == 0:\n logger.info(\n \"......could not obtain comparison commandline from first SFT\"\n \" header in old file '{}'. {}\".format(sftfile, need_new)\n )\n return False\n if not utils.match_commandlines(cl_old, cl_mfd):\n logger.info(\n \"......commandlines unmatched for first SFT in old\"\n \" file '{}':\".format(sftfile)\n )\n logger.info(cl_old)\n logger.info(cl_mfd)\n logger.info(need_new)\n return False\n logger.info(\"......OK: Commandline matched with old SFT header(s).\")\n logger.info(\n \"...all data consistency checks passed: Looks like existing\"\n \" SFT data matches current options, will re-use it!\"\n )\n return True",
"def needs_update(self):\n return not self.config.same_as(self._stored_config,\n excluded=[\"cmake_generator\"])",
"def md5sum_check_if_build_is_needed(md5sum_cache_dir: Path, skip_provider_dependencies_check: bool) -> bool:\n build_needed = False\n modified_files, not_modified_files = calculate_md5_checksum_for_files(\n md5sum_cache_dir, update=False, skip_provider_dependencies_check=skip_provider_dependencies_check\n )\n if modified_files:\n get_console().print(\n f\"[warning]The following important files are modified in {AIRFLOW_SOURCES_ROOT} \"\n f\"since last time image was built: [/]\\n\\n\"\n )\n for file in modified_files:\n get_console().print(f\" * [info]{file}[/]\")\n get_console().print(\"\\n[warning]Likely CI image needs rebuild[/]\\n\")\n build_needed = True\n else:\n get_console().print(\n \"[info]Docker image build is not needed for CI build as no important files are changed! \"\n \"You can add --force-build to force it[/]\"\n )\n return build_needed",
"def already_processed(self):\n # If the flag file has been created by a previous run\n # or if any of the rules have already been re-ordered\n # then we shouldn't make any more changes and instead\n # the system needs to be rebooted.\n return self.syspaths.flag_exists",
"def should_build(target_platform, changed_files):\n return any(_should_file_trigger_build(target_platform, file) for file in changed_files)",
"def built_file_must_exist(self, name, type=None, **kw):\n return self.must_exist(self.built_file_path(name, type, **kw))",
"def test_check_source_3(self):\n self.eval_flags[\"check_host_typo\"] = False\n import_genome.check_source(self.src1, self.eval_flags,\n host_genus=\"Mycobacterium\")\n self.assertEqual(len(self.src1.evaluations), 1)",
"def needs_reinit(self):\n current_cmake_generator = self.config.get(\"cmake_generator\")\n stored_cmake_generator = self._stored_config.get(\"cmake_generator\")\n return ((current_cmake_generator != stored_cmake_generator) or\n not self.has_stored_config_file())",
"def test_check_source_1(self):\n import_genome.check_source(self.src1, self.eval_flags,\n host_genus=\"Mycobacterium\")\n self.assertEqual(len(self.src1.evaluations), 4)",
"def _source_filename_field_was_properly_initialized(self):\n if not Rule.sources_list_is_initialized:\n Rule.sources_list.append(self.source)\n Rule.sources_list_is_initialized = True\n # print(f\"if {self.source} not in {Rule.sources_list}\")\n if self.source not in Rule.sources_list:\n # print(f\"In rule: {self}\")\n # print(f\"Rule.sources_list = {Rule.sources_list}\")\n raise UninitializedSourceError(f\"{repr(self.source)} not initialized.\")\n if self.target not in Rule.sources_list:\n Rule.sources_list.append(self.target)\n return True",
"def test_check_source_2(self):\n self.eval_flags[\"check_id_typo\"] = False\n import_genome.check_source(self.src1, self.eval_flags,\n host_genus=\"Mycobacterium\")\n self.assertEqual(len(self.src1.evaluations), 3)",
"def _validate_sources(generated_sources, original_sources):\n\n generated_sources = list(set(generated_sources))\n original_sources = list(set(original_sources))\n not_existent_source = []\n for source in original_sources:\n if source not in generated_sources:\n not_existent_source.append(source)\n\n if not_existent_source:\n print('WARN: Some sources did exist in generated file')\n print(not_existent_source)\n return False\n\n return True",
"def check_source(source_name):\n source_ext = \".pyx\"\n if not HAS_CYTHON:\n source_name = source_name.replace(\".pyx.in\", \".c\")\n source_name = source_name.replace(\".pyx\", \".c\")\n source_ext = \".c\"\n if not os.path.exists(source_name):\n msg = (\n \"C source not found. You must have Cython installed to \"\n \"build if the C source files have not been generated.\"\n )\n raise IOError(msg)\n return source_name, source_ext",
"def test_check_source_9(self):\n self.src1._host_host_genus = \"Gordonia\"\n import_genome.check_source(self.src1, self.eval_flags,\n host_genus=\"Mycobacterium\")\n count = count_status(self.src1, \"error\", \"warning\")\n self.assertEqual(count, 1)"
] | [
"0.6899336",
"0.66479534",
"0.6410455",
"0.63760024",
"0.63628346",
"0.63059103",
"0.62350976",
"0.619009",
"0.61705863",
"0.615026",
"0.6144818",
"0.6059382",
"0.6036777",
"0.5976113",
"0.5958864",
"0.5946087",
"0.5920723",
"0.591227",
"0.5905643",
"0.58806807",
"0.58798784",
"0.58223265",
"0.58142906",
"0.58027095",
"0.5776974",
"0.5767098",
"0.57249373",
"0.5715421",
"0.5711625",
"0.5702834"
] | 0.78058094 | 0 |
Take the path to a raw json asset and convert it to target bin path. | def processed_json_path(path):
return path.replace(RAW_ASSETS_PATH, ASSETS_PATH).replace('.json', '.bin') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def processed_json_dir(path):\n return os.path.dirname(path.replace(RAW_ASSETS_PATH, ASSETS_PATH))",
"def processed_texture_path(path):\n return path.replace(RAW_ASSETS_PATH, ASSETS_PATH).replace('png', 'webp')",
"def copy_json():\n sourcePath = 'contents/external/'\n targetPath = 'build/external/'\n for base,subdirs,files in os.walk(sourcePath):\n for file in files:\n orig = os.path.join(base, file)\n if os.path.isfile(orig) and file[-5:] == '.json':\n targetBase = os.path.join(targetPath, base[len(sourcePath):])\n dest = os.path.join(targetBase, file)\n puts(\"Checking diretory %s\" % targetBase)\n if not os.path.exists(targetBase):\n puts(yellow(\"Not found! Creating...\"))\n os.makedirs(targetBase)\n puts(\"Copying from %s to %s\" % (orig, dest))\n copyfile(orig, dest)",
"def bin_path(self) -> Path:\n return self._root_path / \"stefan-on-software-api-client\" / \"bin\"",
"def processed_to_raw_path(self, processed_path):\n # Extract useful information from <path>\n stage, hash_dir, cloud_id = \\\n osp.splitext(processed_path)[0].split('/')[-3:]\n\n # Remove the tiling in the cloud_id, if any\n base_cloud_id = self.id_to_base_id(cloud_id)\n\n # Read the raw cloud data\n raw_ext = osp.splitext(self.raw_file_names_3d[0])[1]\n raw_path = osp.join(self.raw_dir, base_cloud_id + raw_ext)\n\n return raw_path",
"def Sourceify(path):\n return path",
"def get_source_file_name(json_name):\n assert json_name.endswith(JSON_EXT)\n (directory, base_name) = os.path.split(json_name)\n new_directory = os.path.relpath(directory, start=CACHE)\n new_directory = os.path.join(\"/\", new_directory)\n new_base_name = base_name[:-len(JSON_EXT)]\n i = new_base_name.rfind(\"-\")\n if i != -1:\n new_base_name = new_base_name[:i] + \".\" + new_base_name[i + 1:]\n result = os.path.join(new_directory, new_base_name)\n return result",
"def resourcePath(relative):\r\n try:\r\n # PyInstaller creates a temp folder and stores path in _MEIPASS\r\n base_path = sys._MEIPASS\r\n except Exception:\r\n base_path = os.path.abspath(os.path.join(os.path.dirname(sys.modules[__name__].__file__), 'assets'))\r\n\r\n return os.path.join(base_path, relative)",
"def Mapping2Bin(decode_cfg, jsonconfig, filename=\"\"):\n if isinstance(decode_cfg, str):\n decode_cfg = bytearray(decode_cfg)\n\n\n # get binary header data to use the correct version template from device\n _, version, _, setting = GetTemplateSetting(decode_cfg)\n\n # make empty binarray array\n _buffer = bytearray()\n # add data\n _buffer.extend(decode_cfg)\n\n if setting is not None:\n # iterate through restore data mapping\n for name in jsonconfig:\n # key must exist in both dict\n if name in setting:\n SetField(_buffer, name, setting[name], jsonconfig[name], addroffset=0, filename=filename)\n else:\n if name != 'header':\n exit(ExitCode.RESTORE_DATA_ERROR, \"Restore file '{}' contains obsolete name '{}', skipped\".format(filename, name), type_=LogType.WARNING, doexit=not args.ignorewarning)\n\n if 'cfg_crc' in setting:\n crc = GetSettingsCrc(_buffer)\n struct.pack_into(setting['cfg_crc'][0], _buffer, setting['cfg_crc'][1], crc)\n if 'cfg_crc32' in setting:\n crc32 = GetSettingsCrc32(_buffer)\n struct.pack_into(setting['cfg_crc32'][0], _buffer, setting['cfg_crc32'][1], crc32)\n return _buffer\n\n else:\n exit(ExitCode.UNSUPPORTED_VERSION,\"File '{}', Tasmota configuration version 0x{:x} not supported\".format(filename, version), type_=LogType.WARNING, doexit=not args.ignorewarning)\n\n return None",
"def get_target_binary():\n file_location = prompt_base(\"where is the file located?\")\n file_location = os.path.abspath(file_location)\n return file_location",
"def convert_json_to_flatbuffer_binary(json, schema, out_dir):\n command = [FLATC, '-o', out_dir, '-b', schema, json]\n run_subprocess(command)",
"def _path(name: str):\n return os.path.join(ASSET_PATH, name)",
"def _json_probe(srcfile):\n return json.loads(__run(srcfile))",
"def loadAsset(self, *args):\n\n asset = OL.loadAssemblyReference(self.name)\n return asset",
"def load_json(path):\n with open(normpath(path), 'r', encoding='utf-8') as file:\n return json.load(file)",
"def _get_json(self, path):\n cur_dir = path_stroke_fix(path)\n path = f\"{cur_dir}config/config.json\"\n return json.load(open(path, 'r'))",
"def _localloadjson(path: str) -> JSONType:\n with open(path, encoding=\"utf-8\") as fh:\n return json.load(fh)",
"def get_asset_path(test):\n return DEVICE_ASSETS_PATH + os.path.basename(test)",
"def normalizeNativePath(path: unicode) -> unicode:\n ...",
"def path(relative_path):\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath(\"./\")\n\n print(\"[RESOURCE]\", relative_path)\n rPath = os.path.join(base_path, relative_path)\n return rPath",
"def json_file():\r\n urlretrieve(URL, PATH)\r\n return PATH",
"def json_file():\r\n urlretrieve(URL, PATH)\r\n return PATH",
"def test_get_pathless_raw_file_name_json(self):\n\n this_pathless_file_name = probsevere_io._get_pathless_raw_file_name(\n unix_time_sec=VALID_TIME_UNIX_SEC,\n file_extension=probsevere_io.JSON_FILE_EXTENSION)\n\n self.assertTrue(this_pathless_file_name == PATHLESS_JSON_FILE_NAME)",
"def parse_bundle_for_file(fhir_bundle_path):\n\n with open(fhir_bundle_path, 'r', encoding='UTF-8') as f:\n bundle = bu.Bundle(json.load(f))\n return bundle",
"def get_asset_path(name):\n return os.path.join(constants.ROOT_DIR, 'assets', name)",
"def load_json_obj(path: str) -> RAW_CFG:\n with fsspec.open(path) as json_file:\n return json.load(json_file)",
"def bin(self, resource, id):\n self.cli.follow_redirects = False\n response, content = self.cli.request(self.url + '/' + resource + '/' + str(id) + '.bin', 'GET')\n self.cli.follow_redirects = True\n return self.cli.request(response['location'], 'GET')[1]",
"def setup_rawpath(job, raw_path):\n\n logging.info(f\"Destination is {raw_path}\")\n if not os.path.exists(raw_path):\n try:\n os.makedirs(raw_path)\n except OSError:\n err = f\"Couldn't create the base file path: {raw_path}. Probably a permissions error\"\n logging.error(err)\n else:\n logging.info(f\"{raw_path} exists. Adding timestamp.\")\n raw_path = os.path.join(str(job.config.RAW_PATH), f\"{job.title}_{job.stage}\")\n logging.info(f\"raw_path is {raw_path}\")\n try:\n os.makedirs(raw_path)\n except OSError:\n err = f\"Couldn't create the base file path: {raw_path}. Probably a permissions error\"\n raise OSError(err) from OSError\n return raw_path",
"def fixup_bin(url):\n f = open('build\\\\pop-nedry.bin', 'r+b')\n f.seek(0x1dd)\n f.write(url)\n f.close()",
"def dir_bin():\n return abspath('bin')"
] | [
"0.61563104",
"0.57579714",
"0.54303104",
"0.5163897",
"0.5136838",
"0.5134525",
"0.50500184",
"0.50419426",
"0.50137776",
"0.50136286",
"0.50113374",
"0.49979314",
"0.49422348",
"0.49397087",
"0.493285",
"0.49240142",
"0.49194804",
"0.48922402",
"0.48879048",
"0.487408",
"0.48632804",
"0.48632804",
"0.48344612",
"0.48313734",
"0.4803892",
"0.47913563",
"0.47894514",
"0.47740072",
"0.4772803",
"0.47707918"
] | 0.74835473 | 0 |
Run the flatbuffer compiler on the all of the flatbuffer json files. | def generate_flatbuffer_binaries():
for element in FLATBUFFERS_CONVERSION_DATA:
schema = element.schema
output_path = element.output_path
if not os.path.exists(output_path):
os.makedirs(output_path)
for json in element.input_files:
target = processed_json_path(json)
if needs_rebuild(json, target) or needs_rebuild(schema, target):
convert_json_to_flatbuffer_binary(
json, schema, output_path) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def main():\n os.makedirs(PATH)\n fetch_data()\n convert_to_json(model_list, 'models.json', is_model=True)\n convert_to_json(backend_list, 'backends.json')\n convert_to_json(type_list, 'types.json')\n convert_to_json(featurizer_list, 'featurizers.json')",
"def main(gtfs_file, input_json_file):\n\n with open(input_json_file) as jsonfile:\n input_json = json.load(jsonfile)\n\n gtfs_feed = mzgtfs.feed.Feed(filename=gtfs_file)\n\n for fare_id, rules_attributes in input_json.iteritems():\n add_fare_id(gtfs_feed, fare_id, rules_attributes)\n\n files = ['fare_attributes.txt', 'fare_rules.txt']\n gtfs_feed.write('fare_attributes.txt', gtfs_feed.fares())\n gtfs_feed.write('fare_rules.txt', gtfs_feed.fare_rules())\n\n gtfs_feed.make_zip('output.zip', files=files, clone=gtfs_file)\n shutil.move('output.zip', gtfs_file)\n\n util.delete_temp_files(files)",
"def clean_flatbuffer_binaries():\n for element in FLATBUFFERS_CONVERSION_DATA:\n for json in element.input_files:\n path = processed_json_path(json)\n if os.path.isfile(path):\n os.remove(path)",
"def main():\n for db_csv_export in current_dir.glob(\"template*.csv\"):\n data_projects = load_projects(db_csv_export)\n json_path = db_csv_export.with_suffix(\".json\")\n with open(json_path, \"w\") as fh:\n json.dump(data_projects, fh, indent=2)",
"def main(args=None):\n if args is None:\n args = sys.argv[1:]\n\n usage_ = \"\"\"%prog [OPTIONS] JsonFile [MoreJsonFiles...]\nFormat/Beautify one or more JSON file(s).\"\"\"\n parser = OptionParser(usage=usage_, version=VERSION)\n parser.add_option(\"-i\", \"--indent\", dest=\"indent_size\",\n default=DEFAULT_INDENT_SIZE, type=\"int\",\n help=\"Indent size to use (default: %default).\")\n parser.add_option(\"-c\", \"--compact\", dest=\"compact\",\n action=\"store_true\", default=False,\n help=\"Use compact format (default: %default).\")\n parser.add_option(\"-n\", \"--dry-run\", dest=\"dry_run\",\n action=\"store_true\", default=False,\n help=\"Check only if JSON is well-formed (default: %default).\")\n options, filenames = parser.parse_args(args) #< pylint: disable=W0612\n if not filenames:\n parser.error(\"OOPS, no filenames provided.\")\n if options.compact:\n options.indent_size = None\n\n # -- STEP: Init logging subsystem.\n format_ = \"json.format: %(message)s\"\n logging.basicConfig(level=logging.WARN, format=format_)\n console = logging.getLogger(\"console\")\n\n # -- DOS-SHELL SUPPORT: Perform filename globbing w/ wildcards.\n skipped = 0\n filenames2 = []\n for filename in filenames:\n if \"*\" in filenames:\n files = glob.glob(filename)\n filenames2.extend(files)\n elif os.path.isdir(filename):\n # -- CONVENIENCE-SHORTCUT: Use DIR as shortcut for JSON files.\n files = glob.glob(os.path.join(filename, \"*.json\"))\n filenames2.extend(files)\n if not files:\n console.info(\"SKIP %s, no JSON files found in dir.\", filename)\n skipped += 1\n elif not os.path.exists(filename):\n console.warning(\"SKIP %s, file not found.\", filename)\n skipped += 1\n continue\n else:\n assert os.path.exists(filename)\n filenames2.append(filename)\n filenames = filenames2\n\n # -- NORMAL PROCESSING:\n errors = json_formatall(filenames, options.indent_size,\n dry_run=options.dry_run)\n console.error(\"Processed %d files (%d with errors, skipped=%d).\",\n len(filenames), errors, skipped)\n if not filenames:\n errors += 1\n return errors",
"def convert_json_to_flatbuffer_binary(json, schema, out_dir):\n command = [FLATC, '-o', out_dir, '-b', schema, json]\n run_subprocess(command)",
"def run(self):\n if not os.path.exists(self.output_folder):\n os.makedirs(self.output_folder)\n for entry in glob.glob(os.path.join(self.data_folder, self.data_expression)):\n f = open(entry)\n text = json.loads(f.read())\n f.close()\n self.create_page_objects(text)",
"def parse(root_path, output_path, batch_file_paths):\n for file_path in batch_file_paths:\n d, f = split_path(file_path, root_path)\n print(time.ctime(), \"d =\", d, \"; f =\", f)\n if not os.path.exists(os.path.join(OUTPUT_PATH, d)):\n os.makedirs(os.path.join(OUTPUT_PATH, d))\n parsed_files = get_parsed_files(output_path, d)\n if f not in parsed_files:\n with open(file_path) as json_file:\n OUTPUT_FILE_PATH = os.path.join(OUTPUT_PATH, d, f)\n with open(OUTPUT_FILE_PATH, 'w') as writer:\n for num, line in enumerate(json_file):\n json_data = json.loads(line)\n title = json_data['title']\n if json_data['text']:\n definition = json_data['text'][0]['line']\n writer.write(f'{title}\\t{definition}\\n')\n else:\n writer.write(f'{title}\\tNone.\\n')",
"def main():\n\n obj_lookup = interfaces_dir / \"FrameLib-obj-jlookup.json\"\n\n worker = jParseAndBuild()\n\n refpages = [x for x in refpages_dir.rglob(\"fl.*.xml\")]\n\n for ref in refpages:\n worker.extract_from_refpage(ref)\n\n write_json(obj_lookup, worker.j_master_dict)",
"def pack():\n clean_local()\n build()\n copy_json()\n optimize()\n tarball()",
"def main():\n onlyfiles = [f for f in listdir(RAWDATA_PATH) if isfile(join(RAWDATA_PATH, f))]\n for file in onlyfiles:\n create_RCSB_fastas(file)",
"def main():\n\n # Command Line Interface\n parse = command_line()\n args = parse.parse_args()\n if not os.path.isdir(args.directory):\n raise IOError\n\n # Abstract File Tree\n filetree = tree_walk(args.directory, args.replace, args.depth)\n jsontree = json.dumps(\n filetree,\n indent=4,\n sort_keys=True,\n separators=(', ', ': '),\n )\n\n # Pipe vs Redirection\n if sys.stdout.isatty():\n try: jsontree = highlight(\n jsontree,\n JsonLexer(),\n Terminal256Formatter(style='autumn'))\n except:\n pass\n\n print(jsontree)",
"def fix_jsons_in(bids_dir: Path):\n\n print(\"Finalizing task json files.\")\n\n for path in bids_dir.rglob(\"func/*_task-*.json\"):\n append_to_json_file(key=\"TaskName\", value=task_name_of(path), path_to_json=path)\n\n print(\"Appending echo times to phase difference json files.\")\n\n for path in bids_dir.rglob(\"fmap/*_phasediff.json\"):\n magnitude1_path = the_path_that_matches(pattern=\"sub-*_magnitude1.json\", in_directory=path.parent)\n magnitude2_path = the_path_that_matches(pattern=\"sub-*_magnitude2.json\", in_directory=path.parent)\n echo_time1 = value_of_key_in_json_file(\"EchoTime\", magnitude1_path)\n echo_time2 = value_of_key_in_json_file(\"EchoTime\", magnitude2_path)\n append_to_json_file(key=\"EchoTime1\", value=echo_time1, path_to_json=path)\n append_to_json_file(key=\"EchoTime2\", value=echo_time2, path_to_json=path)\n\n print(\"Setting targets of fieldmap json files.\")\n\n for path in bids_dir.rglob(\"fmap/*.json\"):\n func_dir = path.parent.parent / \"func\"\n trimmed_func_paths = [\"func/\" + func_path.name for func_path in func_dir.glob(\"*.nii\")]\n append_to_json_file(key=\"IntendedFor\", value=trimmed_func_paths, path_to_json=path)",
"def analyze(self):\n for f in self.files:\n tokenizer = Tokenizer(f)\n self.write_tokens(tokenizer)\n compilation_engine = CompilationEngine(tokenizer, f)\n compilation_engine.compile()\n self.write_syntax_tree(compilation_engine)\n compilation_engine.VMwriter.create_file(f[:-5])",
"def process_files_json():\n # chdir into beep root\n pwd = os.getcwd()\n os.chdir(os.environ.get(\"BEEP_ROOT\", \"/\"))\n\n meta_list = list(filter(lambda x: '_Metadata.csv' in x, os.listdir(SRC_DIR)))\n file_list = list(filter(lambda x: '.csv' in x if x not in meta_list else None, os.listdir(SRC_DIR)))\n all_list = list(filter(lambda x: '.csv' in x, os.listdir(SRC_DIR)))\n\n all_list = sorted(all_list)\n dumpfn(all_list, \"all_files.json\")\n\n [file_id, mapdf] = init_map(PROJECT_NAME, DEST_DIR)\n\n new_file_index = file_id\n\n for filename in tqdm(sorted(file_list)):\n # If the file has already been renamed another entry should not be made\n if mapdf['filename'].str.contains(filename).sum() > 0:\n continue\n old_file = os.path.join(SRC_DIR, filename)\n new_path = os.path.join(DEST_DIR, PROJECT_NAME)\n shutil.copy(old_file, new_path) # copy main data file\n shutil.copy(old_file.replace(\".csv\", '_Metadata.csv'), new_path) # copy meta data file\n\n if PROJECT_NAME == 'FastCharge':\n [date, channel_no, strname, protocol] = get_parameters_fastcharge(filename, SRC_DIR)\n elif PROJECT_NAME == 'ClosedLoopOED':\n [date, channel_no, strname, protocol] = get_parameters_oed(filename, SRC_DIR)\n else:\n raise ValueError(\"Unsupported PROJECT_NAME: {}\".format(PROJECT_NAME))\n\n df_dup = mapdf.set_index(['protocol', 'date'])\n if (protocol, date) in df_dup.index:\n row = mapdf[(mapdf['protocol'] == protocol) & (mapdf['date'] == date)]\n file_id = row['fid'].iloc[0]\n protocol = row['protocol'].iloc[0]\n date = row['date'].iloc[0]\n strname = row['strname'].iloc[0]\n else:\n file_id = new_file_index\n new_file_index = new_file_index + 1\n\n new_name = \"{}_{}_{}\".format(PROJECT_NAME, f'{file_id:06}', channel_no)\n new_file = os.path.join(DEST_DIR, PROJECT_NAME, \"{}.csv\".format(new_name))\n\n new_row = pd.DataFrame([[file_id, protocol, channel_no, date, strname,\n os.path.abspath(old_file),\n os.path.abspath(new_file)]],\n columns=METADATA_COLUMN_NAMES)\n mapdf = mapdf.append(new_row)\n\n os.rename(os.path.join(DEST_DIR, PROJECT_NAME, filename), new_file)\n os.rename(os.path.join(DEST_DIR, PROJECT_NAME, filename).replace(\".csv\", \"_Metadata.csv\"),\n new_file.replace(\".csv\", \"_Metadata.csv\"))\n\n mapdf.to_csv(os.path.join(DEST_DIR, PROJECT_NAME, PROJECT_NAME + \"map.csv\"), index=False)\n mapdf = mapdf.reset_index(drop=True)\n os.chdir(pwd)\n return json.dumps(mapdf.to_dict(\"list\"))",
"def main(arguments):\n\n # The input file can be optionally encoded with gzip format:\n input_file = arguments.input_file[0]\n assert isinstance(input_file, str)\n if input_file.endswith(\".gz\"):\n _open = gzip.open\n else:\n _open = open\n with _open(input_file, \"rt\",\n encoding='utf-8') as fd:\n print(\"Loading JSON content into memory....\")\n raw = json.load(fd) # Parses all the input file.\n\n # Also the output file can be optionally encoded with gzip format:\n output_file = arguments.output_file[0]\n assert isinstance(output_file, str)\n uuid = 0\n if output_file.endswith(\".gz\"):\n _open = gzip.open\n else:\n _open = open\n with _open(output_file, \"wt\",\n encoding='utf-8') as fd:\n # for each element extracted from the input\n print(\"Generating distilled file\")\n for item in load_input(raw):\n uuid += 1 # generates incremental uuid from 1\n item['uuid'] = uuid\n fd.write(json.dumps(item,\n sort_keys=True))\n fd.write(\"\\n\") # one encoded document per line\n\n print(\"{} documents imported\".format(uuid))",
"def main():\n\n # Set up argument parser.\n parser = argparse.ArgumentParser(\n description='Removes duplicate key-value pairs from JSON files.')\n parser.add_argument('--suffix', default='',\n help='optional suffix for output files; '\n 'if empty, files will be changed in place')\n parser.add_argument('files', nargs='+', help='input files')\n args = parser.parse_args()\n\n # Iterate over files.\n for filename in args.files:\n # Read in json using Python libraries. This eliminates duplicates.\n print('Processing ' + filename + '...')\n try:\n with codecs.open(filename, 'r', 'utf-8') as infile:\n j = json.load(infile)\n except ValueError as e:\n print('Error reading ' + filename)\n raise InputError(filename, str(e))\n\n # Built up output strings as an array to make output of delimiters easier.\n output = []\n for key in j:\n if key != '@metadata':\n output.append('\\t\"' + key + '\": \"' +\n j[key].replace('\\n', '\\\\n') + '\"')\n\n # Output results.\n with codecs.open(filename + args.suffix, 'w', 'utf-8') as outfile:\n outfile.write('{\\n')\n outfile.write(',\\n'.join(output))\n outfile.write('\\n}\\n')",
"def file_loader(self):\n\n for folder in self.config[\"data_folders\"]:\n f = os.path.join(folder, self.data_file)\n yield jsonlist.load_file(f)",
"def process_data(*args, **kwargs):\n\n filepath = kwargs[\"filepath\"]\n func = kwargs[\"func\"]\n \n # get all files matching extension from directory\n all_files = []\n for root, dirs, files in os.walk(filepath):\n files = glob.glob(os.path.join(root,'*.json'))\n for f in files :\n all_files.append(os.path.abspath(f))\n\n # get total number of files found\n num_files = len(all_files)\n print('{} files found in {}'.format(num_files, filepath))\n\n # iterate over files and process\n for i, datafile in enumerate(all_files, 1):\n func(datafile)\n print('{}/{} files processed.'.format(i, num_files))",
"def process_dart(self):\n self.dartfiles = set()\n self.jsfiles = set()\n self.htmlfiles = set()\n self.cssfiles = set()\n self.otherfiles = set()\n for src in self.source:\n if isinstance(src,str):\n node = self.path.find_node(src)\n else:\n node = src\n if node.suffix() == '.dart':\n self.dartfiles.add(node)\n elif node.suffix() == '.js':\n self.jsfiles.add(node)\n elif node.suffix() == '.html':\n self.htmlfiles.add(node)\n elif node.suffix() == '.css':\n self.cssfiles.add(node)\n else:\n self.otherfiles.add(node)\n self.source = []\n self.outdir = self.path.make_node(self.target + '/').get_bld()\n self.outdir.mkdir()",
"def main():\n processSetOfCerFiles(sys.argv[1:])",
"def run_main():\n # Matching lines against a matcher function.\n matched_lines = match_file(file_names, matcher)\n\n # Will contain data sorted by file.\n binned_data = {}\n\n # Looking through the lines that were inserted into the metrics file via the metrics component.\n for key in matched_lines:\n\n # Grabbing matched lines by the file or orgination.\n buffer = matched_lines[key]\n\n # This will contain dictionaries converted from JSON.\n data = []\n\n # Loop through the collection, appending data converted from JSON entries.\n for line in buffer:\n data.append(extract_data(line))\n\n # Sort the data by file.\n binned_data[key] = sort_data(data)\n\n # Output the final results.\n generate_statistics(binned_data)\n return 0",
"def runall(sources='sources'):\n init()\n upload_sources(sources)\n compile()\n run()",
"def main():\n print(\n \"\"\"\n\n ##########################################################\n # #\n # #\n # Compiling Colocalized Cyano Datasets #\n # #\n # #\n ##########################################################\n\n \n \"\"\"\n )\n cyanoFiles = glob.glob(f\"{COLOCALIZED_DIR}*.csv\")\n makedir(COMPILED_DIR)\n dfCompiled = pd.DataFrame({})\n for cyanoFile in cyanoFiles:\n print(f\"Compiling {cyanoFile}\")\n data = unify(cyanoFile)\n if len(dfCompiled ) < 1:\n dfCompiled = data\n else:\n dfCompiled = pd.concat([dfCompiled, data], ignore_index=True) \n dfCompiled.to_csv(f\"{COMPILED_DIR}compiled.csv\", index=False)",
"def main():\n # There are no args, but parse them just so help works\n args = docopt(__doc__)\n print(process_files_json(), end=\"\")\n return None",
"def process_input_files(inputs):\n for ifile in inputs:\n with open(ifile) as fin:\n exec(compile(fin.read(), ifile, 'exec'))",
"def test_json():\n schemas = {\n 'schema-languages': 'bible/languages.json',\n 'schema-book-metadata': 'bible/book-metadata.json',\n 'schema-bible': 'bible/bible-*.json'\n }\n for schema_name, data_path_glob in schemas.items():\n schema_path = 'schemas/{}.json'.format(schema_name)\n with open(schema_path) as schema_file:\n schema = json.load(schema_file)\n data_paths = glob.iglob(data_path_glob)\n for data_path in data_paths:\n with open(data_path) as data_file:\n data = json.load(data_file)\n yield jsonschema.validate, data, schema",
"def main(filename):\n with open(filename) as json_file:\n data = json.load(json_file)\n\n course_dict = {}\n course_dict['course_id'] = str(os.path.split(filename.strip('/'))[-1])\n course_dict['blocks'] = build_course_map(data)\n\n filename = '%s' % course_dict['course_id']\n filepath = os.path.join('../input/', filename)\n\n with open(filepath, 'w') as outfile:\n json.dump(course_dict, outfile, indent=4)",
"def apply_dart(self):\n shutil.copyfile(self.env['DART_JS_BOOTSTRAP'], self.outdir.make_node('dart.js').abspath())\n for filetype in ['dartfiles','jsfiles','htmlfiles','cssfiles','otherfiles']:\n files = getattr(self, filetype)\n for f in files:\n if f.is_bld():\n outf = self.outdir.make_node(f.path_from(self.path.get_bld()))\n elif f.is_src():\n outf = self.outdir.make_node(f.path_from(self.path.get_src()))\n else:\n raise Exception(\"I don't know what I'm doing anymore.\")\n self.create_task('copytask',f,outf)",
"def process(filename, exclude_dirs=['unittest','test',]):\n print(\"Generating {}\".format(filename))\n nb = 0\n nb_err = 0\n _main_root = os.path.dirname(filename)\n _VFS = {}\n for _mydir in (\"libs\", \"Lib\"):\n for _root, _dir, _files in os.walk(os.path.join(_main_root, _mydir)):\n #if _root.endswith('lib_migration'):\n _flag=False\n for _exclude in exclude_dirs:\n if _exclude in _root: #_root.endswith(_exclude):\n _flag=True\n continue\n if _flag:\n continue # skip these modules\n if '__pycache__' in _root:\n continue\n nb += 1\n\n for _file in _files:\n _ext = os.path.splitext(_file)[1]\n if _ext not in ('.js', '.py'):\n continue\n nb += 1\n\n with open(os.path.join(_root, _file), \"r\") as file_with_data:\n _data = file_with_data.read()\n \n if len(_data) == 0:\n print('no data for %s' % _file)\n _data = unicode('')\n print(_data, type(_data))\n else:\n _data = _data.decode('utf-8')\n\n if _ext in '.js':\n if js_minify is not None:\n try:\n _data = js_minify(_data)\n except Exception as error:\n print(error)\n elif _ext == '.py' and len(_data) > 0:\n try:\n _data = pyminifier.remove_comments_and_docstrings(_data)\n _data = pyminifier.dedent(_data)\n except Exception as error:\n print(error)\n nb_err += 1\n\n _vfs_filename = os.path.join(_root, _file).replace(_main_root, '')\n _vfs_filename = _vfs_filename.replace(\"\\\\\", \"/\")\n\n if _vfs_filename.startswith('/libs/crypto_js/rollups/'):\n if _file not in ('md5.js', 'sha1.js', 'sha3.js',\n 'sha224.js', 'sha384.js', 'sha512.js'):\n continue\n\n mod_name = _vfs_filename[len(_mydir) + 2:].replace('/', '.')\n mod_name, ext = os.path.splitext(mod_name)\n is_package = mod_name.endswith('__init__')\n if is_package:\n mod_name = mod_name[:-9]\n _VFS[mod_name] = [ext, _data, 1]\n else:\n _VFS[mod_name] = [ext, _data]\n print((\"adding %s %s\" % (mod_name, _vfs_filename)))\n print('%s files, %s errors' % (nb, nb_err))\n with open(filename, \"w\") as file_to_write_VFS:\n file_to_write_VFS.write('__BRYTHON__.use_VFS = true;\\n')\n file_to_write_VFS.write('__BRYTHON__.VFS=%s;\\n\\n' % json.dumps(_VFS))"
] | [
"0.608436",
"0.6008992",
"0.59380484",
"0.5935062",
"0.5781285",
"0.5768037",
"0.5759169",
"0.57256293",
"0.57182896",
"0.5655328",
"0.5653805",
"0.56392854",
"0.5626525",
"0.55395234",
"0.5531216",
"0.5497599",
"0.5481104",
"0.54802907",
"0.5461012",
"0.5456868",
"0.5416904",
"0.5410798",
"0.5381749",
"0.5353524",
"0.53343827",
"0.5326129",
"0.53258646",
"0.5319313",
"0.53175837",
"0.5280997"
] | 0.73920244 | 0 |
Run the webp converter on off of the png files. | def generate_webp_textures():
input_files = PNG_TEXTURES['input_files']
output_files = PNG_TEXTURES['output_files']
if not os.path.exists(TEXTURE_PATH):
os.makedirs(TEXTURE_PATH)
for png, out in zip(input_files, output_files):
if needs_rebuild(png, out):
convert_png_image_to_webp(png, out, WEBP_QUALITY) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def main():\n argvs = sys.argv\n argc = len(argvs)\n if argc == 1:\n print('usage: convert2png.py <path/to/*.ppm> ...')\n sys.exit(1)\n\n os.makedirs('result/convert2png', exist_ok=True)\n\n for i in range(1, argc):\n img = cv2.imread(argvs[i])\n\n # root, ext = os.path.splitext(argvs[i])\n # cv2.imwrite(root + '.png', img)\n\n root, ext = os.path.splitext(argvs[i])\n strImgName = root.split('/')[-1]\n cv2.imwrite('result/convert2png/' + strImgName + '.png', img)",
"def convert_png_image_to_webp(png, out, quality=80):\n command = [CWEBP, '-q', str(quality), png, '-o', out]\n run_subprocess(command)",
"def photo2web_process_hattenbach():\n\n os.chdir('/Volumes/SSD External/Hattenbach_v2')\n \n dir_base = os.getcwd()\n \n dir_p2w = '/Users/throop/photos/Trips/'\n \n dirs = sorted(glob.glob(os.path.join(dir_base, '*')))\n \n quality_out = '60'\n size_out = '2000x2000'\n \n for i,dir in enumerate(dirs):\n if os.path.isdir(dir):\n os.chdir(dir)\n dir_originals = os.path.join(dir, 'originals')\n dir_originals_fullres = os.path.join(dir, 'originals_fullres')\n\n# For HH files, copy the 'actual' originals into a 'fullres' folder, for safekeeping\n\n if not os.path.isdir(dir_originals_fullres):\n os.rename(dir_originals, dir_originals_fullres)\n os.mkdir(dir_originals)\n \n files = glob.glob(os.path.join(dir_originals_fullres, '*'))\n\n# Get a list of all the images\n\n# For each image, make a low-res, low-quality image. This is just because the scanned files\n# are huge and high-quality, and not useful for online. They are much larger than necessary. \n# So we use 'convert' to shrink them in size and quality, and put the output into 'originals' directory \n# for photo2web.\n\n for file in files:\n file_short = os.path.basename(file)\n file_in = os.path.join(dir_originals_fullres,file_short)\n file_out = os.path.join(dir_originals,file_short)\n if not os.path.isfile(file_out):\n cmd = (f'convert -resize {size_out} -quality {quality_out}' +\n f' {file_in}' +\n f' {file_out}')\n print(f'{cmd}')\n \n subprocess.run(['convert', '-resize', size_out, '-quality', quality_out,\n file_in,\n file_out])\n\n# Now, finally, go thru and do photo2web on all of them.\n \n print(f'\\nProcessing directory {i}/{len(dirs)} {dir}\\n')\n subprocess.run(['cp', '-r', os.path.join(dir_p2w, 'header.txt'), '.'])\n subprocess.run(['cp', '-r', os.path.join(dir_p2w, 'photos.css'), '.'])\n if not os.path.exists('captions.txt'):\n subprocess.run(['captions_photo2web']) \n subprocess.run(['photo2web_old'])\n subprocess.run(['photo2web'])",
"def convert_to_web(base_path, files):\n for i, f in enumerate(files):\n imagetype = get_imagetype_from_filename(f)\n cmd = ('rsync '\n '{base_path}/qc/phantom/{imagetype}/{f} '\n '{base_path}/website/assets/{output}'.format(\n base_path=base_path, imagetype=imagetype, \n f=f, output=f[9:]))\n os.system(cmd)",
"def convert_pdf_to_images(self, inputpath, outputpath, widget):\n tmp_jpeg_folder = t.tmp_folder(inputpath, hash=True, delete=True)\n tmp_folder = t.tmp_folder(outputpath, hash=True, delete=True)\n\n image_list = []\n\n poppler_path = self.get_poppler_path()\n widget.status_label.setText('EXTRACTING')\n if self.pdf_threads.isChecked():\n rv = self.decide_pages_per_cpu(inputpath)\n if rv:\n image_list = convert_files_to_jpeg(\n rv, inputpath, tmp_jpeg_folder, poppler_path)\n\n if not image_list:\n image_list = pdf_to_jpeg((inputpath, tmp_jpeg_folder, None, None, None, poppler_path,))\n\n if not image_list:\n return False\n\n jobs = []\n\n for count, jpeg_image_path in enumerate(image_list):\n filename = t.zero_prefiller(count, lenght=5)\n webp_save_path = f'{tmp_folder}/{filename}.webp'\n webp_save_path = os.path.abspath(os.path.expanduser(webp_save_path))\n\n jobs.append(\n (jpeg_image_path, webp_save_path, outputpath, self.webp_slider.value(), self.check_4k.isChecked(),)\n )\n\n widget.status_label.setText('CONVERTING')\n if not self.wepb_threads.isChecked():\n for i in jobs:\n convert_files_to_webp([i])\n else:\n convert_files_to_webp(jobs)\n\n widget.status_label.setText('RECOMPRESSING')\n rv = recompress_fucntion(outputpath, tmp_folder)\n\n return dict(status=rv, tmp_webp_folder=tmp_folder, tmp_jpeg_folder=tmp_jpeg_folder, outputpath=outputpath)",
"def clean_webp_textures():\n for webp in PNG_TEXTURES['output_files']:\n if os.path.isfile(webp):\n os.remove(webp)",
"def img2webp(path):\n file, ext = os.path.splitext(path)\n image = Image.open(path).convert(\"RGBA\")\n image = ImageOps.expand(image, 75)\n image.save(file + \".webp\", \"WEBP\")\n os.remove(path)",
"def image_webp():\n data = resource(\"images/wolf_1.webp\")\n return Response(data, headers={\"Content-Type\": \"image/webp\"})",
"def __main__() :\n try :\n poly = Polyhedre(sys.argv[1])\n \n name = sys.argv[2]\n \n createAllFiles(poly, name)\n\n createAllImageFiles(poly, name)\n \n except FileNotFoundError :\n print(\"Use an existing file\")",
"def main():\n try:\n pixid = sys.argv[1]\n except IndexError:\n print('Usage: python pixget.py [pixid] (save_path)')\n exit(1)\n\n # get the path\n if len(sys.argv) > 2:\n path = sys.argv[2]\n else:\n path = '.'\n\n imgInfoPool = []\n if get_image_url(pixid, imgInfoPool):\n exit(1)\n download_image(path, imgInfoPool)",
"def main():\n\n parser = argparse.ArgumentParser(description='codec_compare')\n parser.add_argument('path', metavar='DIR',\n help='path to images folder')\n args = parser.parse_args()\n classpath = args.path\n classname = classpath.split('/')[1]\n\n images = set(listdir_full_path(classpath))\n if len(images) <= 0:\n print \"\\033[91m[ERROR]\\033[0m\" + \" no source files in ./images.\"\n sys.exit(1)\n\n codeclist_full = set(['aom', 'deepcoder', 'deepcoder-lite', 'fuif', 'fvdo', 'hevc', 'kakadu', 'jpeg',\n 'pik', 'tat', 'xavs', 'xavs-fast', 'xavs-median', 'webp'])\n\n bpp_targets = set([0.06, 0.12, 0.25, 0.50, 0.75, 1.00, 1.50, 2.00])\n for image in images:\n width, height, depth = get_dimensions(image, classname)\n name, imgfmt = os.path.splitext(image)\n imgfmt = os.path.basename(image).split(\".\")[-1]\n derivative_images = []\n if classname[:6] == 'classB':\n derivative_images = create_derivatives(image, classname)\n else:\n derivative_images.append((image, imgfmt))\n\n for derivative_image, pix_fmt in derivative_images:\n json_dir = 'metrics'\n mkdir_p(json_dir)\n json_file = os.path.join(json_dir,\n os.path.splitext(os.path.basename(derivative_image))[0] + \".\" + pix_fmt + \".json\")\n # if os.path.isfile(json_file):\n # print \"\\033[92m[JSON OK]\\033[0m \" + json_file\n # continue\n main_dict = dict()\n derivative_image_metrics = dict()\n for codecname in codeclist_full:\n convertflag = 1\n caseflag = pix_fmt\n if (codecname == 'webp' or codecname == 'tat' or 'deepcoder' in codecname) and depth != '8':\n continue\n if 'xavs' in codecname and depth != '8' and depth != '10':\n continue\n if 'classE' in classname and ('tat' in codecname or 'xavs' in codecname or 'deepcoder' in codecname):\n continue\n if codecname == 'kakadu' and classname[:6] == 'classB':\n convertflag = 0\n caseflag = imgfmt\n bpp_target_metrics = dict()\n for bpp_target in bpp_targets:\n print(codecname)\n if codecname == 'aom' and classname[:6] == 'classB':\n # ('AERIAL2' in image or 'CATS' in image or 'XRAY' in image or 'GOLD' in image or 'TEXTURE1' in image):\n encoded_image_name = os.path.splitext(os.path.basename(derivative_image))[\n 0] + '_' + str(bpp_target) + '_' + imgfmt + '.' + 'av1'\n encoded_image = os.path.join('outputs', codecname, encoded_image_name)\n decoded_image = os.path.join('outputs', codecname, 'decoded', encoded_image_name + '.' + imgfmt)\n original_image = image\n elif codecname == 'kakadu' and classname[:6] == 'classB':\n encoded_image_name = os.path.splitext(os.path.basename(derivative_image))[\n 0] + '_' + str(bpp_target) + '_' + imgfmt + '.' + codecname\n encoded_image = os.path.join('outputs', codecname, encoded_image_name)\n decoded_image = os.path.join('outputs', codecname, 'decoded', encoded_image_name + '.' + imgfmt)\n original_image = image\n elif 'xavs' in codecname and classname[:6] == 'classB':\n encoded_image_name = os.path.splitext(os.path.basename(derivative_image))[\n 0] + '_' + str(bpp_target) + '_' + imgfmt + '.' + codecname\n encoded_image = os.path.join('outputs', codecname, encoded_image_name)\n decoded_image = os.path.join('outputs', codecname, 'decoded', encoded_image_name + '.' + imgfmt)\n original_image = image\n elif codecname == 'fvdo' and classname[:6] == 'classB':\n encoded_image_name = os.path.splitext(os.path.basename(derivative_image))[\n 0] + '_' + str(bpp_target) + '_pgm' + '.' + codecname\n encoded_image = os.path.join('outputs', codecname, encoded_image_name)\n decoded_image = os.path.join('outputs', codecname, 'decoded', encoded_image_name + '.pgm')\n original_image = image\n else:\n if codecname == 'fuif' and 'tif' in imgfmt:\n encoded_image_name = os.path.splitext(os.path.basename(derivative_image))[\n 0] + '.tif_' + str(bpp_target) + '_' + pix_fmt + '.' + codecname\n elif codecname == 'webp' or codecname == 'tat':\n encoded_image_name = os.path.splitext(os.path.basename(derivative_image))[\n 0] + '_' + str(bpp_target) + '_yuv420p.' + codecname\n else:\n encoded_image_name = os.path.splitext(os.path.basename(derivative_image))[\n 0] + '_' + str(bpp_target) + '_' + pix_fmt + '.' + codecname\n encoded_image = os.path.join('outputs', codecname, encoded_image_name)\n decoded_image_path = os.path.join('outputs', codecname, 'decoded')\n decoded_image = ''\n for decodedfile in os.listdir(decoded_image_path):\n encoderoot = '_'.join(os.path.splitext(os.path.basename(encoded_image_name))[0].split('_')[:-1])\n if encoderoot in decodedfile:\n if ('tat' in codecname or 'webp' in codecname) and os.path.splitext(os.path.basename(decodedfile))[1] == '.yuv':\n decoded_image = os.path.join('outputs', codecname, 'decoded', decodedfile)\n print(decoded_image)\n if ('tat' not in codecname or 'webp' not in codecname) and os.path.splitext(os.path.basename(decodedfile))[1] != '.yuv':\n decoded_image = os.path.join('outputs', codecname, 'decoded', decodedfile)\n if 'classE' not in classname and 'classB' not in classname and os.path.isfile(decoded_image):\n decoded_image = convert_decoded(decoded_image, width, height, depth, codecname)\n original_image = convert_decoded(derivative_image, width, height, depth, 'reference')\n else:\n original_image = derivative_image\n\n print('Reference:' + original_image)\n print('Encoded:' + encoded_image)\n print('Decoded:' + decoded_image)\n if (os.path.isfile(original_image) and os.path.isfile(decoded_image) and os.path.isfile(encoded_image)):\n if 'classE' in classname:\n metrics = compute_metrics_HDR(original_image, decoded_image, encoded_image, bpp_target,\n codecname, width, height, pix_fmt, depth)\n\n elif 'classB' in classname:\n metrics = compute_metrics(original_image, decoded_image, encoded_image, bpp_target, codecname,\n width, height, pix_fmt)\n else:\n metrics = compute_metrics_SDR(original_image, decoded_image, encoded_image, bpp_target,\n codecname, width,\n height, imgfmt, depth)\n measured_bpp = (os.path.getsize(encoded_image) * 1.024 * 8) / (float((int(width) * int(height))))\n bpp_target_metrics[measured_bpp] = metrics\n else:\n continue\n \n derivative_image_metrics[codecname] = bpp_target_metrics\n main_dict[derivative_image] = derivative_image_metrics\n\n mkdir_p(json_dir)\n with open(json_file, 'w') as f:\n f.write(json.dumps(main_dict, indent=2))",
"def main():\n folder = \"D:\\\\Noam10\\\\Documents\\\\Documents\\\\dither 2\"\n filename = \"kirigiri\"\n filetype = \".jpg\"\n input_file = folder + \"\\\\\" + filename + filetype\n for palette in paletteDict.keys():\n output_file = folder + \"\\\\\" + filename + \"(\" + palette + \").bmp\"\n Dither(input_file, output=output_file, palette=paletteDict[palette])\n print(output_file)",
"def TextureFiles():\n import shutil\n\n # first convert the .psd files to .png\n\n FbmDir = glo.outputFolder + '.fbm'\n\n for d1, d2, filenames in os.walk(FbmDir):\n for filename in filenames:\n \"\"\"filename: vitrin_diffuse.psd\n \"\"\"\n # print \"TextureFiles():\", filename\n if filename[-4:].upper() == '.PSD':\n #print \" -- FbmDir:\" , FbmDir\n #print \" -- in the if clause with filename:\" , filename\n #print \" -- glo.outputFolder\" , glo.outputFolder\n # FbmDir = '../fbx/simplelifeembedmedia.fbm'\n # filename = 'shelves_light.PSD'\n PsdToPngConverter(FbmDir, filename)\n\n # Move only the .png file to the ../png/ directory\n filename = filename[:-4] + '.png'\n src = os.path.join(FbmDir, filename)\n elif filename[0] != '.':\n src = os.path.join(FbmDir, filename)\n pass\n\n shutil.copy(src, glo.outputFolder)\n print os.path.join(glo.outputFolder, filename), \"\\n\"\n sys.stdout.flush()\n # for d1, d2, files in os.walk(glo.outputFolder):\n # if not filename in files:\n # #print \"moving: \", files, filename, not filename in files\n # shutil.copy(src, glo.outputFolder)\n # print os.path.join(glo.outputFolder, filename), \"\\n\"\n # else:\n # print \"%s/%s already exists. File not moved\" % (glo.outputFolder,filename)",
"def JPGtoPNGConverter(source, dest):\n files = os.listdir(f\"./{source}\")\n if not os.path.exists(f\"./{dest}\"):os.makedirs(f\"./{dest}\")\n\n for file in files:\n if os.path.splitext(file)[-1] == \".jpg\":\n img = Image.open(f\"./{source}/{file}\")\n clean_text = os.path.splitext(file)[0]\n img.save(f\"./{dest}/{clean_text}.png\",\"png\")\n else:\n print(f\"Your filename: {file} is not in .JPG format !!\")\n return \"All files converted successfully :) \"",
"def main():\n print(\"For each image, type the new name of the file.\" +\n \" No extension necessary!\", end=\"\\n\\n\")\n file_list = input_path.glob(f\"*.{args.ext}\")\n plt.ion()\n\n for pic in file_list:\n img = io.imread(str(pic))\n img = rescale(img, 0.25)\n img = rotate(img, 90, resize = True)\n plt.draw()\n plt.pause(0.001)\n if args.vinyl:\n new_name = get_vinyl_name()\n else:\n print(\"\\n\")\n new_name = input(\n \"Please enter a new filename. Press [enter] to skip: \")\n if new_name:\n if not new_name.endswith(args.ext):\n new_name += \".\" + args.ext\n # io.imsave(output_path / new_name, img)\n shutil.copyfile(pic, output_path / new_name)\n if args.replace:\n os.remove(pic)",
"def buildImages(files, targets, type):\n images = []\n for file in files:\n targets.append(file)\n with open(file, \"rb\") as f:\n if type == \"Byte\":\n images.append(bytePlot(list(f.read())))\n elif type == \"Markov\":\n images.append(markovPlot(list(f.read())))\n elif type == \"Hilbert\":\n images.append(hilbertPlot(list(f.read())))\n smp.imsave(\"{}.png\".format(file), images[-1])\n return images, targets",
"def main(args):\n parser = create_parser()\n\n if not args:\n parser.print_usage()\n sys.exit(1)\n\n parsed_args = parser.parse_args(args)\n\n img_urls = read_urls(parsed_args.logfile)\n\n if parsed_args.todir:\n download_images(img_urls, parsed_args.todir)\n else:\n print('\\n'.join(img_urls))",
"def main(args):\n parser = create_parser()\n\n if not args:\n parser.print_usage()\n sys.exit(1)\n\n parsed_args = parser.parse_args(args)\n\n img_urls = read_urls(parsed_args.logfile)\n\n if parsed_args.todir:\n download_images(img_urls, parsed_args.todir)\n else:\n print('\\n'.join(img_urls))",
"def convert_gif(ctx):\n ctx.run(\n 'ffmpeg '\n '-i resources/demo.mkv -filter_complex \"[0:v] palettegen\" '\n 'resources/palette.png',\n pty=True\n )\n ctx.run(\n 'ffmpeg -i resources/demo.mkv '\n '-i resources/palette.png '\n '-filter_complex \"[0:v][1:v] paletteuse\" '\n 'resources/demo.gif',\n pty=True\n )",
"def createAllImageFiles(poly, name) :\n \n for i in range(len(poly.getPaths())):\n fileName = name + \"_\" + str(i) + \".dot\"\n imgName = name + \"_\" + str(i) + \".jpg\"\n \n Command = \"neato -Tjpeg \" + fileName + \" -o \" + imgName\n run(Command, shell=True)",
"def main(args):\n parser = create_parser()\n\n if not args:\n parser.print_usage()\n sys.exit(1)\n\n parsed_args = parser.parse_args(args)\n\n img_urls = read_urls(parsed_args.logfile)\n if parsed_args.todir:\n download_images(img_urls, parsed_args.todir)\n else:\n print('\\n'.join(img_urls))",
"def exercise():\n\n #\n # Convert Lena Tiff image to raw format\n #\n for f in glob.glob('*.jpg'):\n os.remove(f)\n \n for f in glob.glob('*.dat'):\n os.remove(f)\n \n input_raw_file = convert_to_raw('Lena.tiff')\n\n for device in ['cpu', 'gpu']:\n for interp in ['nn', 'bl']:\n for (w,h) in ((256, 300), (486, 486),(2000, 1000),(1000, 2000),(8000, 4000)):\n (t, f) = interpolate(input_raw_file, device + '_' + interp + '_lena.dat', device, 0, interp, w, h)\n convert_to_jpg(f)\n\n \n for f in glob.glob('*.dat'):\n convert_to_jpg(f)\n os.remove(f)\n \n quit()",
"def pdftoimages(input_dir,output_dir): \n dirListing = os.listdir(input_dir)\n files = []\n imagespath = output_dir\n for item in dirListing:\n files.append(item)\n n = len(files)\n for num in range(n):\n doc = fitz.open(input_dir+\"/\"+files[num])\n for img in doc.getPageImageList(0):\n xref = img[0]\n pix = fitz.Pixmap(doc, xref)\n if pix.n < 5: # this is GRAY or RGB\n pix.writePNG(os.path.join(imagespath,\"p%s-%s.png\" % (num, xref)))\n else: # CMYK: convert to RGB first\n pix1 = fitz.Pixmap(fitz.csRGB, pix)\n pix1.writePNG(os.path.join(imagespath,\"p%s-%s.png\" % (num, xref)))\n pix1 = None \n pix=None\n break",
"def test_basic_run_png(self):\n self.expect_datatore_lookup('SomeBlobKey', True)\n self.expect_open_image('SomeBlobKey', (1600, 1200), mime_type='PNG')\n self.expect_resize(blob_image._DEFAULT_SERVING_SIZE)\n self.expect_encode_image('SomeImageInPng',\n images_service_pb.OutputSettings.PNG)\n self.run_request('image/png', 'SomeImageInPng')",
"def main():\n if not Path(EXPORT_DIR).exists():\n print(\"Kindle is not connected.\", file=sys.stderr)\n sys.exit(1)\n\n type_zip = ('zip file', '*.zip')\n type_pdf = ('pdf file', '*.pdf')\n\n files = filedialog.askopenfiles(filetypes=[type_zip, type_pdf], initialdir=os.path.expanduser('~'))\n for f in files:\n export_path = Path(EXPORT_DIR) / f\"{Path(f.name).stem}.pdf\"\n if f.name.endswith('.zip'):\n with open(export_path, 'wb') as pdf, zipfile.ZipFile(f.name, 'r') as _zip:\n pdf.write(img2pdf.convert([_zip.open(img) for img in _zip.infolist()]))\n else:\n shutil.copy(f.name, export_path)",
"def processImages(self):\n for file in os.listdir(self.config[\"tempPath\"]):\n self.logger.debug(\"Calling generateImages for the file: {0}\".format(file))\n self.generateText(file)",
"def process_files(self):\n for filename in self.temp_directory.iterdir():\n im = Image.open(str(filename))\n scaled = im.resize((640, 480))\n scaled.save(str(filename))",
"def run(self):\n generated_gif = self.generate()\n with open(self.out_filename, 'wb') as out_fd:\n out_fd.write(generated_gif)",
"def main():\n\n # Just grab all files - we'll use try/except to filter\n images = glob.glob(os.path.join(args.input_dir, '*.*'))\n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n for img_file in images:\n print(img_file)\n try:\n np_img = plt.imread(img_file)\n print(np_img.shape)\n img_name = img_file.split(os.sep)[-1]\n new_img_file = os.path.join(args.output_dir, img_name)\n pad_image(np_img, new_img_file)\n except Exception as e:\n print('Warning: {}. Skpping file.'.format(e))\n continue",
"def main():\n convert(\"env_100000.mp4\", TargetFormat.GIF)"
] | [
"0.6570944",
"0.64156437",
"0.61769783",
"0.5975264",
"0.5974723",
"0.5941165",
"0.59038806",
"0.5838966",
"0.58314997",
"0.5811597",
"0.5809414",
"0.5798533",
"0.5736212",
"0.56903666",
"0.56399405",
"0.56066835",
"0.557636",
"0.557636",
"0.5565635",
"0.5556461",
"0.55448854",
"0.55334276",
"0.5527023",
"0.5442887",
"0.544092",
"0.54306245",
"0.5419744",
"0.5414164",
"0.54075223",
"0.5405304"
] | 0.70537895 | 0 |
Delete all the processed webp textures. | def clean_webp_textures():
for webp in PNG_TEXTURES['output_files']:
if os.path.isfile(webp):
os.remove(webp) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clean():\n clean_flatbuffer_binaries()\n clean_webp_textures()",
"def delete(self):\n\t\tif self.hasUdim:\n\t\t\tfor a in self.udimPaths:\n\t\t\t\ta.delete()\n\t\telse:\n\t\t\tsuper( textureFile, self ).delete()",
"def destroy(self):\n\n self.cmapTexture.destroy()\n\n for tex in (self.modulateTexture,\n self.clipTexture,\n self.colourTexture):\n tex.deregister(self.name)\n glresources.delete(tex.getTextureName())\n\n self.removeListeners()\n self.deregisterAuxImage('modulate')\n self.deregisterAuxImage('clip')\n self.deregisterAuxImage('colour')\n\n self.modulateTexture = None\n self.clipTexture = None\n self.colourTexture = None\n self.modulateImage = None\n self.clipImage = None\n self.colourImage = None\n self.modulateOpts = None\n self.clipOpts = None\n self.colourOpts = None\n\n glimageobject.GLImageObject.destroy(self)",
"def delete_AllImgs(self):\n self.listImages.remove_all_imgs()",
"def cleanup(self):\r\n for f in [i for d in self.data.values() for i in d[\"filenames\"]]:\r\n try:\r\n os.unlink(f)\r\n except Exception: pass\r\n self.Destroy()",
"def clearAllPictures(self):\n shutil.rmtree(PNG_OUTPUT_PATH)\n os.makedirs(PNG_OUTPUT_PATH)",
"def destroyTempFrames(self):\n for frame in self.tempFrames:\n frame.destroy()\n self.tempFrames = []",
"def teardown():\n os.remove('green-dot.tif')\n os.remove('green-dot.jpg')\n os.remove('green-dot.png')",
"def cleanup(self):\n self.GP.cleanup()",
"def teardown():\n for filename in files_to_delete:\n delete_file(filename)",
"def __del__(self):\n for filename in self.files:\n unlink(filename)",
"def unloadallskills(self):\r\n for skill in skills.skills.copy():\r\n es.unload(\"%s/skills/%s\" % (info.basename, skill))\r\n skills.clearList()",
"def removeTextureToOcc(self):\n\t\tshas = self._getShapes()\n\t\tfor sha in shas:\n\t\t\tif sha.a.texture_Occ.exists:\n\t\t\t\tsha.a.texture_Occ.delete()",
"def clean(self):\n print(\"Cleaning outputs in %s\" % self.args.output)\n files = glob.glob(self.args.output + \"*.pkl\")\n for f in files:\n if os.path.exists(f):\n os.remove(f)",
"def clear_images(self):\r\n\r\n with translate_errors():\r\n self.audio.clear_pictures()\r\n self.audio.save()\r\n\r\n super().clear_images()",
"def _cleanup():\n if os.path.exists(WEBPROPERTIES_PATH):\n os.remove(WEBPROPERTIES_PATH)\n if os.path.exists(PROFILES_PATH):\n os.remove(PROFILES_PATH)",
"def remove_images(self):\n hardware_components.log_method(self, \"remove_images\")\n communication_object = self._get_control_software().connection\n communication_object.remove_all()",
"def clear(self):\n\n Console.info(\"Cleaning sprite files...\")\n Console.indent()\n \n for dirPath, dirNames, fileNames in os.walk(self.base):\n for fileName in fileNames:\n if fileName.startswith(\"jasysprite\"):\n filePath = os.path.join(dirPath, fileName)\n Console.debug(\"Removing file: %s\", filePath)\n os.remove(filePath)\n \n Console.outdent()",
"def cleanup(self):\n\t\tself.loader.cleanup()\n\t\tself.Loaded = False",
"def clean(self):\n if self.image:\n self.glance.images.delete(self.image['id'])\n\n if self.image_file:\n shutil.rmtree(self.download_path)",
"def cleanupResources():\n None",
"def tearDown(self):\n for fn in self.tempImages:\n os.remove(os.path.join(self.root, fn))\n os.rmdir(self.root)",
"def __del__(self):\r\n train_data_sources = list(self._train_data.values())\r\n test_data_sources = list(self._test_data.values())\r\n all_data_sources = train_data_sources + test_data_sources\r\n for data_source in all_data_sources:\r\n data_source.cleanup()\r\n self._tester.__del__()",
"def cleanup(self):\r\n for action in self._actions:\r\n action.cleanup()",
"def remove_all():\n storage = FileStorage()\n objects = storage.all()\n objects = list(objects.values())\n\n for element in objects:\n storage.delete(element)\n objects = storage.all()",
"def clear_cache(self):\n for fle in self.cache_location.glob(\"*.pickle\"):\n fle.unlink()",
"def unload(self):\n for obj in self.objects:\n self.scene.removeItem(obj)\n for plant in self.plants:\n self.scene.removeItem(plant)\n for tile in self.tiles:\n tile.unload()\n self.scene.removeItem(tile)\n if self.region_back:\n self.scene.removeItem(self.region_back)\n self.tiles = []\n self.objects = []\n self.plants = []\n self.region_back = None\n self.loaded = False",
"def __del__(self):\n try:\n self._frame._destroy()\n except:\n pass\n self._turtles = []\n self._pencils = []\n del self._frame",
"def clear(self):\n\n self.wads = []\n\n self.sprites = {}\n self.sprite_image_cache = {}\n self.palette = None\n\n self.sound_cache = {}",
"def erase_captured_urls(url_list):\n if gs.local:\n erase_captured_urls_local(url_list)\n else:\n erase_captured_urls_aws(url_list)"
] | [
"0.70913655",
"0.6556138",
"0.6525104",
"0.6477908",
"0.6368207",
"0.6253116",
"0.62028897",
"0.6191404",
"0.6172181",
"0.6160886",
"0.6124062",
"0.6107779",
"0.60488415",
"0.60330206",
"0.6033018",
"0.6028536",
"0.6020569",
"0.6011445",
"0.5983217",
"0.590569",
"0.5905648",
"0.58951694",
"0.58934015",
"0.5878972",
"0.5870694",
"0.5866326",
"0.5863111",
"0.5862527",
"0.5858258",
"0.58563614"
] | 0.8128452 | 0 |
Delete all the processed flatbuffer binaries. | def clean_flatbuffer_binaries():
for element in FLATBUFFERS_CONVERSION_DATA:
for json in element.input_files:
path = processed_json_path(json)
if os.path.isfile(path):
os.remove(path) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clean():\n clean_flatbuffer_binaries()\n clean_webp_textures()",
"def clean(self):\n # Delete vertices / faces / colors / normals :\n self._vert_buffer.delete()\n self._index_buffer.delete()\n self._normals_buffer.delete()\n self._xrange_buffer.delete()\n self._math_buffer.delete()",
"def clear_all(self):\n self.clear_files_paths()\n self.clear_programs()",
"async def clear_all(self) -> None:",
"def clean_data_fragments(self) -> None:\n read_path: Path = Path(os.environ[\"DATA_PATH\"]) / \"fragments\"\n try:\n shutil.rmtree(read_path / \"__MACOSX\")\n except FileNotFoundError:\n print('Folder \"__MACOSX\" already removed.')\n\n # delete non-binarized images\n frag_paths: list = list((read_path / \"image-data\").iterdir())\n frags_binarized: list = [fp for fp in frag_paths if \"binarized\" in fp.name]\n frags_delete: set = set(frag_paths).difference(set(frags_binarized))\n for frag in frags_delete:\n frag.unlink()\n frag_paths = frags_binarized\n for frag_path in frag_paths:\n # Python 3.8 hack, seems to be supported without str() on 3.9\n shutil.move(str(frag_path.resolve()), str(read_path.resolve()))\n\n (read_path / \"image-data\").rmdir() # delete empty folder",
"def clean(self):\n print(\"Cleaning outputs in %s\" % self.args.output)\n files = glob.glob(self.args.output + \"*.pkl\")\n for f in files:\n if os.path.exists(f):\n os.remove(f)",
"def purge():\n all_hashes = read_all()\n used_hashes = read_used()\n\n for kind, hashes in used_hashes.items():\n to_remove = all_hashes[kind].difference(hashes)\n if kind == 'evs':\n delete_from_directory_by_hashes(EV_DIRECTORY, to_remove)\n elif kind == 'cache':\n delete_from_directory_by_hashes(CACHE_DIRECTORY, to_remove)\n elif kind == 'seeds':\n delete_from_directory_by_hashes(SEED_DIRECTORY, to_remove)\n\n reset_used()",
"def clean(cls, pdb_object):\n if not cls.computed(pdb_object):\n return\n for successor in cls.successors:\n successor.clean(pdb_object)\n pdb_object.uncomplete(cls.name)\n for file in cls.files(pdb_object):\n file.delete()",
"def _clean_bins():\n rmtree(LIBS_DIR)\n rmtree(BINS_DIR)\n rmtree(HEADERS_DIR)",
"def clean():\n clean_files()",
"def delete_all(self):\n with self.__lock:\n self.__data = dict()\n self.flush()",
"def clear_data():\n dir_list = [\"generated/*\", \"pub/static/*\", \"var/cache/*\", \"var/page_cache/*\", \"var/view_preprocessed/*\", \"var/tmp/*\"]\n\n for item in dir_list:\n print(\"[ - ] Removing\", item, \"\\n\")\n subprocess.run([\"rm\", \"-rf\", item])",
"def clean():\n os.system('killall -9 lnd')\n os.system('killall -9 btcd')\n \n shutil.rmtree(btcd_dir)\n os.remove(btcd_log)\n\n index = 0\n while True:\n node = Node.from_index(index)\n try:\n shutil.rmtree(node.path())\n os.remove(node.log())\n except:\n click.echo(f'removed {index} nodes.')\n break\n index += 1",
"def deleteAllFiles(self, flush=True): \n \n if flush: \n self.flush(False) \n \n for filePath in self.filePathDict.keys(): \n if self.outDir is None: \n fullPath = filePath \n else: \n fullPath = os.path.join(self.outDir,filePath) \n \n if os.path.exists(fullPath): \n os.remove(fullPath)",
"def delete_all():\n if os.path.exists(DATA_DIR):\n shutil.rmtree(DATA_DIR)",
"def clean(self):\n if self.options.format != 'svg':\n for svgfile in self.svgouts.itervalues():\n os.remove(svgfile)\n os.rmdir(self.tmpdir)",
"def delete_all(self, prog:progress=None): \n\t\tself.__output_status('Delete all files')\n\t\tif (self.__check_terminated()):\n\t\t\treturn;\t\n\t\tdelete_dir(self.root)\n\t\ttime.sleep(0.3)",
"def delete_all(self):\n # delete everything\n shutil.rmtree(self.location)",
"def cleanup(self):\r\n for f in [i for d in self.data.values() for i in d[\"filenames\"]]:\r\n try:\r\n os.unlink(f)\r\n except Exception: pass\r\n self.Destroy()",
"def clean(self):\n\n for metric in self.metricList:\n listf = glob.glob(\n '{}/*_{}_{}*'.format(self.outDir, metric.name, self.num))\n if len(listf) > 0:\n for val in listf:\n os.system('rm {}'.format(val))",
"def delete_b_files(intermediate_files: List[File]) -> None:\n for f in intermediate_files:\n f.remove()",
"def clear_client_outputs():\n directory = client_variables.output_zip_folder\n for name in glob.glob(directory + '\\\\*'):\n os.remove(name)",
"def _cleanup_files(self):\n\n for root, dirs, files in os.walk(self.build_directory):\n dirs_to_delete = [\n Path(root).joinpath(x) for x in dirs if x == '__pycache__'\n ]\n files_to_delete = [\n Path(root).joinpath(x) for x in files if Path(x).suffix == '.pyc'\n ]\n for d in dirs_to_delete:\n logger.info('Deleting: %s', d)\n shutil.rmtree(d)\n for f in files_to_delete:\n logger.info('Deleting: %s', f)\n f.unlink()",
"def _cleanUp(self):\r\n limit = datetime.now() - timedelta(seconds=self._timeout)\r\n\r\n toClean = [msg for msg in self._incompleteMsgs if msg.older(limit)]\r\n\r\n if toClean:\r\n for msg in toClean:\r\n self._incompleteMsgs.remove(msg)\r\n\r\n log.msg('{0} incomplete messages have been dropped '\r\n 'from assembler.'.format(len(toClean)))\r\n\r\n toClean = [uri for uri, (_, timestamp) in self._binaries.iteritems()\r\n if timestamp < limit]\r\n\r\n if toClean:\r\n for uri in toClean:\r\n del self._binaries[uri]\r\n\r\n log.msg('{0} unused binaries have been dropped '\r\n 'from assembler.'.format(len(toClean)))",
"def clean(self):\n\t\tself.archiver.closeFile()",
"def cleanup(self):\n self.qemu.clean_run_files()\n for tmp in glob.glob(self.configfile + \"?*\"):\n os.unlink(tmp)",
"def delete():\n\n # Check the pipe setup.\n check_pipe_setup(sequence=True, j=True)\n\n # The interatomic data.\n for interatom in interatomic_loop():\n # The data.\n if hasattr(interatom, 'j_coupling'):\n del interatom.j_coupling\n\n # The error.\n if hasattr(interatom, 'j_coupling_err'):\n del interatom.j_coupling_err",
"def clear_batch(self):\n self._batch_idx = 0\n self.variant_states = None\n self.object_specs = None\n self.object_attribute_values = None",
"def free(self):\n for device_buffer in self.device_buffers.values():\n device_buffer.free()",
"def PurgeAll(self):\n\t\tself.acad.ActiveDocument.PurgeAll()"
] | [
"0.7080938",
"0.6617103",
"0.64717567",
"0.6464323",
"0.6414247",
"0.6351224",
"0.6346113",
"0.6323048",
"0.6296938",
"0.62765425",
"0.6252906",
"0.6217102",
"0.61619747",
"0.61525744",
"0.6143297",
"0.6128095",
"0.6123065",
"0.6101668",
"0.60976034",
"0.6087722",
"0.6086085",
"0.6081509",
"0.6037965",
"0.60286456",
"0.60147667",
"0.60073555",
"0.59967035",
"0.59785587",
"0.5977556",
"0.5951491"
] | 0.7819535 | 0 |
Prints an error message to stderr for BuildErrors. | def handle_build_error(error):
sys.stderr.write('Error running command `%s`. Returned %s.\n' % (
' '.join(error.argv), str(error.error_code))) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def error(message):\n print(message, file=sys.stderr)",
"def perror(message):\n print(message, file=sys.stderr)",
"def error(*args, **kwargs): # pragma: nocover\n kwargs['file'] = sys.stderr\n print(\"\\n\\tERROR:\", *args, **kwargs)\n if args and args[0].startswith(\"[Errno 2] No such file or directory\"):\n print(\"\\t(Did you forget to include an __init__.py?)\")\n sys.exit(1)",
"def error(msg):\n print(msg, file=sys.stderr)\n sys.exit()",
"def err(msg):\n print(colored.red(\"[ERROR]: {0}\".format(msg)))",
"def msg_err(message):\n to_stdout(\" !!! {message}\".format(message=message), colorf=red, bold=True)\n if _logger:\n _logger.error(message)",
"def print_err(msg):\n print(msg, file=sys.stderr)",
"def error(message):\n global LAST_LOG\n LAST_LOG = message\n cprint('\\r[ERR] {0}'.format(message), 'red', file=sys.stderr)",
"def _print_error(message):\n sys.stderr.write(str(message) + \"\\n\")\n sys.stderr.flush()",
"def error(message):\n if DEBUG:\n with print_lock:\n print((Colours.FAIL + 'ERROR: ' + Colours.END_COLOUR + message).strip())",
"def print_error(message):\n from sys import stderr\n print(\"\\033[1;31;40m \" + message + \"\\033[0;37;40m\", file=stderr)",
"def printerr(msg):\n print(msg, file=sys.stderr)",
"def errprint(msg):\n\n print('!! *** ERROR: %s' % msg)",
"def err(message: str) -> None:\n filename, line = filename_line()\n\n with State.lock:\n State.stderr.write(err_as_text(filename=filename, line=line, message=message))\n State.stderr.flush()",
"def error(msg):\n sys.stdout.write('%s[ ERROR ]%s %s\\n' % (colors.RED, colors.RESET, msg))",
"def print_error_message(message):\r\n return print('ERROR:',message)",
"def error(self, message: str) -> None:\n lines = message.split('\\n')\n linum = 0\n formatted_message = ''\n for line in lines:\n if linum == 0:\n formatted_message = 'Error: ' + line\n else:\n formatted_message += '\\n ' + line\n linum += 1\n\n self.print_usage(sys.stderr)\n\n # Format errors with style_warning()\n formatted_message = ansi.style_warning(formatted_message)\n self.exit(2, '{}\\n\\n'.format(formatted_message))",
"def eprint(errmsg):\n print(errmsg, file=STDERR)",
"def _print_error(msg):\n sys.stderr.write(msg + '\\n')\n LOG.error(msg)",
"def print_std_err(str_):\n print(str_, file=sys.stderr)",
"def to_stderr(message):\n print >> sys.stderr, message",
"def log_err(msg):\n msg = 'ERROR: {0}\\n'.format(msg)\n sys.stderr.write(msg)",
"def error(self, msg, stderr=True):\n self.log(msg, level=self.ERROR, stderr=stderr)",
"def printerr(message):\n sys.stderr.write('{}\\n'.format(message))\n sys.stderr.flush()",
"def print_error(msg):\n print(\"[{}] {}\".format(datetime.now(), msg), file=sys.stderr)",
"def print_failure_message(message):\n try:\n import colorama\n print(colorama.Fore.RED + message + colorama.Fore.RESET,\n file=sys.stderr)\n except ImportError:\n print(message, file=sys.stderr)",
"def err(message):\n\n timestamp = format_time(get_time())\n message = '{} - [ERROR] - {}'.format(timestamp, message)\n _log_status(message)",
"def stderr(username, root_wf_id, wf_id, job_id, job_instance_id):\n dashboard = Dashboard(g.master_db_url, root_wf_id, wf_id)\n text = dashboard.get_stderr(wf_id, job_id, job_instance_id)\n\n if text.stderr_text == None:\n return 'No Standard error for workflow ' + wf_id + ' job-id ' + job_id\n else:\n return '<pre>%s</pre>' % utils.unquote(text.stderr_text)",
"def err(*message, **kwargs):\n print(*message, file=sys.stderr, **kwargs)",
"def print_to_stderr(msg):\n sys.stderr.write(msg)"
] | [
"0.70509905",
"0.6726131",
"0.6711041",
"0.6696196",
"0.6692284",
"0.6691786",
"0.6670899",
"0.6622568",
"0.65966165",
"0.65933764",
"0.65801626",
"0.65647084",
"0.65214205",
"0.65149754",
"0.6462072",
"0.6449125",
"0.6441902",
"0.64412487",
"0.64282566",
"0.6401715",
"0.63785666",
"0.6363826",
"0.6332405",
"0.6332159",
"0.6331356",
"0.633067",
"0.6323632",
"0.63164556",
"0.62567914",
"0.6255598"
] | 0.7691449 | 0 |
Plots the color mapping together with the fixed points. Creates a movie file. | def tracer_movie(datadir = 'data/', tracerFile = 'tracers.dat',
fixedFile = 'fixed_points.dat', zlim = [],
head_size = 3, hm = 1,
imageDir = './', movieFile = 'fixed_points.mpg',
fps = 5.0, bitrate = 1800):
import pylab as plt
# read the mapping and the fixed point positions
tracers, mapping, t = pc.read_tracers(datadir = datadir, fileName = tracerFile, zlim = zlim, head_size = head_size)
fixed = pc.read_fixed_points(datadir = datadir, fileName = fixedFile, hm = hm)
# read the parameters for the domain boundaries
params = pc.read_param(quiet = True)
domain = [params.xyz0[0], params.xyz1[0], params.xyz0[1], params.xyz1[1]]
# determine the how much faster the fixed pints have been written out than the color mapping
advance = np.ceil(float(len(fixed.t))/len(mapping[0,0,:,0]))
# determine the colors for the fixed points
colors = np.zeros(np.shape(fixed.q) + (3,))
colors[:,:,:] = 0.
print(np.shape(colors))
for j in range(len(colors[:,0,0])):
for k in range(len(colors[0,:,0])):
if fixed.q[j,k] >= 0:
colors[j,k,1] = colors[j,k,2] = (1-fixed.q[j,k]/np.max(np.abs(fixed.q[:,k])))
colors[j,k,0] = fixed.q[j,k]/np.max(np.abs(fixed.q[:,k]))
else:
colors[j,k,0] = colors[j,k,1] = (1+fixed.q[j,k]/np.max(np.abs(fixed.q[:,k])))
colors[j,k,2] = -fixed.q[j,k]/np.max(np.abs(fixed.q[:,k]))
# prepare the plot
width = 6
height = 6
plt.rc("figure.subplot", left=(60/72.27)/width)
plt.rc("figure.subplot", right=(width-20/72.27)/width)
plt.rc("figure.subplot", bottom=(50/72.27)/height)
plt.rc("figure.subplot", top=(height-20/72.27)/height)
figure = plt.figure(figsize=(width, height))
for k in range(len(fixed.x[0,:])):
dots = plt.plot(fixed.x[0,k], fixed.y[0,k], 'o', c = colors[0,k,:])
image = plt.imshow(zip(*mapping[:,::-1,0,:]), interpolation = 'nearest', extent = domain)
j = 0
frameName = imageDir + 'images%06d.png'%j
imageFiles = []
imageFiles.append(frameName)
figure.savefig(frameName)
for j in range(1,len(fixed.t)):
#time.sleep(0.5)
figure.clear()
for k in range(len(fixed.x[j,:])):
dots = plt.plot(fixed.x[j,k], fixed.y[j,k], 'o', c = colors[j,k,:])
image = plt.imshow(zip(*mapping[:,::-1,np.floor(j/advance),:]), interpolation = 'nearest', extent = domain)
frameName = imageDir + 'images%06d.png'%j
imageFiles.append(frameName)
figure.savefig(frameName)
# convert the images into a mpg file
mencodeCommand = "mencoder 'mf://"+imageDir+"images*.png' -mf type=png:fps="+np.str(fps)+" -ovc lavc -lavcopts vcodec=mpeg4:vhq:vbitrate="+np.str(bitrate)+" -ffourcc MP4S -oac copy -o "+movieFile
os.system(mencodeCommand)
# remove the image files
for fname in imageFiles:
os.remove(fname) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def show(self, view = None, save = False, savename = None, cmap = None):\n\n # define the style\n if cmap == None:\n style = PlotStyle(cmap_name = 'macplus')\n else:\n style = PlotStyle(cmap_name = cmap)\n \n # default is skymap\n if view == None:\n view = self._view_options[0]\n else:\n if view not in self._view_options:\n print ('ERROR:', 'view option', view, 'is not defined')\n return\n\n # sky map\n if view == self._view_options[0]:\n\n # figure\n fig = plt.figure(figsize = (12, 6))\n ax = plt.gca()\n \n # skymap\n skymap = AllSkyMap(projection = 'hammer', lon_0 = 0, lat_0 = 0)\n\n \n # define RA and DEC over all coordinates\n rightascensions = np.linspace(-np.pi, np.pi, self.num_points)\n declinations = self.declination\n \n cmap = style.cmap\n norm_proj = matplotlib.colors.Normalize(self.exposure_factor.min(),\n self.exposure_factor.max())\n\n # plot the exposure map\n # NB: use scatter as plot and pcolormesh have bugs in shiftdata methods\n for dec, proj in np.nditer([declinations, self.exposure_factor]):\n decs = np.tile(dec, self.num_points)\n c = SkyCoord(ra = rightascensions * u.rad, \n dec = decs * u.rad, frame = 'icrs')\n lon = c.galactic.l.deg\n lat = c.galactic.b.deg\n skymap.scatter(lon, lat, latlon = True, linewidth = 3, \n color = cmap(norm_proj(proj)), alpha = 0.7)\n\n # plot exposure boundary\n self.draw_exposure_lim(skymap)\n \n # add labels\n skymap.draw_standard_labels(style.cmap, style.textcolor)\n\n # add colorbar\n self._exposure_colorbar(style)\n\n # decplot\n elif view == self._view_options[1]:\n\n # plot for all decs\n \n plt.figure()\n plt.plot(self.declination, self.exposure_factor, linewidth = 5, alpha = 0.7)\n plt.xlabel('$\\delta$');\n plt.ylabel('m($\\delta$)');\n\n\n if save:\n plt.savefig(savename, dpi = 1000,\n bbox_inches = 'tight', pad_inches = 0.5)",
"def plotmap(self):\n if self.plotfigure is None: return\n\n self.plotfigure.clf()\n collist = [\"#%.2x%.2x%.2x\" % (i, i, i) for i in self.currentshades]\n cmap = colors.ListedColormap(collist)\n if self.gs.isfixed:\n crange = [self.minvalue] + self.currentvalues\n elif self.gs.isperc:\n crange = np.percentile(self.imagearray, [0.0] + self.currentpercents)\n else:\n crange = np.array([self.minstdd] + self.currentnsigs) * self.stdvalue + self.meanvalue\n norm = colors.BoundaryNorm(crange, cmap.N)\n img = plt.imshow(self.imagearray, cmap=cmap, norm=norm, origin='lower')\n plt.colorbar(img, norm=norm, cmap=cmap, boundaries=crange, ticks=crange)\n if self.imagetitle is not None:\n plt.title(self.imagetitle)",
"def _plot_map(self):\n\n # Plot points if they exist\n\n if len(self._laserX) > 0:\n self._plot_laser()\n\n if len(self._goalX) > 0:\n self._plot_goal()\n\n if len(self._summitX) > 0:\n self._plot_summit()\n\n self._plot_objects()\n\n # Update Plot\n self._fig.canvas.draw_idle()\n\n plt.pause(0.01)",
"def showGlobalPlot(self,fsize=[14,14],cmap='jet',m=None,figname='fig'):\n\n fig=plt.figure(figsize=(14,14))\n # read in data to use for plotted points\n\n A=[]\n for key,value in self._ts_dict.iteritems():\n A.append(np.array([i.replace(\"#\",\" \")\n .split()[0:4] for i in value.index])\n .astype(float))\n\n B=np.array(A[0]).reshape(len(A[0]),4)\n\n lat = (B[:,0]+B[:,1])/2\n lon = (B[:,2]+B[:,3])/2\n A=[]\n for key,value in self._ts_dict.iteritems():\n A.append(value.sum(axis=1).values)\n\n val = np.array(A)\n\n\n # determine range to print based on min, max lat and lon of the data\n margin = 2 # buffer to add to the range\n lat_min = min(lat) - margin\n lat_max = max(lat) + margin\n lon_min = min(lon) - margin\n lon_max = max(lon) + margin\n\n # create map using BASEMAP\n if m is None:\n m = Basemap(llcrnrlon=lon_min,\n llcrnrlat=lat_min,\n urcrnrlon=lon_max,\n urcrnrlat=lat_max,\n lat_0=(lat_max - lat_min)/2,\n lon_0=(lon_max-lon_min)/2,\n projection='merc',\n resolution = 'h',\n area_thresh=10000.,\n )\n m.drawcoastlines()\n m.drawcountries()\n m.drawstates()\n m.drawmapboundary(fill_color='#acbcec')\n m.fillcontinents(color = 'k',lake_color='#acbcec')\n\n # convert lat and lon to map projection coordinates\n lons, lats = m(lon, lat)\n # plot points as red dots\n m.scatter(lons, lats,s=val+1, c=val, cmap=cmap,\n norm=colors.LogNorm(vmin=np.min(val)+1, vmax=np.max(val)+1),\n zorder=5)\n\n plt.savefig(figname+'.pdf',dpi=300,bbox_inches='tight',transparent=True)\n\n return m",
"def plot_map(priors):\n sns.set_style(\"white\")\n\n cmap=sns.cubehelix_palette(8, start=.5, rot=-.75,as_cmap=True)\n hdulists=list(map(lambda prior:postmaps.make_fits_image(prior,prior.sim), priors))\n fig = plt.figure(figsize=(10*len(priors),10))\n figs=[]\n for i in range(0,len(priors)):\n figs.append(aplpy.FITSFigure(hdulists[i][1],figure=fig,subplot=(1,len(priors),i+1)))\n\n for i in range(0,len(priors)):\n vmin=np.min(priors[i].sim)\n vmax=np.max(priors[i].sim)\n figs[i].show_colorscale(vmin=vmin,vmax=vmax,cmap=cmap)\n figs[i].show_markers(priors[i].sra, priors[i].sdec, edgecolor='black', facecolor='black',\n marker='o', s=20, alpha=0.5)\n figs[i].tick_labels.set_xformat('dd.dd')\n figs[i].tick_labels.set_yformat('dd.dd')\n figs[i].add_colorbar()\n figs[i].colorbar.set_location('top')\n return figs,fig",
"def xz_movie(Feaff, Fe, Fi, muVn, X, Z, length,\n fps=10, path='results/movies/', title='output'):\n\n def colorbar_format(x, pos):\n a = '{:.3f}'.format(x)\n return format(a)\n\n fig, axs = plt.subplots(2, 2, figsize=(8, 8))\n axs[0, 0].set_title('$\\\\nu_e^{aff}$')\n axs[0, 0].set(xlabel='X (mm)', ylabel='Z (mm)')\n axs[0, 1].set_title('$\\\\nu_e$')\n axs[0, 1].set(xlabel='X (mm)', ylabel='Z (mm)')\n axs[1, 0].set_title('$\\\\nu_i$')\n axs[1, 0].set(xlabel='X (mm)', ylabel='Z (mm)')\n axs[1, 1].set_title('$\\\\mu_V^{N}$')\n axs[1, 1].set(xlabel='X (mm)', ylabel='Z (mm)')\n\n camera = Camera(fig)\n\n for i in range(0, length, fps):\n cbar0 = axs[0, 0].contourf(X, Z, Feaff[i, :, :].T,\n np.linspace(Feaff.min(), Feaff.max(), 20),\n cmap=cm.viridis)\n cbar1 = axs[0, 1].contourf(X, Z, Fe[i, :, :].T,\n np.linspace(Fe.min(), Fe.max(), 20),\n cmap=cm.viridis)\n cbar2 = axs[1, 0].contourf(X, Z, Fi[i, :, :].T,\n np.linspace(Fi.min(), Fi.max(), 20),\n cmap=cm.viridis)\n cbar3 = axs[1, 1].contourf(X, Z, muVn[i, :, :].T,\n np.linspace(muVn.min(), muVn.max(), 20),\n cmap=cm.viridis)\n camera.snap()\n\n anim = camera.animate()\n\n fig.colorbar(cbar0, ax=axs[0, 0],\n format=ticker.FuncFormatter(colorbar_format))\n fig.colorbar(cbar1, ax=axs[0, 1],\n format=ticker.FuncFormatter(colorbar_format))\n fig.colorbar(cbar2, ax=axs[1, 0],\n format=ticker.FuncFormatter(colorbar_format))\n fig.colorbar(cbar3, ax=axs[1, 1],\n format=ticker.FuncFormatter(colorbar_format))\n\n fig.tight_layout()\n\n # Saving movie\n path = f'{path}{title}.mp4'\n anim.save(path)\n print(f'Movie saved in {path}.')\n\n plt.close(fig)\n\n return",
"def plot_warp(warp_fname, show=False):\n warp_file = np.load(warp_fname)\n outdir = os.path.dirname(warp_fname)\n warp_basename = os.path.splitext(os.path.basename(warp_fname))[0]\n plt_fname = os.path.join(outdir, warp_basename + '.png')\n offsets = warp_file['offs']\n rg_offs = offsets[0, :, :].T\n az_offs = offsets[1, :, :].T\n\n min_rg_off = np.amin(rg_offs) * 0.9\n max_rg_off = np.amax(rg_offs) * 0.9\n min_az_off = np.amin(az_offs) * 0.9\n max_az_off = np.amax(az_offs) * 0.9\n fig, (rg_ax, az_ax) = plt.subplots(1, 2, figsize=(8, 4), sharey=True)\n\n im_rg = rg_ax.imshow(rg_offs, vmin=min_rg_off, vmax=max_rg_off)\n rg_ax.set_title('Range offsets (pixels)')\n rg_ax.set_xlabel('Range')\n rg_ax.set_ylabel('Azimuth')\n fig.colorbar(im_rg, ax=rg_ax)\n\n im_az = az_ax.imshow(az_offs, vmin=min_az_off, vmax=max_az_off)\n az_ax.set_title('Azimuth offsets (pixels)')\n az_ax.set_xlabel('Range')\n az_ax.set_ylabel('Azimuth')\n fig.colorbar(im_az, ax=az_ax)\n fig.savefig(plt_fname)\n\n plt.figure()\n plt.imshow(offsets[0, :, :].T)\n plt.title('Range offsets')\n plt.colorbar()\n plt.savefig(os.path.join(outdir, 'range_offs.png'))\n\n plt.figure()\n plt.imshow(offsets[1, :, :].T)\n plt.title('Azimuth offsets')\n plt.colorbar()\n plt.savefig(os.path.join(outdir, 'azimuth_offs.png'))\n\n if show:\n plt.show()",
"def draw_map(data, title, output):\n import cartopy.crs as ccrs\n\n coords = get_lat_lon(data).values()\n\n lat = [coord[0] for coord in coords]\n lon = [coord[1] for coord in coords]\n\n ax = plt.axes(projection=ccrs.PlateCarree())\n ax.stock_img()\n ax.coastlines()\n ax.scatter(lon, lat, marker='o', s=50, alpha=0.8)\n ax.set_extent([-75, -20, -35, 5], crs=ccrs.PlateCarree())\n ax.set_title(title)\n plt.savefig(output)",
"def ListColorMaps(self):\n p.rc('text', usetex=False)\n a=p.outerproduct(numpy.arange(0,1,0.01),numpy.ones(10))\n p.figure(figsize=(10,5))\n p.subplots_adjust(top=0.8,bottom=0.05,left=0.01,right=0.99)\n maps=[m for m in p.cm.datad.keys() if not m.endswith(\"_r\")]\n maps.sort()\n l=len(maps)+1\n i=1\n for m in maps:\n p.subplot(1,l,i)\n p.axis(\"off\")\n p.imshow(a,aspect='auto',cmap=p.get_cmap(m),origin=\"lower\")\n p.title(m,rotation=90,fontsize=10)\n i=i+1\n #savefig(\"colormaps.png\",dpi=100,facecolor='gray')\n p.show()",
"def visualize(self, features, targets, my_title=\"untitled\"):\n try:\n import matplotlib.pyplot as plt\n except:\n import matplotlib\n matplotlib.use('Agg')\n import matplotlib.pyplot as plt\n\n plt.figure(figsize=(6,4))\n #plt.contourf(self.out, cmap=plt.cm.Paired)\n plt.axis('off')\n plt.scatter(features[:, 0], features[:, 1], c=self.out)\n plt.title(my_title)\n plt.savefig(f'{my_title}.png')",
"def plot(self):\n self.fig = plt.figure('black hole')\n self.fig.clf() #clear the graph to avoir superposing data from the same set (can be deactivated if need to superpose)\n self.ax = plt.subplot()\n\n if self.img2 is not None:\n self.ax.imshow(self.img2)\n else:\n print(\"No black hole deformation in the memory, displayed the original image instead.\")\n self.ax.imshow(self.img_debut)\n\n self.fig.canvas.set_window_title('Black hole')\n self.ax.set_title(\"scrool to zoom in or out \\nright click to add an offset in the background \\nleft click to refresh image \\n close the option windows to stop the program\")\n self.fig.canvas.mpl_connect('scroll_event', self.onscroll)\n self.fig.canvas.mpl_connect('button_press_event', self.onclick)\n self.fig.canvas.mpl_connect('axes_leave_event', self.disconnect)\n self.fig.canvas.mpl_connect('axes_enter_event', self.connect)\n\n self.draw()",
"def generate_movie(filename, x_size=640, y_size=360, numframes=150, dpi=100):\n global timeflag\n timeflag = 1\n\n # Functions for red, green, and blue channels - where the magic happens!\n red_function = build_random_function(7, 9)\n green_function = build_random_function(7, 9)\n blue_function = build_random_function(7, 9)\n print \"red_function:\\t\" + str(red_function)\n print \"green_function:\\t\" + str(green_function)\n print \"blue_function:\\t\" + str(blue_function)\n\n for n in range(1, numframes+1):\n # Create image and loop over all pixels\n im = Image.new(\"RGB\", (x_size, y_size))\n pixels = im.load()\n for i in range(x_size):\n for j in range(y_size):\n x = remap_interval(i, 0, x_size, -1, 1)\n y = remap_interval(j, 0, y_size, -1, 1)\n t = remap_interval(n, 0, numframes, -1, 1)\n pixels[i, j] = (\n color_map(evaluate_random_function(red_function, x, y, t)),\n color_map(evaluate_random_function(green_function, x, y, t)),\n color_map(evaluate_random_function(blue_function, x, y, t))\n )\n im.save(\"movie_images/\"+'%03d'%n+\".png\")\n\n os.system(\"echo 'yes'|avconv -r 24 -i movie_images/%03d.png -vb 20M myart.mp4\")\n\n \"\"\"fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_aspect('equal')\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n\n im = Image.new(\"RGB\", (x_size, y_size))\n\n def update_img(n):\n # Functions for red, green, and blue channels - where the magic happens!\n red_function = build_random_function(7, 9)\n green_function = build_random_function(7, 9)\n blue_function = build_random_function(7, 9)\n\n # Create image and loop over all pixels\n im = Image.new(\"RGB\", (x_size, y_size))\n pixels = im.load()\n for i in range(x_size):\n for j in range(y_size):\n x = remap_interval(i, 0, x_size, -1, 1)\n y = remap_interval(j, 0, y_size, -1, 1)\n pixels[i, j] = (\n color_map(evaluate_random_function(red_function, x, y, n)),\n color_map(evaluate_random_function(green_function, x, y, n)),\n color_map(evaluate_random_function(blue_function, x, y, n))\n )\n im.save(\"test.png\")\n return im\n ani = animation.FuncAnimation(fig, update_img, numframes, interval=24) #TODO: FIX THIS\n writer = animation.writers['avconv'](fps=24)\n\n ani.save(filename, writer=writer, dpi=dpi)\"\"\"",
"def createMap(title_in, file_in, fig_file_in, N, vmin, vmax, lon_in,\n lat_in, sss_in, colors, label='SSS [PSS]'):\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title(title_in)\n plt.figtext(1, 0, file_in, ha='right', va='bottom', fontsize=6)\n\n map = Basemap(projection='moll', resolution='l', lon_0=-50, ellps='WGS84', anchor='S')\n map.drawcoastlines(linewidth=0.01, antialiased=False)\n map.drawmapboundary(fill_color='white', linewidth=0.01)\n map.drawmeridians(np.arange(-180,181,60), labels=[0,0,0,0], linewidth=0.01, labelstyle=None)\n map.drawparallels(np.arange(-90,91,30), labels=[1,0,0,0], linewidth=0.01, labelstyle=None) \n map.fillcontinents(color='grey')\n\n ticks = np.linspace(vmin, vmax, N+1)\n \n lonout, z = map.shiftdata(lon_in, sss_in, lon_0=-50)\n lon, lat = np.meshgrid(lonout, lat_in)\n x, y = map(lon, lat)\n\n cmap = cm.get_cmap(colors, N)\n cmap.set_bad('1.0')\n cmap.set_under((0.0, 0.0, 0.25, 1.0))\n cmap.set_over((0.25, 0.0, 0.0, 1.0))\n\n pc = map.pcolormesh(x, y, z, vmin=vmin, vmax=vmax, cmap=cmap)\n cb = plt.colorbar(pc, shrink=0.8, orientation='horizontal', fraction=0.04, extend ='both', ticks=ticks)\n cb.set_label(label)\n plt.savefig(fig_file_in)\n logging.debug(fig_file_in +' .... created!' )\n plt.close()\n\n return None",
"def velocity_map(self, output='test'):\n self.figure = figure(figsize=(10,3))\n self.axes = self.figure.gca() \n xWindowLim = (self.analyst.windowSize[0], self.analyst.windowSize[1])\n yWindowLim = (self.analyst.windowSize[2], self.analyst.windowSize[3])\n \n # Generate contours for velocity magnitude \n xGrid = linspace(\\\n xWindowLim[0]*self.millimetersPerPixel, \n xWindowLim[1]*self.millimetersPerPixel, self.nbins)\n yGrid = linspace(\\\n yWindowLim[0]*self.millimetersPerPixel, \n yWindowLim[1]*self.millimetersPerPixel, self.nbins)\n magVelGrid = griddata(self.xs, self.ys, self.magVel, xGrid, yGrid) \n # csf = self.axes.contourf(xGrid, yGrid, magVelGrid, range(2,26,2), cmap=myColorMap)\n csf = self.axes.contourf(xGrid, yGrid, magVelGrid, cmap=myColorMap)\n cbar = self.figure.colorbar(csf) \n cbar.set_label(\"Velocity magnitude, px/s\")\n \n # Generate arrow plot\n # q = self.axes.quiver(self.xs, self.ys, self.us, self.vs,\n # angles = 'xy', scale_units='xy', scale=2, pivot = 'mid')\n # self.axes.quiverkey(q, 0.9, 1.0, 10, \"10 px/frame\", coordinates='axes') \n \n # Save figure \n self.axes.set_aspect('equal')\n self.axes.set_xlim(*xWindowLim)\n self.axes.set_ylim(*yWindowLim)\n self.figure.savefig(output + '_velocity_map.pdf')",
"def plot_map(\n self,\n variable,\n title=None,\n ax=None,\n figsize=None,\n **kwargs,\n ):\n turbines = self.results[FC.TURBINE].to_numpy()\n states = self.results[FC.STATE].to_numpy()\n\n if ax is None:\n __, ax = plt.subplots(figsize=figsize)\n fig = ax.get_figure()\n\n ds = states[-1] - states[-2]\n states = np.append(states, states[-1] + ds)\n turbines = np.arange(len(turbines) + 1)\n\n y, x = np.meshgrid(states, turbines)\n z = self.results[variable].to_numpy()\n\n prgs = {\"shading\": \"flat\"}\n prgs.update(kwargs)\n\n c = ax.pcolormesh(x, y, z.T, **prgs)\n\n ax.set_xticks(turbines[:-1] + 0.5)\n ax.set_xticklabels(turbines[:-1])\n yt = ax.get_yticks()\n ytl = ax.get_yticklabels()\n ax.set_yticks(yt[:-1] + 0.5 * (yt[-1] - yt[-2]), ytl[:-1])\n if len(turbines) > 10:\n xt = ax.get_xticks()\n xtl = [None for t in xt]\n xtl[::5] = ax.get_xticklabels()[::5]\n ax.set_xticks(xt, xtl)\n fig.colorbar(c, ax=ax)\n\n t = title if title is not None else variable\n ax.set_title(t)\n ax.set_xlabel(\"Turbine index\")\n ax.set_ylabel(\"State\")\n\n return ax",
"def plot_interaction_map(model, name, matrix, output_name, first_variable, second_variable, x_coord, y_coord, output_path): \n import matplotlib\n import matplotlib.cm as cm\n import matplotlib.pyplot as plt\n\n font = {'size' : 14}\n\n matplotlib.rc('font', **font)\n fig = plt.figure(figsize=(5,5))\n ax = plt.subplot()\n\n maxValue = np.max(np.abs(matrix))\n img = ax.imshow((matrix), cmap = cm.bwr, origin='lower', vmin = -min(maxValue, 6), vmax = min(maxValue, 6), interpolation='spline16')\n\n first_variable = '{}'.format(first_variable)\n second_variable = '{}'.format(second_variable)\n ax.set_ylabel(r'$x_i$ = ' + first_variable)\n ax.set_xlabel(r'$y_i$ = ' + second_variable)\n ax.axes.set_xticks([0, 50, 99])\n ax.axes.set_yticks([0, 50, 99])\n xticks = np.linspace(np.array(model.feature_limits[first_variable]).min(), np.array(model.feature_limits[first_variable]).max(), 3)\n yticks = np.linspace(np.array(model.feature_limits[second_variable]).min(), np.array(model.feature_limits[second_variable]).max(), 3)\n ax.scatter([x_coord], [y_coord], marker='o', color='white', s = 250, edgecolors='black', linewidth=3)\n\n ax.set_yticklabels([xticks[tind] for tind in range(3)])\n ax.set_xticklabels([yticks[tind] for tind in range(3)])\n ax.axis([0, (100) - 1, 0, (100) - 1])\n\n # ax.scatter([x_coord_linear], [y_coord_linear], marker='o', color='blue', s = 250, edgecolors='black', linewidth=3)\n t = ax.set_title(r'$\\mathregular{\\frac{\\delta ^2 F(\\bar{x})}{\\delta x_i \\delta x_j}}$')\n # t = ax.set_title('{} and {} - '.format(first_variable, second_variable) + r'$\\mathregular{\\frac{\\delta ^2 F(\\bar{x})}{\\delta x_i \\delta x_j}}$')\n t.set_position([.5, 1.025])\n from mpl_toolkits.axes_grid1 import make_axes_locatable\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n cb = plt.colorbar(img, cax=cax)\n cb.set_label(\"Nomralized mixed derivative\", rotation=90)\n plt.savefig('{}/{}_{}_{}_{}_nonlinear_map.pdf'.format(output_path, name, output_name, first_variable, second_variable), transparent=True, bbox_inches='tight', format='pdf', dpi=600)\n # plt.close('all')",
"def generateStationPlot(dir_path, traj_list, color_scheme='light'):\n\n\n # Choose the color scheme\n cs = MapColorScheme()\n \n if color_scheme == 'light':\n cs.light()\n\n else:\n cs.dark()\n\n\n plt.figure(figsize=(19.2, 10.8))\n\n # Init the map\n m = Basemap(projection='cyl', resolution='i')\n\n # Draw the coast boundary and fill the oceans with the given color\n m.drawmapboundary(fill_color=cs.map_background)\n\n # Fill continents, set lake color same as ocean color\n m.fillcontinents(color=cs.continents, lake_color=cs.lakes, zorder=1)\n\n # Draw country borders\n m.drawcountries(color=cs.countries)\n m.drawstates(color=cs.states, linestyle='--')\n\n\n\n ### PLOT WORLD MAP ###\n\n # Group stations into countries\n country_dict = {}\n for traj in traj_list:\n\n for obs in traj.observations:\n\n # Extract country code\n country_code = obs.station_id[:2]\n\n if country_code not in country_dict:\n country_dict[country_code] = {}\n \n\n if obs.station_id not in country_dict[country_code]:\n country_dict[country_code][obs.station_id] = [obs.lat, obs.lon]\n\n\n\n # Plot stations in all countries\n for country_code in country_dict:\n\n station_dict = country_dict[country_code]\n\n # Extract lat/lon\n lat = np.degrees([station_dict[station_id][0] for station_id in station_dict])\n lon = np.degrees([station_dict[station_id][1] for station_id in station_dict])\n\n # Convert lat/lon to x/y\n x, y = m(lon, lat)\n\n plt.scatter(x, y, s=0.75, zorder=5, label=\"{:s}: {:d}\".format(country_code, len(lat)))\n\n\n plt.legend(loc='lower left')\n\n plt.tight_layout()\n\n plt.savefig(os.path.join(dir_path, \"world_map.png\"), dpi=100)\n\n plt.close()\n\n ### ###",
"def plot_screen(tiles, gazepoints, tolerance=0, filepath=None):\n\n #read gaze points\n gazeX = gazepoints['pixels'].map(lambda x: x.x)\n gazeY = gazepoints['pixels'].map(lambda x: x.y)\n gazeI = gazepoints.index\n\n\n #create figure\n fig = plt.figure(figsize=(16,9))\n ax = fig.add_subplot(111)\n\n ax.set_xlim(-200,2120)\n ax.set_ylim(-200,1280)\n ax.set_ylim(ax.get_ylim()[::-1])\n ax.add_patch(plt.Rectangle((0,0),1920,1080,alpha = 0.05,color = 'b'))\n gax = ax.scatter(gazeX, gazeY, s= 10, c = gazeI, cmap = plt.cm.Reds, label = 'gaze')\n fig.colorbar(gax, format = '%d')\n\n #draw the objects\n tolerances = []\n for tile in tiles:\n if tile.is_selected:\n ax.text(tile.x, tile.y, tile.shape, size = 'xx-large', weight = 'bold',\n color = tile.color, bbox=dict(facecolor='red', alpha=0.5))\n else:\n ax.text(tile.x, tile.y, tile.shape, size = 'xx-large', weight = 'bold', color = tile.color)\n #draw the tolerance circles\n circle = plt.Circle((tile.x, tile.y), tolerance, color='r', fill=False)\n ax.add_patch(circle)\n if filepath is not None:\n fig.savefig(filepath, format=filepath.split('.')[1])",
"def plot(self, title='', file_name='schelling.png'):\n import matplotlib.pyplot as plt\n fig, ax = plt.subplots()\n #If you want to run the simulation with more than 7 colors, you should set agent_colors accordingly\n colors = ['b','r','g','c','m','y','k']\n for person in self.people:\n ax.scatter(\n person.home.x+0.5,\n person.home.y+0.5,\n s = 50.,\n color=colors[person.group]\n )\n ax.set_title(title, fontsize=10, fontweight='bold')\n ax.set_xlim([0, self.nx])\n ax.set_ylim([0, self.ny])\n ax.set_xticks([])\n ax.set_yticks([])\n plt.savefig(file_name)",
"def map_plot(self, iter_no):\n \n m = self._m\n n = self._n\n plt.figure()\n label=np.zeros(m*n)\n self._trained = True\n mapped = self.map_vects(datanorm)\n mapped=tuple(map(tuple, mapped))\n c=Counter(mapped)\n \n c= sorted(c.items(), key=itemgetter(1))\n a=[m*n]\n for i in range(0,len(c)):\n x=(((c[i])[0])[0])\n y=(((c[i])[0])[1])\n z=((c[i])[1])\n plt.plot(x, y, 'ro', markersize= z/(2*m*n)) \n plt.savefig('exoplanet{}.png'.format(iter_no))\n p=plt.imread('exoplanet{}.png'.format(iter_no))\n imgs.append(p)\n plt.show()\n plt.close()\n print(c)\n self._trained = False",
"def plot_raft_map(data, img, TITLE, OUTDIR, vmin=None, vmax=None):\n\n map = np.zeros((6, 24))\n for i, fli in enumerate(img):\n x = (fli.dev_index / 3) * 2 # [0, 2, 4]\n y = (fli.dev_index % 3) * 8 # [0, 8, 16]\n for j in range(16):\n xx = x + j / 8 # [0, 1,..., 5]\n yy = y + j % 8 # [0, 1,..., 23]\n map[xx, yy] = data[i, j]\n\n yseg = range(6)\n ylab = [\"00-07\", \"10-17\", \"00-07\", \"10-17\", \"00-07\", \"10-17\"]\n xseg = range(0, 24, 4)\n xlab = [\"0\", \"4\", \"0\", \"4\", \"0\", \"4\"]\n\n fig = plt.figure(figsize=(10, 10))\n ax1 = fig.add_subplot(111)\n im = ax1.imshow(map, interpolation='nearest', cmap='jet', aspect=4, vmin=vmin, vmax=vmax)\n plt.yticks(yseg, ylab)\n plt.xticks(xseg, xlab)\n plt.annotate('S22', xy=(0, 0), xytext=(4, -0.8), fontsize=15, ha='center', va='center')\n plt.annotate('S12', xy=(0, 0), xytext=(12, -0.8), fontsize=15, ha='center', va='center')\n plt.annotate('S02', xy=(0, 0), xytext=(20, -0.8), fontsize=15, ha='center', va='center')\n plt.annotate('S02', xy=(0, 0), xytext=(24., 0.5), fontsize=15, ha='left', va='center')\n plt.annotate('S01', xy=(0, 0), xytext=(24., 2.5), fontsize=15, ha='left', va='center')\n plt.annotate('S00', xy=(0, 0), xytext=(24., 4.5), fontsize=15, ha='left', va='center')\n ax1.vlines(7.5, -0.5, 5.5)\n ax1.vlines(15.5, -0.5, 5.5)\n ax1.hlines(1.5, -0.5, 23.5)\n ax1.hlines(3.5, -0.5, 23.5)\n plt.subplots_adjust(left=0.07, bottom=0.05, right=0.8, top=0.95, wspace=0, hspace=0)\n #cbar_ax = fig.add_axes([0.15, 0.03, 0.7, 0.05])\n #fig.colorbar(im, cax=cbar_ax, orientation=\"horizontal\")\n cbar_ax = fig.add_axes([0.87, 0.15, 0.05, 0.7])\n fig.colorbar(im, cax=cbar_ax)\n fig.suptitle(TITLE, y=0.98, size=19)\n plt.savefig(OUTDIR + TITLE + '.png')\n plt.show()\n plt.close(fig)",
"def plot_single_frame(casepath, frameno, max_level, subtitle, outputfile):\n from clawpack import pyclaw\n\n # paths\n casepath = os.path.abspath(casepath)\n outputpath = os.path.join(casepath, \"_output\")\n\n # check file\n if os.path.isfile(outputfile):\n logger.warning(\"Fig %s already exists. Skip\", outputfile)\n logger.handlers[0].flush()\n return\n\n # a new figure\n fig = pyplot.figure(num=0, figsize=(8, 5), dpi=100)\n\n # create an axes at 1, 3, 1\n main_ax = fig.add_axes([0.1, 0.38, 0.8, 0.52])\n\n # solution\n soln = pyclaw.Solution()\n soln.read(frameno, outputpath, file_format=\"binary\", read_aux=True)\n\n # plot topo first\n for lvl in range(1, max_level+1):\n for state in soln.states:\n if state.patch.level != lvl:\n continue\n main_ax.imshow(\n state.aux[0, :, :].T, origin=\"lower\",\n extent=[state.patch.lower_global[0], state.patch.upper_global[0],\n state.patch.lower_global[1], state.patch.upper_global[1]],\n vmin=9, vmax=30, cmap=pyplot.get_cmap(\"terrain\"))\n\n for state in soln.states:\n if state.patch.level != max_level:\n continue\n main_ax.imshow(\n numpy.ma.masked_less(state.q[0, :, :].T, 1e-4),\n origin=\"lower\",\n extent=[state.patch.lower_global[0], state.patch.upper_global[0],\n state.patch.lower_global[1], state.patch.upper_global[1]],\n vmin=0, vmax=0.2, cmap=pyplot.get_cmap(\"viridis\"))\n\n main_ax.set_xlim(0, 152)\n main_ax.set_ylim(0, 60)\n main_ax.set_xlabel(r\"$x\\ (m)$\")\n main_ax.set_ylabel(r\"$y\\ (m)$\")\n\n # plot colorbar in a new axes for topography\n cbar_ax1 = fig.add_axes([0.16, 0.24, 0.68, 0.025])\n cbar1 = matplotlib.colorbar.ColorbarBase(\n cbar_ax1, cmap=pyplot.get_cmap(\"terrain\"), orientation=\"horization\",\n norm=matplotlib.colors.Normalize(vmin=9, vmax=30),\n ticklocation=\"bottom\")\n cbar1.set_label(\"Elevation (m)\")\n\n # plot colorbar in a new axes for depth\n cbar_ax2 = fig.add_axes([0.16, 0.1, 0.68, 0.025])\n cbar2 = matplotlib.colorbar.ColorbarBase(\n cbar_ax2, cmap=pyplot.get_cmap(\"viridis\"), orientation=\"horization\",\n norm=matplotlib.colors.Normalize(vmin=0, vmax=0.2),\n ticklocation=\"bottom\")\n cbar2.set_label(\"Depth (m)\")\n\n fig.suptitle(\"Topography and depth, T={}s\".format(int(soln.state.t+0.5)) +\n \"\\n({})\".format(subtitle),\n x=0.5, y=0.92, fontsize=12,\n horizontalalignment=\"center\",\n verticalalignment=\"bottom\")\n\n fig.savefig(outputfile, dpi=\"figure\", bbox_inches=\"tight\")\n pyplot.close(fig)\n\n logger.info(\"Done creating fig %s\", outputfile)\n logger.handlers[0].flush()\n print(\"Done creating fig {}\".format(outputfile))",
"def _plot_camera_view(self):\n fig, axs = plt.subplots()\n fig.show()\n axs.cla()\n axs.axis([-0.003, 0.003, -0.003, 0.003])\n axs.grid()\n axs.plot([0], [0], 'r+')\n for t_step in range(0, int(self._t_sim / self._dt) + 1, 250):\n axs.plot(\n self._feat_vec[t_step, 0, 0],\n self._feat_vec[t_step, 1, 0], 'ro')\n axs.plot(\n self._feat_vec[t_step, 0, 1],\n self._feat_vec[t_step, 1, 1], 'bo')\n axs.plot(\n self._feat_vec[t_step, 0, 2],\n self._feat_vec[t_step, 1, 2], 'yo')\n axs.plot(\n self._feat_vec[t_step, 0, 3],\n self._feat_vec[t_step, 1, 3], 'go')\n axs.plot(\n self._feat_vec[t_step, 0, 4],\n self._feat_vec[t_step, 1, 4], 'ro')\n plt.pause(1 / self._plot_fps)",
"def plot_map(self,map_options=None) :\n\n if map_options is not None :\n self.map_options.update(map_options)\n\n # TODO: Add custom sizing and resolution specifications\n fig = plt.figure(figsize=(self.map_options['img_size'][0]/2.54,\n self.map_options['img_size'][1]/2.54))\n\n # TODO: Accept custom projections\n proj = ccrs.Mercator()\n\n # TODO: Add support for multiple plots per figure (too complex? consider use cases)\n ax = fig.add_subplot(1,1,1,projection = proj)\n\n # TODO: Increase flexibility of borders consideration\n if self.map_options['brdr_nation'] :\n ax.add_feature(cfeat.BORDERS)\n\n # TODO: Consider first-last versus min-max - how can we avoid accidentally flipping images\n extents=[self.lon[0],self.lon[-1],self.lat[0],self.lat[-1]]\n ax.set_extent(extents)\n\n # Confusingly, this code correctly translate the lat/lon limits into the projected coordinates\n extents_proj = proj.transform_points(ccrs.Geodetic(),np.array(extents[:2]),np.array(extents[2:]))\n extents_proj = extents_proj[:,:2].flatten(order='F')\n\n # TODO: Custom colormaps, interpolation, cropping\n im = ax.imshow(self.map,extent=extents_proj,transform=proj,origin='lower',\n cmap=self.map_options['cmap'],interpolation='bicubic')\n\n # TODO: Add more advanced title interpretation (i.e. smart date placeholder)\n if self.map_options['title'] is not None :\n ax.set_title(self.map_options['title'])\n\n # TODO: Add support for horizontal\n if self.map_options['cbar'] :\n cb = plt.colorbar(im, ax=ax, orientation='horizontal',pad=0.05,fraction=0.05)\n cb.ax.set_xlabel(self.units)\n\n # TODO: Add plot title, small textbox description, copyright from dataset, ticks and gridlines\n if self.map_options['save'] :\n # Generate timestamp filename if relying on default\n if self.map_options['img_filename'] == \"timestamp\" :\n img_filename=dt.datetime.now().strftime('%Y%m%d_%H%M%S_%f')\n\n plt.savefig(self.map_options['img_dir']+img_filename+\".\"+self.map_options['img_filetype'],\n bbox_inches=\"tight\",dpi=self.map_options['img_dpi'])\n\n plt.show()\n\n return self",
"def colored_plot_pairs(frame, pairs, colorMap): \n for i in range(len(pairs)):\n x_label = pairs[i][0]\n y_label = pairs[i][1]\n \n for pattern in frame[colorMap].unique():\n mask = (frame[colorMap] == pattern)\n points = frame[mask] \n x = points[x_label]\n y = points[y_label]\n plt.title(x_label+\"-\"+y_label)\n plt.xlabel(x_label)\n plt.ylabel(y_label)\n plt.plot(x, y, 'o', label=pattern)\n \n plt.grid(True)\n plt.legend()\n plt.show()",
"def show():\n setup()\n plt.show()",
"def SimpleArrayPlotHelper(self,filename):\n #levels = np.linspace(-100.0, 9900.0, 100, endpoint=True)\n plt.figure()\n #plt.contourf(orography_field,levels)\n plt.colorbar()\n pts.invert_y_axis()",
"def anim_scatter_plot(points_list, values, \n fname=\"anim_scatter.mpg\", fps=2, *args, **kwargs):\n print \"Genrating temp images\"\n for idx, pts in enumerate(points_list):\n print \"\\tPlot %i of %i\" % (idx, len(points_list))\n scatter_plot(pts, values, \"_tmp_%i.png\" % idx, *args, **kwargs)\n print \"Creating animation\" \n os.system(\"mencoder 'mf://_tmp_*.png' -mf type=png:fps=%i -ovc\\\n lavc -lavcopts vcodec=wmv2 -oac copy -o %s\" % (fps, fname))\n print \"Removing temp files\"\n os.system(\"rm -f _tmp_*.png\")",
"def plot_visualization(path_results, x_data, y_data, variant_mode, nb_classes, signal_test, args):\n\n\t#path_tsne = path_results + \"/Visualization/train/\" + str(args.step) + \"_2d.csv\"\n\t#data_frame = pd.read_csv(path_tsne)\n\t\n\tpath_maping = path_results + \"/Maping/\" + str(args.subject).split(\".txt\")[0] + \"/\"\n\tfilename = path_maping + \"maping_\" + str(args.step) + \"_\" + str(args.subject).split(\".txt\")[0] + \"_stick\" + str(args.stick) + \".png\"\n\n\tprint(\"path_save maping\", path_maping)\n\n\tif not os.path.exists(path_maping):\n\t\tos.makedirs(path_maping)\n\n\t#print(\"path_tsne\", path_tsne)\n\n\tlabel_maping = np.array([10])\n\n\tx_data = np.concatenate((x_data,signal_test),axis=0)\n\ty_data = np.concatenate((y_data,label_maping),axis=0)\n\n\tprint(\"x_data concatenate\",x_data.shape)\n\tprint(\"y_data concatenate\",y_data.shape)\n\n\tdata_frame = tsne_2d(x_data, y_data)\n\n\t\n\t\n\tgroups = data_frame.groupby('label')\n\n\tcluster_names, cluster_colors = get_target_names_dr(nb_classes, args.mode, args, variant_mode)\n\n\tfig = plt.figure(figsize=(20, 10))\n\tax = fig.add_subplot(111)\n\tax.margins(0.05) # Optional, just adds 5% padding to the autoscaling\n\tfor name, group in groups:\n\t\t\n\t\tif cluster_names[name] == str(args.subject):\n\t\t\tax.scatter(group.x, group.y, marker='D', s=150, edgecolors = 'face',label=cluster_names[name], color=cluster_colors[name])\n\t\telse:\n\t\t\tax.scatter(group.x, group.y, marker='o', label=cluster_names[name], color=cluster_colors[name])\n\n\tax.legend(numpoints=1) #show legend with only 1 point\n\tplt.savefig(filename) #save the plot",
"def generate_2D_plot(x, y, labels_dict, file_title, plot_title):\n fig = plt.figure()\n plt.plot(x, y)\n\n if labels_dict:\n plt.xlabel(labels_dict[\"x\"])\n plt.ylabel(labels_dict[\"y\"])\n if plot_title:\n plt.title(plot_title)\n\n plt.savefig(file_title)"
] | [
"0.66461504",
"0.6347427",
"0.6157591",
"0.6120898",
"0.6077686",
"0.60383564",
"0.6011217",
"0.59548956",
"0.59487545",
"0.594416",
"0.58821493",
"0.5876597",
"0.5861647",
"0.58368415",
"0.58342564",
"0.58336323",
"0.5828103",
"0.5826117",
"0.5800824",
"0.57943434",
"0.5785863",
"0.5784283",
"0.5767904",
"0.57654786",
"0.57431585",
"0.5727941",
"0.5711181",
"0.57078886",
"0.56961226",
"0.56798166"
] | 0.72189265 | 0 |
Plots an image and the projections (sums) of it on the x, y axes. | def plot_image_and_proj(image, title="", **kwargs):
fig = plt.figure()
gs = gridspec.GridSpec(3, 2, width_ratios=[3, 1], height_ratios=[0.2, 3, 1])
ax0 = plt.subplot(gs[1,0])
plt.title(title)
ims = plt.imshow(image, aspect="auto", **kwargs)
ax2 = plt.subplot(gs[2,0], sharex=ax0, )
plt.plot(image.sum(axis=0))
plt.subplot(gs[1,1], sharey=ax0)
plt.plot(image.sum(axis=1), range(len(image.sum(axis=1))))
ax = plt.subplot(gs[0,0])
plt.colorbar(ims, orientation="horizontal", cax=ax)
fig.show() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def show(image):\n fig = pyplot.figure()\n ax = fig.add_subplot(1,1,1)\n imgplot = ax.imshow(image, cmap=mpl.cm.Greys)\n imgplot.set_interpolation('nearest')\n ax.xaxis.set_ticks_position('top')\n ax.yaxis.set_ticks_position('left')\n pyplot.show()",
"def show(image):\n fig = pyplot.figure()\n axis = fig.add_subplot(1, 1, 1)\n imgplot = axis.imshow(image)\n imgplot.set_interpolation('nearest')\n axis.xaxis.set_ticks_position('top')\n axis.yaxis.set_ticks_position('left')\n pyplot.show()",
"def show(image):\n from matplotlib import pyplot\n import matplotlib as mpl\n fig = pyplot.figure()\n ax = fig.add_subplot(1,1,1)\n imgplot = ax.imshow(image, cmap=mpl.cm.Greys)\n imgplot.set_interpolation('nearest')\n ax.xaxis.set_ticks_position('top')\n ax.yaxis.set_ticks_position('left')\n pyplot.show()",
"def show(image):\n from matplotlib import pyplot\n import matplotlib as mpl\n fig = pyplot.figure()\n ax = fig.add_subplot(1,1,1)\n imgplot = ax.imshow(image, cmap=mpl.cm.Greys)\n imgplot.set_interpolation('nearest')\n ax.xaxis.set_ticks_position('top')\n ax.yaxis.set_ticks_position('left')\n pyplot.show()",
"def show(image):\n from matplotlib import pyplot\n import matplotlib as mpl\n fig = pyplot.figure()\n ax = fig.add_subplot(1,1,1)\n imgplot = ax.imshow(image, cmap=mpl.cm.Greys)\n imgplot.set_interpolation('nearest')\n ax.xaxis.set_ticks_position('top')\n ax.yaxis.set_ticks_position('left')\n pyplot.show()",
"def show_2d_projections(self):\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n fig, axs = plt.subplots(1, 3)\n for i in range(3):\n plt.sca(axs[i])\n io.imshow(self.image.max(i), cmap='viridis')",
"def plot(self,id=1,dpi=150):\n fig = plt.figure(id)\n ax1 = fig.add_subplot(111)\n ax1.imshow(self.image,interpolation='nearest',extent=[self.xmin,self.xmax,\n self.ymin,self.ymax], origin='lower')\n #plt.savefig('.png',dpi=dpi)\n plt.draw()",
"def plot_img(X: np.ndarray, **kwargs):\n kwargs.setdefault('origin', 'lower') # Sane default\n plt.imshow(X, **kwargs)",
"def imshow(img):\n imadd(img)\n plt.ion()\n plt.show()",
"def imshow(self):\n axes([0, 0, 1, 1], xticks=[], yticks=[])\n imshow(self.rgb_image())",
"def show_points_on_img(mask,img):\n labeled, num_objects = ndi.label(mask)\n slices = ndi.find_objects(labeled)\n x, y = [], []\n for dy,dx in slices:\n x_center = (dx.start + dx.stop - 1)/2\n x.append(x_center)\n y_center = (dy.start + dy.stop - 1)/2 \n y.append(y_center)\n plt.figure()\n plt.imshow(img)\n plt.autoscale(False)\n plt.plot(x,y, \"o\")",
"def plotmap(self):\n if self.plotfigure is None: return\n\n self.plotfigure.clf()\n collist = [\"#%.2x%.2x%.2x\" % (i, i, i) for i in self.currentshades]\n cmap = colors.ListedColormap(collist)\n if self.gs.isfixed:\n crange = [self.minvalue] + self.currentvalues\n elif self.gs.isperc:\n crange = np.percentile(self.imagearray, [0.0] + self.currentpercents)\n else:\n crange = np.array([self.minstdd] + self.currentnsigs) * self.stdvalue + self.meanvalue\n norm = colors.BoundaryNorm(crange, cmap.N)\n img = plt.imshow(self.imagearray, cmap=cmap, norm=norm, origin='lower')\n plt.colorbar(img, norm=norm, cmap=cmap, boundaries=crange, ticks=crange)\n if self.imagetitle is not None:\n plt.title(self.imagetitle)",
"def plot(self, show_contours=False):\n plt.imshow(self.img, cmap='gray')\n if show_contours:\n for X in self.contours:\n plt.plot(X[:, 0], X[:, 1])\n plt.gca().invert_yaxis()",
"def _plot_images(self):\n # Plot sagittal (0), coronal (1) or axial (2) view\n self._images = dict(base=list(), cursor_v=list(), cursor_h=list(),\n bounds=list())\n img_min = np.nanmin(self._base_data)\n img_max = np.nanmax(self._base_data)\n text_kwargs = dict(fontsize='medium', weight='bold', color='#66CCEE',\n family='monospace', ha='center', va='center',\n path_effects=[patheffects.withStroke(\n linewidth=4, foreground=\"k\", alpha=0.75)])\n xyz = apply_trans(self._ras_vox_t, self._ras)\n for axis in range(3):\n plot_x_idx, plot_y_idx = self._xy_idx[axis]\n fig = self._figs[axis]\n ax = fig.axes[0]\n img_data = np.take(self._base_data, self._current_slice[axis],\n axis=axis).T\n self._images['base'].append(ax.imshow(\n img_data, cmap='gray', aspect='auto', zorder=1,\n vmin=img_min, vmax=img_max))\n img_extent = self._img_extents[axis] # x0, x1, y0, y1\n w, h = np.diff(np.array(img_extent).reshape(2, 2), axis=1)[:, 0]\n self._images['bounds'].append(Rectangle(\n img_extent[::2], w, h, edgecolor='w', facecolor='none',\n alpha=0.25, lw=0.5, zorder=1.5))\n ax.add_patch(self._images['bounds'][-1])\n v_x = (xyz[plot_x_idx],) * 2\n v_y = img_extent[2:4]\n self._images['cursor_v'].append(ax.plot(\n v_x, v_y, color='lime', linewidth=0.5, alpha=0.5, zorder=8)[0])\n h_y = (xyz[plot_y_idx],) * 2\n h_x = img_extent[0:2]\n self._images['cursor_h'].append(ax.plot(\n h_x, h_y, color='lime', linewidth=0.5, alpha=0.5, zorder=8)[0])\n # label axes\n self._figs[axis].text(0.5, 0.05, _IMG_LABELS[axis][0],\n **text_kwargs)\n self._figs[axis].text(0.05, 0.5, _IMG_LABELS[axis][1],\n **text_kwargs)\n self._figs[axis].axes[0].axis(img_extent)\n self._figs[axis].canvas.mpl_connect(\n 'scroll_event', self._on_scroll)\n self._figs[axis].canvas.mpl_connect(\n 'button_release_event', partial(self._on_click, axis=axis))\n # add head and brain in mm (convert from m)\n if self._head is None:\n logger.info('Using marching cubes on CT for the '\n '3D visualization panel')\n rr, tris = _marching_cubes(np.where(\n self._base_data < np.quantile(self._base_data, 0.95), 0, 1),\n [1])[0]\n rr = apply_trans(self._vox_ras_t, rr)\n self._renderer.mesh(\n *rr.T, triangles=tris, color='gray', opacity=0.2,\n reset_camera=False, render=False)\n else:\n self._renderer.mesh(\n *self._head['rr'].T * 1000, triangles=self._head['tris'],\n color='gray', opacity=0.2, reset_camera=False, render=False)\n if self._lh is not None and self._rh is not None:\n self._renderer.mesh(\n *self._lh['rr'].T * 1000, triangles=self._lh['tris'],\n color='white', opacity=0.2, reset_camera=False, render=False)\n self._renderer.mesh(\n *self._rh['rr'].T * 1000, triangles=self._rh['tris'],\n color='white', opacity=0.2, reset_camera=False, render=False)\n self._renderer.set_camera(azimuth=90, elevation=90, distance=300,\n focalpoint=tuple(self._ras))\n # update plots\n self._draw()\n self._renderer._update()",
"def draw_image(self):\n \n pixel_array = self.imageprepare(self.image_path)\n newArr = self.reshape_pixel_array(pixel_array)\n plt.imshow(newArr, interpolation='nearest')\n plt.savefig('MNIST_IMAGE.png')#save MNIST image\n plt.show()#Show / plot that image",
"def plot(self):\n self.fig = plt.figure('black hole')\n self.fig.clf() #clear the graph to avoir superposing data from the same set (can be deactivated if need to superpose)\n self.ax = plt.subplot()\n\n if self.img2 is not None:\n self.ax.imshow(self.img2)\n else:\n print(\"No black hole deformation in the memory, displayed the original image instead.\")\n self.ax.imshow(self.img_debut)\n\n self.fig.canvas.set_window_title('Black hole')\n self.ax.set_title(\"scrool to zoom in or out \\nright click to add an offset in the background \\nleft click to refresh image \\n close the option windows to stop the program\")\n self.fig.canvas.mpl_connect('scroll_event', self.onscroll)\n self.fig.canvas.mpl_connect('button_press_event', self.onclick)\n self.fig.canvas.mpl_connect('axes_leave_event', self.disconnect)\n self.fig.canvas.mpl_connect('axes_enter_event', self.connect)\n\n self.draw()",
"def visualizeImg(img):\n plt.figure(figsize=(10,4))\n plt.imshow(img)\n plt.show()",
"def plot_data(self):\n # plot every log image\n for log_img in self.log_img_map.itervalues():\n log_img.plot()",
"def plot_map(self,map_options=None) :\n\n if map_options is not None :\n self.map_options.update(map_options)\n\n # TODO: Add custom sizing and resolution specifications\n fig = plt.figure(figsize=(self.map_options['img_size'][0]/2.54,\n self.map_options['img_size'][1]/2.54))\n\n # TODO: Accept custom projections\n proj = ccrs.Mercator()\n\n # TODO: Add support for multiple plots per figure (too complex? consider use cases)\n ax = fig.add_subplot(1,1,1,projection = proj)\n\n # TODO: Increase flexibility of borders consideration\n if self.map_options['brdr_nation'] :\n ax.add_feature(cfeat.BORDERS)\n\n # TODO: Consider first-last versus min-max - how can we avoid accidentally flipping images\n extents=[self.lon[0],self.lon[-1],self.lat[0],self.lat[-1]]\n ax.set_extent(extents)\n\n # Confusingly, this code correctly translate the lat/lon limits into the projected coordinates\n extents_proj = proj.transform_points(ccrs.Geodetic(),np.array(extents[:2]),np.array(extents[2:]))\n extents_proj = extents_proj[:,:2].flatten(order='F')\n\n # TODO: Custom colormaps, interpolation, cropping\n im = ax.imshow(self.map,extent=extents_proj,transform=proj,origin='lower',\n cmap=self.map_options['cmap'],interpolation='bicubic')\n\n # TODO: Add more advanced title interpretation (i.e. smart date placeholder)\n if self.map_options['title'] is not None :\n ax.set_title(self.map_options['title'])\n\n # TODO: Add support for horizontal\n if self.map_options['cbar'] :\n cb = plt.colorbar(im, ax=ax, orientation='horizontal',pad=0.05,fraction=0.05)\n cb.ax.set_xlabel(self.units)\n\n # TODO: Add plot title, small textbox description, copyright from dataset, ticks and gridlines\n if self.map_options['save'] :\n # Generate timestamp filename if relying on default\n if self.map_options['img_filename'] == \"timestamp\" :\n img_filename=dt.datetime.now().strftime('%Y%m%d_%H%M%S_%f')\n\n plt.savefig(self.map_options['img_dir']+img_filename+\".\"+self.map_options['img_filetype'],\n bbox_inches=\"tight\",dpi=self.map_options['img_dpi'])\n\n plt.show()\n\n return self",
"def show_env(self, img):\n plt.figure(1)\n plt.subplot(111)\n plt.imshow(img, interpolation=\"nearest\")\n plt.show()",
"def show_image(image):\r\n plt.imshow(image, cmap='gray')\r\n plt.show()",
"def plot(self):\n\t\tself.plotOfXray().plot()",
"def assemblePlot(self):\n self.clearPlot()\n self.axes = self.figure.add_subplot(111)\n\n # Reset handles\n self._fluxOverlayHandles = []\n self._magneticAxisHandle = None\n self._orbitHandles = []\n self._separatrixOverlayHandle = None\n self._wallCrossSectionOverlayHandle = None\n\n # Plot image\n self.plotEq()\n\n # Plot overlays\n self.plotOverlays()\n\n self.adjustAxes()",
"def plot_potential(self):\n imshow(self.U, extent=(self.x[0], self.x[-1], self.y[0], self.y[-1]), aspect='auto', interpolation='None')\n xlabel('x')\n ylabel('y')",
"def show(self):\n \n \n \n \n \n \n r = 4\n f, axarr = plt.subplots(r, r, figsize=(8,8))\n counter = 0\n for i in range(r):\n for j in range(r):\n temp = self.x[counter,:]\n counter += 1\n img = self.x[counter,:]\n axarr[i][j].imshow(img)\n #######################################################################\n # #\n # #\n # TODO: YOUR CODE HERE #\n # #\n # #\n #######################################################################",
"def PlotImages(x):\r\n # 5.1 Create figure-window and axes\r\n _, ax = plt.subplots(nrows = 2, ncols= 3)\r\n # 5.2\r\n ax[0,0].imshow(x[0, :].reshape(75,75))\r\n ax[0,1].imshow(x[1, :].reshape(75,75))\r\n ax[0,2].imshow(x[2, :].reshape(75,75))\r\n ax[1,0].imshow(x[3, :].reshape(75,75))\r\n ax[1,1].imshow(x[4, :].reshape(75,75))\r\n ax[1,2].imshow(x[5, :].reshape(75,75))\r\n plt.show()",
"def visualize(**images):\n n = len(images)\n plt.figure(figsize=(16, 5))\n for i, (name, image) in enumerate(images.items()):\n plt.subplot(1, n, i + 1)\n plt.xticks([])\n plt.yticks([])\n plt.title(' '.join(name.split('_')).title())\n plt.imshow(image)\n plt.show()\n # plt.savefig('./drive/My Drive/Colab Notebooks/TACK/Large/result' + ' '.join(name.split('_')).title() + '.png')",
"def plot_tiles(self):\n \n #TODO: adjust plot, border and text_box sizes\n \n ordered_projections = []\n flat_clusters = []\n colors_2D = []\n\n for cluster, nodes in clusters.items():\n for n in nodes:\n ordered_projections.append(projection_2D[n])\n\n for n in nodes:\n flat_clusters.append(n)\n\n for i, n in enumerate(G.nodes):\n if n in nodes:\n colors_2D.append(colors[i])\n\n grid_cols = int(np.ceil(np.sqrt(len(ordered_projections))))\n\n if len(ordered_projections) <= (grid_cols**2 - grid_cols):\n grid_rows = grid_cols - 1\n else:\n grid_rows = grid_cols\n\n #assuming images are same size, get shape\n l, w = ordered_projections[0].shape\n\n #add blank images to pack in grid\n while len(ordered_projections) < grid_rows*grid_cols:\n ordered_projections.append(np.zeros((l, w)))\n colors_2D.append((0., 0., 0.))\n flat_clusters.append('')\n\n f = Figure()\n\n grid = ImageGrid(f, 111, #similar to subplot(111)\n nrows_ncols=(grid_rows, grid_cols), #creates grid of axes\n axes_pad=0.05) #pad between axes in inch\n \n lw = 1.75\n text_box_size = 5 \n props = dict(boxstyle='round', facecolor='white')\n \n for i, (ax, im) in enumerate(zip(grid, ordered_projections)):\n ax.imshow(im, cmap='gray')\n\n for side, spine in ax.spines.items():\n spine.set_color(colors_2D[i])\n spine.set_linewidth(lw)\n\n ax.get_yaxis().set_ticks([])\n ax.get_xaxis().set_ticks([])\n\n text = str(flat_clusters[i])\n ax.text(1, 1, text, va='top', ha='left', bbox=props, size=text_box_size)\n \n newWindow = tk.Toplevel()\n newWindow.grid_rowconfigure(0, weight=1)\n newWindow.grid_columnconfigure(0, weight=1)\n \n #PLOT FRAME\n plotFrame = tk.Frame(newWindow, bg='lightgrey', width=600, height=400)\n plotFrame.grid(row=0, column=0, sticky='nsew')\n \n canvas = FigureCanvasTkAgg(f, plotFrame)\n canvas.draw()\n canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=True)\n canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)\n canvas.figure.tight_layout()\n \n\n #TOOLBAR FRAME\n toolbarFrame = ttk.Frame(newWindow, width=600, height=100)\n toolbarFrame.grid(row=1, column=0, sticky='nsew')\n toolbarFrame.grid_propagate(0)\n \n toolbar = NavigationToolbar2Tk(canvas, toolbarFrame)\n toolbar.update()",
"def visualize(**images):\n n = len(images)\n plt.figure(figsize=(16, 5))\n for i, (name, image) in enumerate(images.items()):\n plt.subplot(1, n, i + 1)\n plt.xticks([])\n plt.yticks([])\n plt.title(' '.join(name.split('_')).title())\n plt.imshow(image)\n plt.show()",
"def visualize(**images):\n n = len(images)\n plt.figure(figsize=(16, 5))\n for i, (name, image) in enumerate(images.items()):\n plt.subplot(1, n, i + 1)\n plt.xticks([])\n plt.yticks([])\n plt.title(' '.join(name.split('_')).title())\n plt.imshow(image)\n plt.show()"
] | [
"0.6885412",
"0.6744014",
"0.6729617",
"0.6729617",
"0.6729617",
"0.66539717",
"0.6561349",
"0.6445221",
"0.64222753",
"0.6366756",
"0.62954986",
"0.62783086",
"0.6277011",
"0.6251577",
"0.623828",
"0.62100685",
"0.62035716",
"0.6173411",
"0.6167177",
"0.61653167",
"0.6155882",
"0.6147212",
"0.61449754",
"0.61133015",
"0.6112973",
"0.60967225",
"0.60729325",
"0.60482633",
"0.60278845",
"0.60278845"
] | 0.7413701 | 0 |
Request data for a list of block hashes. | def send_get_data(self, block_hashes):
msg = msg_getdata()
for x in block_hashes:
msg.inv.append(CInv(MSG_BLOCK, x))
self.send_message(msg) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_blocks():\n chain_to_send = blockchain\n blocklist = \"\"\n for i in range(len(chain_to_send)):\n block = chain_to_send[i]\n block_index = str(block.index)\n block_timestamp = str(block.timestamp)\n block_data = str(block.data)\n block_hash = block.hash\n assembled = json.dumps({\n \"index\": block_index,\n \"timestamp\": block_timestamp,\n \"data\": block_data,\n \"hash\": block_hash\n })\n if blocklist == \"\":\n blocklist = assembled\n else:\n blocklist += assembled\n return blocklist\n\n chain_to_send = json.dumps(chain_to_send)\n return chain_to_send",
"def get_block(blockhash):\n return requests.get(BASE+f'/api/block/{blockhash}').json()",
"def get_blockHash(self, data):\n blockHash = data['blockHash']\n return blockHash",
"def get_blocks():\n query = iroha.blocks_query()\n IrohaCrypto.sign_query(query, ADMIN_PRIVATE_KEY)\n for block in net.send_blocks_stream_query(query):\n print('\\nThe next block arrived:', block)",
"def fetch_block_transaction_hashes(self, index, cb):\r\n data = pack_block_index(index)\r\n self.send_command('blockchain.fetch_block_transaction_hashes',\r\n data, cb)",
"def hash_block_content(index: int, prev_bhash: str, timestamp: int,\n data: List[Transaction], difficulty: int, nonce: int):\n return hash_sha256([index, prev_bhash, timestamp, data, difficulty, nonce])",
"def block_info(self, block):\n # Allow for a list of blocks..\n block = utils.request_type(block)\n\n res = r.get(self.url + self.block + str(block))\n return self.execute(res)",
"def ip_get_blocks():\n # start Requests session\n sc = requests.Session()\n\n # import cookies from Firefox\n sc.cookies.update(get_cookies('imhsc.imhadmin.net'))\n\n # send request\n vpx = sc.post('https://imhsc.imhadmin.net/index.php?v=IPManager')\n\n # check if login failed\n check_sc_login(vpx.text)\n\n # parse with BS4\n bs = BeautifulSoup(vpx.text, \"xml\")\n\n # get list of provisioning blocks\n blocklist = []\n for tblk in bs.find_all('table')[3].tr.div.table.find_all('tr'):\n tbx = {\n 'id': re.match(r'.+block_id=([0-9]+).*', tblk.find_all('td')[0].a['href']).group(1),\n 'prefix': tblk.find_all('td')[0].a.string,\n 'block': tblk.find_all('td')[1].string,\n 'usage': tblk.find_all('td')[2].string\n }\n blocklist.append(tbx)\n\n return bs, blocklist",
"def make_blocks_from_blockhashes(blockhashes):\n blocks = []\n\n for (height, blockhash) in enumerate(blockhashes):\n block = {\"hash\": blockhash, \"height\": height, \"tx\": []}\n if height != 0:\n block[\"previousblockhash\"] = previousblockhash\n blocks.append(block)\n previousblockhash = blockhash\n\n return blocks",
"def GetBlockHash(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def get_list():\n hash_map_list = model.hash_table.find()\n data = dict(success=True, hash_map_list=hash_map_list)\n return data",
"def list_blocks(self, _):\n print(self.data.name)",
"def get_blocks(self):\n cmd = \"\"\" SELECT * FROM %s; \"\"\" %(TABLE_BLOCKCHAIN)\n\n self.__dbcursor.execute(cmd)\n return self.__dbcursor.fetchall()",
"def get_block_hash(height):\n return requests.get(BASE+f'/api/block-index/{height}').json()['blockHash']",
"def getBlocks(request):\n if request.method == 'GET':\n blockName = request.GET.get('block', '')\n bid = request.GET.get('bid', '')\n districtName=request.GET.get('district', '')\n stateName=request.GET.get('state', '')\n limit=request.GET.get('limit', '')\n if limit == '':\n limit=50\n else:\n limit=int(limit)\n if bid=='':\n blocks = Block.objects.filter(name__icontains=blockName, district__name__icontains = districtName, district__state__name__icontains=stateName)\n else:\n blocks = Block.objects.filter(id = bid)\n\n blocks = blocks[:limit]\n serializer = SelectBlockSerializer(blocks, many=True)\n return JsonResponse(serializer.data, safe=False)",
"def getBlocks(request):\n if request.method == 'GET':\n blockName = request.GET.get('block', '')\n bid = request.GET.get('bid', '')\n districtName=request.GET.get('district', '')\n stateName=request.GET.get('state', '')\n limit=request.GET.get('limit', '')\n if limit == '':\n limit=50\n else:\n limit=int(limit)\n if bid=='':\n blocks = Block.objects.filter(name__icontains=blockName, district__name__icontains = districtName, district__state__name__icontains=stateName)\n else:\n blocks = Block.objects.filter(id = bid)\n\n blocks = blocks[:limit]\n serializer = SelectBlockSerializer(blocks, many=True)\n return JsonResponse(serializer.data, safe=False)",
"def get_block_dict(self) -> dict:\n return self.blocks",
"def verify_response_block_list(self, response):\n self.assertSetEqual(\n {block['id'] for block in response.data},\n self.non_orphaned_block_usage_keys,\n )",
"def get_table():\n response = dict(blocks=[])\n block_ids = DB.get_sched_block_instance_ids()\n for index, block_id in enumerate(block_ids):\n block = DB.get_block_details([block_id]).__next__()\n info = [\n index,\n block['id'],\n block['sub_array_id'],\n len(block['processing_blocks'])\n ]\n response['blocks'].append(info)\n return response, HTTPStatus.OK",
"def get_rawblock(blockhash):\n return requests.get(BASE+f'/api/rawblock/{blockhash}').json()['rawblock']",
"def get_blocks_before(self, hash_bytes: bytes, num_blocks: int = 100) -> list[Block]:\n raise NotImplementedError",
"def block_headers(self, block_headers: list):\n num_headers = len(block_headers)\n block_headers_size = num_headers * self._message_size['header']\n return {\n 'id': 'block_headers',\n 'block_headers': block_headers,\n 'size': kB_to_MB(block_headers_size)\n }",
"async def blocklist(self, ctx):\n blocked = await self.db.get('blocked', [])\n if not blocked:\n return await ctx.send('🐱 There are no blocked images.')\n async with aiohttp.ClientSession() as session:\n async with session.post(f'{self.haste_url}/documents', data='\\n'.join(blocked)) as resp:\n return await ctx.send(f'🐱 Here is a list of blocked images\\n\\n{self.haste_url}/{resp[\"key\"]}.txt')",
"def get_blockhashes_at_height(self, height):\n return self.chain[height]",
"def set_hashes(self, url, hashes):",
"async def new_block(request: Request) -> dict:\n block: dict = await request.json()\n block = await chain.add_block(block)\n response_block = Block(**block).to_dict()\n\n miner_ip = f\"{request.client.host}:{request.client.port}\"\n for node in chain.peers:\n async with httpx.AsyncClient() as client:\n _ = await client.get(f\"{node}/\")\n temp_chain = {f\"Block-{height}\": data.to_dict()\n for height, data in enumerate(chain.serialized)}\n return {\"miner_address\": miner_ip,\n \"latest_block\": response_block.dict(),\n \"new_chain\": temp_chain, }",
"def getblock(self, hash):\n return self.proxy.getblock(hash)",
"def method_get_torrents(self, hashes: List[str] = None) -> List[dict]: # pragma: nocover\n raise NotImplementedError",
"def blocklist_update(self):\n self._rpc_version_warning(5)\n result = self._request('blocklist-update')\n if 'blocklist-size' in result:\n return result['blocklist-size']\n return None",
"def stream_blocks():\n # add height\n query = iroha.blocks_query()\n ic.sign_query(query, user_private_key)\n for block in net.send_blocks_stream_query(query):\n pprint(\"The next block arrived: {}\".format(MessageToDict(block)), indent=1)"
] | [
"0.6409231",
"0.6145271",
"0.6124917",
"0.59927475",
"0.5951528",
"0.5827728",
"0.5783262",
"0.57184494",
"0.56973565",
"0.56766754",
"0.5650549",
"0.5647434",
"0.5635516",
"0.56190014",
"0.5569193",
"0.5569193",
"0.55531555",
"0.55520594",
"0.55443203",
"0.5535596",
"0.55184996",
"0.5489227",
"0.54887617",
"0.5467223",
"0.54644203",
"0.5419431",
"0.53589624",
"0.5341245",
"0.53178734",
"0.53059703"
] | 0.70251316 | 0 |
Test whether the last headers announcements received are right. Headers may be announced across more than one message. | def check_last_headers_announcement(self, headers):
test_function = lambda: (len(self.recent_headers_announced) >= len(headers))
self.wait_until(test_function)
with p2p_lock:
assert_equal(self.recent_headers_announced, headers)
self.block_announced = False
self.last_message.pop("headers", None)
self.recent_headers_announced = [] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def has_bad_headers(self):\n\n headers = [self.sender, self.reply_to] + self.recipients\n for header in headers:\n if _has_newline(header):\n return True\n\n if self.subject:\n if _has_newline(self.subject):\n for linenum, line in enumerate(self.subject.split('\\r\\n')):\n if not line:\n return True\n if linenum > 0 and line[0] not in '\\t ':\n return True\n if _has_newline(line):\n return True\n if len(line.strip()) == 0:\n return True\n return False",
"def assert_header(self):\r\n\r\n if self.length > self.owner.settings[SETTINGS_MAX_FRAME_SIZE]:\r\n raise netius.ParserError(\r\n \"Headers are greater than SETTINGS_MAX_FRAME_SIZE\",\r\n stream = self.stream,\r\n error_code = FRAME_SIZE_ERROR\r\n )\r\n if self.last_type in (HEADERS, CONTINUATION) and not\\\r\n self.last_end_headers and not self.last_stream == self.stream:\r\n raise netius.ParserError(\r\n \"Cannot send frame from a different stream in middle of headers\",\r\n error_code = PROTOCOL_ERROR\r\n )",
"def check_headers(self, headers):\n h = headers.values()[0]\n\n if 'DT' in PAR:\n if h.dt != PAR.DT:\n h.dt = PAR.DT\n\n if 'NT' in PAR:\n if h.nt != PAR.NT:\n print 'Warning: h.nt != PAR.NT'\n\n if 'NREC' in PAR:\n if h.nr != PAR.NREC:\n print 'Warning: h.nr != PAR.NREC'\n\n return h",
"def validate_against_header_count(self):\n valid = False\n len_headers = len(\n [header for header in self.headers if \"Unnamed\" not in header]\n )\n len_annot_type = len(\n [\n annot_type\n for annot_type in self.annot_types\n if \"Unnamed\" not in annot_type\n ]\n )\n if not len_headers == len_annot_type:\n msg = (\n f\"Header mismatch: {len_annot_type} TYPE declarations \"\n f\"for {len_headers} column headers\"\n )\n self.store_validation_issue(\"error\", msg, \"format:cap:count\")\n else:\n valid = True\n return valid",
"def validate_header(self, reply):\n # check message is from my agg to me\n check_equal(reply.header.sender, self.aggregator_uuid, self.logger)\n check_equal(reply.header.recipient, self.common_name, self.logger)\n\n # check that the federation id matches\n check_equal(reply.header.federation_id, self.federation_uuid, self.logger)\n\n # check that we agree on single_col_cert_common_name\n check_equal(reply.header.single_col_cert_common_name, self.single_col_cert_common_name, self.logger)",
"def verifyHeader(self, headers):\n for head in headers:\n if(hex(head[0]) == VID and hex(head[1]) == PID):\n return True\n return False",
"def check_non_consecutive(agent):\n return agent.received[-1] - agent.received[-2] != 1",
"def validate_unique_header(self):\n valid = False\n unique_headers = set(self.headers)\n if len(unique_headers) == len(self.headers):\n valid = True\n else:\n seen_headers = set()\n duplicate_headers = set()\n for x in self.headers:\n if x in seen_headers or seen_headers.add(x):\n duplicate_headers.add(x)\n msg = f\"Duplicated header names are not allowed: {duplicate_headers}\"\n log_exception(Annotations.dev_logger, Annotations.user_logger, msg)\n self.store_validation_issue(\"error\", msg, \"format:cap:unique\")\n valid = False\n if any(\"Unnamed\" in s for s in list(unique_headers)):\n msg = \"Headers cannot contain empty values\"\n log_exception(Annotations.dev_logger, Annotations.user_logger, msg)\n self.store_validation_issue(\"error\", msg, \"format:cap:no-empty\")\n valid = False\n return valid",
"def _expect_100(connection: typing.Union[ssl.SSLSocket, socket.socket]) -> bool:\n try:\n headers = b''\n while b'\\r\\n\\r\\n' not in headers:\n headers += connection.recv(1024)\n return b' 100 ' in headers.split(b'\\r\\n')[0]\n except IOError:\n return False",
"def has_headers(self):\n for column in self.columns:\n if column.header:\n return True\n return False",
"def check_header(self, name, value):\r\n if value in self.headers.get(name, ''):\r\n return True\r\n return False",
"def check_header(self, name, value):\r\n if value in self.headers.get(name, ''):\r\n return True\r\n return False",
"def __len__(self):\n return len(self._headers)",
"def check_last_inv_announcement(self, inv):\n\n test_function = lambda: self.block_announced\n self.wait_until(test_function)\n\n with p2p_lock:\n compare_inv = []\n if \"inv\" in self.last_message:\n compare_inv = [x.hash for x in self.last_message[\"inv\"].inv]\n assert_equal(compare_inv, inv)\n self.block_announced = False\n self.last_message.pop(\"inv\", None)",
"def check_header_match_180_or_later(header1, header2):\r\n header1 = header1.split(':')\r\n header2 = header2.split(':')\r\n for e1, e2 in zip(header1, header2):\r\n if e1.split(' ')[0] != e2.split(' ')[0]:\r\n return False\r\n\r\n return True",
"def test_check_header_match_180_or_later(self):\r\n # identical\r\n self.assertTrue(check_header_match_180_or_later(\r\n \"M00176:17:000000000-A0CNA:1:1:15487:1773 1:N:0:0\",\r\n \"M00176:17:000000000-A0CNA:1:1:15487:1773 1:N:0:0\"))\r\n # identical except read number\r\n self.assertTrue(check_header_match_180_or_later(\r\n \"M00176:17:000000000-A0CNA:1:1:15487:1773 1:N:0:0\",\r\n \"M00176:17:000000000-A0CNA:1:1:15487:1773 2:N:0:0\"))\r\n # identical except read number\r\n self.assertTrue(check_header_match_180_or_later(\r\n \"M00176:17:000000000-A0CNA:1:1:15487:1773 1:N:0:0\",\r\n \"M00176:17:000000000-A0CNA:1:1:15487:1773 3:N:0:0\"))\r\n # different reads\r\n self.assertFalse(check_header_match_180_or_later(\r\n \"M00176:17:000000000-A0CNA:1:1:15487:1773 1:N:0:0\",\r\n \"M00176:17:000000000-A0CNA:1:1:16427:1774 1:N:0:0\"))",
"def has_header():\n header_content = (\"\\n\".join(CURRENT_BUFFER[:7])).lower()\n return sum(1 for keyword in KEYWORDS if header_content.find(keyword.lower()) != -1) >= 2",
"def test_headers(self):\n msg = self.shortDescription()\n self.assertTrue(False, msg=msg)\n pass",
"def correct_header_fields():\n test_str = \"c0rrect_!!heAd3R fi3ld5__%%!! @\\n\"\n server = start_server()\n client = start_client()\n\n write_to(client, test_str)\n segments = read_segments_from(client)\n if not segments:\n return False\n teardown()\n\n # Start reference solution to get answers.\n ref_server = start_server(port=REF_PORT, reference=True)\n ref_client = start_client(server_port=REF_PORT, reference=True)\n\n # Get reference checksum.\n write_to(ref_client, test_str)\n ref_segment = read_segments_from(ref_client)[0]\n\n # Check the first sent segment. Should have all the same header fields as\n # the reference.\n segment = segments[0]\n\n # Check the flags first. Maybe decided to ACK all segments.\n if not segment.has_same_flags(ref_segment):\n if \"ACK\" in segment.flags:\n segment.flags.remove(\"ACK\")\n\n return (\n segment.seqno == ref_segment.seqno and\n (segment.ackno == 0 or segment.ackno == ref_segment.ackno) and\n segment.length == ref_segment.length and\n segment.has_same_flags(ref_segment) and\n segment.window == ref_segment.window and\n (segment.checksum == ref_segment.checksum or\n int(segment.checksum, 16) == segment.c_repr.cksum)\n )",
"def is_responded(self):\n if not self.requires_response:\n return False, None, None\n for history_entry in self.history[::-1]:\n if history_entry.action == MessageAction.respond:\n return True, history_entry.timestamp, history_entry.username\n else:\n return False, None, None",
"def IsDuplicate(self, header, payload_string, cur_time): # pylint: disable=unused-argument\n last_seq = self._recv_seq_nums[(header.source, header.type)]\n last_time = self._recv_times[(header.source, header.type)]\n cur_seq = header.sequence\n\n # Sequence numbers expire after maximum latency.\n if cur_time - last_time < aio_header.AIO_EXPIRATION_TIME_US * 1e-6:\n # Expected duplication.\n if cur_seq == last_seq:\n return True\n # Out of order.\n if (cur_seq - last_seq) % 2**16 > aio_header.AIO_ACCEPTANCE_WINDOW:\n return True\n return False",
"def is_forwarded(self):\n return bool(re.match(FW_PATTERNS, self.header('Subject', '')))",
"def allhunks(self):\n for h in self.header:\n if self.allhunks_re.match(h):\n return True\n return False",
"def ack(self):\n return (self.status == self.STATUS_ACK)",
"async def is_server_ready(self, headers: dict[str, t.Any] = ...) -> bool:",
"def announcement_complete(self) -> bool:\n # If a quorum not announced, not ready\n if len(self._available_guardians) < self._context.quorum:\n log_warning(\"cannot decrypt with fewer than quorum available guardians\")\n return False\n\n # If guardians missing or available not accounted for, not ready\n if (\n len(self._available_guardians) + len(self._missing_guardians)\n != self._context.number_of_guardians\n ):\n log_warning(\n \"cannot decrypt without accounting for all guardians missing or present\"\n )\n return False\n return True",
"def has_msg(self):\n return self.bufsize >= 4 and self.bufsize - 4 >= struct.unpack('!I', str(self.buf.peek(0, 4)))[0]",
"def test_specific_headers_sent_with_request(self):\n req = self.httpbin.get_my_headers(dry_run=True)\n self.assertIn('All-Request-Headers', req.prepared_request.headers)\n request_data_headers = self.httpbin.client['get_my_headers']['headers']['All-Request-Headers']\n self.assertEqual(req.prepared_request.headers['All-Request-Headers'], request_data_headers)",
"def process_headers(fin, fout, fixed_headers):\n filtered_headers = set(item[0] for item in fixed_headers)\n filtered_headers.add(\"SAMPLE\")\n expected_values = {\n name: value for name, asserted, value in fixed_headers if asserted\n }\n errors = False\n for raw_line in fin:\n if raw_line.startswith('##'):\n # TODO: This will break if the metadata header is bad.\n name, value = raw_line[2:].rstrip().split('=', 1)\n if name in filtered_headers:\n if name in expected_values:\n if value != expected_values[name]:\n errors = True\n # TODO: propper logging\n sys.stderr.write(\n 'tcga-vcf-reheader: mismatch {}={}\\n'.format(\n name, value\n )\n )\n else: # Just some other header...\n fout.write(raw_line)\n else:\n break\n fout.write(raw_line) # raw_line should now be the data header line.\n return errors",
"def getLastReqArchiveUse(self):\n return self.getLastHeader(\"req-archive\", \"0\") == \"1\""
] | [
"0.65723264",
"0.61208695",
"0.6049333",
"0.6003903",
"0.59692436",
"0.5896339",
"0.5871474",
"0.5851515",
"0.57597494",
"0.57581687",
"0.57434636",
"0.57434636",
"0.573652",
"0.57329977",
"0.5717457",
"0.5645279",
"0.55954987",
"0.55861056",
"0.55829144",
"0.555257",
"0.5521725",
"0.5519392",
"0.55052966",
"0.54921067",
"0.5483426",
"0.5477582",
"0.5472228",
"0.5462045",
"0.5429188",
"0.5413564"
] | 0.8560445 | 0 |
Test whether the last announcement received had the right inv. inv should be a list of block hashes. | def check_last_inv_announcement(self, inv):
test_function = lambda: self.block_announced
self.wait_until(test_function)
with p2p_lock:
compare_inv = []
if "inv" in self.last_message:
compare_inv = [x.hash for x in self.last_message["inv"].inv]
assert_equal(compare_inv, inv)
self.block_announced = False
self.last_message.pop("inv", None) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def InventoryReceived(self, inventory):\n if inventory.Hash.ToBytes() in self._MissedBlocks:\n self._MissedBlocks.remove(inventory.Hash.ToBytes())\n\n if inventory is MinerTransaction:\n return False\n\n if type(inventory) is Block:\n if BC.Default() is None:\n return False\n\n if BC.Default().ContainsBlock(inventory.Index):\n return False\n\n if not BC.Default().AddBlock(inventory):\n return False\n\n else:\n if not inventory.Verify(self.MemPool.values()):\n return False",
"def has_invites(self):\r\n return self.invite_ct > 0",
"def test_invited(self) -> None:\n\n self._perform_background_initial_update()\n\n u1 = self.register_user(\"u1\", \"pass\")\n u1token = self.login(\"u1\", \"pass\")\n r1 = self.helper.create_room_as(u1, tok=u1token)\n\n u2 = self.register_user(\"u2\", \"pass\")\n\n r1stats_ante = self._get_current_stats(\"room\", r1)\n assert r1stats_ante is not None\n\n self.helper.invite(r1, u1, u2, tok=u1token)\n\n r1stats_post = self._get_current_stats(\"room\", r1)\n assert r1stats_post is not None\n\n self.assertEqual(\n r1stats_post[\"current_state_events\"] - r1stats_ante[\"current_state_events\"],\n 1,\n )\n self.assertEqual(\n r1stats_post[\"invited_members\"] - r1stats_ante[\"invited_members\"], +1\n )",
"def is_invited_pending_activation(self):\n if self.registration_method == self.INVITED \\\n and self.is_pending_activation():\n return True\n else:\n return False",
"def is_invincible(self):\n\t\treturn self._invincible",
"def test_new_invitation(self):\n (approval_user_id,\n joining_user_id,\n conversation_id,\n _) = self.setup_invites()\n uri = '/status/{}/{}'.format(conversation_id, approval_user_id)\n self.set_user_cookie(approval_user_id, conversation_id)\n self.set_session_cookie(approval_user_id, conversation_id)\n resp = self.client.post(\n uri, data={'public_key':'', 'last_message_seen_id': None})\n resp_json = json.loads(resp.data)\n\n invitations = resp_json['invitations']\n self.assertEqual(len(invitations), 1)\n self.assertEqual(invitations[0]['user_id'], joining_user_id)",
"def contains(self, block_):\n return block_ in self._inventory",
"def check_non_consecutive(agent):\n return agent.received[-1] - agent.received[-2] != 1",
"def print_inv_msg(b, invNum=0):\n # Print if the target block is in the range\n if invNum + 500 >= TARGET_BLOCK:\n prefix = ' '\n print(prefix + 'INV')\n print(prefix + '-' * 56)\n print(b[:3].hex(), ' (each hash printed in reverse of '\n 'serialized order for clarity) count 500')\n count = 1\n target = [\"\", -1]\n last_block = ''\n n = 36\n\n # Iterate through the inventory messages\n for i in range(3, len(b), n):\n try:\n block = b[i:i + n].hex()\n objType = block[:8] # Object type\n hashObj = convertLittleBig(block[8:]) # Hash of the object\n\n # Keep the information to return if the target block is found\n if invNum + count == TARGET_BLOCK:\n target = [hashObj, invNum + count]\n\n # Print if the target block is in the range\n if invNum + 500 >= TARGET_BLOCK:\n print(objType, hashObj, 'MSG_BLOCK', 'inventory #'\n + str(invNum + count))\n count += 1\n last_block = hashObj\n except Exception:\n continue\n # Return the found one if the target block is found\n if target[1] == TARGET_BLOCK:\n return target\n # Return the last block if the target block is not found\n return [last_block, invNum + count - 1]",
"def test_ack_invitation(self):\n (approver_user_id,\n joining_user_id,\n _,\n invite_id) = self.setup_invites()\n uri = '/invite_ack/{}/{}'.format(approver_user_id, joining_user_id)\n rsp = self.client.post(uri, data={'approves': True})\n rsp_json = json.loads(rsp.data)\n\n invite = model.Invitation.query.get(invite_id)\n self.assertEqual(rsp_json['success'], True)\n self.assertEqual(rsp.status_code, 200)\n self.assertEqual(invite.invite_id, invite_id)",
"def test_invites_added(self):\n rsp = self.client.post('/join/join-here', data={'name': 'bob'})\n rsp_json = json.loads(rsp.data)\n\n invites = model.Invitation.query.filter_by(\n joining_user_id=rsp_json['new_user_id']).count()\n self.assertNotEqual(int(invites), 0)",
"def _add_to_inv(self, block_):\n if block_ in self._inventory:\n self._inventory[block_] += 1\n else:\n self._inventory[block_] = 1",
"def is_invulnerable(self) -> bool:\n return self.invul_timer != 0",
"def check_last_headers_announcement(self, headers):\n test_function = lambda: (len(self.recent_headers_announced) >= len(headers))\n self.wait_until(test_function)\n with p2p_lock:\n assert_equal(self.recent_headers_announced, headers)\n self.block_announced = False\n self.last_message.pop(\"headers\", None)\n self.recent_headers_announced = []",
"def test_archived_no_iterate_last_check(self):\n old_last_check = self.alice_inbox.last_checked\n # Assigment\n messages = self.alice_inbox.archived\n self.assertEqual(self.alice_inbox.last_checked, old_last_check)",
"def list_invitation(self, master_id):\n try:\n response = self.client.list_invitations()\n for invitation in response['Invitations']:\n print(invitation['RelationshipStatus'])\n if invitation['AccountId'] == master_id and invitation['RelationshipStatus'] == 'Invited':\n return invitation['InvitationId']\n return False\n except ClientError as e:\n print(e.response['Error']['Code'])\n return False",
"def stepCheckConfirmedCheckbookInventory(self, sequence=None, sequence_list=None, **kw):\n # check the inventory of the bank account\n self.assertEqual(self.simulation_tool.getCurrentInventory(\n payment=self.bank_account_2.getRelativeUrl(),\n resource=self.currency_1.getRelativeUrl()), 100000)\n self.assertEqual(self.simulation_tool.getAvailableInventory(\n payment=self.bank_account_2.getRelativeUrl(),\n resource=self.currency_1.getRelativeUrl()), 80000)\n self.assertEqual(self.simulation_tool.getFutureInventory(\n payment=self.bank_account_2.getRelativeUrl(),\n resource=self.currency_1.getRelativeUrl()), 80000)",
"def check_contract_expire_soon():\n\n contract_expire_soon_list = []\n contract_expired_list = []\n\n # get user contract\n # refactoring techniques: replace temp with query\n user_role = get_user_role()\n contract_list = user_role.user_contracts\n\n for contract in contract_list:\n if contract['dateSigned'] and not contract['terminationDate']:\n\n # get expiry date and current date\n expiry_date = datetime.strptime(contract['expiryDate'][:19], \"%Y-%m-%dT%H:%M:%S\")\n current_time = datetime.now()\n \n # get the diffenrence between expiry date and current date\n difference = expiry_date - current_time\n days = divmod(difference.days, 86400)\n\n # Refactoring techniques: composing method\n contract_expire_soon = (days[1] <= 31) and (days[1] >= 0)\n contract_expired = days[0] < 0\n\n if contract_expire_soon:\n contract_expire_soon_list.append(contract)\n if contract_expired:\n contract_expired_list.append(contract)\n \n # return True if there's elem in any list, else False\n if len(contract_expire_soon_list) >= 1 or len(contract_expired_list) >= 1:\n return True, contract_expire_soon_list, contract_expired_list\n else:\n return False, contract_expire_soon_list, contract_expired_list",
"def verify_player_pending(self, player_email):\n try:\n self.pending_players.index(player_email)\n return True\n except ValueError:\n return False",
"async def _invites(self, ctx):\n waiting = await ctx.send(\"`Loading server invites...`\")\n guild = ctx.guild\n guild_invites = await guild.invites()\n invitecodes = []\n uses = []\n channel = []\n inviter = []\n for invite in guild_invites:\n invitecodes.append(invite.code)\n uses.append(str(invite.uses))\n channel.append(invite.channel.mention)\n inviter.append(invite.inviter.mention)\n\n invitecodes = \"\\n\".join(invitecodes)\n uses = \"\\n\".join(uses)\n channel = \"\\n\".join(channel)\n inviter = \"\\n\".join(inviter)\n\n e = discord.Embed(color=ctx.guild.me.top_role.color)\n e.set_author(name=f\"{guild.name}'s invites\")\n e.set_thumbnail(url=guild.icon_url)\n e.add_field(name=\"Invites\", value=invitecodes)\n e.add_field(name=\"Uses\", value=uses)\n e.add_field(name=\"Channel\", value=channel)\n e.add_field(name=\"Inviter\", value=inviter)\n await waiting.edit(content=None, embed=e)",
"def verify_tbird_updatefirmwarealerts_li(cls, message_list):\n found = 0\n num_message = len(message_list)\n if not CommonOperationLogicalInterconnect.click_logical_interconnect_activity():\n logger.warn(\"failed to select activity..please check\")\n return False\n\n for message in message_list:\n if ui_lib.wait_for_element(GeneralLogicalInterconnectsElements.ID_ACTIVITY_MESSAGE % message, 15):\n time = (GeneralLogicalInterconnectsElements.ID_ACTIVITY_MESSAGE % message) + \"/td[4]/div[2]\"\n timeago = (ui_lib.get_text(time, 10, hidden_element=True)).split()\n logger.info(\"Event found at - %s\" % timeago)\n\n if timeago:\n if timeago[1].lower() == \"hours\" and int(timeago[0]) > 2:\n logger.warn(\n \"Expected message %s found is not within last 2 hours!! Discarding Old activity Message\" % message)\n continue\n elif timeago[1].lower() in (\"months\", \"year\", \"years\"):\n logger.warn(\"Expected message '{}' found is of '{}'!!\".format(message, timeago))\n continue\n found += 1\n logger.debug(\"\\nActivity : '%s' found in IC activity page\" % message)\n else:\n logger.info(\"\\nMessage %s is found but testscript failed to extract exact time\" % message)\n continue\n\n else:\n logger.warn(\"Expected message '%s' is not found in activity page:\" % message)\n\n if found == num_message:\n logger.debug(\"All the excepted messages found in LI activity page\")\n return True\n else:\n logger.debug(\"All the excepted messages are not found in LI activity page\")\n return False",
"def verify_chain():\n for (index, block) in enumerate(blockchain):\n if index == 0:\n continue\n if block['previous_hash'] != hash_block(blockchain[index - 1]):\n return False\n # Here [:-1] excludes the reward from being a part of validation\n if not valid_proof(block['transactions'][:-1], block['previous_hash'], block['proof']):\n print('Proof of work is invalid.')\n return False\n return True",
"def is_done_in_the_past(self):\n return any(self.hash == rec['hash'] for rec in self.records)",
"def test_archived_iterate_last_checked(self):\n old_last_check = self.alice_inbox.last_checked\n # Emulate iterate\n messages = list(self.alice_inbox.archived)\n self.assertEqual(self.alice_inbox.last_checked, old_last_check)",
"def isInCall(self):\n if len(self.partyList) == 0:\n return False, False\n established_parties = [party for party in self.partyList if party.State == PartyState.Established]\n return True, len(established_parties) > 0",
"def is_activity_completed(block_id, activities):\n for activity in activities:\n if block_id == activity.block_id:\n return 'completed'\n\n return 'not_completed'",
"def test_alice_sent(self):\n messages = list(self.alice_storage.sent)\n self.assertEqual(1, len(messages))\n self.assertIn(self.alice_message_to_bob, messages)",
"def verify_chain():\n\n block_index = 0\n is_unchanged = True\n\n if namoto_length < 1:\n print('Blockchain is empty!')\n return None\n\n for block in namoto_blockchain:\n\n if block[0] == namoto_blockchain[block_index -1]:\n is_unchanged = True\n block_index += 1\n\n else:\n is_unchanged = False\n break\n\n return is_unchanged",
"def new_recv_block(recv_block: Block, sender_id: Optional[int] = None, mute: bool = False) -> bool:\n logging.debug(\"Received block %s\", util.bintos(recv_block.current_hash))\n if not recv_block.verify():\n logging.debug(\"Block %s rejected (failed verification)\",\n util.bintos(recv_block.current_hash))\n return False\n\n r = util.get_db()\n with r.lock(\"blockchain:blocks:lock\"), \\\n r.lock(\"blockchain:last_block:lock\"), \\\n r.lock(\"blockchain:main_branch:lock\"), \\\n r.lock(\"blockchain:orphan_blocks:lock\"), \\\n r.lock(\"blockchain:tx_pool:lock\"), \\\n r.lock(\"blockchain:utxo-block:lock\"), \\\n r.lock(\"blockchain:utxo-tx:lock\"):\n\n # NOTE: Comments like the one below are references to the bitcoin\n # protocol rules\n # OK 2 Reject if duplicate of block we have in any of the three categories\n if r.hexists(\"blockchain:blocks\", recv_block.current_hash) or \\\n r.sismember(\"blockchain:orphan_blocks:\".encode() + recv_block.previous_hash,\n recv_block.dumpb()):\n logging.debug(\"Block %s rejected (already exists)\",\n util.bintos(recv_block.current_hash))\n return False\n\n # Handle the genesis block\n if recv_block.is_genesis():\n r.hset(\"blockchain:blocks\", recv_block.current_hash, recv_block.dumpb())\n t = recv_block.transactions[0]\n o = t.outputs[0]\n ib = TransactionInput(t.id, o.index).dumpb()\n ob = o.dumpb()\n r.hset(\"blockchain:utxo-block:\".encode() + recv_block.current_hash, ib, ob)\n r.hset(\"blockchain:utxo-tx\", ib, ob)\n r.sadd(\"blockchain:main_branch\", recv_block.current_hash)\n _set_last_block_unlocked(r, recv_block)\n logging.debug(\"Genesis block accepted\")\n return True\n\n # OK 11 Check if prev block (matching prev hash) is in main branch or side branches. If not,\n # add this to orphan blocks, then query peer we got this from for 1st missing orphan\n # block in prev chain; done with block\n prev_blockb = r.hget(\"blockchain:blocks\", recv_block.previous_hash)\n if prev_blockb is None:\n logging.debug(\"Block %s is orphan\", util.bintos(recv_block.current_hash))\n r.sadd(\"blockchain:orphan_blocks:\".encode() + recv_block.previous_hash,\n recv_block.dumpb())\n # TODO OPT: Unlock before requesting the block (it could take some time, although\n # the response is asynchronous of course\n if not mute:\n logging.debug(\"Requesting block %s\", util.bintos(recv_block.previous_hash))\n # TODO OPT: Only ask the node we got this from, not everyone, to\n # avoid the flood of incoming blocks later\n chatter.get_blockid(recv_block.previous_hash,\n [sender_id] if sender_id is not None else util.get_peer_ids())\n return False\n\n prev_block = Block.loadb(prev_blockb)\n logging.debug(\"Previous block %s\", util.bintos(prev_block.current_hash))\n if recv_block.index != prev_block.index + 1:\n logging.debug(\"Block %s rejected (wrong index)\", util.bintos(recv_block.current_hash))\n return False\n\n # OK 15 Add block into the tree. There are three cases: 1. block further extends the main\n # branch; 2. block extends a side branch but does not add enough difficulty to make\n # it become the new main branch; 3. block extends a side branch and makes it the new\n # main branch.\n last_block = get_block()\n if recv_block.previous_hash == last_block.current_hash:\n # OK Case 1 (b.previous_hash == last_block):\n logging.debug(\"Block %s extends the main branch\", util.bintos(recv_block.current_hash))\n txos = _validate_block_unlocked(r, recv_block)\n if txos is None:\n return False\n referenced_txos, new_utxos = txos\n \"\"\"\n # NOTE: This is the body of _validate_block_unlocked, annotated, for reference\n referenced_txos: Set[bytes] = set() # the utxos from UTXO-block spent in recv_block\n new_utxos: Dict[bytes, bytes] = {}\n # OK 1 For all but the coinbase transaction, apply the following:\n for t in recv_block.transactions:\n # OK 1 For each input, look in the main branch to find the referenced output\n # transaction. Reject if the output transaction is missing for any input.\n input_amount = 0.0\n for i in t.inputs:\n # Search for i in UTXO-block\n ib = i.dumpb()\n ob = r.hget(\"blockchain:utxo-block:\".encode() + recv_block.previous_hash, ib)\n if ob is None:\n # Not found in UTXO-block, search in new_utxos\n ob = new_utxos.get(ib)\n if ob is None:\n return False\n del new_utxos[ib]\n else:\n # Avoid double-spending of a utxo from UTXO-block in the block\n if ib in referenced_txos:\n return False\n referenced_txos.add(ib)\n o = TransactionOutput.loadb(ob)\n # OK 2 For each input, if we are using the nth output of the earlier transaction,\n # but it has fewer than n+1 outputs, reject.\n # OK 4 Verify crypto signatures for each input; reject if any are bad\n if o.recipient != t.sender:\n return False\n # OK 5 For each input, if the referenced output has already been spent by a\n # transaction in the main branch, reject\n # OK 7 Reject if the sum of input values < sum of output values\n input_amount += o.amount\n if input_amount != sum(o.amount for o in t.outputs):\n return False\n\n new_utxos.update({TransactionInput(t.id, o.index).dumpb(): o.dumpb() \\\n for o in t.outputs})\n \"\"\"\n\n # OK 4 For each transaction, \"Add to wallet if mine\"\n # NOTE: referenced_txos and new_utxos are not empty since we got here\n _create_utxo_block_unlocked(r, recv_block, referenced_txos, new_utxos)\n\n # OK 5 For each transaction in the block, delete any matching transaction from the pool\n # : of the transactions in the pool, keep only the ones that are valid using the\n # new utxo-block to check for validity\n tx_pool = {t.id: t for t in \\\n [Transaction.loadb(tb) for tb in r.hvals(\"blockchain:tx_pool\")]}\n # NOTE: There can't be double spending in the tx pool as it is now\n tx_pool = _rebuild_tx_pool_unlocked(r, tx_pool, recv_block)\n\n _rebuild_utxo_tx_unlocked(r, recv_block, tx_pool)\n\n # Add block to main branch\n r.hset(\"blockchain:blocks\", recv_block.current_hash, recv_block.dumpb())\n r.sadd(\"blockchain:main_branch\", recv_block.current_hash)\n\n _set_last_block_unlocked(r, recv_block)\n logging.debug(\"Block %s accepted\", util.bintos(recv_block.current_hash))\n elif recv_block.index <= last_block.index:\n # OK Case 2 (b.previous_hash != last_block && b.index <= last_block.index)\n # : Add it without doing any validation because validating this now would require a lot\n # of work (actually simulating adding this to its prev as if extending the main branch).\n logging.debug(\"Block %s extends a side branch (not changing main)\",\n util.bintos(recv_block.current_hash))\n r.hset(\"blockchain:blocks\", recv_block.current_hash, recv_block.dumpb())\n else:\n # OK Case 3 (b.previous_hash != last_block && b.index > last_block.index)\n # OK 1 Find the fork block on the main branch which this side branch forks off of\n # : Ascend the side branch, the fork block is the first to be in the main branch\n logging.debug(\"Block %s extends a side branch (changing main)\",\n util.bintos(recv_block.current_hash))\n old_side_branch = [recv_block] # the Blocks in the old side branch\n fork_block = Block.loadb(r.hget(\"blockchain:blocks\", recv_block.previous_hash))\n while not r.sismember(\"blockchain:main_branch\", fork_block.current_hash):\n old_side_branch.append(fork_block)\n fork_block = Block.loadb(r.hget(\"blockchain:blocks\", fork_block.previous_hash))\n old_side_branch.reverse() # starting from the child of the fork block\n # OK 2 Redefine the main branch to only go up to this fork block\n # : Ascend from last_block up to the fork block\n old_main_branch: List[Block] = [] # the Blocks in the old main branch\n b = Block.loadb(r.hget(\"blockchain:blocks\", last_block.current_hash))\n while b != fork_block:\n old_main_branch.append(b)\n b = Block.loadb(r.hget(\"blockchain:blocks\", b.previous_hash))\n old_main_branch.reverse() # starting from the child of the fork block\n logging.debug(\"Fork block %s\", util.bintos(fork_block.current_hash))\n # OK 3 For each block on the side branch, from the child of the fork block to the leaf,\n # add to the main branch:\n for osbi, b in enumerate(old_side_branch):\n # OK 1 Do \"branch\" checks 3-11\n # : Why? we did them when first receiving the block. What could have changed?\n # OK 2 For all the transactions:\n txos = _validate_block_unlocked(r, b)\n if txos is None:\n # Delete invalid blocks and abort\n invalid_ids = [invalid.current_hash for invalid in old_side_branch[osbi:]]\n r.hdel(\"blockchain:blocks\", *invalid_ids)\n return False\n referenced_txos, new_utxos = txos\n \"\"\"\n # NOTE: This is the body of _validate_block_unlocked, annotated, for reference\n referenced_txos: Set[bytes] = set() # the utxos from UTXO-block spent in b\n new_utxos: Dict[bytes, bytes] = {}\n for t in b.transactions:\n # WP 1 For each input, look in the main branch to find the referenced output\n # transaction. Reject if the output transaction is missing for any input.\n # : Search for the referenced outputs in UTXO-block[previous_hash]\n input_amount = 0.0\n for i in t.inputs:\n # Search for i in UTXO-block\n ib = i.dumpb()\n ob = r.hget(\"blockchain:utxo-block:\".encode() + b.previous_hash, ib)\n if ob is None:\n # Not found in UTXO-block, search in new_utxos\n ob = new_utxos.get(ib)\n if ob is None:\n # TODO: Undo any changes, delete invalid blocks and reject\n raise NotImplementedError\n del new_utxos[ib]\n else:\n # Avoid double-spending in the block\n if ib in referenced_txos:\n # TODO: Undo any changes, delete invalid blocks and reject\n raise NotImplementedError\n referenced_txos.add(ib)\n o = TransactionOutput.loadb(ob)\n # OK 2 For each input, if we are using the nth output of the earlier\n # transaction, but it has fewer than n+1 outputs, reject.\n # WP 4 Verify crypto signatures for each input; reject if any are bad\n # : Check that t.sender == o.recipient for each utxo referenced\n if o.recipient != t.sender:\n # TODO: Undo any changes, delete invalid blocks and reject\n raise NotImplementedError\n # OK 5 For each input, if the referenced output has already been spent by a\n # transaction in the main branch, reject\n # WP 7 Reject if the sum of input values < sum of output values\n # : Check that sum(inputs) == sum(outputs)\n input_amount += o.amount\n if input_amount != sum(o.amount for o in t.outputs):\n # TODO: Undo any changes, delete invalid blocks and reject\n raise NotImplementedError\n\n new_utxos.update({TransactionInput(t.id, o.index).dumpb(): o.dumpb() for o \\\n in t.outputs})\n \"\"\"\n\n # OK 5 For each transaction, \"Add to wallet if mine\"\n # NOTE: referenced_txos and new_utxos are not empty since we got here\n _create_utxo_block_unlocked(r, b, referenced_txos, new_utxos)\n\n # OK 5 For each block in the old main branch, from the leaf down to the child of the\n # fork block:\n tx_pool = {t.id: t for t in \\\n [Transaction.loadb(tb) for tb in r.hvals(\"blockchain:tx_pool\")]}\n for b in reversed(old_main_branch):\n # OK 1 For each non-coinbase transaction in the block:\n for t in b.transactions:\n # OK 1 Apply \"tx\" checks 2-9, except in step 8, only look in the transaction\n # pool for duplicates, not the main branch\n # : Why? these have been checked already. There can't be double spending\n # transactions in the pool as it is at this point (current as of the old\n # main branch) + the old main branch, because they wouldn't have gotten\n # there in the first place.\n # OK 2 Add to transaction pool if accepted, else go on to next transaction\n tx_pool[t.id] = t\n\n # OK 6 For each block in the new main branch, from the child of the fork node to the\n # leaf:\n # OK 1 For each transaction in the block, delete any matching transaction from the\n # transaction pool\n # : Of the transactions in the pool, keep only the ones that are valid using the\n # new utxo-block to check for validity\n # NOTE: There can't be double spending in the tx pool as it is now,\n # because it consists of the tx in the previous tx pool and all the\n # tx in the old main branch, and all of these have already been\n # checked for double spending\n tx_pool = _rebuild_tx_pool_unlocked(r, tx_pool, recv_block)\n\n _rebuild_utxo_tx_unlocked(r, recv_block, tx_pool)\n\n # Update main_branch\n for b in old_main_branch:\n r.srem(\"blockchain:main_branch\", b.current_hash)\n for b in old_side_branch:\n r.sadd(\"blockchain:main_branch\", b.current_hash)\n\n r.hset(\"blockchain:blocks\", recv_block.current_hash, recv_block.dumpb())\n _set_last_block_unlocked(r, recv_block)\n logging.debug(\"Block %s accepted\", util.bintos(recv_block.current_hash))\n\n orphans = [Block.loadb(orphanb) for orphanb in \\\n r.smembers(\"blockchain:orphan_blocks:\".encode() + recv_block.current_hash)]\n r.delete(\"blockchain:orphan_blocks:\".encode() + recv_block.current_hash)\n\n logging.debug(\"Block time for %s %f\", util.bintos(recv_block.current_hash),\n time.time() - recv_block.timestamp)\n\n # OK 19 For each orphan block for which this block is its prev, run all these steps (including\n # this one) recursively on that orphan\n for orphan in orphans:\n new_recv_block(orphan, sender_id)\n\n _check_for_new_block()\n return True",
"async def verify(self,ctx,ign='',region=''):\r\n if ign =='' or region =='':\r\n await self.bot.say(\"Please type in a ign and region.\")\r\n return\r\n if not ctx.message.channel.is_private: #Makes sure channel is private\r\n await self.bot.say('Sorry. But this process must be done in a private message, to continue please dm the bot ```{}```'.format(ctx.message.content))\r\n return\r\n try:\r\n pattern = verify.start(ctx.message.author.id, ign,region)\r\n except Exception as e:\r\n await self.bot.say('Error: ' + str(e)+'\\n\\nJoin http://discord.me for more info.')\r\n return\r\n pattern_ = '{} Halcyon Potions, {} Weapon Infusions, and {} Crystal Infusions'.format(str(pattern.count(0)), str(pattern.count(1)), str(pattern.count(2)))\r\n await self.bot.say(\"Awesome. To complete the authorization process.\\n• Enter a **blitz** match\\n• Buy **{}** for your first {} items.\\n• **You can sell them immediately at the same price.**\\n• This must be your next match.\\n• **Once you are done please type {}check to complete authorization process.** Once this is done, your account will be linked and authenticated permanantly.\".format(pattern_,len(pattern), self.bot.command_prefix[0]))\r\n\r\n await asyncio.sleep(345)\r\n\r\n await self.bot.send_message(ctx.message.author, verify.check(ctx.message.author.id))"
] | [
"0.61076456",
"0.5824365",
"0.5646299",
"0.5420734",
"0.5327307",
"0.5240005",
"0.5224036",
"0.5191901",
"0.51852673",
"0.5120963",
"0.51135236",
"0.51021546",
"0.506925",
"0.5056993",
"0.50302124",
"0.49842632",
"0.4932219",
"0.4904889",
"0.49018604",
"0.48957723",
"0.48712662",
"0.48627266",
"0.48447913",
"0.48159567",
"0.48108524",
"0.4792894",
"0.47898042",
"0.4774204",
"0.4762981",
"0.47568634"
] | 0.87524366 | 0 |
Mine count blocks and return the new tip. | def mine_blocks(self, count):
# Clear out block announcements from each p2p listener
[x.clear_block_announcements() for x in self.nodes[0].p2ps]
self.generatetoaddress(self.nodes[0], count, self.nodes[0].get_deterministic_priv_key().address)
return int(self.nodes[0].getbestblockhash(), 16) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def num_blocks(self): # -> int:\n ...",
"def getblocknumber(self):\n return self.getblockcount()",
"def calculate_tip(meal_base, tip_rate):",
"def getblockcount(self):\n return self.proxy.getblockcount()",
"def parent_block_count(self):\n return self.block_count",
"def parent_block_count(self):\n raise NotImplementedError()",
"def get_newest_blocks(self, count: int) -> tuple[list[Block], bool]:\n raise NotImplementedError",
"def mine(self):\n if not self.unconfirmed_transactions: \n return False\n \n last_block = self.last_block\n \n new_block = Block(index= last_block.index + 1, \n transactions = self.unconfirmed_transactions,\n timestamp = time.time(),\n previous_hash = last_block.hash)\n\n proof = self.proof_of_work(new_block)\n self.add_block(new_block, proof)\n self.unconfirmed_transactions = []\n return new_block.index",
"def setMine(self):\n self.count = 13\n self.mine = True",
"def tip_calulator(total, people, tip):\n tip = tip / 100\n total = total / people\n tip_amount = total * tip\n new_total = total + tip_amount\n\n return tip_amount, new_total\n # pass",
"def getblockcount(self):\n return len(self.blocks) - 1",
"def mineBlock(self, difficulty):\n print ( \"\\n[Status] Mining block (\" + str(self.index) + \") with PoW ...\")\n startTime = time.time()\n\n while self.hash[:difficulty] != \"0\"*difficulty:\n self.nonce += 1\n self.hash = self.calculateHash()\n\n endTime = time.time()\n print ( \"[ Info ] Time Elapsed : \" + str(endTime - startTime) + \" seconds.\")\n print ( \"[ Info ] Mined Hash : \" + self.hash)\n print (Style.RESET_ALL)",
"def block_count(self):\n\n data = {\"action\" : \"block_count\"}\n\n return rpc_request(self.uri, data)",
"def total_blocks(*args, **kwargs): # real signature unknown\n pass",
"def retrieve_new_blocks_since(number_of_last_sent_block, web3):\n new_blocks = []\n number_of_last_block = web3.eth.getBlock('latest').number\n if number_of_last_block > number_of_last_sent_block:\n number_of_blocks_to_send = number_of_last_block - number_of_last_sent_block\n for i in range(1, number_of_blocks_to_send + 1):\n new_blocks.append(web3.eth.getBlock(number_of_last_sent_block + i))\n return number_of_last_block, new_blocks\n else:\n return number_of_last_sent_block, new_blocks",
"def decorate_numtips(tree):\r\n\r\n # iterate over tree and give the score for each node\r\n for n in tree.postorder(include_self=True):\r\n if n.istip():\r\n # if the node is a tip then the number of children is 1\r\n n.Score = 1\r\n else:\r\n # if the node is not a tip then get the number of children\r\n n.Score = len(n.tips())\r\n\r\n return tree",
"def mine(self):\n last_block = self.chain[-1]\n\n nonce = self.proof_of_work()\n previous_hash = self.hash(last_block)\n self.create_block(nonce, previous_hash)",
"def new_block(self, proof, previous_hash=None):\n servers = [\n \"1.us.pool.ntp.org\",\n \"2.us.pool.ntp.org\",\n \"3.us.pool.ntp.org\"\n ]\n\n response = {}\n\n try:\n response = self.c.request('0.us.pool.ntp.org')\n except Exception:\n for server in servers:\n try:\n response = self.c.request(server)\n\n if response:\n break\n\n except Exception:\n print('\\n //// alternate ntp server didnt work')\n\n block = {\n 'message': 'New Block Forged',\n 'index': len(self.chain) + 1,\n 'timestamp': response.tx_time or time(),\n 'transactions': self.current_transactions,\n 'proof': proof,\n 'previous_hash': previous_hash or self.chain[-1]['hash'],\n }\n\n # Calculate the hash of this new Block\n block['hash'] = self.hash(block)\n\n # Reset the current list of transactions\n self.current_transactions = []\n\n self.chain.append(block)\n return block",
"def rpc_getblockcount(self) -> int:\n return self._call_command([\"getblockcount\"])",
"def get_block_count(self, ticker: str) -> int:\n result = self.rpc_getblockcount()\n return int(result)",
"def mine(self):\n new_block = Block(self.block['timestamp'], self.block['car'],\n self.block['id'])\n # link the block to the previous block\n new_block.previous_hash = self._get_previous_hash()\n while True:\n # get a hash\n new_hash = new_block.get_hash()\n # check hash rules, in our case check if the hash starts with\n # self.difficulty number of zeroes\n if new_hash[0] != self.difficulty * \"0\":\n if self.new_block[\"block\"] is None:\n # the hash hasn't been found yet by any other process,\n # therefore increase the nonce and continue\n # miners will use a different mining mechanism in order\n # to increase the probability of finding a hash by\n # a different miner\n new_block.increment_nonce(self.id + 1)\n continue\n break\n break\n\n # NOTE: May happen that two processes find the hash at the same time,\n # because there is not a big difficulty, however, it's not a problem,\n # for sake of the demo it's fine\n\n if self.new_block[\"block\"] is None:\n # this process has found the hash first\n print(self.id, \" - the winner hash\", new_hash)\n new_block.hash = new_hash\n self.new_block[\"block\"] = new_block\n print(self.id, \" - mined the block\")\n else:\n # validate the block found by other process (miner)\n if self.new_block[\"validated\"] is not False:\n print(self.id, \" - validating\")\n # check block's validity\n valid = False\n if self.new_block[\"block\"].is_block_valid():\n # check blockchain's validity when we apply the newly\n # mined block\n if self.is_blockchain_valid(self.new_block[\"block\"]):\n valid = True\n self.new_block[\"validated\"] = valid\n else:\n # NOTE: this demo doesn't take into account the number of\n # miners who approved the block, the block will be rejected\n # if any of them rejected it\n # but usually just more than 50% of miners must approve\n print(self.id, \" - the block has been rejected by other miner\")",
"def block(self):\n return randint(0, self.max_block)",
"def adjust_difficulty(last_block, new_timestamp):\n if(new_timestamp - last_block.timestamp) < MINE_RATE:\n return last_block.difficulty + 1\n if(last_block.difficulty - 1) > 0:\n return last_block.difficulty - 1\n return 1",
"def mine(self):\n # Checking if there is anything to be mined \n if len(self.pool) > 0:\n # Getting data from the pools list and removing it from the list\n data = self.pool.pop()\n # Instantiating the block with the given data and hash of the last block in the blocks list\n block = Block(data, self.blocks[-1].hash)\n # mining the block on the given difficulty level\n block.mine(self.difficulty)\n # Adding the block to the chain\n self.add_to_chain(block)\n # Showing block details\n self.verbose(block)",
"def miner_lock_blocks(self) -> int:",
"def mine_block(previous: bytes, height: int, miner: bytes, transactions: List[Transaction], timestamp: int, difficulty: int, cutoff_time: int) -> 'Block':\n print('============================ \\n \\n \\n ============================')\n manager = mp.Manager()\n final_nonce = manager.Value('i', None)\n # Declare a new instance of a block, and change the nonce until it produces a correct block_id\n\n block = Block(miner=miner, transactions=transactions, timestamp=timestamp,\n block_id=None, nonce=0, previous=previous, height=height, difficulty=difficulty)\n processes: List[Process] = []\n\n found_event = Event()\n for _ in range(processors):\n process = Process(target=_mine_block, args=(\n block, final_nonce, found_event, cutoff_time))\n processes.append(process)\n\n for process in processes:\n\n process.start()\n\n found_event.wait()\n\n for process in processes:\n process.terminate()\n\n for process in processes:\n process.join()\n print('============= END =============== \\n \\n \\n ============================')\n if final_nonce.value is not None:\n block.nonce = final_nonce.value\n block.block_id = block.compute_block_id()\n return block\n else:\n return None",
"def NewBlock(self):\n for i in self.matrix:\n if 2 in i:\n return()\n blockType = self.bag.Choose()\n subtractor = {\"I\" : 4, \"J\" : 3, \"L\" : 3, \"O\" : 2, \"S\" : 3, \"T\" : 3, \"Z\": 3}\n x = random.randint(0, self.width - subtractor.get(blockType))\n coords = []\n if blockType == \"I\":\n coords = [(x + i, 0) for i in range(4)]\n elif blockType == \"J\":\n coords = [(x + i, 0) for i in range(3)]\n coords.append((x, 1))\n elif blockType == \"L\":\n coords = [(x + i, 0) for i in range(3)]\n coords.append((x + 2, 1))\n elif blockType == \"O\":\n coords = [(x, 0), (x + 1, 0), (x, 1), (x + 1, 1)]\n elif blockType == \"Z\":\n coords = [(x, 0), (x + 1, 0), (x + 1, 1), (x + 2, 1)]\n elif blockType == \"S\":\n coords = [(x + 1, 0), (x + 2, 0), (x, 1), (x + 1, 1)]\n elif blockType == \"T\":\n coords = [(x, 0), (x + 1, 0), (x + 2, 0), (x + 1, 1)]\n self.coords = coords\n return(coords)",
"def set_adjacent_mine_count(self):\n for position in self.grid_coords:\n x, y = position\n if self.grid[y][x] >= 0:\n grid_value = sum(map(self.is_mine, get_adjacent.get_adjacent(position)))\n self.grid[y][x] = grid_value",
"def getNum(self):\r\n return self.blockNum",
"def mine(self):\n if self.unconfirmed_transactions == []:\n return False\n\n transactions = self.unconfirmed_transactions\n for transaction in transactions:\n author = transaction['author']\n public_key_path = author + '_public.pem'\n content = transaction['content']\n signature = transaction['signature']\n verify = rsa_verify(content, signature, public_key_path)\n if verify == False:\n print('Transaction not verified.')\n return \n previous_block = self.last_block\n last_index = previous_block.index\n\n index = last_index + 1\n timestamp = time.time()\n previous_hash = previous_block.hash\n\n newblock = Block(index=index, transactions=transactions, timestamp=timestamp, previous_hash=previous_hash)\n proof = Blockchain.proof_of_work(newblock)\n\n self.add_block(newblock, proof)\n self.unconfirmed_transactions = []\n return newblock.index"
] | [
"0.5717195",
"0.5667572",
"0.56337124",
"0.5575411",
"0.5538822",
"0.55174536",
"0.54982716",
"0.5473228",
"0.53758806",
"0.5342303",
"0.53158927",
"0.5282329",
"0.521116",
"0.52065545",
"0.52049637",
"0.5188872",
"0.5170571",
"0.5148232",
"0.5138396",
"0.50767654",
"0.5072963",
"0.50619954",
"0.5041602",
"0.5041206",
"0.5036822",
"0.5015922",
"0.49920994",
"0.49726182",
"0.49505484",
"0.4924022"
] | 0.6711444 | 0 |
Mine a reorg that invalidates length blocks (replacing them with length+1 blocks). | def mine_reorg(self, length):
# make sure all invalidated blocks are node0's
self.generatetoaddress(self.nodes[0], length, self.nodes[0].get_deterministic_priv_key().address)
for x in self.nodes[0].p2ps:
x.wait_for_block_announcement(int(self.nodes[0].getbestblockhash(), 16))
x.clear_block_announcements()
tip_height = self.nodes[1].getblockcount()
hash_to_invalidate = self.nodes[1].getblockhash(tip_height - (length - 1))
self.nodes[1].invalidateblock(hash_to_invalidate)
all_hashes = self.generatetoaddress(self.nodes[1], length + 1, self.nodes[1].get_deterministic_priv_key().address) # Must be longer than the orig chain
return [int(x, 16) for x in all_hashes] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def simple_reorg(self, height, shift=0):\n hashes = []\n fee_delta = 1000000\n orig_len = self.rpc.getblockcount()\n old_hash = self.rpc.getblockhash(height)\n if height + shift > orig_len:\n final_len = height + shift\n else:\n final_len = 1 + orig_len\n # TODO: raise error for insane args?\n\n self.rpc.invalidateblock(old_hash)\n self.wait_for_log(r'InvalidChainFound: invalid block=.* height={}'\n .format(height))\n memp = self.rpc.getrawmempool()\n\n if shift == 0:\n hashes += self.generate_block(1 + final_len - height)\n else:\n for txid in memp:\n # lower priority (to effective feerate=0) so they are not mined\n self.rpc.prioritisetransaction(txid, None, -fee_delta)\n hashes += self.generate_block(shift)\n\n for txid in memp:\n # restore priority so they are mined\n self.rpc.prioritisetransaction(txid, None, fee_delta)\n hashes += self.generate_block(1 + final_len - (height + shift))\n self.wait_for_log(r'UpdateTip: new best=.* height={}'\n .format(final_len))\n return hashes",
"def rescanblockchain(self, *args, **kwargs):\n pass",
"def mutate_seq(seq, block0, RNs):\n sequence = seq\n block = block0\n # get the number of changes in the FWR part and key part\n # for framework part, include the rate of silent mutations (75%), this\n # is not necessary for the explicitly modeled residues as changes there\n # can lead to replacement with the same AA still\n FWR_changes = np.random.binomial(cf.lAb, cf.p_err_FWR*0.75)\n CDR_changes = np.random.binomial(cf.nkey, cf.p_err_CDR)\n if FWR_changes > 0:\n # determine number of deadly muts and blockmuts in the non-death\n # branch (p_death + (1-p_death)*p_block + (1-p_death)*(1-p_block)=1)\n # 0 signifies deathly mutation, 1 signifies blocking mutation\n mutIDs = list(np.random.choice([0, 1, 2],\n p=[cf.p_death_FWR,\n (1-cf.p_death_FWR) * cf.p_block_FWR,\n (1-cf.p_death_FWR) *\n (1-cf.p_block_FWR)],\n size=FWR_changes))\n\n if 0 in mutIDs: # if deadly mutations happen, return no sequence\n return None, 0, 0\n elif 1 in mutIDs: # if block mutation happens, set block to true\n block = True\n # if the cell has not died yet, analyse mutations in the CDR region\n if CDR_changes > 0:\n # get non-repetitive positions where mutation will be attempted\n changepos = random.sample(range(cf.nkey), CDR_changes)\n for pos in changepos:\n # get transition probabilities for the current amino acid\n cumprob = np.cumsum(cf.tp20[sequence[pos] - 1])\n randi = RNs.getR()\n # find replacement codon\n for i in range(21): # 20 aa plus stop\n if randi < cumprob[i]:\n sequence[pos] = i + 1\n break\n # if stop codon was integrated into the sequence, return 0 as well\n if 21 in sequence:\n return None, 0, 0\n # only mutations of cells that survived are returnd for the counting\n return sequence, FWR_changes, block",
"def mine(self):\n last_block = self.chain[-1]\n\n nonce = self.proof_of_work()\n previous_hash = self.hash(last_block)\n self.create_block(nonce, previous_hash)",
"def test_replace_chain_keep_original(self):\n import copy\n miner_address = 'miner_address'\n\n blockchain1 = Blockchain()\n blockchain1.mine(miner_address)\n\n blockchain2 = copy.deepcopy(blockchain1)\n blockchain1.mine(miner_address)\n\n # Now let's make sure that each blockchain has its own number of blocks\n self.assertEqual(3, len(blockchain1.full_chain))\n self.assertEqual(2, len(blockchain2.full_chain))\n\n # Then let's replace blockchain1 with blockchain2\n blockchain1.replace_chain(blockchain2.full_chain)\n\n self.assertEqual(3, len(blockchain1.full_chain))\n self.assertEqual(2, len(blockchain2.full_chain))",
"def _extend(self, newlen: int) -> None:\n diff = newlen - len(self)\n if diff > 0:\n self.extend([0] * diff)",
"def grow(self):\n self.starve = 500 # useful to avoid looping AI snakes (they die younger -> bad fitness)\n self.body.append(self.old_tail) # that's why I keep old_tail",
"def shrink_offset_pairs(self):\n\n def int_from_block(i):\n u, v = self.blocks[i].bounds\n block_bytes = self.shrink_target.buffer[u:v]\n return int_from_bytes(block_bytes)\n\n def block_len(i):\n return self.blocks[i].length\n\n # Try reoffseting every pair\n def reoffset_pair(pair, o):\n n = len(self.blocks)\n # Number of blocks may have changed, need to validate\n valid_pair = [\n p\n for p in pair\n if p < n and int_from_block(p) > 0 and self.is_payload_block(p)\n ]\n\n if len(valid_pair) < 2:\n return\n\n m = min([int_from_block(p) for p in valid_pair])\n\n new_blocks = [\n self.shrink_target.buffer[u:v]\n for u, v in self.shrink_target.all_block_bounds()\n ]\n for i in valid_pair:\n new_blocks[i] = int_to_bytes(int_from_block(i) + o - m, block_len(i))\n buffer = hbytes().join(new_blocks)\n return self.incorporate_new_buffer(buffer)\n\n def is_non_zero_payload(block):\n return not block.all_zero and self.is_payload_block(block.index)\n\n for block_i, block_j in self.each_pair_of_blocks(\n is_non_zero_payload, is_non_zero_payload\n ):\n i = block_i.index\n j = block_j.index\n\n value_i = int_from_block(i)\n value_j = int_from_block(j)\n\n offset = min(value_i, value_j)\n Integer.shrink(\n offset, lambda o: reoffset_pair((i, j), o), random=self.random\n )",
"def remove(self, pos, length):\n if pos in self.removals:\n self.removals[pos] += length\n else:\n self.removals[pos] = length",
"def update_lengths(lengths, eoses, idx):\n # If a length is 0 it has never had a length set so it is eligible to have\n # this EOS be the length.\n updatable_lengths = (lengths == 0)\n # If this length can be updated AND this token is an eos\n lengths_mask = updatable_lengths & eoses\n return lengths.masked_fill(lengths_mask, idx)",
"def block_program(description):\n\n def run(self):\n n = len(description)\n i = 0\n while i + n <= len(self.shrink_target.blocks):\n attempt = bytearray(self.shrink_target.buffer)\n failed = False\n for k, d in reversed(list(enumerate(description))):\n j = i + k\n u, v = self.blocks[j].bounds\n if d == \"-\":\n value = int_from_bytes(attempt[u:v])\n if value == 0:\n failed = True\n break\n else:\n attempt[u:v] = int_to_bytes(value - 1, v - u)\n elif d == \"X\":\n del attempt[u:v]\n else: # pragma: no cover\n assert False, \"Unrecognised command %r\" % (d,)\n if failed or not self.incorporate_new_buffer(attempt):\n i += 1\n\n run.command = description\n run.__name__ = \"block_program(%r)\" % (description,)\n return run",
"def grow(self):\n self.capacity = self.capacity * 2\n self.rehash()",
"def test_replace_chain(self):\n import copy\n miner_address = 'miner_address'\n\n blockchain1 = Blockchain()\n blockchain1.mine(miner_address)\n\n blockchain2 = copy.deepcopy(blockchain1)\n blockchain2.mine(miner_address)\n\n # Now let's make sure that each blockchain has its own number of blocks\n self.assertEqual(2, len(blockchain1.full_chain))\n self.assertEqual(3, len(blockchain2.full_chain))\n\n # Then let's replace blockchain1 with blockchain2\n blockchain1.replace_chain(blockchain2.full_chain)\n\n self.assertEqual(3, len(blockchain1.full_chain))\n self.assertEqual(3, len(blockchain2.full_chain))\n self.assertEqual(blockchain1.last_block.hash, blockchain2.last_block.hash)",
"def mine_blocks(self, count):\n\n # Clear out block announcements from each p2p listener\n [x.clear_block_announcements() for x in self.nodes[0].p2ps]\n self.generatetoaddress(self.nodes[0], count, self.nodes[0].get_deterministic_priv_key().address)\n return int(self.nodes[0].getbestblockhash(), 16)",
"def example_deletion_with_block_lowering(self):\n i = 0\n while i < len(self.shrink_target.blocks):\n if not self.is_shrinking_block(i):\n i += 1\n continue\n\n u, v = self.blocks[i].bounds\n\n j = 0\n while j < len(self.shrink_target.examples):\n n = int_from_bytes(self.shrink_target.buffer[u:v])\n if n == 0:\n break\n ex = self.shrink_target.examples[j]\n if ex.start < v or ex.length == 0:\n j += 1\n continue\n\n buf = bytearray(self.shrink_target.buffer)\n buf[u:v] = int_to_bytes(n - 1, v - u)\n del buf[ex.start : ex.end]\n if not self.incorporate_new_buffer(buf):\n j += 1\n\n i += 1",
"def invalidate(self):\n self._compute_merkle_tree()\n self.block_header.merkle_root_hash = self.merkle_tree.hash",
"def testCheckBlocksFitLength_TooFewBlocks(self):\n self.assertRaises(PayloadError,\n checker.PayloadChecker._CheckBlocksFitLength,\n 64, 3, 16, 'foo')\n self.assertRaises(PayloadError,\n checker.PayloadChecker._CheckBlocksFitLength,\n 60, 3, 16, 'foo')\n self.assertRaises(PayloadError,\n checker.PayloadChecker._CheckBlocksFitLength,\n 49, 3, 16, 'foo')\n self.assertRaises(PayloadError,\n checker.PayloadChecker._CheckBlocksFitLength,\n 48, 2, 16, 'foo')",
"def build(self, block_size):",
"def mine(self):\n if not self.unconfirmed_transactions: \n return False\n \n last_block = self.last_block\n \n new_block = Block(index= last_block.index + 1, \n transactions = self.unconfirmed_transactions,\n timestamp = time.time(),\n previous_hash = last_block.hash)\n\n proof = self.proof_of_work(new_block)\n self.add_block(new_block, proof)\n self.unconfirmed_transactions = []\n return new_block.index",
"def resize_invalid_genes(self):\n pass",
"def extend_seq(mrnaseq, mrna_frag, total_length=50):\n #\n # Prepare sequences with no gaps\n #\n mrnaseq_nogap = mrnaseq.replace(\"-\", \"\")\n mrna_frag_nogap = mrna_frag.replace(\"-\", \"\")\n #\n # check if the sequence is shorter\n #\n if len(mrna_frag_nogap) > total_length:\n syserr(\"mrnaseq_nogap: \", mrnaseq_nogap)\n syserr(\"mrna_frag_nogap: \", mrna_frag_nogap)\n syserr(\"mrnaseq: \", mrnaseq)\n syserr(\"mrna_frag: \", mrna_frag)\n raise Exception(\n \"Check your sequences maybe you should shrink, not extend them\")\n span = re.search(mrna_frag_nogap, mrnaseq_nogap).span()\n\n # Decide which type of extension to do\n gap_pos_mean = mean([i for i, x in enumerate(mrna_frag) if x == \"-\"])\n list_median = median([i for i in range(len(mrna_frag))])\n\n # this ratio gives us relative position of the gaps\n ratio = gap_pos_mean / list_median\n\n # Based on the ratio do the extension of the sequence\n if ratio > 0.5 and ratio < 1.5: # extend both sides\n li = span[0]\n ui = span[1]\n length = ui - li\n if length > total_length:\n return -1\n elif length == total_length:\n return mrnaseq_nogap[li:ui]\n else:\n dif = total_length - length\n quot = dif // 2 # this is explicit integer division\n l_ext = li - quot # TODO check if they are not lower than 0\n u_ext = ui + (dif - quot)\n if (l_ext < 0) or (u_ext > len(mrnaseq_nogap) - 1):\n return \"NA\"\n else:\n return mrnaseq_nogap[l_ext:u_ext]\n elif ratio <= 0.5: # extend left - it means upstream (5'end)\n li = span[0]\n ui = span[1]\n length = ui - li\n dif = total_length - len(mrna_frag_nogap)\n if (li - dif < 0):\n return mrnaseq_nogap[:ui + abs(li - dif)]\n else:\n return mrnaseq_nogap[li - dif:ui]\n elif ratio >= 1.5: # extend right - it means downstream (3'end)\n li = span[0]\n ui = span[1]\n length = ui - li\n dif = total_length - len(mrna_frag_nogap)\n # if there is noting to extend to the right\n if ui + dif > len(mrnaseq_nogap):\n return mrnaseq_nogap[li - ((ui + dif) - len(mrnaseq_nogap)):]\n else:\n return mrnaseq_nogap[li:ui + dif]",
"def _maskhg19(self):\n if len(self._current_block) > 2:\n self._current_block[0].text = self._current_block[1].text\n self._current_block[0].size = self._current_block[1].size\n self._current_block[0].setstring()\n self._current_block.remove(self._current_block[1])\n else:\n self._current_block = []",
"def run_hash(input, lengths, repeat):\n position = 0\n skip = 0\n for times in range(repeat):\n for length in lengths:\n if position + length < len(input):\n input[position:position + length] = reversed(input[position:position + length])\n else:\n # Transform the circular list to be reversed into a linear one and reverse it\n reversed_list = list(reversed(input[position:] + input[:(position + length) % len(input)]))\n input[position:] = reversed_list[:len(input) - position]\n input[:(position + length) % len(input)] = reversed_list[len(input) - position:]\n\n position = (position + length + skip) % len(input)\n skip += 1\n return input",
"def grow(self):\n \n self.body.append(self.body[-1])",
"def fill_cells_to_the_left(nonogram, row, col):\n sth_changed = False\n\n # leeway stores a number of fillable cells to the right\n # -1 at the end returns length of line, when there is no true empty cell\n right_cells = nonogram.data.get_row(row)[col+1:]\n leeway = (right_cells + [-1]).index(-1)\n\n block_length = find_min_block_length(nonogram, row, col)\n\n # filling cells enforced by minimal block length\n for position in range(col + leeway + 1 - block_length, col):\n nonogram.fill_cell(row, position, 1)\n sth_changed = True\n\n return sth_changed",
"def correct_length_of_all_items(self):\n if self.need_correct_length:\n if not is_power_of_2(self.max_length) and self.length_is_power_of_2:\n self.max_length = 2 ** next_power_of_2(self.max_length)\n for i in self.items:\n i.correct_length(self.max_length)\n self.need_correct_length = False",
"def reconsiderblock(self, block_hash: str) -> None:\n return self.rpc_call(\"reconsiderblock\", block_hash)",
"def resolve_conflict(self):\n neighbours = self.nodes\n new_chain = None\n #We're only looking for chains Longer than ours\n max_length = len(self.chain)\n #Grab and verify the chains from all the other nodes in our netwrok\n for node in neighbours:\n response = requests.get(f'http://{node}/chain')\n if response.status_code == 200:\n length = response.json()['length']\n chain = response.json()['chain']\n #check if the lentgh is longer and the cain is valid\n if length > max_length and self.valid_chain(chain):\n max_length = length\n new_chain = chain\n\n #replace our chain if we're discovered a new valid chain, Longer than ours\n if new_chain:\n self.chain = new_chain\n return True\n\n return False",
"def sync_length(self, should_grow=True):\n try:\n self._length = (self._end - self._start) % self.maxsize\n except ZeroDivisionError:\n self._length = 0\n\n if self._length == 0 and should_grow:\n self._length = self.maxsize",
"def fix_seq(self, fixed_seq):\n assert len(fixed_seq) == self.length, \\\n \"Length of fixed sequence (%d) does not match length of %s (%d)\" \\\n % (len(fixed_seq), self.full_name, self.length)\n i = 0\n for seq in self.seqs:\n seq.fix_seq( fixed_seq[i:i+seq.length] )\n i += seq.length"
] | [
"0.57378554",
"0.5465236",
"0.5457202",
"0.5445514",
"0.54246277",
"0.5282873",
"0.52699226",
"0.5255896",
"0.5254636",
"0.52333164",
"0.51682156",
"0.5163101",
"0.51473725",
"0.5139157",
"0.5117177",
"0.5074714",
"0.5061125",
"0.50600505",
"0.50330913",
"0.50164944",
"0.5009292",
"0.49961588",
"0.49685708",
"0.4946665",
"0.49454722",
"0.49313006",
"0.4918689",
"0.4906384",
"0.48995733",
"0.4897199"
] | 0.749667 | 0 |
Runs only the lyrics search. | def run_lyrics(self):
if self._GUI:
self._run_lyrics_gui()
else:
self._run_lyrics_nogui() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _run_lyrics_gui(self):\n self._log.info(\"Searching for lyrics\")\n\n self.save_lyrics(find=True)\n Action(\"load\", load=True)\n\n self._log.info(\"Done\")",
"def _run_lyrics_nogui(self):\n self.read_files()\n\n # find lyrics\n self._log_print(msg_GREEN=\"Searching for lyrics\")\n\n self.save_lyrics()\n\n if not self.write_tags():\n self._log_print(msg_WHITE=\"Cannot write tags because there are no \"\n \"coresponding files\")\n else:\n self._log_print(msg_GREEN=\"Done\")",
"def fetch_lyrics(self) -> None:\n if self.artist is None or self.title is None:\n return\n Logger.Logger.log('Looking for song lyrics...')\n finder = LyricsFinder.LyricsFinder(self)\n finder.fetch()\n self.lyrics = finder.get_lyrics()\n self.lyrics_writer = finder.get_lyrics_writer()\n if not self.lyrics:\n Logger.Logger.log('No lyrics found for this song.')",
"async def _search(self, ctx):\n if ctx.invoked_subcommand is None:\n text = open('texts/search.md').read()\n em = discord.Embed(title='Commandes de search TuxBot', description=text, colour=0x89C4F9)\n await self.bot.say(embed=em)",
"async def genius(self, ctx, *args):\n args = argsmachine(args)\n async with ctx.channel.typing():\n if len(args) > 0:\n headers = {'Authorization': 'Bearer ' + token}\n search_url = f'https://api.genius.com/search?q={args}'\n response = requests.get(search_url, headers=headers)\n response = response.json()\n allitems = []\n for item in response['response']['hits']:\n new = item['result']\n newsong = Song(new['full_title'], new['url'], new)\n allitems.append(newsong)\n embed = Embed()\n embed.description = concatenator(allitems)\n await ctx.channel.send('Here are some results of the songs that you wanted. Type in the # of which result you want the lyrics to, or \"no\" to back out!', embed=embed)\n while True:\n try:\n message = await self.bot.wait_for('message', check = check, timeout=30)\n message = message.content.strip()\n if message == 'no':\n break\n else:\n message = int(message)-1\n break\n except asyncio.TimeoutError:\n await ctx.send(\"You didn't reply in time! Enter the #.\")\n continue\n except:\n await ctx.send(f\"Try entering the # again, or enter 'no' to exit the search command.\")\n continue\n\n try:\n chosensong = allitems[message]\n site = requests.get(chosensong.url)\n site = bs4.BeautifulSoup(site.text, features='html.parser')\n chosensong.lyrics = site.find(\"div\", class_=\"lyrics\").get_text()\n \n #Discord supports only 2048 characters in each embed message so this is used to break it up into multiple messages\n messages_needed = math.ceil(len(chosensong.lyrics) / 2048)\n lyricsembed=Embed()\n counter = 1\n currentchar = 0\n nextchar = 2048\n while messages_needed >= counter:\n lyrics = chosensong.lyrics[currentchar:nextchar]\n lyricsembed.description = lyrics\n await ctx.send(f'Here are the lyrics for `{chosensong.title}`, `{counter}`/`{messages_needed}`!', embed=lyricsembed)\n currentchar += 2048\n nextchar += 2048\n counter += 1\n except:\n await ctx.send(f\"Stopping the genius command.\")\n else:\n await ctx.send(f\"Can't really search for lyrics if there are none provided, right? Try again with words, song titles, or artist names.\")",
"async def search(self, ctx: commands.Context, *, query: t.Optional[str]) -> None:\n if query is None:\n # Maybe the user didn't know to pass in a query?\n embed = discord.Embed(colour=Colours.regular, timestamp=Embeds.now())\n embed.description = (\n \"No query passed in. Try passing in something: `$search arabic music`\"\n )\n embed.set_footer(\n text=\"See $help voice for more commands.\", icon_url=Icons.info\n )\n return await ctx.send(embed=embed)\n\n if (results := await self.get_tracks(query, True, False)) is not None:\n # Ensure that we're connected before playing.\n await ctx.invoke(self.connect, channel=None)\n player = self.get_player(ctx.guild)\n if not player.is_connected:\n return\n\n embed = discord.Embed(colour=Colours.regular, timestamp=Embeds.now())\n embed.set_footer(\n text=f\"Showing 5/{len(results)} results.\",\n icon_url=ctx.author.avatar_url,\n )\n embed.description = \"\"\n results = results[:5]\n\n for index, track in enumerate(results, 1):\n m, s = self.get_formatted_length(track.length, True)\n embed.description += (\n f\"**{index}**. [{track.title}]({track.uri}) ({m}:{s})\\n\"\n )\n\n # Get a integer selection using Choice.prompt().\n if (\n choice := await Choices.prompt(\n ctx=ctx, embed=embed, n=5, author_only=True\n )\n ) is None:\n if player.queue.empty:\n await ctx.invoke(self.disconnect)\n return\n\n embed = discord.Embed(\n title=\"Now queued.\" if player.is_playing else \"Now playing.\",\n description=f\"[{results[choice].title}]({results[choice].uri})\",\n colour=Colours.regular,\n timestamp=Embeds.now(),\n )\n\n m, s = self.get_formatted_length(results[choice].length, False)\n embed.set_footer(\n text=f\"Track goes for {m} minutes and {s} seconds.\",\n icon_url=ctx.author.avatar_url,\n )\n if results[choice].thumb is not None:\n embed.set_thumbnail(url=results[choice].thumb)\n await ctx.send(embed=embed)\n\n player.queue.add_tracks(results[choice])\n if not player.is_playing:\n await player.playback()\n else:\n fail = Embeds.status(success=False, desc=\"Failed to find any results.\")\n await ctx.send(embed=fail)",
"def search(self, term):",
"def song_has_lyrics():\n pass",
"def search():\n pass",
"async def _lyrics(self, ctx: commands.Context):\n if not ctx.voice_state.is_playing:\n raise commands.CommandError('Nothing being played at the moment.')\n\n # Get song name listed on youtube\n song_title = ctx.voice_state.current.source.track\n if not song_title:\n return await ctx.send(\"Couldn't find lyrics for this track!\")\n\n song_title = re.sub(\"[(\\[].*?[)\\]]\", \"\", song_title).strip() # Remove parenthesis from song title\n # Get artist name listed on youtube\n artist_name = ctx.voice_state.current.source.artist\n # Instance of GeniusSong class using the Genius API\n genius_song = GeniusSong(song_title, artist_name)\n # Try getting the lyrics using the lyricsgenius library\n lyrics = genius_song.fastlyrics()\n\n # In case of no lyrics found. Use the other (slower) method\n if not lyrics:\n res = genius_song.get_response() # Generate a response using the Genius API to get the songs\n if res:\n # Find the most similar artist comparing the artist on YouTube and Genius\n artist_name = genius_song.return_similar_artist(res)\n # Artist didn't match\n if not artist_name:\n await ctx.send(\"Couldn't find similar artists. The lyrics might not be the expected.\")\n\n # Get the lyrics using the lyricsgenius library with the new artist\n lyrics = genius_song.fastlyrics(artist_name)\n\n else:\n return await ctx.send(\n \"**Error!**\\nThere is a problem with Genius.\\nTry again in a few minutes. \"\n \"\\nYou can also try the command `fastlyrics`.\")\n\n if lyrics:\n # Split lyrics into fields\n fields = genius_song.split_lyrics(lyrics)\n # Create an embed message\n embed = embed_msg(\n title=song_title.capitalize() + \"\\n{}\".format(artist_name),\n description=\"\",\n footer=\"Lyrics provided by Genius.\",\n field_values=fields,\n inline=False\n )\n return await ctx.send(embed=embed)\n\n return await ctx.send(\"Lyrics couldn't be found.\")",
"async def search(self, *args, **kwargs):\n pass",
"async def search(self, ctx, *, query):\n player = self.bot.lavalink.players.get(ctx.guild.id)\n query = \"ytsearch:{}\".format(query)\n if player.is_connected:\n if not ctx.author.voice or not ctx.author.voice.channel or player.connected_channel.id != ctx.author.voice.channel.id:\n return await ctx.send(\"You have to be in my voice channel to queue a song :no_entry:\")\n else:\n if not ctx.author.voice or not ctx.author.voice.channel:\n return await ctx.send(\"Join a voice channel :no_entry:\")\n else:\n player.store('sessionowner', ctx.author.id)\n player.store('channel', ctx.channel.id)\n await player.connect(ctx.author.voice.channel.id)\n results = await self.bot.lavalink.get_tracks(query)\n if not results or not results['tracks']:\n return await ctx.send(\"I could not find any songs matching that query :no_entry:\")\n msg = \"\"\n for i, x in enumerate(results[\"tracks\"][:10], start=1):\n msg += \"{}. **[{}]({})**\\n\".format(i, x[\"info\"][\"title\"], x[\"info\"][\"uri\"])\n message = await ctx.send(embed=discord.Embed(description=msg).set_footer(text=\"Choose a number to the queue the song | cancel\"))\n def check(m):\n return m.channel == ctx.channel and m.author == ctx.author and (m.content.isdigit() or m.content.lower() == \"cancel\")\n try:\n response = await self.bot.wait_for(\"message\", check=check, timeout=60)\n if response.content.lower() == \"cancel\":\n await response.delete()\n return await message.delete()\n else:\n track = results[\"tracks\"][int(response.content) + 1]\n player.add(requester=ctx.author.id, track=track)\n timetill = 0\n for x in player.queue:\n timetill += x.duration\n if player.current:\n timetill += player.current.duration - player.position\n else:\n timetill = 0 \n index = [x.track for x in player.queue].index(track[\"track\"]) + 1\n s=discord.Embed()\n s.set_author(name=\"Added to Queue\", icon_url=ctx.author.avatar_url)\n s.set_thumbnail(url=\"https://img.youtube.com/vi/{}/default.jpg\".format(track[\"info\"][\"identifier\"]))\n s.add_field(name=\"Song\", value=\"[{}]({})\".format(track[\"info\"][\"title\"], track[\"info\"][\"uri\"]), inline=False)\n s.add_field(name=\"Duration\", value=self.format_time(track[\"info\"][\"length\"]), inline=True)\n s.add_field(name=\"Position in Queue\", value=index)\n if timetill != 0:\n s.add_field(name=\"Estimated time till playing\", value=self.format_time(timetill-track[\"info\"][\"length\"]))\n else:\n s.add_field(name=\"Estimated time till playing\", value=\"Next\")\n await response.delete()\n await message.delete()\n await self.bot.get_channel(player.fetch('channel')).send(embed=s)\n if not player.is_playing:\n await player.play()\n except asyncio.TimeoutError:\n return await ctx.send(\"Timed out :stopwatch:\")",
"def run_it():\n initialize()\n parser = get_parser()\n args = None\n first_parse = True\n while(True):\n if first_parse is True:\n first_parse = False\n args = parser.parse_args()\n \n else:\n # print(textwrap.dedent(\n # '''\\\n # Search again like in the beginning.\n # -- You can either choose best rated or list mode.\n # -- This time, you can insert the search string without double quotes.\n # Remember the list mode options!\n # 0: torrent project.\n # 1: the pirate bay.\n # 2: 1337x.\n # 3: eztv.\n # 4: limetorrents.\n # 5: isohunt.\n # '''))\n sys.exit(0)\n print('Or.. if you want to exit just write \"' +\n Colors.LRED + 'Q' + Colors.ENDC + '\" or \"' +\n Colors.LRED + 'q' + Colors.ENDC + '\".')\n input_parse = input('>> ').replace(\"'\", \"\").replace('\"', '')\n if input_parse in ['Q', 'q']:\n sys.exit(1)\n\n args = parser.parse_args(input_parse.split(' ', 2))\n \n if args.str_search.strip() == \"\":\n print('Please insert an appropiate non-empty string.')\n else:\n args.str_search = args.str_search.replace('_',' ').replace(\"'\",'')\n\n movieName = args.str_search\n #print(args.str_search)\n auto = AutoPy(*insert(args))\n auto.movieName = movieName\n auto.get_content()\n auto.select_torrent()\n auto.download_torrent()",
"def search(self, word):",
"def __quickSearchText(self):\n self.__quickSearchInEditor(False, False)",
"def search(self, query):",
"def search_multiple_words(words):\n # YOUR CODE HERE #\n pass # delete this when you write your code",
"def search(self, *args, **kwargs):",
"def song_lyrics(ans):\r\n albums = simple_album_list()\r\n for album in albums:\r\n songs = simple_songs_list(album)\r\n for song in songs:\r\n if ans == song:\r\n words = dbase()[album][0][song]\r\n words = words[2]\r\n return words",
"def runSearch():\n\tglobal processLanguageOn\n\tdataToFind=getDataFromWidget(podSearchEntry)\n\t#Search through the keys otherwise data changes\n\tdataSource=podListbox.data.keys()\n\t#Store the results of the search\n\tresults=[]\n\t#Search the data source\n\tfor item in dataSource:\n\t\tif searchDataSource(dataToFind,[item],capital=True,full=False):\n\t\t\tresults.append(item)\n\n\t#Add the results to screen\n\tpodListbox.delete(0,END)\n\tfor item in results:\n\t\tpodListbox.addExisting(item)\n\n\tif processLanguageOn:\n\t\tprocessSearchLanguage(podSearchEntry)",
"def search_clues(self):\r\n print(\"\\n************Searching Clues************\\n\")\r\n for word_id in self.words.keys():\r\n if not self.words[word_id].see and not self.words[word_id].wth:\r\n clue = pop_backslash(self.words[word_id].clue)\r\n temp = word_domain(\"allintext:\" + clue +' -crossword',self.words[word_id].length)\r\n temp2 = temp + word_domain(clue +' -crossword',self.words[word_id].length)\r\n domain = temp2 + data_muse(clue, self.words[word_id].length)\r\n unique_list = []\r\n for x in domain: \r\n y = x.upper()\r\n # check if exists in unique_list or not \r\n if y not in unique_list: \r\n unique_list.append(y) \r\n \r\n self.words[word_id].assign_word_domain(unique_list)\r\n print(\"\\nSearch is done...\")",
"def search_main() -> None:\n\n logger.info(\"Starting search\")\n links = run_search(grab_search_info())\n if links:\n logger.info(\"Printing links\")\n for key in links:\n print(f\"{key.upper()}: {links[key]}\")",
"def process_search_async(self, call):\n if \"query\" not in call.data:\n _LOGGER.error(\"No text to search\")\n return\n global G_SPOTIFY_FOUND\n G_SPOTIFY_FOUND = []\n search_text = call.data[\"query\"]\n\n self.refresh_spotify_instance()\n\n # Don't true search when token is expired\n if self._oauth.is_token_expired(self._token_info):\n _LOGGER.warning(\"Spotify failed to update, token expired.\")\n return\n\n titles = [ais_global.G_EMPTY_OPTION]\n # artist\n results = self._spotify.search(q='artist:' + search_text, type='artist')\n titles.extend(self.get_list_from_results(results, 'artist'))\n # album\n results = self._spotify.search(q='album:' + search_text, type='album')\n titles.extend(self.get_list_from_results(results, 'album'))\n # playlist\n results = self._spotify.search(q='playlist:' + search_text, type='playlist')\n titles.extend(self.get_list_from_results(results, 'playlist'))\n\n # Update input_select values:\n yield from self.hass.services.async_call(\n 'input_select',\n 'set_options', {\n \"entity_id\": \"input_select.ais_music_track_name\",\n \"options\": titles})\n\n if len(G_SPOTIFY_FOUND) > 0:\n text = \"Znaleziono: %s, włączam pierwszy: %s\" % (\n str(len(G_SPOTIFY_FOUND)), G_SPOTIFY_FOUND[0][\"title\"])\n else:\n text = \"Brak wyników na Spotify dla zapytania %s\" % search_text\n yield from self.hass.services.async_call(\n 'ais_ai_service', 'say_it', {\n \"text\": text\n })\n yield from self.hass.services.async_call(\n 'input_select',\n 'select_option', {\n \"entity_id\": \"input_select.ais_music_track_name\",\n \"option\": G_SPOTIFY_FOUND[0][\"title\"]})",
"async def search(terms):\n\n # Load Settings\n settings = await fetch_settings()\n table = settings[\"table\"]\n\n if \";\" in terms:\n terms.replace(\";\", \"\")\n return \"You can't use ';' in your searches!\"\n terms = terms.split(\" \")\n options = []\n words = []\n results = []\n limit = 5\n searchorder = \"DESC\"\n datebegin = \"0000-00-00\"\n dateend = \"4000-01-01\"\n\n # Separating Options and Search Terms\n for item in terms:\n if \"--\" in item[:2]:\n option = item.replace(\"--\", \"\")\n if option == \"limitall\" or option == \"listall\":\n limit = 10000000\n elif \"limit\" in option:\n try:\n limit = int(option[6:])\n except ValueError:\n limit = 5\n elif \"before\" in option:\n year = datetime.datetime.strptime(option[7:], \"%Y-%m-%d\").year\n # Convert date to format stored table\n if year >= 3300:\n converted_year = str(year - GAME_YEAR_OFFSET) + option[11:]\n dateend = datetime.datetime.strptime(converted_year, \"%Y-%m-%d\")\n else:\n dateend = datetime.datetime.strptime(option[7:], \"%Y-%m-%d\")\n options.append(\"before\")\n elif \"after\" in option:\n year = datetime.datetime.strptime(option[6:], \"%Y-%m-%d\").year\n # Convert date to format stored in table\n if year >= 3300:\n converted_year = str(year - GAME_YEAR_OFFSET) + option[10:]\n datebegin = datetime.datetime.strptime(converted_year, \"%Y-%m-%d\")\n else:\n datebegin = datetime.datetime.strptime(option[6:], \"%Y-%m-%d\")\n options.append(\"after\")\n elif option == \"searchreverse\":\n searchorder = \"ASC\"\n else:\n options.append(option)\n else:\n words.append(item.lower())\n\n # Searching\n connection = await connect()\n if \"before\" in options and \"after\" in options:\n rows = await connection.fetch(f\"\"\"\n SELECT * FROM \"{table}\" \n WHERE \"dateReleased\" BETWEEN $1 AND $2\n ORDER BY \"dateReleased\" {searchorder};\n \"\"\", datebegin, dateend)\n elif \"before\" in options:\n rows = await connection.fetch(f\"\"\"\n SELECT * FROM \"{table}\" \n WHERE \"dateReleased\" < $1\n ORDER BY \"dateReleased\" {searchorder};\n \"\"\", dateend)\n elif \"after\" in options:\n rows = await connection.fetch(f\"\"\"\n SELECT * FROM \"{table}\" \n WHERE \"dateReleased\" > $1\n ORDER BY \"dateReleased\" {searchorder};\n \"\"\", datebegin)\n else:\n rows = await connection.fetch(f\"\"\"\n SELECT * FROM \"{table}\" ORDER BY \"dateReleased\" {searchorder};\n \"\"\")\n await connection.close()\n if \"searchall\" in options:\n for row in rows:\n for word in words:\n if word in row[\"Title\"].lower():\n results.append(row)\n if word in row[\"Text\"].lower():\n if row in results:\n pass\n else:\n results.append(row)\n elif \"content\" in options:\n for row in rows:\n for word in words:\n if word in row[\"Text\"].lower():\n results.append(row)\n else:\n for row in rows:\n for word in words:\n if word in row[\"Title\"].lower():\n results.append(row)\n return results[:limit], len(results)",
"def start_search(self):\n self._raise_not_supported()",
"def search(self, query):\n logger.debug('Performing search for: '+query)\n write_textfield('queryString', query+\"\\n\", check=False)\n self.waitForLoaderToDisappear()",
"def search(self, *args, **kwargs): # real signature unknown\n pass",
"def main():\n\tdescription = \"Utility to search for spotify by song, artist or song ID and to create playlists based off of song ID's\"\n\tusage = \"search.py [-h] [-s SONG | -a ARTIST | -i ID] [-p PLAYLIST & -u USERNAME & -i ID & -d DESCRIPTION]\"\n\tparser = argparse.ArgumentParser(description=description, usage=usage)\n\tgroup = parser.add_mutually_exclusive_group()\n\tgroup.add_argument(\"-s\", \"--song\", nargs=1, required='--argument' in sys.argv, help=\"Search for a song by name\")\n\tgroup.add_argument(\"-a\", \"--artist\", nargs=1, required='--argument' in sys.argv,\n\t\t\t\t\t help=\"Search for songs from an Artist\\n\")\n\tgroup.add_argument(\"-i\", \"--id\", nargs=1, required='--argument' in sys.argv,\n\t\t\t\t\t help=\"Search for song based on ID or create playlist based off of song ID\")\n\tparser.add_argument(\"-p\", \"--playlist\", nargs=1, required='--id' in sys.argv,\n\t\t\t\t\t\thelp=\"Name of the playlist to be created. MUST be used with -i/--id\")\n\tparser.add_argument(\"-d\", \"--description\", nargs=1, required='--argument' in sys.argv,\n\t\t\t\t\t\thelp=\"Playlist Description. Must be used with -p,-i and -u\")\n\tparser.add_argument(\"-u\", \"--username\", nargs=1, required='--argumnet' in sys.argv,\n\t\t\t\t\t\thelp=\"Spotify Username. Must be used with -p, -i and -d\")\n\targs = parser.parse_args()\n\t# print(args)\n\n\tsolr = Solr_Query()\n\n\tresponse = None\n\n\tif args.song:\n\t\tprint(\"Searching for song:\", args.song[0].strip())\n\t\tsong_name = args.song[0].strip()\n\t\tsolr.set_search_type(\"songs\")\n\t\tquery = solr.set_query(song_name)\n\t\tresponse = solr.exec_query(query)\n\t\tsolr.print_search_results(response)\n\n\tif args.artist:\n\t\tprint(\"Searching for songs by artist: \", args.artist[0].strip())\n\t\tartist = args.artist[0].strip()\n\t\tsolr.set_search_type(\"artists\")\n\t\tquery = solr.set_query(artist)\n\t\tresponse = solr.exec_query(query)\n\t\tsolr.print_search_results(response)\n\n\t# Still trying to figure this one out. The getmorelike this funcionality is harder than we thought\n\tif args.playlist and args.id and args.description and args.username:\n\t\tprint(\"Creating a playlist based off of song ID:\", args.id[0].strip())\n\t\tid = args.id[0].strip()\n\t\tdescription = args.description[0].strip()\n\t\tplaylist = args.playlist[0].strip()\n\t\tusername = args.username[0].strip()\n\n\t\tsolr.set_search_type(\"id\")\n\t\tquery = solr.set_query(id)\n\t\tresponse = solr.exec_query(query)\n\n\t\t# Create a playlist create object to find similar songs and create the playlist\n\t\tcreator = Playlist_Create(username, playlist, description)\n\t\tcreator.authenticate() # authenticate using the username passed in\n\t\tresponse = creator.get_similar_songs(response)\n\t\tsongs = creator.get_song_ids(response)\n\t\tplaylist_id = creator.create_playlist()\n\t\tcreator.add_songs(playlist_id, songs)\n\n\n\n\telif args.playlist and not args.id:\n\t\tparser.error(\"Must input a song ID to create a playlist with!\")\n\telif args.playlist and not args.description:\n\t\tparser.error(\"Must input a playlist description\")\n\telif args.playlist and not args.username:\n\t\tparser.error(\"Need your username to create the playlist\")\n\n\tif args.id:\n\t\tprint(\"Searching for song with ID:\", args.id[0].strip())\n\t\tid = args.id[0].strip()\n\t\tsolr.set_search_type(\"id\")\n\t\tquery = solr.set_query(id)\n\t\tresponse = solr.exec_query(query)\n\t\tsolr.print_search_results(response)\n\n\tprint(\"\\nDone!\")",
"def main(self):\n\n args = self.parser.parse_args()\n # Little magic/abomination to avoid a if-else\n # funct == search_all or search_any\n funct = getattr(self, \"search_\" + args.search_funct) \n cache_file_path = self.get_cache_file_path()\n\n # If cache is missing or user asked to force an update\n # then scrape the wiki\n if not os.path.isfile( cache_file_path ) or args.update:\n self.data_values = WikiTableScraper().scrapeTables()\n pickle.dump( self.data_values, open( cache_file_path, \"wb\" ) )\n # else load the minecraft objects from cache\n else:\n self.data_values = pickle.load( open( cache_file_path, \"rb\" ) )\n\n # call the search function (will return list of ids)\n for k in funct(args.words):\n print k.ljust(4), # Don't print a newline\n # Don't print the name if user requested only id's\n if not args.only_id:\n print self.data_values[k],\n print # print the newline now",
"def runNewSearch(self):\n self.__searchJob = self.__startSearch()\n\n self.monitorSearchJob()"
] | [
"0.7195334",
"0.6765707",
"0.625291",
"0.5987377",
"0.595524",
"0.5927061",
"0.5899226",
"0.5836227",
"0.5817641",
"0.5816498",
"0.579893",
"0.574689",
"0.57248443",
"0.56937945",
"0.56414646",
"0.56008506",
"0.5592254",
"0.55891544",
"0.55754983",
"0.5552696",
"0.5535671",
"0.5514977",
"0.5491328",
"0.5450999",
"0.5446871",
"0.5438636",
"0.54382294",
"0.5434539",
"0.54233277",
"0.5423005"
] | 0.67778045 | 1 |
Runs only lyrics search with specifics of the GUI mode. | def _run_lyrics_gui(self):
self._log.info("Searching for lyrics")
self.save_lyrics(find=True)
Action("load", load=True)
self._log.info("Done") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run_lyrics(self):\n if self._GUI:\n self._run_lyrics_gui()\n else:\n self._run_lyrics_nogui()",
"def _run_lyrics_nogui(self):\n self.read_files()\n\n # find lyrics\n self._log_print(msg_GREEN=\"Searching for lyrics\")\n\n self.save_lyrics()\n\n if not self.write_tags():\n self._log_print(msg_WHITE=\"Cannot write tags because there are no \"\n \"coresponding files\")\n else:\n self._log_print(msg_GREEN=\"Done\")",
"def load_search_gui(self):\n pass",
"def __quickSearchText(self):\n self.__quickSearchInEditor(False, False)",
"def run_it():\n initialize()\n parser = get_parser()\n args = None\n first_parse = True\n while(True):\n if first_parse is True:\n first_parse = False\n args = parser.parse_args()\n \n else:\n # print(textwrap.dedent(\n # '''\\\n # Search again like in the beginning.\n # -- You can either choose best rated or list mode.\n # -- This time, you can insert the search string without double quotes.\n # Remember the list mode options!\n # 0: torrent project.\n # 1: the pirate bay.\n # 2: 1337x.\n # 3: eztv.\n # 4: limetorrents.\n # 5: isohunt.\n # '''))\n sys.exit(0)\n print('Or.. if you want to exit just write \"' +\n Colors.LRED + 'Q' + Colors.ENDC + '\" or \"' +\n Colors.LRED + 'q' + Colors.ENDC + '\".')\n input_parse = input('>> ').replace(\"'\", \"\").replace('\"', '')\n if input_parse in ['Q', 'q']:\n sys.exit(1)\n\n args = parser.parse_args(input_parse.split(' ', 2))\n \n if args.str_search.strip() == \"\":\n print('Please insert an appropiate non-empty string.')\n else:\n args.str_search = args.str_search.replace('_',' ').replace(\"'\",'')\n\n movieName = args.str_search\n #print(args.str_search)\n auto = AutoPy(*insert(args))\n auto.movieName = movieName\n auto.get_content()\n auto.select_torrent()\n auto.download_torrent()",
"def on_regex_search_toggle(self, event):\r\n\r\n if self.m_regex_search_checkbox.GetValue():\r\n update_autocomplete(self.m_searchfor_textbox, \"regex_search\")\r\n else:\r\n update_autocomplete(self.m_searchfor_textbox, \"literal_search\")\r\n event.Skip()",
"def on_load_search(self, event):\r\n\r\n dlg = LoadSearchDialog(self)\r\n dlg.ShowModal()\r\n search, is_regex = dlg.get_search()\r\n dlg.Destroy()\r\n if search is not None and is_regex is not None:\r\n self.m_searchfor_textbox.SetValue(search)\r\n self.m_regex_search_checkbox.SetValue(regex_search)",
"def runSearch():\n\tglobal processLanguageOn\n\tdataToFind=getDataFromWidget(podSearchEntry)\n\t#Search through the keys otherwise data changes\n\tdataSource=podListbox.data.keys()\n\t#Store the results of the search\n\tresults=[]\n\t#Search the data source\n\tfor item in dataSource:\n\t\tif searchDataSource(dataToFind,[item],capital=True,full=False):\n\t\t\tresults.append(item)\n\n\t#Add the results to screen\n\tpodListbox.delete(0,END)\n\tfor item in results:\n\t\tpodListbox.addExisting(item)\n\n\tif processLanguageOn:\n\t\tprocessSearchLanguage(podSearchEntry)",
"def __find(self):\n txt = self.textCursor().selectedText()\n self.__mainWindow.showFind(txt)",
"def other_search(self):\n test = self.ask_zoekarg.text()\n if test:\n self.parent().search_arg = test\n self.parent().do_select()",
"def on_test_regex(self, event):\r\n\r\n self.m_regex_test_button.Enable(False)\r\n self.tester = RegexTestDialog(\r\n self,\r\n self.m_case_checkbox.GetValue(),\r\n self.m_dotmatch_checkbox.GetValue(),\r\n self.m_searchfor_textbox.GetValue()\r\n )\r\n self.tester.Show()",
"def new_search(self, widget, data=None):\n self.artist_name.set_text(\"\")\n self.song_name.set_text(\"\")\n self.status_bar.hide()\n self.lyrics_view.hide()\n self.scroll.hide()\n self.window.resize(self.width, self.height)",
"def on_findtextCombo_editTextChanged(self, text):\n self.__enableFindButton()",
"def search_command():\n listing.delete(0, END)\n for row in backend.search(title_text.get(), \n author_text.get(), \n year_text.get(), \n isbn_text.get()):\n listing.insert(END, row)",
"def searchText(self):\n tabId = self.tab.currentIndex()\n if tabId == -1: return False\n currentDoc = self.tab.widget(tabId)\n \n if isinstance(currentDoc, WelcomePage):\n return\n \n if currentDoc.extension in [ TestUnit.TYPE, TestSuite.TYPE, TestAdapter.TYPE,\n TestData.TYPE, TestLibrary.TYPE, TestTxt.TYPE ]:\n selectedText = ''\n if currentDoc.editor().hasSelectedText():\n selectedText = currentDoc.editor().selectedText()\n # self.hideFindReplaceAction.setChecked(True)\n self.findWidget.showEnhanced(textSelected=selectedText)",
"def text_to_find_changed(self, _):\n self.ui.findButton.setEnabled(self.ui.textToFind.size() > 0)",
"def on_filterEdit_textEdited(self, text):\n self.__enableFindButton()",
"def filter(self):\n self.getcmd()\n self.get_status()\n self.select()\n if self.params['mode'] == 'greedy':\n self.greedy()\n \n return",
"def song_has_lyrics():\n pass",
"def main():\n gui = GUI()\n engine = SearchEngine()\n \n while True:\n event, values = gui.window.Read()\n if event is sg.WIN_CLOSED:\n break\n \n if event == \"-SEARCH-\":\n search_time = time()\n print(\">> Loading file index.\")\n if engine.load_existing_index(values[\"PATH\"]):\n # Check whether the modified time of directory matches the\n # indexed modified time. If not then ask to create to a new\n # file index.\n if engine.modified_time != os.path.getmtime(values[\"PATH\"]):\n confirm = sg.popup_ok_cancel(\n \"The folder appears to be modified. \"\n \"Create new index before searching??\"\n )\n if confirm == \"OK\":\n recreate_time = time()\n engine.create_new_index(values[\"PATH\"])\n print(\">> New file index created. \"\n \"[{:.3f}s]\".format(time() - recreate_time))\n else:\n print(\">> File index not present. Creating new file index\")\n index_time = time()\n try:\n engine.create_new_index(values[\"PATH\"])\n except FileNotFoundError:\n print(\">> Enter a valid directory\")\n continue\n else:\n print(\">> New file index created. \"\n \"[{:.3f}]\".format(time() - index_time))\n engine.search(values)\n print(\">> Searched {} records. \"\n \"[{:.3f}s]\".format(engine.records, time() - search_time))\n gui.window[\"-RESULTS-\"].Update(values=engine.results)\n Thread(target=notify).start()\n print(\">> Files found {}\".format(len(engine.results)))\n \n # Set the FolderBrowser location to the current location.\n gui.window.FindElement(\"-FOLDER-\").InitialFolder = values[\"PATH\"]\n elif event == \"-CLEAR-\":\n clear_time = time()\n engine.clear_indexes()\n print(\">> Cleared all file indexes. \"\n \"[{:.3f}]\".format(time() - clear_time))\n elif event == \"-RESULTS-dc-\":\n try:\n file, verb, target = values[\"-RESULTS-\"][0], \"Opening\", \"file\"\n except IndexError:\n continue\n \n action = gui.file_popup(file)\n if not action:\n continue\n \n command = \"xdg-open\"\n if action == \"-EXPLORER-\":\n file = file.rsplit(\"/\", 1)[0]\n target = \"folder\"\n elif action == \"-DEl-\":\n command = \"rm -f\"\n verb = \"Deleting\"\n engine.results.remove(file)\n print(f\">> {verb} {target} for {file}.\")\n Thread(target=os.system,\n args=(f\"{command} '{file}'\",)).start()\n if action == \"-DEl-\":\n gui.window[\"-RESULTS-\"].Update(values=engine.results)\n new_index_time = time()\n engine.create_new_index(values[\"PATH\"])\n print(\">> New file index created for directory. \"\n f\"[{time() - new_index_time:.3f}s]\")\n elif event in (\"-APP-\", \"-EXPLORER-\"):\n if not values[\"-RESULTS-\"]:\n continue\n file = values[\"-RESULTS-\"][0]\n target = \"file\"\n if event == \"-EXPLORER-\":\n file, target = file.rsplit(\"/\", 1)[0], \"folder\"\n print(f\">> Opening {target} for {file}.\")\n Thread(target=os.system, args=(f\"xdg-open '{file}'\",)).start()\n elif event == \"-DEl-\":\n if not values[\"-RESULTS-\"]:\n continue\n confirm = sg.popup_yes_no(\"Are u sure to delete \"\n f\"{len(values['-RESULTS-'])} files???\")\n if not confirm == \"Yes\":\n continue\n del_time = time()\n for file in values[\"-RESULTS-\"]:\n os.remove(file)\n print(f\">> Deleted {file}.\")\n engine.results = [file for file in engine.results\n if file not in values[\"-RESULTS-\"]]\n gui.window[\"-RESULTS-\"].Update(values=engine.results)\n print(f\">> Deleted {len(values['-RESULTS-'])} files. \"\n f\"[{time() - del_time:.3f}s]\")\n new_index_time = time()\n engine.create_new_index(values[\"PATH\"])\n print(\">> New file index created for directory. \"\n f\"[{time() - new_index_time:.3f}s]\")\n print(\"*\" * 100)\n \n gui.window.close(), exit()",
"def showFind(self, txt=\"\"):\n self.__searchWidget.showFind(txt)",
"async def _search(self, ctx):\n if ctx.invoked_subcommand is None:\n text = open('texts/search.md').read()\n em = discord.Embed(title='Commandes de search TuxBot', description=text, colour=0x89C4F9)\n await self.bot.say(embed=em)",
"def pre_search(self):\n self.update_status(\"Edit pattern filter\")\n self.patternEditor.show()",
"def __search(self):\n self.resultList.clear()\n self.infoLabel.clear()\n \n self.buttonBox.button(QDialogButtonBox.Close).setEnabled(False)\n self.buttonBox.button(QDialogButtonBox.Cancel).setEnabled(True)\n self.buttonBox.button(QDialogButtonBox.Cancel).setDefault(True)\n self.searchButton.setEnabled(False)\n QApplication.processEvents(QEventLoop.ExcludeUserInputEvents)\n \n QApplication.setOverrideCursor(Qt.WaitCursor)\n QApplication.processEvents(QEventLoop.ExcludeUserInputEvents)\n \n self.__canceled = False\n \n self.__query = [term for term in self.searchEdit.text().strip().split()\n if term not in PipSearchDialog.Stopwords]\n self.__client.call(\n \"search\",\n ({\"name\": self.__query, \"summary\": self.__query}, \"or\"),\n self.__processSearchResult,\n self.__searchError\n )",
"def handle_text_search(self, text):\n log.debug(\"Handling text search: %s\", text)\n\n self.current_selected = 0\n self._refresh()",
"def __doSearch(self):\n if (\n self.__replaceMode and\n not e5App().getObject(\"ViewManager\").checkAllDirty()\n ):\n return\n \n self.__cancelSearch = False\n \n if self.filterCheckBox.isChecked():\n fileFilter = self.filterEdit.text()\n fileFilterList = [\n \"^{0}$\".format(filter.replace(\".\", r\"\\.\").replace(\"*\", \".*\"))\n for filter in fileFilter.split(\";\")\n ]\n filterRe = re.compile(\"|\".join(fileFilterList))\n \n if self.projectButton.isChecked():\n if self.filterCheckBox.isChecked():\n files = [self.project.getRelativePath(file)\n for file in\n self.__getFileList(\n self.project.getProjectPath(), filterRe)]\n else:\n files = []\n if self.sourcesCheckBox.isChecked():\n files += self.project.pdata[\"SOURCES\"]\n if self.formsCheckBox.isChecked():\n files += self.project.pdata[\"FORMS\"]\n if self.interfacesCheckBox.isChecked():\n files += self.project.pdata[\"INTERFACES\"]\n if self.protocolsCheckBox.isChecked():\n files += self.project.pdata[\"PROTOCOLS\"]\n if self.resourcesCheckBox.isChecked():\n files += self.project.pdata[\"RESOURCES\"]\n elif self.dirButton.isChecked():\n if not self.filterCheckBox.isChecked():\n filters = []\n if self.sourcesCheckBox.isChecked():\n filters.extend(\n [\"^{0}$\".format(\n assoc.replace(\".\", r\"\\.\").replace(\"*\", \".*\"))\n for assoc in list(\n Preferences.getEditorLexerAssocs().keys())\n if assoc not in self.formsExt + self.interfacesExt +\n self.protocolsExt])\n if self.formsCheckBox.isChecked():\n filters.append(self.filterForms)\n if self.interfacesCheckBox.isChecked():\n filters.append(self.filterInterfaces)\n if self.protocolsCheckBox.isChecked():\n filters.append(self.filterProtocols)\n if self.resourcesCheckBox.isChecked():\n filters.append(self.filterResources)\n filterString = \"|\".join(filters)\n filterRe = re.compile(filterString)\n files = self.__getFileList(\n os.path.abspath(self.dirPicker.currentText()),\n filterRe)\n elif self.openFilesButton.isChecked():\n vm = e5App().getObject(\"ViewManager\")\n vm.checkAllDirty()\n files = vm.getOpenFilenames()\n \n self.findList.clear()\n QApplication.processEvents()\n QApplication.processEvents()\n self.findProgress.setMaximum(len(files))\n \n # retrieve the values\n reg = self.regexpCheckBox.isChecked()\n wo = self.wordCheckBox.isChecked()\n cs = self.caseCheckBox.isChecked()\n ct = self.findtextCombo.currentText()\n if reg:\n txt = ct\n else:\n txt = re.escape(ct)\n if wo:\n txt = \"\\\\b{0}\\\\b\".format(txt)\n flags = re.UNICODE\n if not cs:\n flags |= re.IGNORECASE\n try:\n search = re.compile(txt, flags)\n except re.error as why:\n E5MessageBox.critical(\n self,\n self.tr(\"Invalid search expression\"),\n self.tr(\"\"\"<p>The search expression is not valid.</p>\"\"\"\n \"\"\"<p>Error: {0}</p>\"\"\").format(str(why)))\n self.stopButton.setEnabled(False)\n self.findButton.setEnabled(True)\n self.findButton.setDefault(True)\n return\n # reset the findtextCombo\n if ct in self.searchHistory:\n self.searchHistory.remove(ct)\n self.searchHistory.insert(0, ct)\n self.findtextCombo.clear()\n self.findtextCombo.addItems(self.searchHistory)\n Preferences.Prefs.settings.setValue(\n \"FindFileDialog/SearchHistory\",\n self.searchHistory[:30])\n \n if self.__replaceMode:\n replTxt = self.replacetextCombo.currentText()\n if replTxt in self.replaceHistory:\n self.replaceHistory.remove(replTxt)\n self.replaceHistory.insert(0, replTxt)\n self.replacetextCombo.clear()\n self.replacetextCombo.addItems(self.replaceHistory)\n Preferences.Prefs.settings.setValue(\n \"FindFileDialog/ReplaceHistory\",\n self.replaceHistory[:30])\n \n if self.dirButton.isChecked():\n searchDir = self.dirPicker.currentText()\n if searchDir in self.dirHistory:\n self.dirHistory.remove(searchDir)\n self.dirHistory.insert(0, searchDir)\n self.dirPicker.clear()\n self.dirPicker.addItems(self.dirHistory)\n Preferences.Prefs.settings.setValue(\n \"FindFileDialog/DirectoryHistory\",\n self.dirHistory[:30])\n \n # set the button states\n self.stopButton.setEnabled(True)\n self.stopButton.setDefault(True)\n self.findButton.setEnabled(False)\n \n # now go through all the files\n self.__populating = True\n self.findList.setUpdatesEnabled(False)\n progress = 0\n breakSearch = False\n occurrences = 0\n fileOccurrences = 0\n for file in files:\n self.__lastFileItem = None\n found = False\n if self.__cancelSearch or breakSearch:\n break\n \n self.findProgressLabel.setPath(file)\n \n if self.projectButton.isChecked():\n fn = os.path.join(self.project.ppath, file)\n else:\n fn = file\n # read the file and split it into textlines\n try:\n text, encoding, hashStr = Utilities.readEncodedFileWithHash(fn)\n lines = text.splitlines(True)\n except (UnicodeError, IOError):\n progress += 1\n self.findProgress.setValue(progress)\n continue\n \n # now perform the search and display the lines found\n count = 0\n for line in lines:\n if self.__cancelSearch:\n break\n \n count += 1\n contains = search.search(line)\n if contains:\n occurrences += 1\n found = True\n start = contains.start()\n end = contains.end()\n if self.__replaceMode:\n rline = search.sub(replTxt, line)\n else:\n rline = \"\"\n line = self.__stripEol(line)\n if len(line) > 1024:\n line = \"{0} ...\".format(line[:1024])\n if self.__replaceMode:\n if len(rline) > 1024:\n rline = \"{0} ...\".format(line[:1024])\n line = \"- {0}\\n+ {1}\".format(\n line, self.__stripEol(rline))\n self.__createItem(file, count, line, start, end,\n rline, hashStr)\n \n if self.feelLikeCheckBox.isChecked():\n fn = os.path.join(self.project.ppath, file)\n self.sourceFile.emit(fn, count, \"\", start, end)\n QApplication.processEvents()\n breakSearch = True\n break\n \n QApplication.processEvents()\n \n if found:\n fileOccurrences += 1\n progress += 1\n self.findProgress.setValue(progress)\n \n if not files:\n self.findProgress.setMaximum(1)\n self.findProgress.setValue(1)\n \n resultFormat = self.tr(\"{0} / {1}\", \"occurrences / files\")\n self.findProgressLabel.setPath(resultFormat.format(\n self.tr(\"%n occurrence(s)\", \"\", occurrences),\n self.tr(\"%n file(s)\", \"\", fileOccurrences)))\n \n self.findList.setUpdatesEnabled(True)\n self.findList.sortItems(self.findList.sortColumn(),\n self.findList.header().sortIndicatorOrder())\n self.findList.resizeColumnToContents(1)\n if self.__replaceMode:\n self.findList.header().resizeSection(0, self.__section0Size + 30)\n self.findList.header().setStretchLastSection(True)\n self.__populating = False\n \n self.stopButton.setEnabled(False)\n self.findButton.setEnabled(True)\n self.findButton.setDefault(True)\n \n if breakSearch:\n self.close()",
"def on_searchEdit_textChanged(self, txt):\n self.searchButton.setEnabled(bool(txt))",
"def search(self):\n timed_print(\"Searching\", randint(3,7))\n if len([i for i in self.notes if not i.hidden]) == 0:\n return \"Nothing here\"\n else:\n for i in self.notes:\n screen_clear()\n i.show()\n print()\n input(\"Press Enter to continue searching\")\n return \"Nothing else here!\"",
"def _search(progtext, qs=None, splash=True, pre_load=True):\n g.message = \"Searching for '%s%s%s'\" % (c.y, progtext, c.w)\n\n # show splash screen during fetch\n if splash:\n g.content = logo(c.b) + \"\\n\\n\"\n screen_update()\n\n # perform fetch\n wdata = call_gdata('search', qs)\n songs = get_tracks_from_json(wdata)\n\n if songs and pre_load:\n # preload first result url\n kwa = {\"song\": songs[0], \"delay\": 0}\n t = threading.Thread(target=preload, kwargs=kwa)\n t.start()\n\n if songs:\n g.model.songs = songs\n return True\n\n return False",
"def textManip(*args, visible: bool=True, q=True, query=True, **kwargs)->Union[None, Any]:\n pass"
] | [
"0.7300415",
"0.66114295",
"0.60927004",
"0.5863751",
"0.57411414",
"0.5515903",
"0.54406303",
"0.54393154",
"0.54014164",
"0.53518796",
"0.53172654",
"0.5292283",
"0.52921987",
"0.52903503",
"0.52819675",
"0.52603555",
"0.5241229",
"0.5237763",
"0.5199213",
"0.5194571",
"0.51670116",
"0.5155587",
"0.51532847",
"0.51449305",
"0.5141894",
"0.5136687",
"0.5136608",
"0.51206696",
"0.51105964",
"0.50975513"
] | 0.7690281 | 0 |
Performs check of Batch Job Definition container properties. | def _validate_container_properties(container_properties, prefix=None):
if not prefix:
prefix = 'container_properties'
container_config = [
{
'field_name': 'image',
'field_value': container_properties.get('image'),
'prefix': prefix,
'required_type': str,
'validators': [
_validate_field_type
]
},
{
'field_name': 'vcpus',
'field_value': container_properties.get('vcpus'),
'prefix': prefix,
'required_type': int,
'validators': [
_validate_field_type
]
},
{
'field_name': 'memory',
'field_value': container_properties.get('memory'),
'prefix': prefix,
'required_type': int,
'validators': [
_validate_field_type
]
},
{
'field_name': 'command',
'field_value': container_properties.get('command'),
'prefix': prefix,
'required_type': list,
'validators': [
_validate_field_type
]
},
{
'field_name': 'job_role_arn',
'field_value': container_properties.get('job_role_arn'),
'prefix': prefix,
'required_type': str,
'validators': [
_validate_field_type
]
},
{
'field_name': 'execution_role_arn',
'field_value': container_properties.get('execution_role_arn'),
'prefix': prefix,
'required_type': str,
'validators': [
_validate_field_type
]
},
{
'field_name': 'volumes',
'field_value': container_properties.get('volumes'),
'prefix': prefix,
'required_type': list,
'validators': [
_validate_field_type
]
},
{
'field_name': 'volumes',
'field_value': container_properties.get('volumes'),
'prefix': prefix,
'required_type': list,
'validators': [
_validate_field_type
]
},
{
'field_name': 'environment',
'field_value': container_properties.get('environment'),
'prefix': prefix,
'required_type': list,
'validators': [
_validate_field_type
]
},
{
'field_name': 'mount_points',
'field_value': container_properties.get('mount_points'),
'prefix': prefix,
'required_type': list,
'validators': [
_validate_field_type
]
},
{
'field_name': 'readonly_root_filesystem',
'field_value': container_properties.get('readonly_root_filesystem'),
'prefix': prefix,
'required_type': bool,
'validators': [
_validate_field_type
]
},
{
'field_name': 'privileged',
'field_value': container_properties.get('privileged'),
'prefix': prefix,
'required_type': bool,
'validators': [
_validate_field_type
]
},
{
'field_name': 'ulimits',
'field_value': container_properties.get('ulimits'),
'prefix': prefix,
'required_type': list,
'validators': [
_validate_field_type
]
},
{
'field_name': 'user',
'field_value': container_properties.get('user'),
'prefix': prefix,
'required_type': str,
'validators': [
_validate_field_type
]
},
{
'field_name': 'instance_type',
'field_value': container_properties.get('instance_type'),
'prefix': prefix,
'required_type': str,
'validators': [
_validate_field_type
]
},
{
'field_name': 'resource_requirements',
'field_value': container_properties.get('resource_requirements'),
'prefix': prefix,
'required_type': dict,
'validators': [
_validate_field_type
]
},
{
'field_name': 'linux_parameters',
'field_value': container_properties.get('linux_parameters'),
'prefix': prefix,
'required_type': dict,
'validators': [
_validate_field_type
]
},
{
'field_name': 'log_configuration',
'field_value': container_properties.get('log_configuration'),
'prefix': prefix,
'required_type': dict,
'validators': [
_validate_field_type
]
},
{
'field_name': 'secrets',
'field_value': container_properties.get('secrets'),
'prefix': prefix,
'required_type': list,
'validators': [
_validate_field_type
]
},
{
'field_name': 'network_configuration',
'field_value': container_properties.get('network_configuration'),
'prefix': prefix,
'required_type': dict,
'validators': [
_validate_field_type
]
},
{
'field_name': 'fargate_platform_configuration',
'field_value': container_properties.get('fargate_platform_configuration'),
'prefix': prefix,
'required_type': dict,
'validators': [
_validate_field_type
]
},
]
_process_config(container_config) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def validate_batch_jobdef(jobdef_name, jobdef_meta):\n jobdef_config = [\n {\n 'field_name': 'job_definition_name',\n 'field_value': jobdef_name,\n 'prefix': '',\n 'validators': [\n _validate_required_field\n ]\n },\n {\n 'field_name': 'job_definition_type',\n 'field_value': jobdef_meta.get('job_definition_type'),\n 'field_options': JOB_DEFINITION_TYPES,\n 'prefix': '',\n 'required': True,\n 'validators': [\n _validate_options_field\n ]\n },\n {\n 'field_name': 'parameters',\n 'field_value': jobdef_meta.get('parameters'),\n 'prefix': '',\n 'required_type': dict,\n 'validators': [\n _validate_field_type\n ]\n },\n {\n 'field_name': 'retry_strategy',\n 'field_value': jobdef_meta.get('retry_strategy'),\n 'prefix': '',\n 'required_type': dict,\n 'validators': [\n _validate_field_type\n ]\n },\n {\n 'field_name': 'propagate_tags',\n 'field_value': jobdef_meta.get('propagate_tags'),\n 'prefix': '',\n 'required_type': bool,\n 'validators': [\n _validate_field_type\n ]\n },\n {\n 'field_name': 'timeout',\n 'field_value': jobdef_meta.get('timeout'),\n 'prefix': '',\n 'required_type': dict,\n 'validators': [\n _validate_field_type\n ]\n },\n {\n 'field_name': 'tags',\n 'field_value': jobdef_meta.get('tags'),\n 'prefix': '',\n 'required_type': dict,\n 'validators': [\n _validate_field_type\n ]\n },\n {\n 'field_name': 'platform_capabilities',\n 'field_value': jobdef_meta.get('platform_capabilities'),\n 'prefix': '',\n 'required_type': list,\n 'validators': [\n _validate_field_type\n ]\n }\n ]\n\n job_definition_type = jobdef_meta.get('job_definition_type')\n container_properties = jobdef_meta.get('container_properties')\n node_properties = jobdef_meta.get('node_properties')\n\n _process_config(jobdef_config)\n\n if job_definition_type == 'container':\n if not container_properties and not node_properties:\n raise AssertionError(\n \"Either 'container_properties' or 'node_properties' must be specified \"\n \"for 'container' job definition type.\"\n )\n if container_properties:\n _validate_container_properties(container_properties)\n if node_properties:\n _validate_node_properties(node_properties)",
"def _check_analyzed_job(self, job, container):\n self.log(u\"Checking the Job object generated from container\")\n\n self.log(u\"Checking that the Job is not None\")\n if job is None:\n self._failed(u\"Unable to create a Job from the container.\")\n return\n\n self.log(u\"Checking that the Job has at least one Task\")\n if len(job) == 0:\n self._failed(u\"Unable to create at least one Task from the container.\")\n return\n\n if self.rconf[RuntimeConfiguration.JOB_MAX_TASKS] > 0:\n self.log(u\"Checking that the Job does not have too many Tasks\")\n if len(job) > self.rconf[RuntimeConfiguration.JOB_MAX_TASKS]:\n self._failed(u\"The Job has %d Tasks, more than the maximum allowed (%d).\" % (\n len(job),\n self.rconf[RuntimeConfiguration.JOB_MAX_TASKS]\n ))\n return\n\n self.log(u\"Checking that each Task text file is well formed\")\n for task in job.tasks:\n self.log([u\"Checking Task text file '%s'\", task.text_file_path])\n text_file_bstring = container.read_entry(task.text_file_path)\n if (text_file_bstring is None) or (len(text_file_bstring) == 0):\n self._failed(u\"Text file '%s' is empty\" % task.text_file_path)\n return\n self._check_utf8_encoding(text_file_bstring)\n if not self.result.passed:\n self._failed(u\"Text file '%s' is not encoded in UTF-8\" % task.text_file_path)\n return\n self._check_not_empty(text_file_bstring)\n if not self.result.passed:\n self._failed(u\"Text file '%s' is empty\" % task.text_file_path)\n return\n self.log([u\"Checking Task text file '%s': passed\", task.text_file_path])\n self.log(u\"Checking each Task text file is well formed: passed\")",
"def check(self):\r\n self._check_object(self._config.name)",
"def service_mesh_job_check(container_name):\n complete = False\n log.info(\"Checking if %s is complete\", container_name)\n try:\n response = coreV1Api.list_namespaced_pod(namespace=namespace, watch=False)\n for item in response.items:\n # container_statuses can be None, which is non-iterable.\n if item.status.container_statuses is None:\n continue\n for container in item.status.container_statuses:\n if container.name == container_name and item.status.phase == \"Running\":\n name = read_name(item)\n log.info(\"Container Details %s \", container)\n log.info(\"Container Status %s \", container.state.terminated)\n\n if container.state.terminated:\n log.info(\"Container Terminated with reason %s \", container.state.terminated.reason)\n complete = True\n\n except ApiException as exc:\n log.error(\"Exception when calling read_namespaced_job_status: %s\\n\",\n exc)\n return complete",
"def lock_JobProperties(self):\n for j in self.__dict__.keys():\n j_obj=self.__dict__.get(j)\n if hasattr(j_obj,'lock_JobProperties'):\n j_obj.lock_JobProperties()\n j_obj._locked=True\n elif hasattr(j_obj,'_locked'):\n j_obj._locked=True\n self._log.info('The JobProperty Container %s is locked',\n self.__name__)",
"def check_job_exists( job_list, analysis_group_id, reprocess_config_id):\n for job in job_list:\n struct = JSONMessage.unserialize(job.input_message)\n\n if( int( struct.analysis_group_id ) == int( analysis_group_id ) and \\\n int( struct.reprocess_config_id ) == int( reprocess_config_id ) ):\n return 1\n return 0",
"def testJobProperties(databases):\n\n def checkProperties(config):\n jobId = 'job0'\n assert config.targets == {'target1', 'target2'}\n assert config.getId() == jobId\n assert config['name'] == jobId\n assert config.owner == gen.owner\n assert config['owner'] == gen.owner\n assert config.comment == gen.comment\n #assert config.getDescription() == config['description']\n\n gen = DataGenerator(databases)\n config = gen.createConfiguration(\n targets=('target1', 'target2')\n )\n runWithReload(databases, config, checkProperties)",
"def test_validate_properties(self):\n\n ingest_mgmr = IngestManager()\n ingest_mgmr.validate_config_file(self.example_config_data)\n ingest_mgmr.validate_properties()\n assert (ingest_mgmr.collection.name == 'my_col_1')\n assert (ingest_mgmr.experiment.name == 'my_exp_1')\n assert (ingest_mgmr.channel.name == 'my_ch_1')",
"def _validate(self):\n for name, prop in self._properties.iteritems():\n value = getattr(self, name, None)\n prop._do_validate(value)",
"def test_successful_configuration(self):\n url = '/%s/job-types/validation/' % self.api\n manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)\n json_data = {\n 'manifest': manifest,\n 'configuration': self.configuration\n }\n\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n results = json.loads(response.content)\n self.assertTrue(results['is_valid'])\n self.assertDictEqual(results, {u'errors': [], u'is_valid': True, u'warnings': []})",
"def job_lang_check(lang):\n lang = lang\n def job_check(form, field):\n \"\"\"\n this is to check if job properties are well edited:\n job title and content should not be empty if it is published\n\n Arguments:\n - `from`:\n - `field`:\n \"\"\"\n data = field.data\n published = getattr(form, 'publish_'+lang)\n if published.data:\n if len(data) == 0:\n raise validators.ValidationError('field should not be empty if you choose to publish it')\n return job_check",
"def has_pending_jobs(instance_properties, max_size):\n try:\n max_cluster_slots = max_size * instance_properties.get(\"slots\")\n pending_jobs = get_pending_jobs_info(max_slots_filter=max_cluster_slots, skip_if_state=SGE_HOLD_STATE)\n logging.info(\"Found the following pending jobs:\\n%s\", pending_jobs)\n return len(pending_jobs) > 0, False\n except Exception as e:\n log.error(\"Failed when checking for pending jobs with exception %s. Reporting no pending jobs.\", e)\n return False, True",
"def check_status(job):\n client = get_dropbox_client()\n\n try:\n return client.metadata(\n '/Video Automation Platform/jobs/{job}/{job}.png'.format(job=job))\n\n except ErrorResponse:\n return False",
"def check(args, session: Session = NEW_SESSION) -> None:\n if args.allow_multiple and not args.limit > 1:\n raise SystemExit(\"To use option --allow-multiple, you must set the limit to a value greater than 1.\")\n if args.hostname and args.local:\n raise SystemExit(\"You can't use --hostname and --local at the same time\")\n\n query = select(Job).where(Job.state == JobState.RUNNING).order_by(Job.latest_heartbeat.desc())\n if args.job_type:\n query = query.where(Job.job_type == args.job_type)\n if args.hostname:\n query = query.where(Job.hostname == args.hostname)\n if args.local:\n query = query.where(Job.hostname == get_hostname())\n if args.limit > 0:\n query = query.limit(args.limit)\n\n alive_jobs: list[Job] = [job for job in session.scalars(query) if job.is_alive()]\n\n count_alive_jobs = len(alive_jobs)\n if count_alive_jobs == 0:\n raise SystemExit(\"No alive jobs found.\")\n if count_alive_jobs > 1 and not args.allow_multiple:\n raise SystemExit(f\"Found {count_alive_jobs} alive jobs. Expected only one.\")\n if count_alive_jobs == 1:\n print(\"Found one alive job.\")\n else:\n print(f\"Found {count_alive_jobs} alive jobs.\")",
"def check_properties(self):\r\n for prop in self.mandatory_properties:\r\n if not hasattr(self, prop):\r\n raise NameError(prop)",
"def health_check():\n printed_something = False\n\n job_checks = {}\n job_names = []\n for job in config.enabled_jobs:\n spec = nomad.parse(get_job(job.template))\n printed_something |= bool(nomad.check_events_and_logs(job.name))\n for service, checks in nomad.get_health_checks_from_spec(spec):\n if not checks:\n log.warn(f'service {service} has no health checks')\n continue\n job_checks[service] = checks\n job_names.append(job.name)\n printed_something |= nomad.wait_for_service_health_checks(consul, job_names, job_checks, nowait=True)\n\n if printed_something:\n log.error('Problems detected; see logs above.')\n sys.exit(1)\n else:\n log.info('No problems detected.')",
"def run_checks(self):\n\n try:\n check_obj = self.metadata.get_described_element()\n except ObjectDoesNotExist:\n pass\n\n if self.metadata.is_service_metadata:\n if self.metadata.is_service_type(OGCServiceEnum.WMS):\n self.check_wms(check_obj)\n elif self.metadata.is_service_type(OGCServiceEnum.WFS):\n self.check_wfs(check_obj)\n\n elif self.metadata.is_layer_metadata:\n self.check_layer(check_obj)\n elif self.metadata.is_featuretype_metadata:\n self.check_featuretype(check_obj)\n elif self.metadata.is_dataset_metadata:\n self.check_dataset()\n\n # all checks are done. Calculate the health state for all monitoring results\n health_state = HealthState.objects.create(monitoring_run=self.monitoring_run, metadata=self.metadata)\n health_state.run_health_state()",
"def _setJob_checkShot(shotPath):\n\tvalid = True\n\n\tjobPath = os.path.split(shotPath)[0]\n\t#jobDataDir = os.path.join(jobPath, os.environ['IC_METADATA'])\n\tshotDataDir = os.path.join(shotPath, os.environ['IC_METADATA'])\n\n\t# if not os.path.isdir(jobDataDir):\n\t# \tvalid = False\n\n\tif not os.path.isdir(shotDataDir):\n\t\tvalid = False\n\n\treturn valid",
"def validate_ready_to_run(self):\n super(FlexibleMaster, self).validate_ready_to_run()\n if len(self._job_name_lst) < len(self._step_function_lst) + 1:\n raise ValueError(\"Not enough job names set.\")\n elif len(self._job_name_lst) > len(self._step_function_lst) + 1:\n raise ValueError(\"Not enough step functions set.\")",
"def validate(self):\n validated = True \n # Check that all parameters exist in the self.parameters dictionary\n for param_name in self._SCALAR_PARAMETERS:\n if param_name not in self.parameters:\n LOG.critical('%s not found in %s', param_name, self.filename)\n validated = False \n \n for param_name in self._TABLE_PARAMETERS:\n if not all([elem for elem in self.parameters[param_name]]):\n LOG.critical('%s not found in %s', param_name, self.filename)\n validated = False\n \n return validated",
"def job_has_params(job_url):\n name = job_url.rstrip(\"/\").rsplit(\"/\")[-1]\n if name in (\n \"pr-docs\",\n \"pr-lint\",\n \"pr-pre-commit\",\n ):\n return False\n else:\n return True",
"def checkJobsDict(self):\n if not hasattr(self.DB.meta,'peatsa_jobs'):\n from ZODB.PersistentMapping import PersistentMapping\n self.DB.meta.peatsa_jobs = PersistentMapping()",
"def _check_config(self):",
"def readjob(job):\n with open(job) as f:\n try:\n jobdefn = json.load(f)\n except ValueError as e:\n job_logger.error('Cannont load job json: %s', e)\n return False\n else:\n try:\n validate(instance=jobdefn, schema=jobschema)\n except Exception as e:\n logecho(str(e), level='error')\n return False\n else:\n return jobdefn",
"def check_config(self):\n # Check if tool is at all included in workflow\n if \"gatk_post_bam\" not in (self.config[\"postprocessing\"] or []): # pylint: disable=C0325\n return # GATK BAM postprocessing not enabled, skip\n\n # Check required configuration settings present\n self.parent.ensure_w_config(\n config_keys=(\"step_config\", \"ngs_mapping\", \"gatk_post_bam\", \"paths_known_sites\"),\n msg=\"Known sites list cannot be empty for GATK BAM postprocessing\",\n )\n self.parent.ensure_w_config(\n config_keys=(\"static_data_config\", \"reference\", \"path\"),\n msg=\"Path to reference FASTA required for GATK BAM postprocessing\",\n )",
"def check_produce_params(self):\n if self.has_error():\n return False\n\n if not isinstance(self.produce_params, dict):\n self.add_err_msg('produce_params must be a python dict')\n return False\n\n # Iterate through the expectd keys\n #\n expected_keys = [ta2_static.KEY_FITTED_SOLUTION_ID, 'inputs',\n 'exposeOutputs', 'exposeValueTypes']\n\n for key in expected_keys:\n if key not in self.produce_params:\n user_msg = ('produce_params for pipeline \"%s\" is missing key: %s') % \\\n (self.pipeline_id, key)\n self.send_websocket_err_msg(ta2_static.PRODUCE_SOLUTION, user_msg)\n return False\n\n return True",
"def _hasValuesCheckerWrapper(self, args):\n \n constraints = args['constraints']\n \n def _hasValuesChecker(entity, params):\n \"\"\"Checks if values of specified properties of an entity are in\n given sets. \n \"\"\"\n \n for key, values in constraints.iteritems():\n if entity.__getattribute__(key) not in values:\n return False\n\n return True\n\n return _hasValuesChecker",
"def test_validate_no_batch_name(self):\n mock_event = {\n 'gds_volume_name': \"bssh.xxxx\",\n 'gds_folder_path': \"/Runs/cccc.gggg\",\n 'seq_run_id': \"yyy\",\n 'seq_name': \"zzz\",\n }\n\n settings_by_samples = [\n {\n \"samples\": [\n \"PTC_EXPn200908LL_L2000002\",\n \"PTC_EXPn200908LL_L2000003\"\n ],\n \"settings\": {\n \"override_cycles\": \"Y100;I8N2;I8N2;Y100\"\n }\n }\n ]\n\n reason = bcl_convert.validate_metadata(mock_event, settings_by_samples)\n\n logger.info(\"-\" * 32)\n logger.info(json.dumps(reason))\n\n self.assertIsNotNone(reason)\n\n # should call to slack webhook once\n verify(libslack.http.client.HTTPSConnection, times=1).request(...)",
"def check_data(self):\n\n missing_params = {}\n flag = False\n\n missing_params['general'] = {}\n for name, param in self.params.items():\n if not param.check():\n missing_params['general'][name] = param.get_description()\n flag = True\n\n for component, comp_obj in self.components.items():\n missing_params[component], flag_comp = comp_obj.check_data()\n\n # Assign empty component parameters that have a general version:\n empty_general_params = set(missing_params[component]).intersection(\n set(self.params))\n for param in empty_general_params:\n comp_obj.change_param_object(param, self.params[param])\n del missing_params[component][param]\n\n if missing_params[component]:\n flag = True\n\n if flag:\n raise Exception('Following parameters are missing:\\n{}'\n .format(\n self._print_params(missing_params, disp=False)))\n\n return True",
"def validate_metadata(self):\n metadata = self.get_client_metadata()\n\n return True"
] | [
"0.5902173",
"0.5750896",
"0.56742275",
"0.5597777",
"0.5589587",
"0.54979694",
"0.5465641",
"0.5424713",
"0.5351139",
"0.53396976",
"0.5328847",
"0.53148127",
"0.5310186",
"0.52777135",
"0.5270606",
"0.5264659",
"0.526109",
"0.52511007",
"0.5231011",
"0.52110624",
"0.5206256",
"0.5156965",
"0.5138121",
"0.5126543",
"0.5109919",
"0.51073426",
"0.51011795",
"0.51006186",
"0.50935495",
"0.50912243"
] | 0.5885001 | 1 |
Create a new DB table for the DataFrame | def create_db_dataframe(self, df, table_name):
try:
print("-I- Writing " + table_name + " with DataFrame")
df.to_sql(name=table_name, con=self.engine, if_exists='replace', index=True)
print("-I- Write complete.")
except Exception as e:
print("-W- " + str(e)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_create_from_dataframe(self):\n self.insert()\n data = self.tbl.select()\n data.index.name = None\n tbl = Table.create(':memory:', \"Foo_2\", data, verbose=True,\n primary_key='id', autoincrement=True)\n self.check(self.idata, tbl.select())",
"def _create_table(self) :\n\n cur = self.con.cursor()\n delete_sql = 'DROP TABLE IF EXISTS \"%s\"' % self.name\n cur.execute(delete_sql)\n\n col_sql = ','.join(['\"%s\" %s' % (self.cols[i], self.types[i])\n for i in range(len(self.cols))])\n create_sql = 'CREATE TABLE \"%s\" ( %s );' % (self.name, col_sql)\n cur.execute(create_sql)",
"def db_create_table(db_in, tablename):\n connection = db_in.connection.cursor()\n connection.execute('CREATE TABLE IF NOT EXISTS %s(id INTEGER PRIMARY KEY);' % tablename)",
"def table_from_frame(self, frame, table_name, conn=None, if_exists='fail', index=False,\n index_label=None, schema=None, chunksize=None, copy=True):\n \n table = SQLTable(table_name, self, frame=frame, table_setup=True, index=index,\n if_exists=if_exists, index_label=index_label, schema=schema)\n \n table.create()\n \n # check for potentially case sensitivity issues (GH7815)\n if table_name not in self.engine.table_names(schema=schema or self.meta.schema):\n warnings.warn(\"The provided table name '{0}' is not found exactly \"\n \"as such in the database after writing the table, \"\n \"possibly due to case sensitivity issues. Consider \"\n \"using lower case table names.\".format(name), UserWarning)\n \n \n table.insert(conn=conn, bulk=True, chunksize=chunksize, copy=copy)",
"def create_table_in_sqlite_db(self):\n with self.con:\n cur = self.con.cursor()\n cur.execute(\"\"\"DROP TABLE IF EXISTS {};\"\"\".format(self.table_name))\n base_create_query = \"\"\"CREATE TABLE {}({}, PRIMARY KEY ({}));\"\"\"\n columns = ','.join(['{} {}'.format(col, self.columns_types[col]) for col in self.table_columns])\n primary_keys = ','.join(['{}'.format(col) for col in self.table_primary_keys])\n create_query = base_create_query.format(self.table_name, columns, primary_keys)\n cur.execute(create_query)\n self.con.commit()",
"def create_table():\n conn = psycopg2.connect(host=\"localhost\", database=\"integration\", user=\"postgres\", password=\"postgres\")\n cursor = conn.cursor()\n cursor.execute(CREATE_TABLE)\n conn.commit()\n cursor.close()",
"def new_table(self):\n self.c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS {table} (\n id integer primary key,\n {event} integer,\n {desc} text,\n {date} text,\n {link} text)\n \"\"\".format(\n table=TABLE,\n event=EVENT,\n desc=DESC,\n date=DATE,\n link=LINK,\n )\n )",
"def create_table(self):\n\n # Get columns\n columns = []\n for i, (name, type_) in enumerate(self.schema.items()):\n if 'sqlalchemy' in str(type(type_)):\n pass\n else:\n type_ = str(type_).lower()\n\n if 'int' in type_:\n type_ = sqlalchemy.Integer\n elif 'float' in type_:\n type_ = sqlalchemy.Float\n elif 'bool' in type_:\n type_ = sqlalchemy.Boolean\n elif 'timestamp' in type_:\n type_ = sqlalchemy.TIMESTAMP\n elif 'varchar' in type_ or 'str' in type_:\n type_ = sqlalchemy.VARCHAR\n elif 'json' in type_:\n type_ = sqlalchemy.JSON\n elif 'datetime' in type_:\n type_ = sqlalchemy.DateTime\n elif 'date' in type_:\n type_ = sqlalchemy.Date\n else:\n raise Exception(f\"Column type {type_} not supported when creating a new table\")\n\n columns.append(sqlalchemy.Column(name, type_))#, primary_key=True))\n\n columns = tuple(columns)\n table = sqlalchemy.Table(\n self.table, self.metadata,\n *columns\n )\n self.metadata.create_all(self.engine)",
"def table_to_df(db_name, table_name):\n return sqlContext.table(\"{0}.{1}\".format(db_name, table_name))",
"def create_table(self):\n Engine.create_table(self)\n self.connection.commit()",
"def data_table_creation(cursor, connection_to_db):\n\n cursor.execute(\"\"\"\n\n CREATE TABLE IF NOT EXISTS data(\n question TEXT NOT NULL,\n answer TEXT NULL,\n question_type TEXT NOT NULL,\n question_type_answers TEXT NULL,\n PRIMARY KEY(question)\n );\n\n \"\"\")\n\n connection_to_db.commit()",
"def create_table(create_table_sql):\n conn = DbUtil.connection\n try:\n c = conn.cursor()\n c.execute(create_table_sql)\n except Error as e:\n print(e)",
"def create(db, table, name):\n columns = ['{0} {1}'.format(name, ctype) for name, ctype in table.items()]\n create = CREATE_TABLE.format(table=name, fields=\", \".join(columns))\n try:\n db.execute(create)\n except Exception as e:\n print(create)\n print(\"Failed to create table: \" + e)",
"def df2db(self, df: pd.DataFrame, tab_name):\n\n self.execute(\"set hive.execution.engine = tez\")\n self.execute(\"set tez.queue.name = sephora_internal\")\n self.execute(\"drop table if exists {table_name}\".format(table_name=tab_name))\n df.to_sql(tab_name, self.engine, method='multi', index=False)",
"def create_table(self):\n logging.debug('Creating new table')\n if not self._dbconnect or not self._cursor:\n raise Exception('Invalid call to Context Manager method!')\n\n self._cursor.execute(\"create table {} (date text, time text, location text, nodeID text)\".format(self._name))",
"def create_table():\n with create_connection() as conn:\n cur = conn.cursor()\n cur.execute(query=SQL_STATEMENT)\n return conn",
"def create_table(self):\n pass",
"def add_to_database(self, df):\n \n from sqlalchemy import create_engine\n \n engine = create_engine(\"mysql://dublinbus:somepaawsord/researchpracticum\")\n con = engine.connect()\n df.to_sql(con=con, name='TimeTables', if_exists='append')\n con.close()",
"def create_table(cls):\n if not connection.connected:\n raise Exception('Not connected to the database.')\n cursor = connection.execute(cls.create_table_sql())\n cursor.close()",
"def create_new_table():\n dataset = create_dataset()\n table_id = \"{}.{}.corona_cases_table\".format(client.project, dataset.dataset_id)\n table = bigquery.Table(table_id)\n table = client.create_table(table, exists_ok=True)\n print(\n \"Created table {}.{}.{}\".format(table.project, table.dataset_id, table.table_id)\n )\n return table",
"def create_table():\n\tCURSOR.execute(\"\"\"CREATE TABLE IF NOT EXISTS {} (\n\t\t\t[ID] NVARCHAR NOT NULL PRIMARY KEY,\n\t\t\t[Name] NVARCHAR,\n\t\t\t[Definition] NVARCHAR)\"\"\".format(TABLE_NAME))",
"def create_sqlite_table(self):\n self.print_datetime_output('Connect to data base %s' % self.db_name)\n con = sqlite3.connect(self.db_name)\n cur = con.cursor()\n\n # check if table exists\n cur.execute(\"select count(*) from sqlite_master where type='table' and name='%s'\" % self.db_table)\n if cur.fetchall()[0][0] == 1:\n self.print_datetime_output('Previous table %s was dropped' % self.db_table)\n cur.execute(\"DROP TABLE %s;\" % self.db_table)\n\n self.print_datetime_output('Create table %s and import data from csv file %s' % (self.db_table,\n self.time_series_file_name))\n cur.execute(\"CREATE TABLE %s (timestamp, close_USD);\" % self.db_table)\n\n with open(self.file_name, 'r') as fin:\n dr = csv.DictReader(fin)\n to_db = [(i['timestamp'], i['close (USD)']) for i in dr]\n\n cur.executemany(\"INSERT INTO %s (timestamp, close_USD) VALUES (?, ?);\" % self.db_table, to_db)\n con.commit()\n return con",
"def test_dummydb_new_table(self):\n db = DummyDB()\n columns = {\n \"one\": int,\n \"two\": str,\n \"three\": bool,\n }\n db.create_table(\"new_table\", columns)",
"def create_table(connection, tablequery):\n cur = connection.cursor()\n cur.execute(tablequery)\n connection.commit()",
"def _get_db_create_table(self, frame):\r\n\r\n columns = (u',\\n'.\r\n\r\n join([u' `%s` DECIMAL(20,5) DEFAULT NULL COMMENT \"%s\"' %\r\n\r\n (self._get_db_name(name), name) for name in\r\n\r\n frame.index.values]))\r\n\r\n table_name = self._get_db_table_name(frame)\r\n\r\n return (\r\n\r\n u'CREATE TABLE `%s` (\\n' % table_name +\r\n\r\n u' `ticker` VARCHAR(50) NOT NULL COMMENT \"Exchange:Ticker\",\\n' +\r\n\r\n u' `period` DATE NOT NULL COMMENT \"Period\",\\n' +\r\n\r\n u'%s,\\n' % columns +\r\n\r\n u' PRIMARY KEY USING BTREE (`ticker`, `period`),\\n' +\r\n\r\n u' KEY `ix_ticker` USING BTREE (`ticker`))\\n' +\r\n\r\n u'ENGINE=MyISAM DEFAULT CHARSET=utf8\\n' +\r\n\r\n u'COMMENT = \"%s\"' % frame.index.name)",
"def create_table(table_name:str, database_name:str='dars_nic_391419_j3w9t_collab', select_sql_script:str=None) -> None:\n \n spark.conf.set(\"spark.sql.legacy.allowCreatingManagedTableUsingNonemptyLocation\",\"true\")\n \n if select_sql_script is None:\n select_sql_script = f\"SELECT * FROM global_temp.{table_name}\"\n \n spark.sql(f\"\"\"CREATE TABLE {database_name}.{table_name} AS\n {select_sql_script}\n \"\"\")\n spark.sql(f\"ALTER TABLE {database_name}.{table_name} OWNER TO {database_name}\")",
"def create_table(engine, csv_filename, tablename):\n # Read csv file and changes all column names to be lowercase\n csv_df = pd.read_csv(f'./data/{csv_filename}.csv')\n csv_df.columns = [c.lower() for c in csv_df.columns]\n\n # Change date types to datetime\n todateformat = []\n for c in csv_df.columns:\n if \"date\" in c:\n csv_df[c] = csv_df[c].astype('datetime64[ns]')\n\n # Create/replace table with tablename in db\n csv_df.to_sql (tablename, engine, if_exists='replace', index=False)",
"def create_table(self, create_table_sql):\n connection = self.__create_connection()\n try:\n c = connection.cursor()\n c.execute(create_table_sql)\n except Error as e:\n print(e)",
"def df2db(self, df: pd.DataFrame, tab_name, append=False):\n if append:\n df.to_sql(name=tab_name, con=self.engine, if_exists='append', index=False)\n else:\n self.execute(\"drop table if exists {table_name}\".format(table_name=tab_name))\n df.to_sql(name=tab_name, con=self.engine, if_exists='fail', index=False)",
"def create_table(self):\n self.db.query(f\"\"\"\n CREATE TABLE IF NOT EXISTS {self.table} (\n id INT UNSIGNED NOT NULL AUTO_INCREMENT,\n name VARCHAR(140) NOT NULL,\n PRIMARY KEY (id)\n )\n \"\"\")"
] | [
"0.77422994",
"0.7363433",
"0.7361235",
"0.73389554",
"0.7315704",
"0.7284163",
"0.7265219",
"0.71784484",
"0.71592116",
"0.7123823",
"0.7109681",
"0.7104714",
"0.7104014",
"0.70893764",
"0.7065188",
"0.7056585",
"0.70464426",
"0.7045202",
"0.7043078",
"0.70418227",
"0.7035203",
"0.70081115",
"0.69982046",
"0.69817793",
"0.6969021",
"0.6954411",
"0.6952942",
"0.6951786",
"0.690065",
"0.6900537"
] | 0.7874062 | 0 |
Appends DataFrame to the specified table | def append_db_dataframe(self, df, table_name):
try:
print("-I- Appending " + table_name + " with DataFrame")
df.to_sql(name=table_name, con=self.engine, if_exists='append', index=True)
print("-I- Append complete.")
except Exception as e:
print("-W- " + str(e)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def append_data(self, table_name, df):\n\t\tself.__check_colnames(table_name, df)\n\t\tif self.__dbfile is not None:\n\t\t\tdf.to_sql(table_name, self._conn, index=False, if_exists=\"append\")",
"def append_table(self, table):\n if not table:\n return\n\n indexes = []\n for idx in table.index:\n index = self.size + idx\n indexes.append(index)\n\n self.set(indexes=indexes, columns=table.columns, values=table.data)",
"def appendData(self, dataframe, tableName, truncate=False):\n if truncate:\n truncateSetting = 'replace'\n else:\n truncateSetting = 'append'\n dataframe.to_sql(name=tableName, con=self.writeConn, if_exists=truncateSetting, index=False)",
"def add_table(self, table, data_frame, alias=None):\n if alias is not None:\n for name in alias:\n self.alias_map[name] = table\n self.alias_map[table] = table\n self.data_frames[table] = data_frame",
"def table_save_data_frame(self, table_name):\n self.recordset_df = pd.read_sql_table(table_name, self.con)\n return self",
"def store_partial_df(df, table_name):\r\n cursor = hana.cursor()\r\n pbar = tqdm(total=len(df.index))\r\n\r\n for index, row in df.iterrows():\r\n pbar.update(1)\r\n statement = 'INSERT INTO \\\"NIKOLAI\\\".\\\"'+table_name+'\\\" ('\r\n for colname in map(str, row.index.tolist()):\r\n statement += '\\\"'+ colname + '\\\",'\r\n statement = statement[:-1] +') VALUES ('\r\n #for value in map(str, row.tolist()):\r\n for value in row.tolist():\r\n if value != value:\r\n statement += 'null,'\r\n elif isinstance(value, int) or isinstance(value, float):\r\n statement += str(value) + ','\r\n else:\r\n statement += '\\''+ str(value) + '\\','\r\n\r\n cursor.execute(statement[:-1] +');')\r\n\r\n pbar.close()\r\n hana.commit()",
"def create_db_dataframe(self, df, table_name):\n try:\n print(\"-I- Writing \" + table_name + \" with DataFrame\")\n df.to_sql(name=table_name, con=self.engine, if_exists='replace', index=True)\n print(\"-I- Write complete.\")\n except Exception as e:\n print(\"-W- \" + str(e))",
"def add_to_database(self, df):\n \n from sqlalchemy import create_engine\n \n engine = create_engine(\"mysql://dublinbus:somepaawsord/researchpracticum\")\n con = engine.connect()\n df.to_sql(con=con, name='TimeTables', if_exists='append')\n con.close()",
"def append_table(lines, table):\n tabulate(table)\n for row in table:\n lines.append('|' + '|'.join(row).rstrip() + '\\n')",
"def append_table(self, table):\n\n self._db_manager.register_table(table)",
"def add_row(self, row):\n \n new_row = pd.DataFrame(data=[row], columns = self.table.columns) \n self.table = self.table.append(new_row, ignore_index=True)",
"def augment_dataframe(self, df: pd.DataFrame) -> pd.DataFrame:",
"def insert_df(conn, table_name: str, df: pd.DataFrame):\n # To CSV\n output = StringIO()\n df.to_csv(output, sep='\\t', header=False)\n output.seek(0)\n\n # Insert data\n cursor = conn.cursor()\n\n if isinstance(df.index, pd.MultiIndex):\n columns = list(df.index.names) + list(df.columns)\n else:\n columns = [df.index.name] + list(df.columns)\n\n cursor.copy_from(output, table_name, sep='\\t', null='', columns=columns)\n conn.commit()\n cursor.close()",
"def add_table(self, table: pd.DataFrame, table_name: str, table_description: str) -> None:\n\n columns = table.columns\n\n columns_lists = []\n for i in range(0, len(columns), self.table_columns_count):\n columns_list = []\n for j in range(i, len(columns)):\n columns_list.append(columns[j])\n if(len(columns_list) == self.table_columns_count):\n break\n columns_lists.append(columns_list)\n\n tag = r\"\"\n for i in range(len(columns_lists)):\n columns_list = columns_lists[i]\n\n if(len(columns_lists) == 1):\n table_num = r''\n else:\n table_num = r' [' + str(i + 1) + r'/' + str(len(columns_lists)) + r']'\n\n if(len(table) < 30):\n tag = tag + r'''\n \\begin{table}[H]\n \\center\n \\caption{''' + table_description + table_num + '''}\n \\label{tab:''' + table_name + str(len(self.tables)) + r'''}\n \\begin{tabular}{c''' + ' c' * len(columns_list) + '''}\n '''\n else:\n tag = tag + r'''\n \\begin{longtable}{''' + 'c ' + ' c' * len(columns_list) + '''}\n \\caption{''' + table_description + table_num + '''\\label{tab:''' + table_name + str(len(self.tables)) + r'''}}\\\\\n '''\n\n cell = str(columns_list[0])\n\n for column in columns_list[1:]:\n cell = cell + r' & ' + str(column)\n tag = tag + cell + r''' \\\\\n\n \\hline\n '''\n\n for j in range(len(table)):\n cell = str(table[columns_list[0]].values[j])\n\n for column in columns_list[1:]:\n cell = cell + r' & ' + str(table[column].values[j])\n\n tag = tag + cell + r''' \\\\\n '''\n\n if(len(table) < 30):\n tag = tag + r'''\n \\hline\n \\end{tabular}\n \\end{table}\n '''\n else:\n tag = tag + r'''\n \\hline\n \\end{longtable}\n '''\n\n self.tables[len(self.tables)] = [table_name, table_description]\n\n tag = tag.replace('%', '\\%').replace('_', '\\_').replace('#', '\\#')\n\n self.doc = self.doc + tag",
"def AddTable(self, table):\n self.tables.append(table)",
"def add_to_table(self, values_to_report, table_headers):\n row = Series(dict(zip(\n table_headers,\n values_to_report\n )))\n self.configuration.results = self.configuration.results.append(\n row, ignore_index=True)",
"def insert_df(df, cur, table):\n\n df_columns = list(df)\n\n string_buffer = io.StringIO()\n df.to_csv(string_buffer, index=False, header=False, sep='|')\n string_buffer.seek(0)\n\n tmp_table = \"tmp_table\"\n\n cur.execute(\n f\"\"\"\n CREATE TEMP TABLE {tmp_table}\n AS\n SELECT * \n FROM {table}\n WITH NO DATA\n \"\"\"\n )\n\n cur.copy_from(file=string_buffer, table=tmp_table, sep='|', null=\"\", columns=df_columns)\n\n cur.execute(\n f\"\"\"\n INSERT INTO {table}\n SELECT *\n FROM {tmp_table}\n ON CONFLICT DO NOTHING\n \"\"\"\n )\n\n cur.execute(\n f\"\"\"\n DROP TABLE {tmp_table}\n \"\"\"\n )",
"def add_data(self, df):\n # TODO: improve merging code\n self.data = self.data.append(df, ignore_index=False)\n self.data = self.data[~self.data.index.duplicated(keep='first')]",
"def write_tde(table_df, tde_fullpath, arg_append):\n if arg_append and not os.path.isfile(tde_fullpath):\n print \"Couldn't append -- file doesn't exist\"\n arg_append = False\n\n # Remove it if already exists\n if not arg_append and os.path.exists(tde_fullpath):\n os.remove(tde_fullpath)\n tdefile = tde.Extract(tde_fullpath)\n\n # define the table definition\n table_def = tde.TableDefinition()\n \n # create a list of column names\n colnames = table_df.columns\n # create a list of column types\n coltypes = table_df.dtypes\n\n # for each column, add the appropriate info the Table Definition\n for col_idx in range(0, len(colnames)):\n cname = colnames[col_idx]\n ctype = fieldMap[str(coltypes[col_idx])]\n table_def.addColumn(cname, ctype) \n\n # create the extract from the Table Definition\n if arg_append:\n tde_table = tdefile.openTable('Extract')\n else:\n tde_table = tdefile.addTable('Extract', table_def)\n row = tde.Row(table_def)\n\n for r in range(0, table_df.shape[0]):\n for c in range(0, len(coltypes)):\n if str(coltypes[c]) == 'float64':\n row.setDouble(c, table_df.iloc[r,c])\n elif str(coltypes[c]) == 'float32':\n row.setDouble(c, table_df.iloc[r,c])\n elif str(coltypes[c]) == 'int64':\n row.setDouble(c, table_df.iloc[r,c]) \n elif str(coltypes[c]) == 'int32':\n row.setDouble(c, table_df.iloc[r,c])\n elif str(coltypes[c]) == 'object':\n row.setString(c, table_df.iloc[r,c]) \n elif str(coltypes[c]) == 'bool':\n row.setBoolean(c, table_df.iloc[r,c])\n else:\n row.setNull(c)\n # insert the row\n tde_table.insert(row)\n\n tdefile.close()\n print \"Wrote %d lines to %s\" % (len(table_df), tde_fullpath)",
"def add_table_to_hdf(self, run_group, type_dict, data, name = 'bla',filename = []):\n\t\tif filename == []:\n\t\t\tfilename = self.edf_operator.inputFileName\n\t\t\t\n\t\tthis_table = self.h5f.createTable(run_group, name, type_dict, '%s in file %s' % (name, self.edf_operator.inputFileName))\n\t\t\n\t\trow = this_table.row\n\t\tfor r in data:\n\t\t\tfor par in r.keys():\n\t\t\t\trow[par] = r[par]\n\t\t\trow.append()\n\t\tthis_table.flush()",
"def glue_table(name: str, df: pd.DataFrame, build_path=\"_build\"):\n\n if not os.path.exists(build_path):\n os.mkdir(build_path)\n df.to_excel(os.path.join(build_path, f\"{name}.xlsx\"))\n\n glue(name, df)",
"def export_sql(self, table_name, engine=engine):\n self.table.to_sql(name=table_name, \n con=engine, \n if_exists='append')\n self.table = pd.DataFrame()",
"def chart_data_table(self, chart_data_table):\n\n self.container['chart_data_table'] = chart_data_table",
"def __append_columns(self, new_dataframe):\n self.dataframe = pd.merge(self.dataframe, new_dataframe)",
"def append_row(row: pd.DataFrame, df: pd.DataFrame, to_top=True):\n # return pd.concat([row,df], keys=list(get_player_dict().keys())) # persist player_dict so don't have to call func each time\n return df.append(row)",
"def write_frame(self, file, table):\n\n frame = pd.read_csv(file, usecols=self.columns[table])\n frame.fillna('-', inplace=True)\n\n for line in frame.index:\n\n available = []\n accum = 0\n for item in frame.loc[line]:\n if item not in self.placeholder:\n available.append(accum)\n accum = accum + 1\n\n if table == 'premium' and len(available) <= 2:\n # Premium table is full of null\n continue\n\n # Filter the key-value pairs\n key = [frame.columns[column] for column in available]\n keys = ','.join(key)\n value = ['\\'' + str(frame.loc[line][i]) + '\\'' for i in available]\n values = ','.join(value)\n\n insert_query = 'INSERT INTO public.%s ' \\\n '(%s) ' \\\n 'VALUES (%s);' \\\n % (table, keys, values)\n try:\n self.cursor.execute(insert_query)\n self.conn.commit()\n except ps.Error as e:\n # Ignore errors\n self.errors = self.errors + 1\n self.conn.commit()\n continue\n\n self.conn.commit()\n self.cursor.close()",
"def add_data(self, key, data):\n with self.write():\n # index all columns if possible\n try:\n # FIXME: band-aid heuristic to catch a known corner case that\n # HDFStore doesn't catch; see ``Issue 20``\n if (isinstance(data, pd.DataFrame) and\n data.columns.dtype == np.dtype('int64')):\n raise AttributeError\n\n self.handle.put(\n key, data, format='table', data_columns=True, complevel=5,\n complib='blosc')\n except AttributeError:\n self.handle.put(\n key, data, format='table', complevel=5, complib='blosc')",
"def transfers_dataframe(tables_list):\r\n return pd.concat([pd.DataFrame(table[1:], columns=table[0]) for table in tables_list])",
"def add(table, record):\n\n table.append(record)\n\n return table",
"def table(self, *tables):\n self._tables.append(', '.join(tables))\n return self"
] | [
"0.7775088",
"0.7292738",
"0.71129256",
"0.6842862",
"0.67572284",
"0.65789384",
"0.6511594",
"0.6447779",
"0.6344674",
"0.6329825",
"0.6316417",
"0.63103884",
"0.63037944",
"0.62382275",
"0.6213505",
"0.6189217",
"0.6157169",
"0.6135792",
"0.60964775",
"0.60880053",
"0.606603",
"0.60388714",
"0.6035828",
"0.6007141",
"0.59908026",
"0.59442496",
"0.59209865",
"0.59156907",
"0.5865138",
"0.5859505"
] | 0.7644748 | 1 |
test_save_contact test case to test if the contact object is saved into the contact list | def test_save_contact(self):
# .save_contact() is the save to contact function.
# Test would check if an addition has been made to our contact list
self.new_contact.save_contact()
self.assertEqual(len(Contact.contact_list), 1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_save_contact(self):\n self.new_contact.save_contact() # saving the new contact\n self.assertEqual(len(Contact.contact_list), 1)",
"def test_save_multiple_contacts(self):\n self.new_contact.save_contact() # saving the new contact\n test_contact = Contact(\"Test\", \"User\", 254712345678, \"[email protected]\") # new user\n test_contact.save_contact() # saving the new contact\n self.assertEqual(len(Contact.contact_list), 2)",
"def test_save_multiple_contact(self):\n self.new_contact.save_contact()\n # new contact\n test_contact = Contact(\"Test\", \"user\", \"0798765432\", \"[email protected]\")\n test_contact.save_contact()\n self.assertEqual(len(Contact.contact_list), 2)",
"def test_add_contacts(self):\n response = self.contacts.add(\"alex\", \"0708913841\")\n self.assertEqual(response, \"Successfully added contacts\" )",
"def test_delete_contact(self):\n self.new_contact.save_contact()\n # new contact\n test_contact = Contact(\"Test\", \"user\", \"0745639300\", \"[email protected]\")\n # new contact saved\n test_contact.save_contact()\n # For deleting the new contact\n self.new_contact.delete_contact()\n self.assertEqual(len(Contact.contact_list), 1)",
"def test_create_contact(self):\n \n url = reverse('contact-list')\n contact = self.get_dummy_contact()\n\n response = self.client.post(url, contact,\n format='json',\n HTTP_AUTHORIZATION=self.get_auth())\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Contact.objects.count(), 1)\n self.assertEqual(Contact.objects.get().email_address, contact['email_address'])",
"def test_contact_exists(self):\n self.new_contact.save_contact()\n test_contact = Contact(\"Test\", \"User\", 254711223344, \"[email protected]\")\n test_contact.save_contact()\n contact_exists = Contact.contact_exist(254711223344)\n self.assertTrue(contact_exists)",
"def test_delete_contact(self):\n self.new_contact.save_contact()\n test_contact = Contact(\"Test\", \"User\", 254712345678, \"[email protected]\") # new contact\n test_contact.save_contact()\n self.new_contact.delete_contact() # delete a contact object\n self.assertEqual(len(Contact.contact_list), 1)",
"def test_find_contact(self):\n self.new_contact.save_contact()\n test_contact = Contact(\"Test\", \"User\", 254711223344, \"[email protected]\")\n test_contact.save_contact()\n found_contact = Contact.find_by_phone(254711223344)\n\n self.assertEqual(found_contact.email, test_contact.email)",
"def test_add_contact(session): # pylint:disable=unused-argument\n org = factory_org_service()\n org.add_contact(TestContactInfo.contact1)\n dictionary = org.as_dict()\n assert dictionary['contacts']\n assert len(dictionary['contacts']) == 1\n assert dictionary['contacts'][0]['email'] == TestContactInfo.contact1['email']",
"def test_edit_contact_list(self):\n c1 = ContactFactory(company_id=self.company.id)\n contact_list = ContactList.objects.first()\n data = ContactListSerializer(contact_list).data\n\n data['title'] = 'Nestle'\n data['contact_ids'] = [c1.id]\n\n url, parsed = self.prepare_urls('v1:contact_list-detail', subdomain=self.company.subdomain, kwargs={'pk':contact_list.id})\n \n response = self.client.put(url, data, HTTP_HOST=parsed.netloc, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n self.authenticate_user()\n response = self.client.put(url, data, HTTP_HOST=parsed.netloc, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n url, parsed = self.prepare_urls('v1:contact_list-detail', subdomain=self.company.subdomain, kwargs={'pk':contact_list.id})\n response = self.client.get(url, HTTP_HOST=parsed.netloc)\n content = json.loads(response.content)\n self.assertEqual(content['title'], 'Nestle')\n self.assertEqual(content['contacts'], [c1.id])",
"def test_update_contact(session): # pylint:disable=unused-argument\n org = factory_org_service()\n org.add_contact(TestContactInfo.contact1)\n\n dictionary = org.as_dict()\n assert len(dictionary['contacts']) == 1\n assert dictionary['contacts'][0]['email'] == TestContactInfo.contact1['email']\n\n org.update_contact(TestContactInfo.contact2)\n\n dictionary = org.as_dict()\n assert len(dictionary['contacts']) == 1\n assert dictionary['contacts'][0]['email'] == TestContactInfo.contact2['email']",
"def test_new_contact_data(db_session):\n new_contact = AddressBook(\n name=\"test_name\",\n phone=\"test_phone\",\n email=\"test_email\"\n )\n db_session.add(new_contact)\n contact = db_session.query(AddressBook).all()\n assert contact[0].name == \"test_name\"\n assert contact[0].phone == \"test_phone\"\n assert contact[0].email == \"test_email\"",
"def test_sync_from_sugar_contact(self):\n LOG.debug('test_sync_from_sugar_contact')\n business = Business.objects.get(id=114)\n advertiser = Advertiser.objects.get(id=114)\n email = advertiser.email\n module = \"Contacts\"\n query = build_recent_entry_query(module=module, test_mode=True, \n get_modified=False, start=None)\n sugar_list = self.sugar.get_entry_list(module, query)\n sugar_dict = sugar_list[0]\n sugar_dict['advertiser_id_c'] = ''\n self.sugar.set_entry(module, dict_to_name_value(sugar_dict))\n billing_record = BillingRecord.objects.get(id=114)\n order = billing_record.orders.all()[0]\n order.delete()\n billing_record.delete()\n business.delete()\n consumer = Consumer.objects.get(email=email)\n consumer.delete()\n advertiser.delete()\n sync_business_from_sugar(test_mode=True, sugar=self.sugar)\n # business is not created since Sugar record modified by 10Coupons user\n try:\n business = Business.objects.get(advertiser=advertiser)\n self.assertTrue(False)\n except business.DoesNotExist:\n self.assertTrue(True)",
"def test_contact_exists(self):\n\n self.new_contact.save_contact()\n # Test user\n test_contact = Contact(\"Test\", \"user\", \"0722334455\", \"[email protected]\")\n # We save\n test_contact.save_contact()\n # variable that stores what we expect\n contact_exists = Contact.contact_exist(\"0722334455\")\n # The test that should return a variable\n self.assertTrue(contact_exists)",
"def test_that_view_saves_data_if_form_valid(self):\n\n self.client.login(username='admin', password='admin')\n url = reverse(\"to_form\", args=str(self.my_instance.id))\n response = self.client.post(url, data={'name': 'Oleg', 'surname': 'Senyshyn', 'date': date(1995, 05, 03),\n 'email': '[email protected]', 'skype': 'sen9a1990'}, format='json')\n self.assertEqual('Data has been edit', json.loads(response.content)['ok'])\n my_instance = Contact.objects.first()\n self.assertEqual('Oleg', my_instance.name)\n self.assertEqual('Senyshyn', my_instance.surname)\n self.assertEqual(date(1995, 05, 03), my_instance.date)\n self.assertEqual('[email protected]', my_instance.email)\n self.assertEqual('sen9a1990', my_instance.skype)",
"def test_new_contact_is_added(db_session):\n new_contact = AddressBook(\n name=\"test_name\",\n phone=\"test_phone\",\n email=\"test_email\"\n )\n db_session.add(new_contact)\n query = db_session.query(AddressBook).all()\n assert len(query) == 1",
"def test_create_contact_list(self):\n c1 = ContactFactory(company_id=self.company.id)\n data = {\n 'title': 'ContactList1',\n 'contact_ids': [c1.id],\n }\n\n url, parsed = self.prepare_urls('v1:contact_list-list', subdomain=self.company.subdomain)\n \n response = self.client.post(url, data, HTTP_HOST=parsed.netloc, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n self.authenticate_user()\n response = self.client.post(url, data, HTTP_HOST=parsed.netloc, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n content = json.loads(response.content)\n self.assertEqual(content['title'], 'ContactList1')\n self.assertEqual(content['contacts'], [c1.id])\n self.assertNotEqual(content['company_id'], None)\n self.assertNotEqual(content['owner'], None)\n\n url, parsed = self.prepare_urls('v1:contact_list-list', subdomain=self.company.subdomain)\n response = self.client.get(url, HTTP_HOST=parsed.netloc)\n content = json.loads(response.content)\n self.assertEqual(self.contact_lists_count+1, len(content))",
"def test_updateContact(self):\n qs = Contact.objects.all()\n contact = qs[0]\n contact2 = Contact.objects.get(id=contact.id)\n to_update_value = 'address 2'\n contact2.address = to_update_value\n contact2.save()\n # refresh from db\n contact3 = Contact.objects.get(id=contact.id)\n self.assertEqual(contact3.address, to_update_value)",
"def test_save_account(self):\n self.new_account.save_account() # add account to list\n self.assertEqual(len(Credential.credential_list),\n 1) # check length of list",
"def test_updateContact(self):\n response = self.client.get(self.url)\n qs = response.json()\n contact = qs[0]\n to_update_value = 'address 2'\n contact['address'] = to_update_value\n response = self.client.put(self.url + str(contact['id']) + '/', contact, content_type=\"application/json\")\n self.assertEqual(response.status_code, 200)\n contact2 = response.json()\n self.assertEqual(contact2['address'], to_update_value)",
"def test_update_contact_association(self):\n patient1 = self.create_patient({'mobile_number': '12223334444'})\n patient2 = self.create_patient()\n subject_number = patient1.subject_number\n node = self.create_xml_patient({'Subject_Number': subject_number,\n 'Mobile_Number': '43332221111'})\n payload = self.create_payload([node])\n parse_patient(node, payload)\n patient = payload.patients.all()[0]\n self.assertNotEqual(patient.pk, patient2.pk)\n self.assertEqual(patient.pk, patient1.pk)\n self.assertNotEqual(patient.contact.pk, patient2.contact.pk)\n self.assertEqual(patient.contact.pk, patient1.contact.pk)\n self.assertEqual(patient.mobile_number, '+43332221111')\n self.assertEqual(patient.contact.phone, '+43332221111')",
"def test_01_add_person_to_book(self):\n data = {\"first_name\": \"Nidhin\", \"last_name\": \"Bose\",\n \"street_address\": [\"street number 1\", \"street number 2\"],\n \"email\": [\"[email protected]\", \"[email protected]\"],\n \"phone\": [\"123123123\", \"345345345\"]}\n\n rv = self.app.post('/address_book/person',\n data=json.dumps(data),\n follow_redirects=True)\n data = json.loads(rv.data)\n self.assertEqual(data[\"message\"], \"saved person to Address Book\")",
"def test_save_credential(self):\n self.new_credential.save_credential() # saving the new credential\n self.assertEqual(len(Credential.credential_list),1)",
"def test_save_credential(self):\n self.new_credential.save_credential()\n self.assertEqual(len(Credential.credential_list),1)",
"def test_modify_phonebook(self):\n bt_contacts_utils.generate_contact_list(self.contacts_destination_path,\n PSE_CONTACTS_FILE, 100)\n phone_numbers_added = bt_contacts_utils.import_device_contacts_from_vcf(\n self.pse, self.contacts_destination_path, PSE_CONTACTS_FILE)\n if not self.connect_and_verify(phone_numbers_added):\n return False\n\n bt_contacts_utils.erase_contacts(self.pse)\n bt_contacts_utils.generate_contact_list(self.contacts_destination_path,\n PSE_CONTACTS_FILE, 110, 2)\n phone_numbers_added = bt_contacts_utils.import_device_contacts_from_vcf(\n self.pse, self.contacts_destination_path, PSE_CONTACTS_FILE)\n return self.connect_and_verify(phone_numbers_added)",
"def test_get_contact(self):\n pass",
"def test_save_credential(self) :\n self.new_credential.save_credential()\n self.assertEqual(len(Credential.credential_list),1)",
"def test_get_contacts(self):\n pass",
"def test_findContact(self):\n qs = Contact.objects.all()\n contact = qs[0]\n contact2 = Contact.objects.get(id=contact.id)\n self.assertEqual(contact, contact2)"
] | [
"0.90264815",
"0.8396352",
"0.8371293",
"0.7489064",
"0.73771274",
"0.7365106",
"0.7324921",
"0.730443",
"0.7194348",
"0.7193277",
"0.7079255",
"0.70014966",
"0.69934404",
"0.6915084",
"0.691342",
"0.6848425",
"0.68055904",
"0.67810017",
"0.67704403",
"0.67631304",
"0.66917706",
"0.65671664",
"0.65447736",
"0.64902884",
"0.64709234",
"0.6422111",
"0.64126015",
"0.64011127",
"0.6381512",
"0.6330765"
] | 0.8957478 | 1 |
def test_save_multiple_contact to check if we can save multiple contacts to our contact_list | def test_save_multiple_contact(self):
self.new_contact.save_contact()
# new contact
test_contact = Contact("Test", "user", "0798765432", "[email protected]")
test_contact.save_contact()
self.assertEqual(len(Contact.contact_list), 2) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_save_multiple_contacts(self):\n self.new_contact.save_contact() # saving the new contact\n test_contact = Contact(\"Test\", \"User\", 254712345678, \"[email protected]\") # new user\n test_contact.save_contact() # saving the new contact\n self.assertEqual(len(Contact.contact_list), 2)",
"def test_save_contact(self):\n # .save_contact() is the save to contact function.\n # Test would check if an addition has been made to our contact list\n self.new_contact.save_contact()\n self.assertEqual(len(Contact.contact_list), 1)",
"def test_save_contact(self):\n self.new_contact.save_contact() # saving the new contact\n self.assertEqual(len(Contact.contact_list), 1)",
"def test_with_multiple_contacts(self, data_flow_api_client):\n with freeze_time('2019-01-01 12:30:00'):\n contact_1 = ContactFactory()\n with freeze_time('2019-01-03 12:00:00'):\n contact_2 = ContactFactory()\n with freeze_time('2019-01-01 12:00:00'):\n contact_3 = ContactFactory()\n contact_4 = ContactFactory()\n\n response = data_flow_api_client.get(self.view_url)\n assert response.status_code == status.HTTP_200_OK\n response_results = response.json()['results']\n assert len(response_results) == 4\n expected_contact_list = sorted([contact_3, contact_4],\n key=lambda item: item.pk) + [contact_1, contact_2]\n for index, contact in enumerate(expected_contact_list):\n assert contact.email == response_results[index]['email']",
"def test_create_contact_list(self):\n c1 = ContactFactory(company_id=self.company.id)\n data = {\n 'title': 'ContactList1',\n 'contact_ids': [c1.id],\n }\n\n url, parsed = self.prepare_urls('v1:contact_list-list', subdomain=self.company.subdomain)\n \n response = self.client.post(url, data, HTTP_HOST=parsed.netloc, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n self.authenticate_user()\n response = self.client.post(url, data, HTTP_HOST=parsed.netloc, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n content = json.loads(response.content)\n self.assertEqual(content['title'], 'ContactList1')\n self.assertEqual(content['contacts'], [c1.id])\n self.assertNotEqual(content['company_id'], None)\n self.assertNotEqual(content['owner'], None)\n\n url, parsed = self.prepare_urls('v1:contact_list-list', subdomain=self.company.subdomain)\n response = self.client.get(url, HTTP_HOST=parsed.netloc)\n content = json.loads(response.content)\n self.assertEqual(self.contact_lists_count+1, len(content))",
"def test_add_contacts(self):\n response = self.contacts.add(\"alex\", \"0708913841\")\n self.assertEqual(response, \"Successfully added contacts\" )",
"def test_edit_contact_list(self):\n c1 = ContactFactory(company_id=self.company.id)\n contact_list = ContactList.objects.first()\n data = ContactListSerializer(contact_list).data\n\n data['title'] = 'Nestle'\n data['contact_ids'] = [c1.id]\n\n url, parsed = self.prepare_urls('v1:contact_list-detail', subdomain=self.company.subdomain, kwargs={'pk':contact_list.id})\n \n response = self.client.put(url, data, HTTP_HOST=parsed.netloc, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n self.authenticate_user()\n response = self.client.put(url, data, HTTP_HOST=parsed.netloc, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n url, parsed = self.prepare_urls('v1:contact_list-detail', subdomain=self.company.subdomain, kwargs={'pk':contact_list.id})\n response = self.client.get(url, HTTP_HOST=parsed.netloc)\n content = json.loads(response.content)\n self.assertEqual(content['title'], 'Nestle')\n self.assertEqual(content['contacts'], [c1.id])",
"def test_save_multiple_credential(self):\n self.new_credential.save_credential()\n test_credential = Credential(\"Facebook\",\"Chris\",\"[email protected]\",\"chris1\") # new credential\n test_credential.save_credential()\n self.assertEqual(len(Credential.credential_list),2)",
"def test_save_multiple_credential(self):\n self.new_credential.save_credential()\n test_credential = Credential(\"winnie\",\"test\",\"login\",\"winnie\")\n test_credential.save_credential()\n self.assertEqual(len(Credential.credential_list),2)",
"def test_save_multiple_credential(self) :\n self.new_credential.save_credential()\n test_credential = Credential(\"Instagram\", \"[email protected]\", \"Insta002\") #new credential\n test_credential.save_credential()\n self.assertEqual(len(Credential.credential_list),2)",
"def test_create_multiple(self):\n contact = Contact.objects.first()\n sales_cycle = contact.sales_cycles.first()\n valid_data = [{'sales_cycle_id':sales_cycle.id, 'description':'test message', 'contact_id': contact.id}]\n url, parsed = self.prepare_urls('v1:activity-create-multiple', subdomain=self.company.subdomain)\n \n response = self.client.post(url, valid_data, HTTP_HOST=parsed.netloc, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n self.authenticate_user()\n response = self.client.post(url, valid_data, HTTP_HOST=parsed.netloc, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n content = json.loads(response.content)\n self.assertTrue(content.has_key('notification'))",
"def test_get_contacts(self):\n pass",
"def test_delete_contact(self):\n self.new_contact.save_contact()\n # new contact\n test_contact = Contact(\"Test\", \"user\", \"0745639300\", \"[email protected]\")\n # new contact saved\n test_contact.save_contact()\n # For deleting the new contact\n self.new_contact.delete_contact()\n self.assertEqual(len(Contact.contact_list), 1)",
"def test_save_multiple_user(self):\n self.new_user.save_user()\n test_user = User(\"Test\", \"user\", \"test\", \"walIas15\")\n test_user.save_user()\n self.assertEqual(len(User.user_list), 2)",
"def test_save_credential(self):\n self.new_credential.save_credential()\n self.assertEqual(len(Credential.credential_list),1)",
"def test_save_account(self):\n self.new_account.save_account() # add account to list\n self.assertEqual(len(Credential.credential_list),\n 1) # check length of list",
"def test_add_contact(session): # pylint:disable=unused-argument\n org = factory_org_service()\n org.add_contact(TestContactInfo.contact1)\n dictionary = org.as_dict()\n assert dictionary['contacts']\n assert len(dictionary['contacts']) == 1\n assert dictionary['contacts'][0]['email'] == TestContactInfo.contact1['email']",
"def test_modify_phonebook(self):\n bt_contacts_utils.generate_contact_list(self.contacts_destination_path,\n PSE_CONTACTS_FILE, 100)\n phone_numbers_added = bt_contacts_utils.import_device_contacts_from_vcf(\n self.pse, self.contacts_destination_path, PSE_CONTACTS_FILE)\n if not self.connect_and_verify(phone_numbers_added):\n return False\n\n bt_contacts_utils.erase_contacts(self.pse)\n bt_contacts_utils.generate_contact_list(self.contacts_destination_path,\n PSE_CONTACTS_FILE, 110, 2)\n phone_numbers_added = bt_contacts_utils.import_device_contacts_from_vcf(\n self.pse, self.contacts_destination_path, PSE_CONTACTS_FILE)\n return self.connect_and_verify(phone_numbers_added)",
"def test_save_multiple_accounts(self):\n self.new_credentials.save_attributes()\n test_credential = Credentials(\"Instagram\", \"@zephonmakale\", \"123456\")\n test_credential.save_attributes()\n self.assertEqual(len(Credentials.credentials_list), 2)",
"def test_delete_contact(self):\n self.new_contact.save_contact()\n test_contact = Contact(\"Test\", \"User\", 254712345678, \"[email protected]\") # new contact\n test_contact.save_contact()\n self.new_contact.delete_contact() # delete a contact object\n self.assertEqual(len(Contact.contact_list), 1)",
"def test_create_contact(self):\n \n url = reverse('contact-list')\n contact = self.get_dummy_contact()\n\n response = self.client.post(url, contact,\n format='json',\n HTTP_AUTHORIZATION=self.get_auth())\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Contact.objects.count(), 1)\n self.assertEqual(Contact.objects.get().email_address, contact['email_address'])",
"def test_save_credential(self) :\n self.new_credential.save_credential()\n self.assertEqual(len(Credential.credential_list),1)",
"def test_sync_from_sugar_contact(self):\n LOG.debug('test_sync_from_sugar_contact')\n business = Business.objects.get(id=114)\n advertiser = Advertiser.objects.get(id=114)\n email = advertiser.email\n module = \"Contacts\"\n query = build_recent_entry_query(module=module, test_mode=True, \n get_modified=False, start=None)\n sugar_list = self.sugar.get_entry_list(module, query)\n sugar_dict = sugar_list[0]\n sugar_dict['advertiser_id_c'] = ''\n self.sugar.set_entry(module, dict_to_name_value(sugar_dict))\n billing_record = BillingRecord.objects.get(id=114)\n order = billing_record.orders.all()[0]\n order.delete()\n billing_record.delete()\n business.delete()\n consumer = Consumer.objects.get(email=email)\n consumer.delete()\n advertiser.delete()\n sync_business_from_sugar(test_mode=True, sugar=self.sugar)\n # business is not created since Sugar record modified by 10Coupons user\n try:\n business = Business.objects.get(advertiser=advertiser)\n self.assertTrue(False)\n except business.DoesNotExist:\n self.assertTrue(True)",
"def test_update_contact(session): # pylint:disable=unused-argument\n org = factory_org_service()\n org.add_contact(TestContactInfo.contact1)\n\n dictionary = org.as_dict()\n assert len(dictionary['contacts']) == 1\n assert dictionary['contacts'][0]['email'] == TestContactInfo.contact1['email']\n\n org.update_contact(TestContactInfo.contact2)\n\n dictionary = org.as_dict()\n assert len(dictionary['contacts']) == 1\n assert dictionary['contacts'][0]['email'] == TestContactInfo.contact2['email']",
"def test_get_contact_objects(self):\n\n contacts = MessageController.get_contact_objects(['2'])\n self.assertEqual(contacts[0].contact_first_name, 'Contact2')\n self.assertEqual(contacts[0].contact_phone, '4153417706')\n self.assertEqual(contacts[0].user_id, 1)\n self.assertEqual(contacts[0].lang_id, 1)",
"def test_validate_form_import_contacts(self):\n data_contacts = open(self.test_dir + 'contacts.csv',\n encoding='utf-8')\n with data_contacts as contacts:\n response = self.client.post(reverse(\"import_contacts\"), {\n 'contacts': contacts})\n data_contacts.close()\n self.assertEqual(response.status_code, 200)",
"def test_get_people_list(self):\n person_1 = Person(\n first_name='Emilia',\n last_name='Clarke',\n aliases='Emi'\n )\n person_2 = Person(\n first_name='Peter',\n last_name='Dinklage',\n )\n person_3 = Person(\n first_name='Thomas',\n last_name='McCarthy',\n aliases='Thom'\n )\n\n Person.objects.bulk_create([person_1, person_2, person_3])\n\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data.get('count'), Person.objects.count())",
"def test_multiple_phones(self):\n\n PSE1_CONTACTS_FILE = \"{}{}\".format(PSE_CONTACTS_FILE, \"1\")\n PSE2_CONTACTS_FILE = \"{}{}\".format(PSE_CONTACTS_FILE, \"2\")\n\n bt_contacts_utils.generate_contact_list(self.contacts_destination_path,\n PSE1_CONTACTS_FILE, 100)\n phone_numbers_added = bt_contacts_utils.import_device_contacts_from_vcf(\n self.pse, self.contacts_destination_path, PSE1_CONTACTS_FILE)\n bt_contacts_utils.generate_contact_list(self.contacts_destination_path,\n PSE2_CONTACTS_FILE, 100)\n phone_numbers_added = bt_contacts_utils.import_device_contacts_from_vcf(\n self.pse2, self.contacts_destination_path, PSE2_CONTACTS_FILE)\n\n self.pce.droid.bluetoothPbapClientDisconnect(\n self.pse.droid.bluetoothGetLocalAddress())\n self.pce.droid.bluetoothPbapClientDisconnect(\n self.pse2.droid.bluetoothGetLocalAddress())\n\n bt_test_utils.connect_pri_to_sec(\n self.pce, self.pse,\n set([BtEnum.BluetoothProfile.PBAP_CLIENT.value]))\n bt_contacts_utils.wait_for_phone_number_update_complete(self.pce, 100)\n bt_contacts_utils.export_device_contacts_to_vcf(\n self.pce, self.contacts_destination_path, PCE_CONTACTS_FILE)\n pse1_matches = bt_contacts_utils.count_contacts_with_differences(\n self.contacts_destination_path, PCE_CONTACTS_FILE,\n PSE1_CONTACTS_FILE) == 0\n\n bt_test_utils.connect_pri_to_sec(\n self.pce, self.pse2,\n set([BtEnum.BluetoothProfile.PBAP_CLIENT.value]))\n\n bt_contacts_utils.wait_for_phone_number_update_complete(self.pce, 200)\n\n bt_contacts_utils.export_device_contacts_to_vcf(\n self.pce, self.contacts_destination_path, PCE_CONTACTS_FILE)\n\n merged_file = open('{}{}'.format(self.contacts_destination_path,\n MERGED_CONTACTS_FILE), 'w')\n for contacts_file in [PSE1_CONTACTS_FILE, PSE2_CONTACTS_FILE]:\n infile = open(self.contacts_destination_path + contacts_file)\n merged_file.write(infile.read())\n\n self.log.info(\"Checking combined phonebook.\")\n pse1andpse2_matches = bt_contacts_utils.count_contacts_with_differences(\n self.contacts_destination_path, PCE_CONTACTS_FILE,\n MERGED_CONTACTS_FILE) == 0\n\n self.pce.droid.bluetoothPbapClientDisconnect(\n self.pse.droid.bluetoothGetLocalAddress())\n bt_contacts_utils.wait_for_phone_number_update_complete(self.pce, 100)\n\n self.log.info(\"Checking phonebook after disconnecting first device.\")\n bt_contacts_utils.export_device_contacts_to_vcf(\n self.pce, self.contacts_destination_path, PCE_CONTACTS_FILE)\n pse2_matches = bt_contacts_utils.count_contacts_with_differences(\n self.contacts_destination_path, PCE_CONTACTS_FILE,\n PSE2_CONTACTS_FILE) == 0\n\n bt_contacts_utils.erase_contacts(self.pse)\n bt_contacts_utils.erase_contacts(self.pse2)\n return pse1_matches and pse2_matches and pse1andpse2_matches",
"def test_display_all_contact(self):\n self.assertEqual(Contact.display_contacts(), Contact.contact_list)",
"def tearDown(self):\n Contact.contact_list = []"
] | [
"0.91369456",
"0.8120368",
"0.80584645",
"0.73653406",
"0.70864254",
"0.70563537",
"0.7039114",
"0.70254576",
"0.6958467",
"0.68221486",
"0.68147147",
"0.6792331",
"0.66435987",
"0.65641314",
"0.6561918",
"0.65477175",
"0.6451238",
"0.6439873",
"0.64368606",
"0.6408157",
"0.6404192",
"0.6382228",
"0.6374698",
"0.6343167",
"0.633899",
"0.63303936",
"0.63246804",
"0.63154143",
"0.63137",
"0.63136184"
] | 0.9152661 | 0 |
test_delete_contact to test if we can remove a contact from our contact list | def test_delete_contact(self):
self.new_contact.save_contact()
# new contact
test_contact = Contact("Test", "user", "0745639300", "[email protected]")
# new contact saved
test_contact.save_contact()
# For deleting the new contact
self.new_contact.delete_contact()
self.assertEqual(len(Contact.contact_list), 1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_delete_contact(self):\n self.new_contact.save_contact()\n test_contact = Contact(\"Test\", \"User\", 254712345678, \"[email protected]\") # new contact\n test_contact.save_contact()\n self.new_contact.delete_contact() # delete a contact object\n self.assertEqual(len(Contact.contact_list), 1)",
"def test_delete_contact_list(self):\n contact_list = ContactList.objects.first()\n\n url, parsed = self.prepare_urls('v1:contact_list-detail', subdomain=self.company.subdomain, kwargs={'pk':contact_list.id})\n \n response = self.client.delete(url, HTTP_HOST=parsed.netloc)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n self.authenticate_user()\n response = self.client.delete(url, HTTP_HOST=parsed.netloc)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n \n response = self.client.get(url, HTTP_HOST=parsed.netloc)\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n\n url, parsed = self.prepare_urls('v1:contact_list-list', subdomain=self.company.subdomain)\n response = self.client.get(url, HTTP_HOST=parsed.netloc)\n content = json.loads(response.content)\n self.assertEqual(self.contact_lists_count-1, len(content))",
"def test_projects_id_contacts_delete(self):\n project = Contact()\n response = self.client.open('/project-tracker/projects/{id}/contacts'.format(id=56),\n method='DELETE',\n data=json.dumps(project),\n content_type='application/json')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))",
"def test_delete_contact_org_link(session, auth_mock): # pylint:disable=unused-argument\n entity_model = factory_entity_model()\n entity = EntityService(entity_model)\n\n org = factory_org_service()\n org_dictionary = org.as_dict()\n org_id = org_dictionary['id']\n\n contact = factory_contact_model()\n\n contact_link = ContactLinkModel()\n contact_link.contact = contact\n contact_link.entity = entity._model # pylint:disable=protected-access\n contact_link.org = org._model # pylint:disable=protected-access\n contact_link.commit()\n\n updated_org = org.delete_contact()\n\n dictionary = None\n dictionary = updated_org.as_dict()\n assert len(dictionary['contacts']) == 0\n\n delete_contact_link = ContactLinkModel.find_by_entity_id(entity.identifier)\n assert delete_contact_link\n\n exist_contact_link = ContactLinkModel.find_by_org_id(org_id)\n assert not exist_contact_link",
"def test_delete_contact_no_org(session, auth_mock): # pylint:disable=unused-argument\n org = factory_org_service()\n org.add_contact(TestContactInfo.contact1)\n\n updated_org = org.delete_contact()\n\n with pytest.raises(BusinessException) as exception:\n updated_org.delete_contact()\n\n assert exception.value.code == Error.DATA_NOT_FOUND.name",
"def del_contact(contact):\n db = get_db()\n \n if contact.get_hash_name() in db:\n db.pop(contact.get_hash_name())\n write_db(db)\n sys.exit(logger.ok('success: contact ' + '\"%s\"' % contact.get_name() + ' deleted'))\n else:\n sys.exit(logger.fail('fatal: contact does not exist'))",
"def test_save_contact(self):\n self.new_contact.save_contact() # saving the new contact\n self.assertEqual(len(Contact.contact_list), 1)",
"def test_save_contact(self):\n # .save_contact() is the save to contact function.\n # Test would check if an addition has been made to our contact list\n self.new_contact.save_contact()\n self.assertEqual(len(Contact.contact_list), 1)",
"def delete_contact(self):\n delete_first_name = input(\"Enter first name that you want to delete\\n\")\n for contact in self.contact_list:\n if contact.first_name == delete_first_name:\n #print(str(contact))\n self.contact_list.remove(contact)\n else:\n print(f\"No contact is present with first name {delete_first_name} \")",
"def delete_contact_in_db(self):\n self.init_db(self._testing)\n\n # making sure that the object is in the db\n assert not self.uid == \"\"\n\n self._delete_row_in_db(Contact.table_name, (self.uid,))",
"def RemoveContact(self, contact):\n\t\tself.client.Delete(contact)",
"def test_delete(self):\n person = Person('test_person_b')\n person.delete()\n with database() as db:\n results = db.query(\"SELECT * FROM persons WHERE person_name = 'test_person_b'\")\n self.assertEqual(results, [])",
"def test_add_contacts(self):\n response = self.contacts.add(\"alex\", \"0708913841\")\n self.assertEqual(response, \"Successfully added contacts\" )",
"def tearDown(self):\n Contact.contact_list = []",
"def test_get_contacts(self):\n pass",
"def test_client_address_delete(self):\n pass",
"async def delete_contact(dbcon: DBConnection, contact_id: int) -> None:\n if not await contact_exists(dbcon, contact_id):\n raise errors.InvalidArguments('contact does not exist')\n q = \"\"\"delete from contacts where id=%s\"\"\"\n await dbcon.operation(q, (contact_id,))",
"def delete_contact(self, contact):\n self._delete('contacts', self._build_params(uuid=contact))",
"def test_get_contact(self):\n pass",
"def test_delete(self):\n pass",
"def test_save_multiple_contacts(self):\n self.new_contact.save_contact() # saving the new contact\n test_contact = Contact(\"Test\", \"User\", 254712345678, \"[email protected]\") # new user\n test_contact.save_contact() # saving the new contact\n self.assertEqual(len(Contact.contact_list), 2)",
"def delete_contacts(self):\n self.db.delete_all_contacts()\n return self.update_contacts()",
"def test_delete_record(self):\n pass",
"def test_contact_exists(self):\n self.new_contact.save_contact()\n test_contact = Contact(\"Test\", \"User\", 254711223344, \"[email protected]\")\n test_contact.save_contact()\n contact_exists = Contact.contact_exist(254711223344)\n self.assertTrue(contact_exists)",
"def test_delete_case(self):\n pass",
"def test_delete_email_address(self):\n email_addr = 'delete@' + self.email_dom\n addr = SpokeEmailAddress(self.org_name, self.user_id)\n addr.create(email_addr)\n self.assertTrue(addr.delete(email_addr))",
"def test_delete_credential(self):\n self.new_credential.save_credential()\n test_credential = Credential(\"Facebook\",\"Chris\",\"[email protected]\",\"chris1\") # new credential\n test_credential.save_credential()\n self.new_credential.delete_credential() # Deleting a credential object\n self.assertEqual(len(Credential.credential_list),1)",
"def delcontact(id):\n delid = str(id)\n\n try:\n r.srem(\"contacts\", delid, 1)\n\n r.delete(\"uid:\" + delid + \":name\")\n r.delete(\"uid:\" + delid + \":address\")\n r.delete(\"uid:\" + delid + \":phone\")\n r.delete(\"uid:\" + delid + \":email\")\n\n return {}\n except:\n print \"Unexpected error:\", sys.exc_info()[0]\n raise",
"def delete_contacts(self, contacts):\n self._post('contact_actions', None, self._build_params(contacts=contacts, action='delete'))",
"def test_save_multiple_contact(self):\n self.new_contact.save_contact()\n # new contact\n test_contact = Contact(\"Test\", \"user\", \"0798765432\", \"[email protected]\")\n test_contact.save_contact()\n self.assertEqual(len(Contact.contact_list), 2)"
] | [
"0.87605715",
"0.8085114",
"0.766524",
"0.73715454",
"0.72436273",
"0.7203849",
"0.7119705",
"0.6994788",
"0.69293314",
"0.6898616",
"0.68878806",
"0.6867588",
"0.6844377",
"0.6803209",
"0.678051",
"0.6768761",
"0.67519003",
"0.67358315",
"0.67193764",
"0.66907907",
"0.6689076",
"0.66777784",
"0.6655382",
"0.6652161",
"0.6651486",
"0.66510886",
"0.66415334",
"0.6639034",
"0.6635869",
"0.66110444"
] | 0.88033223 | 0 |
Test to check if we can return a Boolean if we cannot find the contact. | def test_contact_exists(self):
self.new_contact.save_contact()
# Test user
test_contact = Contact("Test", "user", "0722334455", "[email protected]")
# We save
test_contact.save_contact()
# variable that stores what we expect
contact_exists = Contact.contact_exist("0722334455")
# The test that should return a variable
self.assertTrue(contact_exists) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_is_workshop_contact(self, obj):\n user = self.context['request'].user\n if not user.is_authenticated:\n return False\n # pylint: disable=no-member\n profile = UserProfile.objects.get(user=user)\n return profile in obj.contacts.all()",
"def found(raise_error: bool = False) -> bool:\n raise NotImplementedError",
"def test_contact_exists(self):\n self.new_contact.save_contact()\n test_contact = Contact(\"Test\", \"User\", 254711223344, \"[email protected]\")\n test_contact.save_contact()\n contact_exists = Contact.contact_exist(254711223344)\n self.assertTrue(contact_exists)",
"def has_error(self):\n return self.error_found",
"def include_contact(self, contact_num: int):\n if self._unique_contacts is not None:\n return contact_num in self._unique_contacts\n else:\n return True",
"def is_found(self) -> bool:\n return self.found",
"def __contains__(self, contact):\n if contact.getId() in self._node_dict.keys():\n return True\n else:\n return False",
"def ref_known_flag(self):\n if CredentialApplication.objects.filter(\n reference_email__iexact=self.reference_email,\n reference_contact_datetime__isnull=False).exclude(\n reference_email=''):\n return True\n elif LegacyCredential.objects.filter(\n reference_email__iexact=self.reference_email).exclude(\n reference_email=''):\n return True\n else:\n return False",
"def test_get_contact(self):\n pass",
"def _check_error(self):\n\n if self.error_code_test != 0:\n return False\n else:\n return True",
"def search_contact():\n if request.method == 'GET':\n tel = request.args.get('tel')\n contact = io_client.get_contacts(urn=['tel:+52' + tel]).all()\n if contact:\n return jsonify({\"existe\": \"Si\"}), 201\n return jsonify({\"existe\": \"No\"}), 404",
"def unfilled_contact(entry: ContactEntry) -> bool:\n if entry.email is not None:\n if len(entry.email) >= 1:\n if entry.email[0].address is not None:\n return False\n if entry.name is not None:\n if entry.name.given_name is not None:\n return False\n if entry.name.family_name is not None:\n return False\n if entry.organization is not None:\n if entry.organization.name is not None:\n if entry.organization.name.text is not None:\n return False\n if entry.organization.department is not None:\n if entry.organization.department.text is not None:\n return False\n return True",
"def is_valid_retrieval(self, card_index):\n return card_index == 0",
"def is_valid_retrieval(self, card_index):\n return card_index == 0",
"def __bool__(self) -> bool:\n return self.return_code == 0",
"def __bool__(self) -> bool:\n return self.return_code == 0",
"def __bool__(self) -> bool:\n return self.return_code == 0",
"def is_authorized_contact(self, dialersetting, str_contact):\n return common_contact_authorization(dialersetting, str_contact)",
"def IsOk(self):\r\n \r\n return True",
"def have_error(self):\n return (hasattr(self, \"got_error\") and\n self.got_error)",
"def has_object_permission(self, request, view, obj):\n\n try:\n Contact.objects.get(user=request.user)\n\n except Contact.DoesNotExist:\n return False\n\n return True",
"def IsfirstAddContact(self):\n if search_text(contact.get_value('accounts'), isScrollable = 0, searchFlag = TEXT_CONTAINS):\n click_in_list_by_index(0)\n return True\n else:\n return False",
"def exists(self):\n return self.obj is not None",
"def __bool__(self):\n return self.isValid()",
"def check(self, mu):\n return self.find(mu)[0] is not None",
"def has_errors(self) -> bool:",
"def exist(self):",
"def _check(self):\n\t\tif not self._raven:\n\t\t\traise NoDeviceFoundException",
"async def getContactState(self):\n contact_state = await self.director.getItemVariableValue(\n self.item_id, \"ContactState\"\n )\n return bool(contact_state)",
"def hasError(self) -> bool:\n return self.errorCode is not None and len(self.errorCode) > 0"
] | [
"0.6157852",
"0.6054971",
"0.60262066",
"0.5897116",
"0.58594185",
"0.5804995",
"0.5778651",
"0.57772166",
"0.56551945",
"0.563208",
"0.56082284",
"0.5526532",
"0.5509251",
"0.5509251",
"0.55092347",
"0.55092347",
"0.55092347",
"0.5499786",
"0.5489946",
"0.5465567",
"0.54627526",
"0.5455963",
"0.5454511",
"0.544725",
"0.5413796",
"0.54096246",
"0.54075485",
"0.5406437",
"0.54061776",
"0.5386415"
] | 0.6327621 | 0 |
Test to confirm that we are copying the email address from a found contact | def test_copy_email(self):
self.new_contact.save_contact()
Contact.copy_email("0712345678")
self.assertEqual(self.new_contact.email, pyperclip.paste())
# Below we are simply stating that if the module being tested is running we collect the test methods and execute them. | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_copy_email(self):\n self.new_contact.save_contact()\n Contact.copy_email(254719702373)\n\n self.assertEqual(self.new_contact.email, pyperclip.paste())",
"def test_copy_email(self):\n\n\n self.new_credential.save_credential()\n Credential.copy_email(\"Chris\")\n\n self.assertEqual(self.new_credential.email,pyperclip.paste())",
"def test_get_email_address(self):\n email_addr = 'test_get_email_addr' + '@' + self.email_dom\n org = 'o=%s' % (self.org_name)\n people = '%s=%s' % (self.container_attr, self.user_container)\n uid = '%s=%s' % (self.user_key, self.user_id)\n dn = '%s,%s,%s,%s' % (uid, people, org, self.base_dn)\n dn_info = {self.smtp_address: [email_addr]}\n expected_result = [(dn, dn_info)] \n addr = SpokeEmailAddress(self.org_name, self.user_id)\n addr.create(email_addr)\n result = addr.get(email_addr)['data']\n self.assertEqual(result, expected_result)",
"def test_find_contact(self):\n self.new_contact.save_contact()\n test_contact = Contact(\"Test\", \"User\", 254711223344, \"[email protected]\")\n test_contact.save_contact()\n found_contact = Contact.find_by_phone(254711223344)\n\n self.assertEqual(found_contact.email, test_contact.email)",
"def test_create_email_address(self):\n email_addr = 'testcreate@' + self.email_dom\n org = 'o=%s' % (self.org_name)\n people = '%s=%s' % (self.container_attr, self.user_container)\n uid = '%s=%s' % (self.user_key, self.user_id)\n dn = '%s,%s,%s,%s' % (uid, people, org, self.base_dn)\n dn_info = {self.smtp_address: [email_addr]}\n expected_result = [(dn, dn_info)] \n addr = SpokeEmailAddress(self.org_name, self.user_id)\n result = addr.create(email_addr)['data']\n self.assertEqual(result, expected_result)",
"def test_get_all_email_address(self):\n email_addr = 'test_get_email_addr' + '@' + self.email_dom\n email_addr2 = 'test_get_all_email_addr' + '@' + self.email_dom\n org = 'o=%s' % (self.org_name)\n people = '%s=%s' % (self.container_attr, self.user_container)\n uid = '%s=%s' % (self.user_key, self.user_id)\n dn = '%s,%s,%s,%s' % (uid, people, org, self.base_dn)\n dn_info = {self.smtp_address: [email_addr, email_addr2]}\n expected_result = [(dn, dn_info)] \n addr = SpokeEmailAddress(self.org_name, self.user_id)\n addr.create(email_addr)\n addr.create(email_addr2)\n result = addr.get()['data']\n self.assertEqual(result, expected_result)",
"def test_get_contact(self):\n pass",
"def test_email_address(self):\n key = api.portal.get_registry_record(\n 'plone.email_from_address'\n )\n self.assertEqual(u'[email protected]', key)",
"def test_get_email_account(self):\n email_addr = self.user_id + '@' + self.email_dom\n org = '%s=%s' % (self.org_attr, self.org_name)\n people = '%s=%s' % (self.container_attr, self.user_container)\n uid = '%s=%s' % (self.user_key, self.user_id)\n dn = '%s,%s,%s,%s' % (uid, people, org, self.base_dn)\n dn_info = {self.imap_enable: ['TRUE'],\n self.imap_mailbox: [self.user_id],\n self.imap_domain: [self.email_dom],\n self.imap_partition: [self.imap_partition_def],\n self.smtp_destination: [email_addr],\n self.smtp_enable: ['TRUE'],\n self.smtp_pri_address: [email_addr]\n }\n expected_result = [(dn, dn_info)] \n acc = SpokeEmailAccount(self.org_name, self.user_id)\n result = acc.get(self.email_addr)['data']\n self.assertEqual(result, expected_result)",
"def test_address__EMailAddress__1():\n zope.interface.verify.verifyObject(IEMailAddress, EMailAddress())",
"def test_duplicate_email(self):\n self.signup('Bo', 'Theo', '[email protected]', 'Bo1995', 'Bo1995')\n rv = self.signup('Bo', 'Theo', '[email protected]', 'Bo1995', 'Bo1995')\n self.assertIn(b'Sorry email already exist', rv.data)",
"def test_contact_exists(self):\n\n self.new_contact.save_contact()\n # Test user\n test_contact = Contact(\"Test\", \"user\", \"0722334455\", \"[email protected]\")\n # We save\n test_contact.save_contact()\n # variable that stores what we expect\n contact_exists = Contact.contact_exist(\"0722334455\")\n # The test that should return a variable\n self.assertTrue(contact_exists)",
"def testGetAddresses3(self):\n self.shop.setMailFromAddress(\"[email protected]\")\n \n sender = self.addresses.getSender()\n self.assertEqual(sender, \"Site Administrator <[email protected]>\")\n \n # Just sender is set, hence receiver is same as sender\n receivers = self.addresses.getReceivers()\n self.assertEqual(receivers, (\"Site Administrator <[email protected]>\",))\n \n # Name and address is set\n self.shop.setMailFromName(\"John Doe\")\n \n sender = self.addresses.getSender()\n self.assertEqual(sender, \"John Doe <[email protected]>\")\n\n # Just sender is set, hence receiver is same as sender\n receivers = self.addresses.getReceivers()\n self.assertEqual(receivers, (\"John Doe <[email protected]>\",))\n\n # Receivers set\n self.shop.setMailTo([\"Jane Doe <[email protected]>\"])\n\n sender = self.addresses.getSender()\n self.assertEqual(sender, \"John Doe <[email protected]>\")\n \n receivers = self.addresses.getReceivers()\n self.assertEqual(receivers, (\"Jane Doe <[email protected]>\",))\n\n # More receivers set\n self.shop.setMailTo([\"Jane Doe <[email protected]>\", \"[email protected]\"])\n\n receivers = self.addresses.getReceivers()\n self.assertEqual(receivers, (\"Jane Doe <[email protected]>\", \"[email protected]\"))",
"def test_copy_details(self):\n self.tc_id = \"Ts_002\"\n self.tc_desc = \"Verify the user is able to copy the location type details\"\n self.tc_step = \"TC Start\"\n\n registration = RegistrationPage(self.driver)\n\n self.tc_step = \"Launch the url\"\n registration.launchUrl(self.url)\n\n self.tc_step = \"Enter the basic registration details\"\n registration.clickVendorLogin()\n registration.clickRegisterButton()\n registration.enterBasicRegistrationDetails(self.firstname, self.lastname, self.vendorname, self.fnsnumber,\n self.primaryemail, self.primaryphno, self.psw, self.cpsw,\n self.continfo)\n registration.basicRegButton()\n self.tc_step = \"Enter the public registration details\"\n registration.enterPublicRegistrationDetails(self.publicemail, self.publicphno, self.publicwebsite,\n self.businessdesc, self.products)\n registration.publicRegButton()\n self.tc_step = \"Enter the location details\"\n registration.clickCSAButton()\n registration.clickLocationYesButton()\n registration.csalocationTypeDetails(self.pickupsitename, self.adrs1, self.adrs2, self.zipcode, self.spzl_instruction)\n registration.chooseCity()\n registration.csacopy()\n registration.addmore()\n registration.remove()\n registration.confirmremove()\n registration.saveonlyCsa()\n self.assertEqual(registration.verifyRegistration(),\"Healthy Incentives Program (HIP)\",\"Login Success\")",
"def test_copy_name(self):\n subject_copy = copy_subject(self.subject, self.DATA_MODEL)\n self.assertEqual(\"Subject (copy)\", subject_copy[\"name\"])",
"def test_create_email_account(self):\n first = 'create_email'\n last = 'account_test'\n user_id = first + last\n email_addr = first + last + '@' + self.email_dom\n user = SpokeUser(self.org_name)\n user.create(email_addr, first, last)\n \n org = '%s=%s' % (self.org_attr, self.org_name)\n people = '%s=%s' % (self.container_attr, self.user_container)\n uid = '%s=%s' % (self.user_key, user_id)\n dn = '%s,%s,%s,%s' % (uid, people, org, self.base_dn)\n dn_info = {'objectClass': ['top', 'inetOrgPerson', self.user_class,\n self.imap_class, self.smtp_class],\n self.imap_enable: ['TRUE'],\n self.imap_mailbox: [user_id],\n self.imap_domain: [self.email_dom],\n self.imap_partition: [self.imap_partition_def],\n self.smtp_destination: [email_addr],\n self.smtp_enable: ['TRUE'],\n self.smtp_pri_address: [email_addr]\n }\n expected_result = [(dn, dn_info)] \n acc = SpokeEmailAccount(self.org_name, user_id)\n result = acc.create(email_addr)['data']\n self.assertEqual(result, expected_result)\n user.delete(first, last)",
"def test_create_contact(self):\n \n url = reverse('contact-list')\n contact = self.get_dummy_contact()\n\n response = self.client.post(url, contact,\n format='json',\n HTTP_AUTHORIZATION=self.get_auth())\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Contact.objects.count(), 1)\n self.assertEqual(Contact.objects.get().email_address, contact['email_address'])",
"def test_get_missing_email_address(self):\n addr = SpokeEmailAddress(self.org_name, self.user_id)\n email_addr = 'missing@' + self.email_dom\n result = addr.get(email_addr)['data']\n expected_result = []\n self.assertEqual(result, expected_result)",
"def test_good_email():\n good_email = \"[email protected]\"\n m = CannedRe.EMAIL.match(good_email)\n # print getmembers(m)\n assert m is not None, \"Canned RegEx email test failed for %s\" % good_email\n assert m.string == good_email",
"def test_compose_email_good(self): \n pass",
"def test_invalid_email_account_input(self):\n acc = SpokeEmailAccount(self.org_name, self.user_id)\n email_addr = '*@domain.loc'\n self.assertRaises(error.InputError, acc.get, email_addr)",
"def test_contact_exists(self):\n self.new_contact.save_contact()\n test_contact = Contact(\"Test\", \"User\", 254711223344, \"[email protected]\")\n test_contact.save_contact()\n contact_exists = Contact.contact_exist(254711223344)\n self.assertTrue(contact_exists)",
"def test_compose_email_somebad(self):\n pass",
"def test_find_by_number(self):\n self.new_contact.save_contact()\n # new contact\n test_contact = Contact(\"Test\", \"user\", \"0748363839\", \"[email protected]\")\n test_contact.save_contact()\n # The number that we find in found_contact should be the same as the one in test_contact for the test to pass.\n # If they aren't the same...the test will always fail\n found_contact = Contact.find_by_number(\"0748363839\")\n # The test\n self.assertEqual(found_contact.email, test_contact.email)",
"def test_copy_details(self):\n self.new_details.save_details()\n twitter = Details('Dennis', 'Facebook', 'Kiplangat', 'kiplangat18')\n twitter.save_details()\n find_details = None\n for details in Details.user_details_list:\n find_details = Details.find_by_site_name(details.site_name)\n return pyperclip.copy(find_details.password)\n\n Details.copy_details(self.new_details.site_name)\n self.assertEqual('kiplangat18', pyperclip.paste())\n print(pyperclip.paste())",
"def test_clean_email(self):\n\n raw_email = 'from=<[email protected]>'\n result = clean_email(raw_email)\n self.assertEqual(result, '[email protected]')",
"def test_add_contact(session): # pylint:disable=unused-argument\n org = factory_org_service()\n org.add_contact(TestContactInfo.contact1)\n dictionary = org.as_dict()\n assert dictionary['contacts']\n assert len(dictionary['contacts']) == 1\n assert dictionary['contacts'][0]['email'] == TestContactInfo.contact1['email']",
"def test_invalid_email_address_input(self):\n email_addr = '*@domain.loc'\n addr = SpokeEmailAddress(self.org_name, self.user_id)\n self.assertRaises(error.InputError, addr.get, email_addr)",
"def test_copyUIDs(self):\n d = self.client.copy(\"2:3\", \"MEETING\", uid=True)\n\n self.assertEqual(\n self.transport.value(),\n b\"0001 UID COPY 2:3 MEETING\\r\\n\",\n )\n\n self.client.lineReceived(b\"0001 OK COPY completed\")\n self.assertEqual(self.successResultOf(d),\n ([], b'OK COPY completed'))",
"def _test_email_address_failures(self, exception):\r\n # Select number of emails to fit into a single subtask.\r\n num_emails = settings.BULK_EMAIL_EMAILS_PER_TASK\r\n # We also send email to the instructor:\r\n self._create_students(num_emails - 1)\r\n expected_fails = int((num_emails + 3) / 4.0)\r\n expected_succeeds = num_emails - expected_fails\r\n with patch('bulk_email.tasks.get_connection', autospec=True) as get_conn:\r\n # have every fourth email fail due to some address failure:\r\n get_conn.return_value.send_messages.side_effect = cycle([exception, None, None, None])\r\n self._test_run_with_task(send_bulk_course_email, 'emailed', num_emails, expected_succeeds, failed=expected_fails)"
] | [
"0.83343744",
"0.7601879",
"0.6900183",
"0.6812739",
"0.6561415",
"0.6468561",
"0.6455767",
"0.64429235",
"0.6356866",
"0.6320259",
"0.62970245",
"0.62465024",
"0.6236399",
"0.6219535",
"0.6197941",
"0.6197167",
"0.6179059",
"0.6178218",
"0.61507607",
"0.6110539",
"0.60834587",
"0.6065035",
"0.60463184",
"0.6037452",
"0.6015344",
"0.6014169",
"0.59923685",
"0.595652",
"0.5955858",
"0.595272"
] | 0.7637972 | 1 |
partial_distance_covariance_test(x, y, z, num_resamples=0, exponent=1, random_state=None) Test of partial distance covariance independence. Compute the test of independence based on the partial distance covariance, for two random vectors conditioned on a third. The test is a permutation test where the null hypothesis is that all random vectors have the same distribution. | def partial_distance_covariance_test(x, y, z, **kwargs):
# pylint:disable=too-many-locals
random_state = _random_state_init(kwargs.pop("random_state", None))
# B
num_resamples = kwargs.pop("num_resamples", 0)
_check_kwargs_empty(kwargs)
# Compute U-centered matrices
u_x = _dcor_internals._u_distance_matrix(x)
u_y = _dcor_internals._u_distance_matrix(y)
u_z = _dcor_internals._u_distance_matrix(z)
# Compute projections
proj = _dcor_internals.u_complementary_projection(u_z)
p_xz = proj(u_x)
p_yz = proj(u_y)
num_dimensions = u_x.shape[0]
# epsilon_n
observed_pdcov = num_dimensions * _dcor_internals.u_product(p_xz, p_yz)
# epsilon^(b)_n
bootstrap_pdcov = _np.ones(num_resamples, dtype=observed_pdcov.dtype)
for bootstrap in range(num_resamples):
permuted_index = random_state.permutation(num_dimensions)
permuted_p_xz = p_xz[_np.ix_(permuted_index, permuted_index)]
pdcov = num_dimensions * _dcor_internals.u_product(permuted_p_xz, p_yz)
bootstrap_pdcov[bootstrap] = pdcov
extreme_results = bootstrap_pdcov > observed_pdcov
p_value = (_np.sum(extreme_results) + 1) / (num_resamples + 1)
return _utils.HypothesisTest(
p_value=p_value,
statistic=observed_pdcov
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_coeffvar(self):\n self.assertEqual(coeffvar(list1, sample=False), np.std(list1) /\n np.mean(list1))\n self.assertEqual(coeffvar(list1), np.std(list1, ddof=1) /\n np.mean(list1))",
"def test_exact_two_qubit_cnot_decompose_random(self, seed):\n unitary = random_unitary(4, seed=seed)\n self.check_exact_decomposition(unitary.data, two_qubit_cnot_decompose)",
"def partial_corr(data=None, x=None, y=None, z=None, method=\"pearson\"):\n\n assert data.shape[0] > 2 # Check for atleast 3 samples\n assert x != z # x and z should be distinct\n assert y != z # y and z should be distinct\n assert x != y # x and y should be distinct\n if isinstance(z, list):\n assert x not in z # x and z should be distinct\n assert y not in z # y and z should be distinct\n\n combined_variables = [x, y] # Combine all variables - x, y and z\n for var in z:\n combined_variables.append(var)\n data = data[combined_variables].dropna() # Drop missing values\n n = data.shape[0] # Number of samples after dropping missing values\n k = data.shape[1] - 2 # Number of covariates\n assert n > 2\n\n if method == \"spearman\":\n V = data.rank(na_option=\"keep\").cov() # Change data to rank for spearman correlation\n else:\n V = data.astype(float).cov() # Computing Covariance Matrix\n Vi = np.linalg.pinv(V, hermitian=True) # Computing Inverse Covariance Matrix\n Vi_diag = Vi.diagonal() # Storing variance\n D = np.diag(np.sqrt(1 / Vi_diag)) # Storing Standard Deviations from diagonal of inverse covariance matrix\n pcor = -1 * (D @ Vi @ D)\n r = pcor[0, 1]\n\n if np.isnan(r):\n return {\"n\": n, \"r\": np.nan, \"CI95%\": np.nan, \"p-val\": np.nan}\n\n # Finding p-value using student T test\n dof = n - k - 2 # Degree of freedom for multivariate analysis\n tval = r * np.sqrt(dof / (1 - r**2)) # Test statistic\n pval = 2 * t.sf(np.abs(tval), dof) # Calculate p-value corresponding to the test statistic and degree of freedom\n\n ci = compute_ci(r=r, nx=(n - k), ny=(n - k)) # Finding Confidence Interval\n ci = np.round(ci, 3)\n stats = {\n \"n\": n,\n \"r\": r,\n \"CI95%\": [ci],\n \"p-val\": pval.round(5),\n }\n return stats",
"def test_exact_supercontrolled_decompose_phase_3_use_random(self, seed):\n state = np.random.default_rng(seed)\n decomposer = self.make_random_supercontrolled_decomposer(state)\n\n tgt_k1 = np.kron(random_unitary(2, seed=state).data, random_unitary(2, seed=state).data)\n tgt_k2 = np.kron(random_unitary(2, seed=state).data, random_unitary(2, seed=state).data)\n tgt_phase = state.random() * 2 * np.pi\n\n tgt_a, tgt_b = state.random(size=2) * np.pi / 4\n tgt_c = state.random() * np.pi / 2 - np.pi / 4\n tgt_unitary = np.exp(1j * tgt_phase) * tgt_k1 @ Ud(tgt_a, tgt_b, tgt_c) @ tgt_k2\n self.check_exact_decomposition(tgt_unitary, decomposer, num_basis_uses=3)",
"def PartialPearsonCorrelation(xdata, ydata, zdata):\n\ttry:\n\t\t(rxy, dummy, n) = PearsonCorrelation(xdata, ydata)\n\t\t(ryz, dummy, n) = PearsonCorrelation(ydata, zdata)\n\t\t(rxz, dummy, n) = PearsonCorrelation(xdata, zdata)\n\t\tr = (rxy - ryz*rxz)/math.sqrt((1-ryz**2)*(1-rxz**2))\n\texcept ZeroDivisionError:\n\t\traise StatsError(\"Standard deviation is zero.\")\n\tif not (-1.0000000001 <= r <= 1.000000001):\n\t\traise StatsError(\"Invalid correlation coefficient of %r.\" % r)\n\tt = r*math.sqrt((n-3)/(1-r*r))\n\tz = t\n\tp = Prob_Z(z)\n\tif not (0.0 <= p <= 1.0):\n\t\traise StatsError(\"Invalid P-value of %r.\" % r)\n\treturn (r, p, n)",
"def test(x, y, z=None, num_perm=10, prop_test=.1,\n max_time=60, discrete=(False, False),\n plot_return=False, test_type='min',\n verbose=False, fixed_arch=False, bootstrap_type='mindiv', **kwargs):\n # If x xor y is discrete, use the continuous variable as input.\n if discrete[0] and not discrete[1]:\n x, y = y, x\n # Otherwise, predict the variable with fewer dimensions.\n elif x.shape[1] < y.shape[1]:\n x, y = y, x\n\n # Adjust the dimensionalities of x, y, z to be on the same\n # order, by simple data duplication.\n x, y, z = equalize_dimensions(x, y, z)\n\n # Use this many datapoints as a test set.\n n_samples = x.shape[0]\n n_test = int(n_samples * prop_test)\n\n # Attach the conditioning variable to the input.\n x_z = np.hstack([x, z])\n\n # Set up storage.\n d0_preds = []\n d1_preds = []\n d0_stats = np.zeros(num_perm)\n d1_stats = np.zeros(num_perm)\n\n kwargs['epochs'] = 1000\n kwargs['lr'] = 1e-2\n kwargs['nn_verbose'] = True\n kwargs['batch_size'] = 128\n kwargs['ntype'] = 'plain'\n\n # Construct the neural net.\n if fixed_arch:\n clf = nn.NN(x_dim=x_z.shape[1], y_dim=y.shape[1],\n arch=[128]*2, ntype='plain')\n\n for perm_id in range(num_perm):\n # Create the d0 (reshuffled-x) dataset.\n perm_ids = np.random.permutation(n_samples)\n x_z_bootstrap = np.hstack([x[perm_ids], z])\n\n # Sample NN training params.\n if not fixed_arch:\n kwargs['arch'] = [32] * (perm_id + 1)\n clf = nn.NN(x_dim=x_z.shape[1], y_dim=y.shape[1], **kwargs)\n print(('lr={lr:.2}, bs={batch_size}, '\n 'arch={arch}, ntype={ntype}').format(**kwargs))\n\n with tf.Session() as sess:\n # Train on the reshuffled data.\n sess.run(tf.global_variables_initializer())\n clf.saver.save(sess, './init_nn_save')\n clf.fit(x_z_bootstrap[n_test:], y[n_test:], sess=sess, **kwargs)\n y_pred0 = clf.predict(x_z_bootstrap[:n_test], sess=sess)\n\n # Train on the original data.\n sess.run(tf.global_variables_initializer())\n clf.saver.restore(sess, './init_nn_save')\n clf.fit(x_z[n_test:], y[n_test:], sess=sess, **kwargs)\n y_pred1 = clf.predict(x_z[:n_test], sess=sess)\n\n d0_preds.append(y_pred0)\n d0_stats[perm_id] = mse(y_pred0, y[:n_test])\n d1_preds.append(y_pred1)\n d1_stats[perm_id] = mse(y_pred1, y[:n_test])\n\n if verbose:\n print('D0 statistic, iter {}: {}'.format(\n perm_id, d0_stats[perm_id]))\n print('D1 statistic, iter {}: {}'.format(\n perm_id, d1_stats[perm_id]))\n\n print('Resetting Tensorflow graph...')\n tf.reset_default_graph()\n \n # Compute the p-value.\n p_value = globals()['bootstrap_' + bootstrap_type](d0_stats, d1_stats)\n\n if plot_return:\n return (p_value, d0_stats, d1_stats)\n else:\n return p_value",
"def _cp3(X, n_components, tol, max_iter, init_type, random_state=None):\n\n if len(X.shape) != 3:\n raise ValueError(\"CP3 decomposition only supports 3 dimensions!\")\n\n if init_type == \"random\":\n A, B, C = _random_init(X, n_components, random_state)\n elif init_type == \"hosvd\":\n A, B, C = _hosvd_init(X, n_components)\n grams = [np.dot(arr.T, arr) for arr in (A, B, C)]\n err = 1E10\n\n for itr in range(max_iter):\n err_old = err\n A = matricize(X, 0).dot(kr(C, B)).dot(linalg.pinv(grams[1] * grams[2]))\n if itr == 0:\n normalization = np.sqrt((A ** 2).sum(axis=0))\n else:\n normalization = A.max(axis=0)\n normalization[normalization < 1] = 1\n A /= normalization\n grams[0] = np.dot(A.T, A)\n\n B = matricize(X, 1).dot(kr(C, A)).dot(linalg.pinv(grams[0] * grams[2]))\n if itr == 0:\n normalization = np.sqrt((B ** 2).sum(axis=0))\n else:\n normalization = B.max(axis=0)\n normalization[normalization < 1] = 1\n B /= normalization\n grams[1] = np.dot(B.T, B)\n\n C = matricize(X, 2).dot(kr(B, A)).dot(linalg.pinv(grams[0] * grams[1]))\n if itr == 0:\n normalization = np.sqrt((C ** 2).sum(axis=0))\n else:\n normalization = C.max(axis=0)\n normalization[normalization < 1] = 1\n C /= normalization\n grams[2] = np.dot(C.T, C)\n\n err = linalg.norm(matricize(X, 0) - np.dot(A, kr(C, B).T)) ** 2\n thresh = np.abs(err - err_old) / err_old\n if thresh < tol:\n break\n\n return A, B, C",
"def test_calculate_variance_covariance(self):\n\n _var_covar = calculate_variance_covariance(22, 620.0, 0.4239, 0.6142)\n self.assertAlmostEqual(_var_covar[0][0], 0.1351777)\n self.assertAlmostEqual(_var_covar[0][1], -0.04660735)\n self.assertAlmostEqual(_var_covar[1][0], -0.04660735)\n self.assertAlmostEqual(_var_covar[1][1], 0.01710296)\n self.assertEqual(_var_covar[0][1], _var_covar[1][0])",
"def test_partial_sum_2d(self): # pylint:disable=no-self-use\n x = [1, 2, 3]\n y = [4, 5, 6]\n c = [7, 8, 9]\n\n gamma = dcor_internals._partial_sum_2d(x, y, c)\n expected_gamma = [17., 16., 15.]\n\n np.testing.assert_allclose(gamma, expected_gamma)",
"def test_cov_q(self, ndlys=13):\n for d in self.d:\n d.flag_array[:] = False #ensure that there are no flags!\n d.select(times=np.unique(d.time_array)[:10], frequencies=d.freq_array[:16])\n for d_std in self.d_std:\n d_std.flag_array[:] = False\n d_std.select(times=np.unique(d_std.time_array)[:10], frequencies=d_std.freq_array[:16])\n self.ds = pspecdata.PSpecData(dsets=self.d, wgts=self.w, dsets_std=self.d_std)\n self.ds = pspecdata.PSpecData(dsets=self.d, wgts=self.w, dsets_std=self.d_std)\n Ntime = self.ds.Ntimes\n self.ds.set_Ndlys(ndlys)\n # Here is the analytic covariance matrix...\n chan_x, chan_y = np.meshgrid(range(self.ds.Nfreqs), range(self.ds.Nfreqs))\n cov_analytic = np.zeros((self.ds.spw_Ndlys, self.ds.spw_Ndlys), dtype=np.complex128)\n for alpha in range(self.ds.spw_Ndlys):\n for beta in range(self.ds.spw_Ndlys):\n cov_analytic[alpha, beta] = np.exp(-2j*np.pi*(alpha-beta)*(chan_x-chan_y)/self.ds.spw_Ndlys).sum()\n key1 = (0, 24, 38)\n key2 = (1, 25, 38)\n #print(cov_analytic)\n\n for input_data_weight in ['identity','iC', 'dayenu']:\n self.ds.set_weighting(input_data_weight)\n #check error raised\n if input_data_weight == 'dayenu':\n pytest.raises(ValueError,self.ds.R, key1)\n rpk = {'filter_centers':[0.],'filter_half_widths':[0.],'filter_factors':[0.]}\n self.ds.set_r_param(key1,rpk)\n self.ds.set_r_param(key2,rpk)\n for taper in taper_selection:\n qc = self.ds.cov_q_hat(key1,key2,model='dsets')\n self.assertTrue(np.allclose(np.array(list(qc.shape)),\n np.array([self.ds.Ntimes, self.ds.spw_Ndlys, self.ds.spw_Ndlys]), atol=1e-6))\n qc = self.ds.cov_q_hat(key1,key2,model='empirical')\n self.assertTrue(np.allclose(np.array(list(qc.shape)),\n np.array([self.ds.Ntimes, self.ds.spw_Ndlys, self.ds.spw_Ndlys]), atol=1e-6))\n\n \"\"\"\n Now test that analytic Error calculation gives Nchan^2\n \"\"\"\n self.ds.set_weighting('identity')\n qc = self.ds.cov_q_hat(key1, key2, model='dsets')\n self.assertTrue(np.allclose(qc,\n np.repeat(cov_analytic[np.newaxis, :, :], self.ds.Ntimes, axis=0), atol=1e-6))\n \"\"\"\n Test lists of keys\n \"\"\"\n self.ds.set_weighting('identity')\n qc=self.ds.cov_q_hat([key1], [key2], time_indices=[0], model='dsets')\n self.assertTrue(np.allclose(qc,\n np.repeat(cov_analytic[np.newaxis, :, :], self.ds.Ntimes, axis=0), atol=1e-6))\n self.assertRaises(ValueError, self.ds.cov_q_hat, key1, key2, time_indices=200)\n self.assertRaises(ValueError, self.ds.cov_q_hat, key1, key2, time_indices=\"watch out!\")",
"def test_sx_virtz_3cnot_optimal(self, seed):\n unitary = random_unitary(4, seed=seed)\n decomposer = TwoQubitBasisDecomposer(CXGate(), euler_basis=\"ZSX\", pulse_optimize=True)\n circ = decomposer(unitary)\n self.assertEqual(Operator(unitary), Operator(circ))\n self.assertEqual(self._remove_pre_post_1q(circ).count_ops().get(\"sx\"), 2)",
"def cross_covariance(y, z):\n return CrossCovariance()(y, z)",
"def test_quadratic_features_random_state_invariance():\n transformer1 = QuadraticFeatures(random_state=0)\n transformer2 = QuadraticFeatures(random_state=0)\n X1 = transformer1.fit_transform(X_small)\n X2 = transformer2.fit_transform(X_small)\n assert np.all(X1 == X2)",
"def test_spheroid_convexity(spheroid_convex_fixture):\n assert(spheroid_convex_fixture.convex_p() == pytest.approx(1.0))\n assert(spheroid_convex_fixture.linear_p() == pytest.approx(0.0))",
"def spatial_covariance(distances, z, eval_distances, tolerance=0.2):\n if distances[np.triu_indices(distances.shape[0])].max() > 1000:\n sub_distances = distances\n else:\n sub_distances = np.array(distances, copy=True)\n sub_distances[np.triu_indices(sub_distances.shape[0])] = 999999\n covariances = np.zeros(eval_distances.size)\n z_flat = z.ravel()\n for d, eval_distance in enumerate(eval_distances):\n points_a, points_b = np.where(np.abs(sub_distances - eval_distance) <= tolerance)\n covariances[d] = np.sum((z_flat[points_a] - z_flat[points_a].mean()) *\n (z_flat[points_b] - z_flat[points_b].mean())) / (float(points_a.size) - 1.0)\n covariances[d] /= z_flat[points_a].std() * z_flat[points_b].std()\n return covariances",
"def test_interpolative_decomposition(self):\n self.assertTrue(np.all(self._P <= 2)) # Validate entries of P are between -1 and 2.\n self.assertTrue(np.all(self._P >= -2))\n # Validate P's norm is bound by the theoretical bound\n self.assertLessEqual(np.linalg.norm(self._P), np.sqrt(self._k * (self._n - self._k) + 1))\n self.assertGreaterEqual(svdvals(self._P)[-1], 1) # Validate the least singular value of P is at least 1.\n\n for unit_vector in np.eye(self._k): # Validate P has kxk identity matrix as a sub-matrix.\n self.assertIn(unit_vector, self._P.T)\n\n for col in self._B.T: # Validate every column of B is also a column of A.\n self.assertIn(col, self._A.T)",
"def zca(x, xtest, bias=0.1):\n covariance = np.dot(x.T, x) / x.shape[0]\n covariance += bias * np.eye(x.shape[1])\n U, S, _ = np.linalg.svd(covariance)\n pc = U @ np.diag(1. / np.sqrt(S)) @ U.T\n X = x @ pc\n Xtest = xtest @ pc\n return X, Xtest",
"def numerical_covariance(self, params={}, nrealisations=200, nthreads=1):\n\n if nrealisations < 2:\n raise ValueError(\"nrealisations must be more than one\")\n\n # We use a hack where we define an external function which *passed*\n # this object just so that we can do multiprocessing on it.\n fnc = partial(_produce_mock, self, params)\n\n pool = MyPool(nthreads)\n \n power = pool.map(fnc, np.arange(int(nrealisations/2)))\n power2 = pool.map(fnc, np.arange(int(nrealisations/2)))\n power.extend(power2)\n \n # Note, this covariance *already* has thermal noise built in.\n cov = []\n mean = []\n \n for ii in range(self.n_obs):\n mean.append(np.mean(np.array(power)[:,ii,:,:], axis=0))\n\n if self.ps_dim == 2:\n cov.append([np.cov(x) for x in np.array(power)[:,ii,:,:].transpose((1, 2, 0))])\n else:\n cov = np.var(np.array(power)[:,ii,:,:], axis=0)\n\n #Cleanup the memory\n for i in range(len(power)-1,-1,-1):\n del power[i] \n \n pool.close()\n pool.join()\n\n return mean, cov",
"def cd(x0,partial,learning_rate=0.01,iterations=10,**kwargs):\n x = x0; N = len(x)\n for i in range(iterations):\n indices = random.sample(list(range(N)),N)\n for j in indices:\n x[j] -= learning_rate*partial(x0,j)\n return x",
"def covariance(data=None, left=None, right=None, finite_sample_correction=True, **kwargs):\n return Component(\n \"Covariance\",\n arguments={\n 'data': Component.of(data),\n 'left': Component.of(left),\n 'right': Component.of(right)\n },\n options={\n 'finite_sample_correction': finite_sample_correction\n },\n constraints=kwargs)",
"def test_exact_supercontrolled_decompose_random(self, seed):\n state = np.random.default_rng(seed)\n decomposer = self.make_random_supercontrolled_decomposer(state)\n self.check_exact_decomposition(random_unitary(4, seed=state).data, decomposer)",
"def crosscorr(x, y, **kwargs):\r\n # just make the same computation as the crosscovariance,\r\n # but without subtracting the mean\r\n kwargs['debias'] = False\r\n rxy = crosscov(x, y, **kwargs)\r\n return rxy",
"def test_param_cov(self, fitter):\n fitter = fitter()\n\n a = 2\n b = 100\n\n with NumpyRNGContext(_RANDOM_SEED):\n x = np.linspace(0, 1, 100)\n # y scatter is amplitude ~1 to make sure covariance is\n # non-negligible\n y = x * a + b + np.random.randn(len(x))\n\n # first compute the ordinary least squares covariance matrix\n X = np.vstack([x, np.ones(len(x))]).T\n beta = np.matmul(np.matmul(np.linalg.inv(np.matmul(X.T, X)), X.T), y.T)\n s2 = np.sum((y - np.matmul(X, beta).ravel()) ** 2) / (len(y) - len(beta))\n olscov = np.linalg.inv(np.matmul(X.T, X)) * s2\n\n # now do the non-linear least squares fit\n mod = models.Linear1D(a, b)\n\n with pytest.warns(AstropyUserWarning, match=r\"Model is linear in parameters\"):\n fmod = fitter(mod, x, y)\n\n assert_allclose(fmod.parameters, beta.ravel())\n assert_allclose(olscov, fitter.fit_info[\"param_cov\"])",
"def test_approx_supercontrolled_decompose_phase_3_use_random(self, seed, delta=0.01):\n state = np.random.default_rng(seed)\n basis_k1 = np.kron(random_unitary(2, seed=state).data, random_unitary(2, seed=state).data)\n basis_k2 = np.kron(random_unitary(2, seed=state).data, random_unitary(2, seed=state).data)\n basis_phase = state.random() * 2 * np.pi\n basis_b = state.random() * np.pi / 4\n basis_unitary = np.exp(1j * basis_phase) * basis_k1 @ Ud(np.pi / 4, basis_b, 0) @ basis_k2\n decomposer = TwoQubitBasisDecomposer(UnitaryGate(basis_unitary), basis_fidelity=0.99)\n\n tgt_k1 = np.kron(random_unitary(2, seed=state).data, random_unitary(2, seed=state).data)\n tgt_k2 = np.kron(random_unitary(2, seed=state).data, random_unitary(2, seed=state).data)\n tgt_phase = state.random() * 2 * np.pi\n tgt_a, tgt_b, tgt_c = 0.5, 0.4, 0.3\n d1, d2, d3 = state.random(size=3) * delta\n tgt_unitary = (\n np.exp(1j * tgt_phase) * tgt_k1 @ Ud(tgt_a + d1, tgt_b + d2, tgt_c + d3) @ tgt_k2\n )\n self.check_approx_decomposition(tgt_unitary, decomposer, num_basis_uses=3)",
"def check_sample_correctishness_bc01(f):\n\n batch_size = 5\n rows = 32\n cols = 30\n channels = 3\n pool_rows = 2\n pool_cols = 3\n\n rng = np.random.RandomState([2012, 9, 26])\n zv = rng.randn(batch_size, channels, rows,\n cols).astype(config.floatX) * 2. - 3.\n top_down_v = rng.randn(batch_size, channels, rows / pool_rows,\n cols / pool_cols).astype(config.floatX)\n\n z_th = T.TensorType(broadcastable=(False, False, False, False),\n dtype = config.floatX)()\n z_th.tag.test_value = zv\n z_th.name = 'z_th'\n\n top_down_th = T.TensorType(broadcastable=(False, False, False, False),\n dtype = config.floatX)()\n top_down_th.tag.test_value = top_down_v\n top_down_th.name = 'top_down_th'\n\n theano_rng = MRG_RandomStreams(rng.randint(2147462579))\n p_th, h_th, p_sth, h_sth = f(z_th, (pool_rows, pool_cols), top_down_th,\n theano_rng)\n\n prob_func = function([z_th, top_down_th], [p_th, h_th])\n pv, hv = prob_func(zv, top_down_v)\n\n sample_func = function([z_th, top_down_th], [p_sth, h_sth])\n\n acc_p = 0. * pv\n acc_h = 0. * hv\n\n # make sure the test gets good coverage, ie, that it includes many\n # different activation probs for both detector and pooling layer\n buckets = 10\n bucket_width = 1. / float(buckets)\n for i in xrange(buckets):\n lower_lim = i * bucket_width\n upper_lim = (i+1) * bucket_width\n\n assert np.any((pv >= lower_lim) * (pv < upper_lim))\n assert np.any((hv >= lower_lim) * (hv < upper_lim))\n\n assert upper_lim == 1.\n\n for i in xrange(10000):\n ps, hs = sample_func(zv, top_down_v)\n\n assert ps.shape == pv.shape\n assert hs.shape == hv.shape\n\n acc_p += ps\n acc_h += hs\n\n est_p = acc_p / float(i+1)\n est_h = acc_h / float(i+1)\n\n pd = np.abs(est_p-pv)\n hd = np.abs(est_h-hv)\n\n \"\"\"\n # plot maps of the estimation error, this is to see if it has some\n # spatial pattern this is useful for detecting bugs like not handling\n # the border correctly, etc.\n from pylearn2.gui.patch_viewer import PatchViewer\n\n pv = PatchViewer((pd.shape[0],pd.shape[3]),(pd.shape[1],pd.shape[2]),\n is_color = False)\n for i in xrange(pd.shape[0]):\n for j in xrange(pd.shape[3]):\n pv.add_patch( (pd[i,:,:,j] / pd.max() )* 2.0 - 1.0, rescale = False)\n pv.show()\n\n pv = PatchViewer((hd.shape[0],hd.shape[3]), (hd.shape[1],hd.shape[2]),\n is_color = False)\n for i in xrange(hd.shape[0]):\n for j in xrange(hd.shape[3]):\n pv.add_patch( (hd[i,:,:,j] / hd.max() )* 2.0 - 1.0, rescale = False)\n pv.show()\n \"\"\"\n\n \"\"\"\n plot expectation to estimate versus error in estimation\n expect bigger errors for values closer to 0.5\n\n from matplotlib import pyplot as plt\n\n #nelem = reduce( lambda x, y : x*y, pd.shape)\n #plt.scatter( pv.reshape(nelem), pd.reshape(nelem))\n #plt.show()\n\n nelem = reduce( lambda x, y : x*y, hd.shape)\n plt.scatter( hv.reshape(nelem), hd.reshape(nelem))\n plt.show()\n \"\"\"\n\n # don't really know how tight this should be\n # but you can try to pose an equivalent problem\n # and implement it in another way\n # using a numpy implementation in softmax_acc.py\n # I got a max error of .17\n assert max(pd.max(), hd.max()) < .17\n\n # Do exhaustive checks on just the last sample\n assert np.all((ps == 0) + (ps == 1))\n assert np.all((hs == 0) + (hs == 1))\n\n for k in xrange(batch_size):\n for i in xrange(ps.shape[2]):\n for j in xrange(ps.shape[3]):\n for l in xrange(channels):\n p = ps[k, l, i, j]\n h = hs[k, l, i*pool_rows:(i+1)*pool_rows,\n j*pool_cols:(j+1)*pool_cols]\n assert h.shape == (pool_rows, pool_cols)\n assert p == h.max()\n assert h.sum() <= 1\n\n \"\"\" If you made it to here, it's correctish\n (cant tell if samples are perfectly \"correct\") \"\"\"",
"def pcov(xdata, ydata, mx=None, my=None):\n n, s = _SP(xdata, mx, ydata, my)\n if n > 0:\n return s/n\n else:\n raise ValueError('population covariance requires at least one point')",
"def cond_indep_fisher_z(data, var1, var2, cond=[], alpha=0.05):\n\n N, k_var = np.shape(data)\n list_new = [var1, var2] + list(cond)\n data_array = np.array(data)\n array_new = np.transpose(np.matrix(data_array[:, list_new]))\n cov_array = np.cov(array_new)\n size_c = len(list_new)\n X1 = 0\n Y1 = 1\n S1 = [i for i in range(size_c) if i != 0 and i != 1]\n r = partial_corr_coef(cov_array, X1, Y1, S1)\n z = 0.5 * np.log((1+r) / (1-r))\n z0 = 0\n W = np.sqrt(N - len(S1) - 3) * (z - z0)\n cutoff = norm.ppf(1 - 0.5 * alpha)\n if abs(W) < cutoff:\n CI = 1\n else:\n CI = 0\n p = norm.cdf(W)\n r = abs(r)\n\n return CI, r, p",
"def test__get_covariance(self):\n # Setup\n copula = GaussianMultivariate(GaussianUnivariate)\n copula.fit(self.data)\n\n expected_covariance = np.array([\n [1., -0.01261819, -0.19821644],\n [-0.01261819, 1., -0.16896087],\n [-0.19821644, -0.16896087, 1.]\n ])\n\n # Run\n covariance = copula._get_covariance(self.data)\n\n # Check\n assert np.isclose(covariance, expected_covariance).all().all()",
"def test_seed_289(self):\n unitary = random_unitary(4, seed=289)\n self.check_exact_decomposition(unitary.data, two_qubit_cnot_decompose)",
"def test_single_variable_fourier_coeffs(self, freq_dict, expected_coeffs):\n degree = max(freq_dict.keys())\n partial_func = partial(fourier_function, freq_dict)\n # Testing with a single degree provided as integer\n coeffs = coefficients(partial_func, 1, degree)\n\n assert np.allclose(coeffs, expected_coeffs)\n # Testing with a single-entry sequence of degrees\n coeffs = coefficients(partial_func, 1, (degree,))\n\n assert np.allclose(coeffs, expected_coeffs)"
] | [
"0.5698384",
"0.53769475",
"0.5372643",
"0.52613395",
"0.51615024",
"0.5145563",
"0.50757515",
"0.50168383",
"0.4970485",
"0.4894757",
"0.484348",
"0.48419997",
"0.48381978",
"0.48207268",
"0.48052663",
"0.47089332",
"0.470408",
"0.46567678",
"0.463776",
"0.46216598",
"0.46030724",
"0.457652",
"0.45661092",
"0.45576346",
"0.45399594",
"0.45271486",
"0.45270804",
"0.45196047",
"0.45047083",
"0.4498597"
] | 0.8479564 | 0 |
Creates an SSL keyfile and returns the path. | def CreateKeyFile():
keyfile = tempfile.mkstemp()[1]
cmd = [
'openssl',
'genrsa',
'-out', keyfile,
'2048'
]
_RunCommand(cmd)
return keyfile | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _keypath(self) -> pathlib.Path:\n home = pathlib.Path.home()\n keyfile = home / \".cmdc\" / \"apikey\"\n keyfile.parent.mkdir(parents=True, exist_ok=True)\n return keyfile",
"def generate_key():\r\n # generating key\r\n key = Fernet.generate_key()\r\n\r\n key_dir = os.path.join(os.path.dirname(__file__), \"resources/key\")\r\n\r\n # writing key in file\r\n with open(key_dir, \"wb\") as keyFile:\r\n keyFile.write(key)",
"def CreateCsrFile(keyfile):\n csrfile = tempfile.mkstemp()[1]\n cmd = [\n 'openssl',\n 'req',\n '-new',\n '-key', keyfile,\n '-out', csrfile,\n '-subj', '/C=NA/ST=NA/L=NA/O=Chromium/OU=Test/CN=chromium.org'\n ]\n _RunCommand(cmd)\n return csrfile",
"def create_file(self, key=None):\n self.make_directory()\n open(self.file_path(key), 'w').close()",
"def create_user_key_file(username: str):\n\n user: User = UserModel().get_user(username=username)\n user_key: Key = user.public_key\n\n public_key: bytes = user_key.public_key\n\n if not os.path.exists(\"./ssh_ca\"):\n os.mkdir(\"./ssh_ca\")\n\n with open(f\"./ssh_ca/{username}.pub\") as public_key_file:\n public_key_file.write(public_key.decode())",
"def _get_key_path(self, key_name, serial):\n return '%s%s/%d_%s.key' % (self.ca_dir, PRIVATE_DIR_NAME, serial,\n key_name)",
"def new_key(self, key_name=None, key_type=Key.KEY_REGULAR_FILE):\r\n if key_name == '-':\r\n return Key(self.name, '-', key_type=Key.KEY_STREAM_WRITABLE)\r\n else:\r\n dir_name = os.path.dirname(key_name)\r\n if dir_name and not os.path.exists(dir_name):\r\n os.makedirs(dir_name)\r\n fp = open(key_name, 'wb')\r\n return Key(self.name, key_name, fp)",
"def _set_keystore_path(self) -> None:\n response = self.single_call(\"hmy keys location\").strip()\n if not os.path.exists(response):\n os.mkdir(response)\n self.keystore_path = response",
"def CreateCrtFile(keyfile, csrfile):\n crtfile = tempfile.mkstemp()[1]\n cmd = [\n 'openssl',\n 'x509',\n '-req',\n '-days', '1',\n '-in', csrfile,\n '-signkey', keyfile,\n '-out', crtfile\n ]\n _RunCommand(cmd)\n return crtfile",
"def CreatePemFile():\n keyfile = CreateKeyFile()\n csrfile = CreateCsrFile(keyfile)\n crtfile = CreateCrtFile(keyfile, csrfile)\n pemfile = tempfile.mkstemp()[1]\n with open(keyfile) as k:\n with open(crtfile) as c:\n with open(pemfile, 'wb') as p:\n p.write('%s\\n%s' % (k.read(), c.read()))\n return pemfile",
"def create_keypair(key_name):\n if os.path.isfile(SSH_FOLDER + key_name + \".pem\"):\n return # Key already created\n ec2 = boto.ec2.connect_to_region(AWS_REGION)\n key = ec2.create_key_pair(key_name)\n key.save(SSH_FOLDER)",
"def init_key(key_size, key_dir):\n try:\n key_pem = crypto_util.make_key(key_size)\n except ValueError as err:\n logging.fatal(str(err))\n sys.exit(1)\n\n # Save file\n le_util.make_or_verify_dir(key_dir, 0o700)\n key_f, key_filename = le_util.unique_file(\n os.path.join(key_dir, \"key-letsencrypt.pem\"), 0o600)\n key_f.write(key_pem)\n key_f.close()\n\n logging.info(\"Generating key (%d bits): %s\", key_size, key_filename)\n\n return le_util.Key(key_filename, key_pem)",
"def _get_path_to_key_file():\n\n if 'private_key_path' not in ctx.node.properties:\n raise NonRecoverableError(\n 'Unable to get key file path, private_key_path not set.')\n\n return os.path.expanduser(ctx.node.properties['private_key_path'])",
"def generate_key():\n key = Fernet.generate_key()\n with open(\"pass.key\", \"wb\") as key_file:\n key_file.write(key)",
"def generate_ssl_object(module, ssl_cafile, ssl_certfile, ssl_keyfile,\n ssl_crlfile=None):\n\n ssl_files = {\n 'cafile': {'path': ssl_cafile, 'is_temp': False},\n 'certfile': {'path': ssl_certfile, 'is_temp': False},\n 'keyfile': {'path': ssl_keyfile, 'is_temp': False},\n 'crlfile': {'path': ssl_crlfile, 'is_temp': False}\n }\n\n for key, value in ssl_files.items():\n if value['path'] is not None:\n # TODO is that condition sufficient?\n if value['path'].startswith(\"-----BEGIN\"):\n # value is a content, need to create a tempfile\n fd, path = tempfile.mkstemp(prefix=key)\n with os.fdopen(fd, 'w') as tmp:\n tmp.write(value['path'])\n ssl_files[key]['path'] = path\n ssl_files[key]['is_temp'] = True\n elif not os.path.exists(os.path.dirname(value['path'])):\n # value is not a content, but path does not exist,\n # fails the module\n module.fail_json(\n msg='\\'%s\\' is not a content and provided path does not '\n 'exist, please check your SSL configuration.' % key\n )\n\n return ssl_files",
"def create_pki():\n os.mkdir(pki_dir)\n os.mkdir(f'{pki_dir}/newcerts')\n Path(f'{pki_dir}/index.txt').touch()\n with open(f'{pki_dir}/serial', 'w') as serial_file:\n serial_file.write('00000000')\n serial_file.close()\n create_CA('/CN=My cool CA/O=Honest Achmed/OU=Used Cars/C=EU')",
"def create_server_certs():\n global server_key_files, server_keystore, config\n\n same_enc_sign_cert = config[\"config\"][\"same_enc_sign_cert\"]\n if not Path(server_key_files[\"key\"]).is_file() or not Path(server_key_files[\"crt\"]).is_file():\n print(\"create new encryption cert\\n\")\n create_server_certs_enc()\n for f_item in [\"key\", \"crt\"]:\n with open(server_key_files[f_item], \"w\") as f:\n f.write(server_keystore[f_item])\n f.close()\n else:\n for f_item in [\"key\", \"crt\"]:\n with open(server_key_files[f_item], \"r\") as f:\n server_keystore[f_item] = f.read()\n f.close()\n\n server_keystore[\"key-sign\"] = server_keystore[\"key\"]\n server_keystore[\"crt-sign\"] = server_keystore[\"crt\"]\n\n if not Path(server_key_files[\"key-sign\"]).is_file() or not Path(server_key_files[\"crt-sign\"]).is_file():\n print(\"create new signing cert\\n\")\n if not same_enc_sign_cert:\n create_server_certs_sign()\n for f_item in [\"key-sign\", \"crt-sign\"]:\n with open(server_key_files[f_item], \"w\") as f:\n f.write(server_keystore[f_item])\n f.close()\n else:\n for f_item in [\"key-sign\", \"crt-sign\"]:\n with open(server_key_files[f_item], \"r\") as f:\n server_keystore[f_item] = f.read()\n f.close()",
"def create_cert(self, cert_file, key_file):\n if os.path.isfile(cert_file) and os.path.isfile(key_file):\n return cert_file, key_file\n\n k = crypto.PKey()\n k.generate_key(crypto.TYPE_RSA, 2048)\n cert = crypto.X509()\n cert.get_subject().C = \"US\"\n cert.get_subject().ST = \"CO\"\n cert.get_subject().L = \"Denver\"\n cert.get_subject().CN = gethostname()\n cert.get_subject().O = \"Metropolitan State University of Denver\"\n cert.get_subject().OU = \"Computer Science\"\n cert.set_serial_number(6)\n cert.gmtime_adj_notBefore(0)\n cert.gmtime_adj_notAfter(365*24*60*60)\n cert.set_issuer(cert.get_subject())\n cert.set_pubkey(k)\n cert.sign(k, 'sha1')\n\n open(join(cert_file), 'w').write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))\n open(join(key_file), \"w\").write(crypto.dump_privatekey(crypto.FILETYPE_PEM, k))\n return cert_file, key_file",
"def create_private_key_temp_file(cls, file_suffix):\n tmp_file = tempfile.NamedTemporaryFile(mode='w+b', suffix=file_suffix)\n f = open(tmp_file.name, \"w+\")\n f.write(DSConfig.private_key())\n f.close()\n return tmp_file",
"def generate_keyfile(csrf_key, session_key):\n output = file_template.safe_substitute(dict(\n csrf_key=csrf_key, session_key=session_key\n ))\n if os.path.exists(file_name):\n if options.force is None:\n print \"Warning: secret_keys.py file exists. Use '-f' flag to force overwrite.\"\n else:\n write_file(output)\n else:\n write_file(output)",
"def generate_key(domain_name):\n key = rsa.generate_private_key(\n public_exponent=65537,\n key_size=2048,\n backend=default_backend()\n )\n\n #storing client's private key\n with open(domain_name + \".key\", \"wb\") as f:\n f.write(key.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PrivateFormat.TraditionalOpenSSL,\n encryption_algorithm=serialization.NoEncryption(),\n ))\n\n return key",
"def create_key ():",
"def PRIVATE_RSA_KEYFILE_PATH() :\n return os.path.join( config.CONFIG_PATH(), \"%s-private.pem\" % RSA_KEYPAIR_PREFIX() )",
"def generate_key():\n key = Fernet.generate_key()\n with open(\"Secret.key\",\"wb\")as key_file:\n key_file.write(key)",
"def write_key():\n key = fernet.Fernet.generate_key()\n keyfile = open(KEY_PATH,'wb')\n keyfile.write(key)\n keyfile.close()",
"def tmp_key(filename):\n return TMP_PREFIX + filename",
"def get_synapse_signing_key(self):\n if not path.exists(self.synapse_signing_key_file):\n key_id = \"a_\" + self.random_string(4)\n key_content = generate_signing_key(key_id)\n with open(self.synapse_signing_key_file, \"w+\") as key_file:\n write_signing_keys(key_file, (key_content,))\n return self.synapse_signing_key_file",
"def generate_key():\n key = Fernet.generate_key()\n with open(\"secret.key\", \"wb\") as key_file:\n key_file.write(key)",
"def get_skey_file(addresses_path, address_type, name):\n return get_address_key_file(addresses_path, address_type, 'signing_key', name)",
"def _get_key_link(self, key_name):\n return '%s%s/%s.key' % (self.ca_dir, PRIVATE_DIR_NAME, key_name)"
] | [
"0.6690019",
"0.640222",
"0.63990533",
"0.63100165",
"0.6247914",
"0.6236544",
"0.622373",
"0.61842155",
"0.6159797",
"0.6129716",
"0.6111204",
"0.6094071",
"0.6050029",
"0.60403",
"0.60227233",
"0.6010699",
"0.5991577",
"0.59733444",
"0.59372234",
"0.5916288",
"0.5860859",
"0.5842457",
"0.58174187",
"0.57996196",
"0.575787",
"0.5752311",
"0.57476115",
"0.5743859",
"0.57260114",
"0.5707291"
] | 0.7345631 | 0 |
Creates an SSL CSR file and returns the path. | def CreateCsrFile(keyfile):
csrfile = tempfile.mkstemp()[1]
cmd = [
'openssl',
'req',
'-new',
'-key', keyfile,
'-out', csrfile,
'-subj', '/C=NA/ST=NA/L=NA/O=Chromium/OU=Test/CN=chromium.org'
]
_RunCommand(cmd)
return csrfile | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def CreateCrtFile(keyfile, csrfile):\n crtfile = tempfile.mkstemp()[1]\n cmd = [\n 'openssl',\n 'x509',\n '-req',\n '-days', '1',\n '-in', csrfile,\n '-signkey', keyfile,\n '-out', crtfile\n ]\n _RunCommand(cmd)\n return crtfile",
"def create_csr(dn):\n tmp_file = f'/tmp/{get_temp_filename()}'\n key_filename = f'{tmp_file}.key'\n csr_filename = f'{tmp_file}.csr'\n cmd = [\n \"openssl\",\n \"req\",\n \"-subj\", f'{dn}',\n \"-newkey\", f'rsa:{rsa_keysize}',\n \"-keyout\", f'{key_filename}',\n \"-out\", f'{csr_filename}',\n \"-nodes\"\n ]\n exec_cmd(cmd)\n return read_keypair(key_filename, csr_filename)",
"def sign_certificate(csr):\n unique_filename = str(uuid.uuid4().hex)\n\n file = open(\"./csr_req/%s.csr\" % unique_filename, \"w\")\n file.write(csr.decode(\"utf-8\"))\n file.close()\n\n subprocess.run([\"../ca/scripts/sign.sh\", unique_filename], check=False)\n\n file = open(\"./csr_req/%s.p7b\" % unique_filename, \"r\")\n cert = file.read()\n\n os.remove(\"./csr_req/%s.csr\" % unique_filename)\n os.remove(\"./csr_req/%s.p7b\" % unique_filename)\n\n return cert",
"def create_pki():\n os.mkdir(pki_dir)\n os.mkdir(f'{pki_dir}/newcerts')\n Path(f'{pki_dir}/index.txt').touch()\n with open(f'{pki_dir}/serial', 'w') as serial_file:\n serial_file.write('00000000')\n serial_file.close()\n create_CA('/CN=My cool CA/O=Honest Achmed/OU=Used Cars/C=EU')",
"def init_csr(privkey, names, cert_dir):\n csr_pem, csr_der = crypto_util.make_csr(privkey.pem, names)\n\n # Save CSR\n le_util.make_or_verify_dir(cert_dir, 0o755)\n csr_f, csr_filename = le_util.unique_file(\n os.path.join(cert_dir, \"csr-letsencrypt.pem\"), 0o644)\n csr_f.write(csr_pem)\n csr_f.close()\n\n logging.info(\"Creating CSR: %s\", csr_filename)\n\n return le_util.CSR(csr_filename, csr_der, \"der\")",
"def generate_csr(key, domain_name):\n csr = x509.CertificateSigningRequestBuilder().subject_name(x509.Name([\n # Provide various details about who we are.\n x509.NameAttribute(NameOID.COUNTRY_NAME, u\"US\"),\n x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, u\"MA\"),\n x509.NameAttribute(NameOID.LOCALITY_NAME, u\"Boston\"),\n x509.NameAttribute(NameOID.ORGANIZATION_NAME, u\"Northeastern\"),\n x509.NameAttribute(NameOID.COMMON_NAME, domain_name),\n ])).add_extension(\n x509.SubjectAlternativeName([\n x509.DNSName(domain_name),\n ])\n ,\n critical=True,\n\n # Sign the CSR with our private key.\n ).sign(key, hashes.SHA256(), default_backend())\n\n\n # Write our CSR out to disk.\n with open(domain_name + \".csr\", \"wb\") as f:\n f.write(csr.public_bytes(serialization.Encoding.PEM))\n\n return csr",
"def create_csr_pss(dn):\n tmp_file = f'/tmp/{get_temp_filename()}'\n key_filename = f'{tmp_file}.key'\n csr_filename = f'{tmp_file}.csr'\n\n cmd_genpkey = [\n \"openssl\",\n \"genpkey\",\n \"-algorithm\", \"rsa-pss\",\n \"-pkeyopt\", f'rsa_keygen_bits:{rsa_keysize}',\n \"-pkeyopt\", \"rsa_keygen_pubexp:65537\",\n \"-out\", f'{key_filename}'\n ]\n cmd_req = [\n \"openssl\",\n \"req\",\n \"-new\",\n \"-subj\", f'{dn}',\n \"-key\", f'{key_filename}',\n \"-out\", f'{csr_filename}'\n ]\n for cmd in [cmd_genpkey, cmd_req]:\n exec_cmd(cmd)\n\n return read_keypair(key_filename, csr_filename)",
"def CreatePemFile():\n keyfile = CreateKeyFile()\n csrfile = CreateCsrFile(keyfile)\n crtfile = CreateCrtFile(keyfile, csrfile)\n pemfile = tempfile.mkstemp()[1]\n with open(keyfile) as k:\n with open(crtfile) as c:\n with open(pemfile, 'wb') as p:\n p.write('%s\\n%s' % (k.read(), c.read()))\n return pemfile",
"def opensslCmsCertCreate( ownerCertFile ):\n opensslCmdArgs = [ \"openssl\", \"crl2pkcs7\", \"-certfile\", ownerCertFile,\n \"-nocrl\", \"-outform\", \"der\" ]\n ownerCertCmsDerBase64 = runOpensslCmd( opensslCmdArgs, [ \"base64\" ] )\n return ownerCertCmsDerBase64",
"def create_ssl_cert_request ( ssl_hostnames ) :\n first_hostname = ssl_hostnames[ 0 ]\n csr_filename = get_ssl_csr_filename( first_hostname )\n key_filename = get_ssl_key_filename( first_hostname )\n openssl_cnf = \"\"\"\n[req]\ndistinguished_name = req_distinguished_name\nreq_extensions = san_ext\n\n[req_distinguished_name]\ncountryName_default = US\nstateOrProvinceName_default = New York\nlocalityName_default = New York\norganizationalUnitName_default = Home Box Office, Inc\ncommonName_default = \"\"\" + first_hostname + \"\"\"\n\n[san_ext]\nbasicConstraints = CA:FALSE\nkeyUsage = nonRepudiation, digitalSignature, keyEncipherment\nsubjectAltName = @sans\n\n[sans]\n\"\"\"\n counter = 0\n for hostname in ssl_hostnames :\n counter += 1\n openssl_cnf += 'DNS.' + str( counter ) + ' = ' + hostname + '\\n'\n\n with open( first_hostname, 'w' ) as f :\n f.write( openssl_cnf )\n cmd = 'openssl req -new -newkey rsa:2048 -nodes -out ' + csr_filename + ' -keyout ' + key_filename\n cmd += ' -config ' + first_hostname + ' -subj \"/C=US/ST=New York/L=New York/O=Home Box Office Inc/CN=' + first_hostname + '\"'\n keygen = subprocess.call( cmd, shell = True )\n os.remove( first_hostname )\n if keygen != 0 :\n print \"Generation of SSL request failed!\"\n return None\n\n return { 'csr-filename' : csr_filename, 'key-filename' : key_filename }",
"def get_csr_path(self):\n _log.debug(\"get_csr_path: my_node_name={}\".format(self.node_name))\n return os.path.join(self.runtime_dir, \"{}.csr\".format(self.node_name))",
"def _generate_ca_cert(path, pkey):\n crt = _make_base_cert(pkey, 5000, socket.gethostname(),\n random.randrange(0, 2**64))\n crt.set_issuer(crt.get_subject())\n crt.sign(pkey, 'sha256')\n\n data = crypto.dump_certificate(crypto.FILETYPE_PEM, crt)\n open(path, 'wb').write(data)",
"def generate_csr(self, key, cn, san=None):\n csr = x509.CertificateSigningRequestBuilder().subject_name(\n self.generate_x509_name(cn)\n )\n if san:\n dns_names = self.encode_san_dns_names(san)\n csr = csr.add_extension(\n x509.SubjectAlternativeName(dns_names),\n critical=False,\n )\n return csr.sign(key, hashes.SHA256(), default_backend())",
"def CreateKeyFile():\n keyfile = tempfile.mkstemp()[1]\n cmd = [\n 'openssl',\n 'genrsa',\n '-out', keyfile,\n '2048'\n ]\n _RunCommand(cmd)\n return keyfile",
"def sign_csr(csr, ca_private_key, ca_cert=None, issuer_name=None,\n ca_private_key_password=None, generate_ca=False):\n backend = cryptography.hazmat.backends.default_backend()\n # Create x509 artifacts\n root_ca_pkey = serialization.load_pem_private_key(\n ca_private_key.encode(),\n password=ca_private_key_password,\n backend=backend)\n\n new_csr = cryptography.x509.load_pem_x509_csr(\n csr.encode(),\n backend)\n\n if ca_cert:\n root_ca_cert = cryptography.x509.load_pem_x509_certificate(\n ca_cert.encode(),\n backend)\n issuer_name = root_ca_cert.subject\n else:\n issuer_name = issuer_name\n # Create builder\n builder = cryptography.x509.CertificateBuilder()\n builder = builder.serial_number(\n cryptography.x509.random_serial_number())\n builder = builder.issuer_name(issuer_name)\n builder = builder.not_valid_before(\n datetime.datetime.today() - datetime.timedelta(1, 0, 0),\n )\n builder = builder.not_valid_after(\n datetime.datetime.today() + datetime.timedelta(80, 0, 0),\n )\n builder = builder.subject_name(new_csr.subject)\n builder = builder.public_key(new_csr.public_key())\n\n builder = builder.add_extension(\n cryptography.x509.BasicConstraints(ca=generate_ca, path_length=None),\n critical=True\n )\n\n # Sign the csr\n signer_ca_cert = builder.sign(\n private_key=root_ca_pkey,\n algorithm=hashes.SHA256(),\n backend=backend)\n\n return signer_ca_cert.public_bytes(encoding=serialization.Encoding.PEM)",
"def create_cert(commonname, ca_dir):\n sca = SimpleCA(ca_dir)\n sca.new_cert(commonname)",
"def CreateStarCert(filename, log = logging):\n temp1 = tempfile.mkstemp(prefix = 'ssl_proxy')\n temp2 = tempfile.mkstemp(prefix = 'ssl_proxy')\n\n cert_fields = { \"C\": \"US\", \"ST\": \"**INSECURE CONNECTION**\",\n \"L\": \"**INSECURE CONNECTION**\",\n \"O\": \"**INSECURE CONNECTION**\",\n \"OU\": \"**INSECURE CONNECTION**\",\n \"CN\": \"*\" }\n\n cert_valid_days = 1\n\n cert_string = '/C=%(C)s/ST=%(ST)s/L=%(L)s/O=%(O)s/OU=%(OU)s/CN=%(CN)s' % \\\n cert_fields\n\n openssl_command = 'openssl req -newkey rsa:1024 -keyout \"%s\" -nodes ' \\\n '-x509 -days 365 -out \"%s\" -subj \"%s\" -set_serial 0 -days %s ' \\\n '-batch' % (temp1[1], temp2[1], cert_string, cert_valid_days)\n\n find_openssl = os.system('which openssl > /dev/null')\n\n if not find_openssl == 0:\n log.error('Could not find openssl. (Used \"which openssl\" to search)')\n raise OSError, 'Command \"which openssl\" returned: %s' % find_openssl\n\n log.info('Running command: %s' % openssl_command)\n openssl_status = os.system(openssl_command)\n if not openssl_status == 0:\n raise OSError, 'Attempt to run openssl returned: %s' % openssl_status\n\n # Extract the keys into strings.\n key = os.read(temp1[0], 2048)\n cert = os.read(temp2[0], 2048)\n\n os.close(temp1[0])\n os.close(temp2[0])\n\n os.unlink(temp1[1])\n os.unlink(temp2[1])\n\n new_cert = open(filename, 'wb')\n new_cert.write('%s\\n%s' % (key, cert))\n\n new_cert.close()\n\n log.info('Successfully created %s' % filename)\n return True",
"def make_cert_for_spki_request(spki_req_b64, serial, ident):\n spki_obj = netscape_spki_from_b64(spki_req_b64)\n if spki_obj is None:\n raise ValueError('Invalid SPKI object')\n\n root_crt = _try_load_ca_cert(cfg.ca_cert_path())\n root_key = _try_load_ca_private_key(cfg.ca_private_key_path())\n crt = _make_base_cert(spki_obj.get_pubkey(), 365, ident, serial)\n crt.set_issuer(root_crt.get_subject())\n crt.sign(root_key, 'sha256')\n return crypto.dump_certificate(crypto.FILETYPE_ASN1, crt)",
"def create_cert(self, cert_file, key_file):\n if os.path.isfile(cert_file) and os.path.isfile(key_file):\n return cert_file, key_file\n\n k = crypto.PKey()\n k.generate_key(crypto.TYPE_RSA, 2048)\n cert = crypto.X509()\n cert.get_subject().C = \"US\"\n cert.get_subject().ST = \"CO\"\n cert.get_subject().L = \"Denver\"\n cert.get_subject().CN = gethostname()\n cert.get_subject().O = \"Metropolitan State University of Denver\"\n cert.get_subject().OU = \"Computer Science\"\n cert.set_serial_number(6)\n cert.gmtime_adj_notBefore(0)\n cert.gmtime_adj_notAfter(365*24*60*60)\n cert.set_issuer(cert.get_subject())\n cert.set_pubkey(k)\n cert.sign(k, 'sha1')\n\n open(join(cert_file), 'w').write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))\n open(join(key_file), \"w\").write(crypto.dump_privatekey(crypto.FILETYPE_PEM, k))\n return cert_file, key_file",
"def ca_file(tmpdir):\n key = rsa.generate_private_key(public_exponent=65537, key_size=2048)\n public_key = key.public_key()\n\n builder = x509.CertificateBuilder()\n builder = builder.subject_name(\n x509.Name([x509.NameAttribute(NameOID.COMMON_NAME, \"pyopenssl.org\")])\n )\n builder = builder.issuer_name(\n x509.Name([x509.NameAttribute(NameOID.COMMON_NAME, \"pyopenssl.org\")])\n )\n one_day = datetime.timedelta(1, 0, 0)\n builder = builder.not_valid_before(datetime.datetime.today() - one_day)\n builder = builder.not_valid_after(datetime.datetime.today() + one_day)\n builder = builder.serial_number(int(uuid.uuid4()))\n builder = builder.public_key(public_key)\n builder = builder.add_extension(\n x509.BasicConstraints(ca=True, path_length=None),\n critical=True,\n )\n\n certificate = builder.sign(private_key=key, algorithm=hashes.SHA256())\n\n ca_file = tmpdir.join(\"test.pem\")\n ca_file.write_binary(\n certificate.public_bytes(\n encoding=serialization.Encoding.PEM,\n )\n )\n\n return str(ca_file).encode(\"ascii\")",
"def _generate_csr_and_key():\n key = rsa.generate_private_key(\n public_exponent=65537,\n key_size=2048,\n backend=default_backend())\n\n csr = x509.CertificateSigningRequestBuilder().subject_name(x509.Name([\n x509.NameAttribute(NameOID.COMMON_NAME, u\"Magnum User\"),\n ])).sign(key, hashes.SHA256(), default_backend())\n\n result = {\n 'csr': csr.public_bytes(\n encoding=serialization.Encoding.PEM).decode(\"utf-8\"),\n 'key': key.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PrivateFormat.TraditionalOpenSSL,\n encryption_algorithm=serialization.NoEncryption()).decode(\"utf-8\"),\n }\n\n return result",
"def _retrieve_crt_path(haproxy_base_dir, listener, primary_cn):\n confs_dir = os.path.abspath(os.path.normpath(haproxy_base_dir))\n confs_path = os.path.join(confs_dir, listener.id)\n if haproxy_base_dir and listener.id:\n if not os.path.isdir(confs_path):\n os.makedirs(confs_path, 0o755)\n return os.path.join(\n confs_path, '{0}.pem'.format(primary_cn))",
"def create_server_certs_enc():\n global server_keystore, config\n\n same_enc_sign_cert = config[\"config\"][\"same_enc_sign_cert\"]\n if same_enc_sign_cert:\n dn = \"/CN=server certificate RSA\"\n else:\n dn = \"/CN=server certificate encryption RSA\"\n key_pair_rsa = create_csr(dn)\n server_keystore[\"key\"] = key_pair_rsa[\"key\"]\n san = [f'URI.1 = {uuid.uuid4().urn}']\n server_keystore[\"crt\"] = sign_csr(key_pair_rsa[\"pub\"], dn, san)",
"def create_CA(dn):\n cmd_genrsa = [\"openssl\",\n \"genrsa\",\n \"-aes256\",\n \"-out\", f'{pki_dir}/ca.key',\n \"-passout\", f'pass:{ca_password}',\n f'{rsa_keysize}']\n cmd_req = [\"openssl\",\n \"req\",\n \"-new\",\n \"-x509\",\n \"-days\", \"999999\",\n \"-sha256\",\n \"-key\", f'{pki_dir}/ca.key',\n \"-out\", server_key_files[\"ca\"],\n \"-subj\", f'{dn}',\n \"-passin\", f'pass:{ca_password}']\n cmds = [cmd_genrsa, cmd_req]\n for cmd in cmds:\n exec_cmd(cmd)",
"def sign_certificate_request(csr, rootkey, rootcrt, client_key, domain_name, notBefore, notAfter):\n\n serial_number = int(str(uuid.uuid4().int)[:20])\n crt = x509.CertificateBuilder().subject_name(\n csr.subject\n ).issuer_name(\n rootcrt.subject\n ).public_key(\n csr.public_key()\n ).serial_number(\n serial_number # pylint: disable=no-member\n ).not_valid_before(\n notBefore\n ).not_valid_after(\n notAfter\n ).add_extension(\n extension=x509.KeyUsage(\n digital_signature=True, key_encipherment=True, content_commitment=True,\n data_encipherment=False, key_agreement=False, encipher_only=False, decipher_only=False, key_cert_sign=False, crl_sign=False\n ),\n critical=True\n ).add_extension(\n extension=x509.BasicConstraints(ca=False, path_length=None),\n critical=True\n ).add_extension(\n extension=x509.AuthorityKeyIdentifier.from_issuer_public_key(rootkey.public_key()),\n critical=False\n ).add_extension(\n csr.extensions.get_extension_for_oid(ExtensionOID.SUBJECT_ALTERNATIVE_NAME).value,\n critical=False,\n ).sign(\n private_key=rootkey,\n algorithm=hashes.SHA256(),\n backend=default_backend()\n )\n\n ##storing client's .crt\n with open(domain_name + \".crt\", 'wb') as f:\n f.write(crt.public_bytes(encoding=serialization.Encoding.PEM))",
"def generate_ssl_object(module, ssl_cafile, ssl_certfile, ssl_keyfile,\n ssl_crlfile=None):\n\n ssl_files = {\n 'cafile': {'path': ssl_cafile, 'is_temp': False},\n 'certfile': {'path': ssl_certfile, 'is_temp': False},\n 'keyfile': {'path': ssl_keyfile, 'is_temp': False},\n 'crlfile': {'path': ssl_crlfile, 'is_temp': False}\n }\n\n for key, value in ssl_files.items():\n if value['path'] is not None:\n # TODO is that condition sufficient?\n if value['path'].startswith(\"-----BEGIN\"):\n # value is a content, need to create a tempfile\n fd, path = tempfile.mkstemp(prefix=key)\n with os.fdopen(fd, 'w') as tmp:\n tmp.write(value['path'])\n ssl_files[key]['path'] = path\n ssl_files[key]['is_temp'] = True\n elif not os.path.exists(os.path.dirname(value['path'])):\n # value is not a content, but path does not exist,\n # fails the module\n module.fail_json(\n msg='\\'%s\\' is not a content and provided path does not '\n 'exist, please check your SSL configuration.' % key\n )\n\n return ssl_files",
"def create_token(filename):\n\n try:\n os.makedirs(os.path.dirname(filename))\n except Exception:\n pass\n\n sk = ecdsa.SigningKey.generate(curve=ecdsa.NIST256p)\n vk = sk.verifying_key\n if vk is not None:\n line = encode_line(\"signing-key\", sk.to_der(), vk.to_der())\n\n with open(filename, \"w\") as f:\n f.write(line)",
"def req_handler(args):\n key = _get_key(args)\n subject = get_subject_arguments()\n req = create_certificate_request(key, subject=subject, file_name=args.req_out)\n if not args.req_out:\n print(print_certificate_request(req))\n return req",
"def prepare_certificate_file(certificate: str) -> str:\n certificate_file = NamedTemporaryFile(delete=False)\n certificate_path = certificate_file.name\n certificate_file.write(bytes(certificate, 'utf-8'))\n certificate_file.close()\n demisto.debug('Successfully preparing the certificate')\n return certificate_path",
"def generateNewCSR(self, fqdn, subject=None, san=None, with_new_key=False, KeyUsage=True, ExtendedKeyUsage=True):\n if with_new_key:\n self.generateNewKey()\n\n self.logger.info(\"Creating CSR for '\" + str(fqdn) + \"' with SubjectAlternativeName's: \" + str(san))\n\n csr_subject = []\n if fqdn:\n csr_subject.append(x509.NameAttribute(x509.OID_COMMON_NAME, str(fqdn)))\n if subject is not None:\n if subject.organization is not None:\n csr_subject.append(x509.NameAttribute(x509.OID_ORGANIZATION_NAME, str(subject.organization)))\n if subject.organizational_unit is not None:\n csr_subject.append(x509.NameAttribute(x509.OID_ORGANIZATIONAL_UNIT_NAME, str(subject.organizational_unit)))\n if subject.country is not None:\n csr_subject.append(x509.NameAttribute(x509.OID_COUNTRY_NAME, str(subject.country.upper())))\n if subject.state is not None:\n csr_subject.append(x509.NameAttribute(x509.OID_STATE_OR_PROVINCE_NAME, str(subject.state) ))\n if subject.locality is not None:\n csr_subject.append(x509.NameAttribute(x509.OID_LOCALITY_NAME, str(subject.locality)))\n if subject.email is not None:\n csr_subject.append(x509.NameAttribute(x509.OID_EMAIL_ADDRESS, str(subject.email)))\n\n # Generate a CSR\n csr = x509.CertificateSigningRequestBuilder()\n csr = csr.subject_name(x509.Name(csr_subject))\n csr = csr.add_extension(\n x509.BasicConstraints(ca=False, path_length=None), critical=True,\n )\n # Adding SubjectAlternativeName\n adding_san = []\n if san is not None:\n for s in san:\n adding_san.append(x509.DNSName(s))\n csr = csr.add_extension(\n x509.SubjectAlternativeName(adding_san),\n critical=False,\n )\n\n # Key Usage: digitalSignature, keyEncipherment (critical)\n if KeyUsage:\n csr = csr.add_extension(x509.KeyUsage(True, False, True, False, False, False, False, False, False),\n critical=True)\n # Extended Key Usage: TLS Web Server Authentication, TLS Web Client Authentication\n if ExtendedKeyUsage:\n csr = csr.add_extension(\n x509.ExtendedKeyUsage([x509.ExtendedKeyUsageOID.SERVER_AUTH,x509.ExtendedKeyUsageOID.CLIENT_AUTH]),\n critical=False,\n )\n\n # Sign the CSR with our private key.\n self.csr = csr.sign(self.key, hashes.SHA256(), default_backend())"
] | [
"0.76157844",
"0.7091698",
"0.682195",
"0.66409546",
"0.64632416",
"0.64031243",
"0.6295008",
"0.6232049",
"0.6201246",
"0.6178768",
"0.61259174",
"0.61010826",
"0.6063991",
"0.5976694",
"0.5963608",
"0.5935446",
"0.58889717",
"0.58795524",
"0.58761525",
"0.5852114",
"0.57947606",
"0.57098687",
"0.5690949",
"0.56377316",
"0.56196856",
"0.5612721",
"0.56107914",
"0.56101406",
"0.56059504",
"0.5588666"
] | 0.7676574 | 0 |
Creates an SSL CRT file and returns the path. | def CreateCrtFile(keyfile, csrfile):
crtfile = tempfile.mkstemp()[1]
cmd = [
'openssl',
'x509',
'-req',
'-days', '1',
'-in', csrfile,
'-signkey', keyfile,
'-out', crtfile
]
_RunCommand(cmd)
return crtfile | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _retrieve_crt_path(haproxy_base_dir, listener, primary_cn):\n confs_dir = os.path.abspath(os.path.normpath(haproxy_base_dir))\n confs_path = os.path.join(confs_dir, listener.id)\n if haproxy_base_dir and listener.id:\n if not os.path.isdir(confs_path):\n os.makedirs(confs_path, 0o755)\n return os.path.join(\n confs_path, '{0}.pem'.format(primary_cn))",
"def opensslCmsCertCreate( ownerCertFile ):\n opensslCmdArgs = [ \"openssl\", \"crl2pkcs7\", \"-certfile\", ownerCertFile,\n \"-nocrl\", \"-outform\", \"der\" ]\n ownerCertCmsDerBase64 = runOpensslCmd( opensslCmdArgs, [ \"base64\" ] )\n return ownerCertCmsDerBase64",
"def ca_file(tmpdir):\n key = rsa.generate_private_key(public_exponent=65537, key_size=2048)\n public_key = key.public_key()\n\n builder = x509.CertificateBuilder()\n builder = builder.subject_name(\n x509.Name([x509.NameAttribute(NameOID.COMMON_NAME, \"pyopenssl.org\")])\n )\n builder = builder.issuer_name(\n x509.Name([x509.NameAttribute(NameOID.COMMON_NAME, \"pyopenssl.org\")])\n )\n one_day = datetime.timedelta(1, 0, 0)\n builder = builder.not_valid_before(datetime.datetime.today() - one_day)\n builder = builder.not_valid_after(datetime.datetime.today() + one_day)\n builder = builder.serial_number(int(uuid.uuid4()))\n builder = builder.public_key(public_key)\n builder = builder.add_extension(\n x509.BasicConstraints(ca=True, path_length=None),\n critical=True,\n )\n\n certificate = builder.sign(private_key=key, algorithm=hashes.SHA256())\n\n ca_file = tmpdir.join(\"test.pem\")\n ca_file.write_binary(\n certificate.public_bytes(\n encoding=serialization.Encoding.PEM,\n )\n )\n\n return str(ca_file).encode(\"ascii\")",
"def CreatePemFile():\n keyfile = CreateKeyFile()\n csrfile = CreateCsrFile(keyfile)\n crtfile = CreateCrtFile(keyfile, csrfile)\n pemfile = tempfile.mkstemp()[1]\n with open(keyfile) as k:\n with open(crtfile) as c:\n with open(pemfile, 'wb') as p:\n p.write('%s\\n%s' % (k.read(), c.read()))\n return pemfile",
"def CreateCsrFile(keyfile):\n csrfile = tempfile.mkstemp()[1]\n cmd = [\n 'openssl',\n 'req',\n '-new',\n '-key', keyfile,\n '-out', csrfile,\n '-subj', '/C=NA/ST=NA/L=NA/O=Chromium/OU=Test/CN=chromium.org'\n ]\n _RunCommand(cmd)\n return csrfile",
"def create_pki():\n os.mkdir(pki_dir)\n os.mkdir(f'{pki_dir}/newcerts')\n Path(f'{pki_dir}/index.txt').touch()\n with open(f'{pki_dir}/serial', 'w') as serial_file:\n serial_file.write('00000000')\n serial_file.close()\n create_CA('/CN=My cool CA/O=Honest Achmed/OU=Used Cars/C=EU')",
"def generate_ssl_object(module, ssl_cafile, ssl_certfile, ssl_keyfile,\n ssl_crlfile=None):\n\n ssl_files = {\n 'cafile': {'path': ssl_cafile, 'is_temp': False},\n 'certfile': {'path': ssl_certfile, 'is_temp': False},\n 'keyfile': {'path': ssl_keyfile, 'is_temp': False},\n 'crlfile': {'path': ssl_crlfile, 'is_temp': False}\n }\n\n for key, value in ssl_files.items():\n if value['path'] is not None:\n # TODO is that condition sufficient?\n if value['path'].startswith(\"-----BEGIN\"):\n # value is a content, need to create a tempfile\n fd, path = tempfile.mkstemp(prefix=key)\n with os.fdopen(fd, 'w') as tmp:\n tmp.write(value['path'])\n ssl_files[key]['path'] = path\n ssl_files[key]['is_temp'] = True\n elif not os.path.exists(os.path.dirname(value['path'])):\n # value is not a content, but path does not exist,\n # fails the module\n module.fail_json(\n msg='\\'%s\\' is not a content and provided path does not '\n 'exist, please check your SSL configuration.' % key\n )\n\n return ssl_files",
"def get_ssl_certificate():",
"def CreateStarCert(filename, log = logging):\n temp1 = tempfile.mkstemp(prefix = 'ssl_proxy')\n temp2 = tempfile.mkstemp(prefix = 'ssl_proxy')\n\n cert_fields = { \"C\": \"US\", \"ST\": \"**INSECURE CONNECTION**\",\n \"L\": \"**INSECURE CONNECTION**\",\n \"O\": \"**INSECURE CONNECTION**\",\n \"OU\": \"**INSECURE CONNECTION**\",\n \"CN\": \"*\" }\n\n cert_valid_days = 1\n\n cert_string = '/C=%(C)s/ST=%(ST)s/L=%(L)s/O=%(O)s/OU=%(OU)s/CN=%(CN)s' % \\\n cert_fields\n\n openssl_command = 'openssl req -newkey rsa:1024 -keyout \"%s\" -nodes ' \\\n '-x509 -days 365 -out \"%s\" -subj \"%s\" -set_serial 0 -days %s ' \\\n '-batch' % (temp1[1], temp2[1], cert_string, cert_valid_days)\n\n find_openssl = os.system('which openssl > /dev/null')\n\n if not find_openssl == 0:\n log.error('Could not find openssl. (Used \"which openssl\" to search)')\n raise OSError, 'Command \"which openssl\" returned: %s' % find_openssl\n\n log.info('Running command: %s' % openssl_command)\n openssl_status = os.system(openssl_command)\n if not openssl_status == 0:\n raise OSError, 'Attempt to run openssl returned: %s' % openssl_status\n\n # Extract the keys into strings.\n key = os.read(temp1[0], 2048)\n cert = os.read(temp2[0], 2048)\n\n os.close(temp1[0])\n os.close(temp2[0])\n\n os.unlink(temp1[1])\n os.unlink(temp2[1])\n\n new_cert = open(filename, 'wb')\n new_cert.write('%s\\n%s' % (key, cert))\n\n new_cert.close()\n\n log.info('Successfully created %s' % filename)\n return True",
"def _generate_ca_cert(path, pkey):\n crt = _make_base_cert(pkey, 5000, socket.gethostname(),\n random.randrange(0, 2**64))\n crt.set_issuer(crt.get_subject())\n crt.sign(pkey, 'sha256')\n\n data = crypto.dump_certificate(crypto.FILETYPE_PEM, crt)\n open(path, 'wb').write(data)",
"def create_cert(commonname, ca_dir):\n sca = SimpleCA(ca_dir)\n sca.new_cert(commonname)",
"def get_ssl_certificate() :",
"def GetCurrentCertsFile():\n return _ca_certs_file",
"def _create_srt_file(content=None):\r\n content = content or SRT_content\r\n srt_file = tempfile.NamedTemporaryFile(suffix=\".srt\")\r\n srt_file.content_type = 'application/x-subrip; charset=utf-8'\r\n srt_file.write(content)\r\n srt_file.seek(0)\r\n return srt_file",
"def create_cert(self, cert_file, key_file):\n if os.path.isfile(cert_file) and os.path.isfile(key_file):\n return cert_file, key_file\n\n k = crypto.PKey()\n k.generate_key(crypto.TYPE_RSA, 2048)\n cert = crypto.X509()\n cert.get_subject().C = \"US\"\n cert.get_subject().ST = \"CO\"\n cert.get_subject().L = \"Denver\"\n cert.get_subject().CN = gethostname()\n cert.get_subject().O = \"Metropolitan State University of Denver\"\n cert.get_subject().OU = \"Computer Science\"\n cert.set_serial_number(6)\n cert.gmtime_adj_notBefore(0)\n cert.gmtime_adj_notAfter(365*24*60*60)\n cert.set_issuer(cert.get_subject())\n cert.set_pubkey(k)\n cert.sign(k, 'sha1')\n\n open(join(cert_file), 'w').write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))\n open(join(key_file), \"w\").write(crypto.dump_privatekey(crypto.FILETYPE_PEM, k))\n return cert_file, key_file",
"def create_ssl_cert_request ( ssl_hostnames ) :\n first_hostname = ssl_hostnames[ 0 ]\n csr_filename = get_ssl_csr_filename( first_hostname )\n key_filename = get_ssl_key_filename( first_hostname )\n openssl_cnf = \"\"\"\n[req]\ndistinguished_name = req_distinguished_name\nreq_extensions = san_ext\n\n[req_distinguished_name]\ncountryName_default = US\nstateOrProvinceName_default = New York\nlocalityName_default = New York\norganizationalUnitName_default = Home Box Office, Inc\ncommonName_default = \"\"\" + first_hostname + \"\"\"\n\n[san_ext]\nbasicConstraints = CA:FALSE\nkeyUsage = nonRepudiation, digitalSignature, keyEncipherment\nsubjectAltName = @sans\n\n[sans]\n\"\"\"\n counter = 0\n for hostname in ssl_hostnames :\n counter += 1\n openssl_cnf += 'DNS.' + str( counter ) + ' = ' + hostname + '\\n'\n\n with open( first_hostname, 'w' ) as f :\n f.write( openssl_cnf )\n cmd = 'openssl req -new -newkey rsa:2048 -nodes -out ' + csr_filename + ' -keyout ' + key_filename\n cmd += ' -config ' + first_hostname + ' -subj \"/C=US/ST=New York/L=New York/O=Home Box Office Inc/CN=' + first_hostname + '\"'\n keygen = subprocess.call( cmd, shell = True )\n os.remove( first_hostname )\n if keygen != 0 :\n print \"Generation of SSL request failed!\"\n return None\n\n return { 'csr-filename' : csr_filename, 'key-filename' : key_filename }",
"def generate_certificates():\n print('GEN CERTS')\n domain = os.environ.get('SSL_DOMAIN_NAME', 'localhost')\n email = os.environ.get('SSL_EMAIL', '[email protected]')\n print(domain)\n cert_path = '/etc/letsencrypt/live/' + domain\n if domain == \"localhost\":\n print('GEN LOCALHOST SSL KEY')\n call(['mkdir', '-p', cert_path])\n cmd = [\n 'openssl',\n 'req',\n '-x509',\n '-newkey',\n 'rsa:4096',\n '-keyout',\n cert_path +\n '/privkey.pem',\n '-out',\n cert_path +\n '/cert.pem',\n '-days',\n '365',\n '-nodes',\n '-subj',\n '/CN=localhost']\n call(cmd)\n\n else:\n # files exist so renew\n if os.path.isfile(cert_path + '/cert.pem') and os.path.isfile(cert_path + \\\n '/fullchain.pem') and os.path.isfile(cert_path + '/privkey.pem'):\n print('RENEW CERTS')\n cmd = ['certbot', 'renew']\n print(cmd)\n call(cmd)\n\n else:\n print('GENERATE CERTS')\n cmd = [\n 'certbot',\n 'certonly',\n '-a',\n 'standalone',\n '--agree-tos',\n '-d',\n domain,\n '-m',\n email,\n ' --noninteractive']\n print(cmd)\n call(cmd)\n\n # use mosquitto conf template to rewrite mosquitto conf file including env\n # SSL_CERTIFICATES_FOLDER\n marker_replace_template(\n \"/etc/mosquitto/mosquitto-ssl-template.conf\",\n \"/etc/mosquitto/mosquitto-ssl.conf\",\n 'SSL_CERTIFICATE_FOLDER',\n cert_path)",
"def get_cert_file(self, bypass_time_validity_check=False):\n file_contents = (\n \"{} {} {}\"\n ).format(self.cert_key_type,\n str(base64.b64encode(self._sign_cert(bypass_time_validity_check)), encoding='ascii'),\n self.public_key_comment)\n return file_contents",
"def create_server_certs():\n global server_key_files, server_keystore, config\n\n same_enc_sign_cert = config[\"config\"][\"same_enc_sign_cert\"]\n if not Path(server_key_files[\"key\"]).is_file() or not Path(server_key_files[\"crt\"]).is_file():\n print(\"create new encryption cert\\n\")\n create_server_certs_enc()\n for f_item in [\"key\", \"crt\"]:\n with open(server_key_files[f_item], \"w\") as f:\n f.write(server_keystore[f_item])\n f.close()\n else:\n for f_item in [\"key\", \"crt\"]:\n with open(server_key_files[f_item], \"r\") as f:\n server_keystore[f_item] = f.read()\n f.close()\n\n server_keystore[\"key-sign\"] = server_keystore[\"key\"]\n server_keystore[\"crt-sign\"] = server_keystore[\"crt\"]\n\n if not Path(server_key_files[\"key-sign\"]).is_file() or not Path(server_key_files[\"crt-sign\"]).is_file():\n print(\"create new signing cert\\n\")\n if not same_enc_sign_cert:\n create_server_certs_sign()\n for f_item in [\"key-sign\", \"crt-sign\"]:\n with open(server_key_files[f_item], \"w\") as f:\n f.write(server_keystore[f_item])\n f.close()\n else:\n for f_item in [\"key-sign\", \"crt-sign\"]:\n with open(server_key_files[f_item], \"r\") as f:\n server_keystore[f_item] = f.read()\n f.close()",
"def _get_cert_path(self, cert_name, serial):\n return '%s%s/%d_%s.crt' % (self.ca_dir, CERT_DIR_NAME, serial,\n cert_name)",
"def opensslCmsDataCreate( conveyedInfoFile ):\n opensslCmdArgs = [ \"openssl\", \"cms\", \"-data_create\", \"-in\", conveyedInfoFile,\n \"-outform\", \"der\" ]\n conveyedInfoCmsDerBase64 = runOpensslCmd( opensslCmdArgs, [ \"base64\" ] )\n return conveyedInfoCmsDerBase64",
"def put_certificate(self, target, who, args, _files, _user_path):\n name = self.arg_get(args, 'name', str)\n if not commonl.verify_str_safe(name, do_raise = False):\n raise ValueError(\n f\"{name}: invalid certificate name, only [-_a-zA-Z0-9] allowed\")\n\n with target.target_owned_and_locked(who):\n target.timestamp()\n\n cert_path = os.path.join(target.state_dir, \"certificates\")\n cert_client_path = os.path.join(target.state_dir, \"certificates_client\")\n self._setup_maybe(target, cert_path, cert_client_path)\n\n client_key_path = os.path.join(cert_client_path, name + \".key\")\n client_req_path = os.path.join(cert_client_path, name + \".req\")\n client_cert_path = os.path.join(cert_client_path, name + \".cert\")\n\n if os.path.isfile(client_key_path) \\\n and os.path.isfile(client_cert_path):\t# already made?\n with open(client_key_path) as keyf, \\\n open(client_cert_path) as certf:\n return dict({\n \"name\": name,\n \"created\": False,\n \"key\": keyf.read(),\n \"cert\": certf.read(),\n })\n\n try:\n subprocess.run(\n f\"openssl genrsa -out {client_key_path} {self.key_size}\".split(),\n stdin = None, timeout = 5,\n capture_output = True, cwd = cert_path, check = True)\n allocid = target.fsdb.get(\"_alloc.id\", \"UNKNOWN\")\n subprocess.run(\n f\"openssl req -new -key {client_key_path} -out {client_req_path}\"\n f\" -subj /C=LC/ST=Local/L=Local/O=TCF-Signing-Authority-{target.id}-{allocid}/CN=TCF-{name}\".split(),\n check = True, cwd = cert_path,\n stdout = subprocess.PIPE, stderr = subprocess.STDOUT)\n target.log.debug(f\"{name}: created client's certificate\")\n\n # Issue the client certificate using the cert request and the CA cert/key.\n # note we run in the cert_path directory, so the ca.*\n # files are there\n subprocess.run(\n f\"openssl x509 -req -in {client_req_path} -CA ca.cert\"\n \" -CAkey ca.key -set_serial 101 -extensions client\"\n f\" -days 365 -outform PEM -out {client_cert_path}\".split(),\n stdin = None, timeout = 5,\n capture_output = True, cwd = cert_path, check = True)\n except subprocess.CalledProcessError as e:\n target.log.error(f\"command {' '.join(e.cmd)} failed: {e.output}\")\n self._client_wipe(name, cert_client_path)\t# don't leave things half there\n raise\n\n with open(client_key_path) as keyf, \\\n open(client_cert_path) as certf:\n return dict({\n \"name\": name,\n \"created\": True,\n \"key\": keyf.read(),\n \"cert\": certf.read(),\n })",
"def create_temporary_ca_file(anchor_list):\n try:\n f, fname = tempfile.mkstemp()\n for a in anchor_list:\n s = a.output(fmt=\"PEM\")\n l = os.write(f, s)\n os.close(f)\n except:\n return None\n return fname",
"def create_CA(dn):\n cmd_genrsa = [\"openssl\",\n \"genrsa\",\n \"-aes256\",\n \"-out\", f'{pki_dir}/ca.key',\n \"-passout\", f'pass:{ca_password}',\n f'{rsa_keysize}']\n cmd_req = [\"openssl\",\n \"req\",\n \"-new\",\n \"-x509\",\n \"-days\", \"999999\",\n \"-sha256\",\n \"-key\", f'{pki_dir}/ca.key',\n \"-out\", server_key_files[\"ca\"],\n \"-subj\", f'{dn}',\n \"-passin\", f'pass:{ca_password}']\n cmds = [cmd_genrsa, cmd_req]\n for cmd in cmds:\n exec_cmd(cmd)",
"def get_tls_factory(self):\n if not access(self.cert_path, R_OK):\n raise RuntimeError('Error: cert file at %s is not '\n 'readable' % self.cert_path)\n if not access(self.key_path, R_OK):\n raise RuntimeError('Error: key file at %s is not '\n 'readable' % self.key_path)\n if not HAVE_PYOPENSSL:\n raise RuntimeError('Error: running with TLS (cert and key) requires'\n ' pyOpenSSL, but it does not appear to be '\n 'installed. Please \"pip install pyOpenSSL\".')\n # check certs are readable\n cf = certificateOptionsFromFiles(self.key_path, self.cert_path)\n return cf",
"def _create_protocol_file(\n path_to_protocol_package: str, file_name: str, file_content: str\n) -> None:\n pathname = os.path.join(path_to_protocol_package, file_name)\n\n with open_file(pathname, \"w\") as file:\n file.write(file_content)",
"def push_ssl_crt():\n logger.info(u\"Pushing SSl Certificates\")\n key = '%(config_folder)s/%(ssl_key)s' % env\n crt = '%(config_folder)s/%(ssl_crt)s' % env\n bundle = '%(config_folder)s/rapidssl_ca_bundle.pem' % env\n logger.info(u\"Using SSL keys and certs at %s and %s\" % (key, crt))\n\n # Putting to /tmp and moving for permission purposes\n put(key, '/tmp/_.policystat.com.key')\n sudo('mv /tmp/_.policystat.com.key /etc/ssl/private/_.policystat.com.key')\n sudo('chmod 640 /etc/ssl/private/_.policystat.com.key')\n sudo('chown root:ssl-cert /etc/ssl/private/_.policystat.com.key')\n\n put(crt, '/tmp/_.policystat.com.crt')\n put(bundle, '/tmp/rapidssl_ca_bundle.pem')\n # Combine the crt with the rapidssl intermediate bundle\n sudo('cat /tmp/_.policystat.com.crt /tmp/rapidssl_ca_bundle.pem > \\\n /tmp/_.policystat.com.crt.bundled')\n sudo(\n 'mv /tmp/_.policystat.com.crt.bundled '\n '/etc/ssl/certs/_.policystat.com.crt'\n )\n sudo('chmod 777 /etc/ssl/certs/_.policystat.com.crt')",
"def prepare_certificate_file(certificate: str) -> str:\n certificate_file = NamedTemporaryFile(delete=False)\n certificate_path = certificate_file.name\n certificate_file.write(bytes(certificate, 'utf-8'))\n certificate_file.close()\n demisto.debug('Successfully preparing the certificate')\n return certificate_path",
"def _create_certificate_chain():\n caext = X509Extension(b\"basicConstraints\", False, b\"CA:true\")\n not_after_date = datetime.date.today() + datetime.timedelta(days=365)\n not_after = not_after_date.strftime(\"%Y%m%d%H%M%SZ\").encode(\"ascii\")\n\n # Step 1\n cakey = PKey()\n cakey.generate_key(TYPE_RSA, 2048)\n cacert = X509()\n cacert.set_version(2)\n cacert.get_subject().commonName = \"Authority Certificate\"\n cacert.set_issuer(cacert.get_subject())\n cacert.set_pubkey(cakey)\n cacert.set_notBefore(b\"20000101000000Z\")\n cacert.set_notAfter(not_after)\n cacert.add_extensions([caext])\n cacert.set_serial_number(0)\n cacert.sign(cakey, \"sha256\")\n\n # Step 2\n ikey = PKey()\n ikey.generate_key(TYPE_RSA, 2048)\n icert = X509()\n icert.set_version(2)\n icert.get_subject().commonName = \"Intermediate Certificate\"\n icert.set_issuer(cacert.get_subject())\n icert.set_pubkey(ikey)\n icert.set_notBefore(b\"20000101000000Z\")\n icert.set_notAfter(not_after)\n icert.add_extensions([caext])\n icert.set_serial_number(0)\n icert.sign(cakey, \"sha256\")\n\n # Step 3\n skey = PKey()\n skey.generate_key(TYPE_RSA, 2048)\n scert = X509()\n scert.set_version(2)\n scert.get_subject().commonName = \"Server Certificate\"\n scert.set_issuer(icert.get_subject())\n scert.set_pubkey(skey)\n scert.set_notBefore(b\"20000101000000Z\")\n scert.set_notAfter(not_after)\n scert.add_extensions(\n [X509Extension(b\"basicConstraints\", True, b\"CA:false\")]\n )\n scert.set_serial_number(0)\n scert.sign(ikey, \"sha256\")\n\n return [(cakey, cacert), (ikey, icert), (skey, scert)]",
"def create_ca_file(anchor_list, filename):\n try:\n f = open(filename, \"w\")\n for a in anchor_list:\n s = a.output(fmt=\"PEM\")\n f.write(s)\n f.close()\n except:\n return None\n return filename"
] | [
"0.6699168",
"0.6504106",
"0.6388449",
"0.6253104",
"0.62328863",
"0.6191254",
"0.6086789",
"0.60492927",
"0.6046661",
"0.5963553",
"0.5953179",
"0.59513456",
"0.57639164",
"0.5744498",
"0.5647329",
"0.56415385",
"0.56214416",
"0.561746",
"0.55993444",
"0.55791825",
"0.5538785",
"0.55249554",
"0.54756135",
"0.5467627",
"0.54489195",
"0.54442304",
"0.54259604",
"0.54080105",
"0.539486",
"0.5368237"
] | 0.71155643 | 0 |
Creates an SSL PEM file and returns the path. | def CreatePemFile():
keyfile = CreateKeyFile()
csrfile = CreateCsrFile(keyfile)
crtfile = CreateCrtFile(keyfile, csrfile)
pemfile = tempfile.mkstemp()[1]
with open(keyfile) as k:
with open(crtfile) as c:
with open(pemfile, 'wb') as p:
p.write('%s\n%s' % (k.read(), c.read()))
return pemfile | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ca_file(tmpdir):\n key = rsa.generate_private_key(public_exponent=65537, key_size=2048)\n public_key = key.public_key()\n\n builder = x509.CertificateBuilder()\n builder = builder.subject_name(\n x509.Name([x509.NameAttribute(NameOID.COMMON_NAME, \"pyopenssl.org\")])\n )\n builder = builder.issuer_name(\n x509.Name([x509.NameAttribute(NameOID.COMMON_NAME, \"pyopenssl.org\")])\n )\n one_day = datetime.timedelta(1, 0, 0)\n builder = builder.not_valid_before(datetime.datetime.today() - one_day)\n builder = builder.not_valid_after(datetime.datetime.today() + one_day)\n builder = builder.serial_number(int(uuid.uuid4()))\n builder = builder.public_key(public_key)\n builder = builder.add_extension(\n x509.BasicConstraints(ca=True, path_length=None),\n critical=True,\n )\n\n certificate = builder.sign(private_key=key, algorithm=hashes.SHA256())\n\n ca_file = tmpdir.join(\"test.pem\")\n ca_file.write_binary(\n certificate.public_bytes(\n encoding=serialization.Encoding.PEM,\n )\n )\n\n return str(ca_file).encode(\"ascii\")",
"def opensslCmsCertCreate( ownerCertFile ):\n opensslCmdArgs = [ \"openssl\", \"crl2pkcs7\", \"-certfile\", ownerCertFile,\n \"-nocrl\", \"-outform\", \"der\" ]\n ownerCertCmsDerBase64 = runOpensslCmd( opensslCmdArgs, [ \"base64\" ] )\n return ownerCertCmsDerBase64",
"def tls_certificate_chain_pem_path(tls_certificate):\n with tls_certificate.private_key_and_cert_chain_pem.tempfile() as cert_pem:\n yield cert_pem",
"def tls_ca_certificate_pem_path(ca):\n with ca.cert_pem.tempfile() as ca_cert_pem:\n yield ca_cert_pem",
"def _generate_ca_cert(path, pkey):\n crt = _make_base_cert(pkey, 5000, socket.gethostname(),\n random.randrange(0, 2**64))\n crt.set_issuer(crt.get_subject())\n crt.sign(pkey, 'sha256')\n\n data = crypto.dump_certificate(crypto.FILETYPE_PEM, crt)\n open(path, 'wb').write(data)",
"def _generate_ca_private_key(path):\n DEFAULT_KEY_ALG = crypto.TYPE_RSA\n DEFAULT_KEY_BITS = 2048\n\n pkey = crypto.PKey()\n pkey.generate_key(DEFAULT_KEY_ALG, DEFAULT_KEY_BITS)\n data = crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey)\n open(path, 'wb').write(data)\n\n return pkey",
"def convert_key_to_pem ( key_filename, output_filename ) :\n cmd = 'openssl rsa -in ' + key_filename + ' -outform PEM -out ' + output_filename\n return subprocess.call( cmd, shell = True )",
"def _retrieve_crt_path(haproxy_base_dir, listener, primary_cn):\n confs_dir = os.path.abspath(os.path.normpath(haproxy_base_dir))\n confs_path = os.path.join(confs_dir, listener.id)\n if haproxy_base_dir and listener.id:\n if not os.path.isdir(confs_path):\n os.makedirs(confs_path, 0o755)\n return os.path.join(\n confs_path, '{0}.pem'.format(primary_cn))",
"def PRIVATE_RSA_KEYFILE_PATH() :\n return os.path.join( config.CONFIG_PATH(), \"%s-private.pem\" % RSA_KEYPAIR_PREFIX() )",
"def generate_ssl_object(module, ssl_cafile, ssl_certfile, ssl_keyfile,\n ssl_crlfile=None):\n\n ssl_files = {\n 'cafile': {'path': ssl_cafile, 'is_temp': False},\n 'certfile': {'path': ssl_certfile, 'is_temp': False},\n 'keyfile': {'path': ssl_keyfile, 'is_temp': False},\n 'crlfile': {'path': ssl_crlfile, 'is_temp': False}\n }\n\n for key, value in ssl_files.items():\n if value['path'] is not None:\n # TODO is that condition sufficient?\n if value['path'].startswith(\"-----BEGIN\"):\n # value is a content, need to create a tempfile\n fd, path = tempfile.mkstemp(prefix=key)\n with os.fdopen(fd, 'w') as tmp:\n tmp.write(value['path'])\n ssl_files[key]['path'] = path\n ssl_files[key]['is_temp'] = True\n elif not os.path.exists(os.path.dirname(value['path'])):\n # value is not a content, but path does not exist,\n # fails the module\n module.fail_json(\n msg='\\'%s\\' is not a content and provided path does not '\n 'exist, please check your SSL configuration.' % key\n )\n\n return ssl_files",
"def tls_certificate_private_key_pem_path(tls_certificate):\n with tls_certificate.private_key_pem.tempfile() as cert_key_pem:\n yield cert_key_pem",
"def _build_pem(tls_cert):\n pem = ()\n if tls_cert.intermediates:\n for c in tls_cert.intermediates:\n pem = pem + (c,)\n if tls_cert.certificate:\n pem = pem + (tls_cert.certificate,)\n if tls_cert.private_key:\n pem = pem + (tls_cert.private_key,)\n return \"\\n\".join(pem)",
"def save_certificate(self, certificate_msg, cert_path, chain_path):\n # pylint: disable=no-self-use\n cert_chain_abspath = None\n cert_fd, cert_file = le_util.unique_file(cert_path, 0o644)\n cert_fd.write(certificate_msg.certificate.as_pem())\n cert_fd.close()\n logging.info(\n \"Server issued certificate; certificate written to %s\", cert_file)\n\n if certificate_msg.chain:\n chain_fd, chain_fn = le_util.unique_file(chain_path, 0o644)\n for cert in certificate_msg.chain:\n chain_fd.write(cert.to_pem())\n chain_fd.close()\n\n logging.info(\"Cert chain written to %s\", chain_fn)\n\n # This expects a valid chain file\n cert_chain_abspath = os.path.abspath(chain_fn)\n\n return os.path.abspath(cert_file), cert_chain_abspath",
"def CreateKeyFile():\n keyfile = tempfile.mkstemp()[1]\n cmd = [\n 'openssl',\n 'genrsa',\n '-out', keyfile,\n '2048'\n ]\n _RunCommand(cmd)\n return keyfile",
"def get_cert_file(self, bypass_time_validity_check=False):\n file_contents = (\n \"{} {} {}\"\n ).format(self.cert_key_type,\n str(base64.b64encode(self._sign_cert(bypass_time_validity_check)), encoding='ascii'),\n self.public_key_comment)\n return file_contents",
"def get_ssl_certificate():",
"def create_pki():\n os.mkdir(pki_dir)\n os.mkdir(f'{pki_dir}/newcerts')\n Path(f'{pki_dir}/index.txt').touch()\n with open(f'{pki_dir}/serial', 'w') as serial_file:\n serial_file.write('00000000')\n serial_file.close()\n create_CA('/CN=My cool CA/O=Honest Achmed/OU=Used Cars/C=EU')",
"def CreateStarCert(filename, log = logging):\n temp1 = tempfile.mkstemp(prefix = 'ssl_proxy')\n temp2 = tempfile.mkstemp(prefix = 'ssl_proxy')\n\n cert_fields = { \"C\": \"US\", \"ST\": \"**INSECURE CONNECTION**\",\n \"L\": \"**INSECURE CONNECTION**\",\n \"O\": \"**INSECURE CONNECTION**\",\n \"OU\": \"**INSECURE CONNECTION**\",\n \"CN\": \"*\" }\n\n cert_valid_days = 1\n\n cert_string = '/C=%(C)s/ST=%(ST)s/L=%(L)s/O=%(O)s/OU=%(OU)s/CN=%(CN)s' % \\\n cert_fields\n\n openssl_command = 'openssl req -newkey rsa:1024 -keyout \"%s\" -nodes ' \\\n '-x509 -days 365 -out \"%s\" -subj \"%s\" -set_serial 0 -days %s ' \\\n '-batch' % (temp1[1], temp2[1], cert_string, cert_valid_days)\n\n find_openssl = os.system('which openssl > /dev/null')\n\n if not find_openssl == 0:\n log.error('Could not find openssl. (Used \"which openssl\" to search)')\n raise OSError, 'Command \"which openssl\" returned: %s' % find_openssl\n\n log.info('Running command: %s' % openssl_command)\n openssl_status = os.system(openssl_command)\n if not openssl_status == 0:\n raise OSError, 'Attempt to run openssl returned: %s' % openssl_status\n\n # Extract the keys into strings.\n key = os.read(temp1[0], 2048)\n cert = os.read(temp2[0], 2048)\n\n os.close(temp1[0])\n os.close(temp2[0])\n\n os.unlink(temp1[1])\n os.unlink(temp2[1])\n\n new_cert = open(filename, 'wb')\n new_cert.write('%s\\n%s' % (key, cert))\n\n new_cert.close()\n\n log.info('Successfully created %s' % filename)\n return True",
"def get_own_cert_path(self):\n# _log.debug(\"get_own_cert_path: node_name={}\".format(self.node_name))\n cert_dir = os.path.join(self.runtime_dir, \"mine\")\n return os.path.join(cert_dir, self.node_id+\".pem\")",
"def get_ssl_certificate() :",
"def CreateCrtFile(keyfile, csrfile):\n crtfile = tempfile.mkstemp()[1]\n cmd = [\n 'openssl',\n 'x509',\n '-req',\n '-days', '1',\n '-in', csrfile,\n '-signkey', keyfile,\n '-out', crtfile\n ]\n _RunCommand(cmd)\n return crtfile",
"def create_cert(self, cert_file, key_file):\n if os.path.isfile(cert_file) and os.path.isfile(key_file):\n return cert_file, key_file\n\n k = crypto.PKey()\n k.generate_key(crypto.TYPE_RSA, 2048)\n cert = crypto.X509()\n cert.get_subject().C = \"US\"\n cert.get_subject().ST = \"CO\"\n cert.get_subject().L = \"Denver\"\n cert.get_subject().CN = gethostname()\n cert.get_subject().O = \"Metropolitan State University of Denver\"\n cert.get_subject().OU = \"Computer Science\"\n cert.set_serial_number(6)\n cert.gmtime_adj_notBefore(0)\n cert.gmtime_adj_notAfter(365*24*60*60)\n cert.set_issuer(cert.get_subject())\n cert.set_pubkey(k)\n cert.sign(k, 'sha1')\n\n open(join(cert_file), 'w').write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))\n open(join(key_file), \"w\").write(crypto.dump_privatekey(crypto.FILETYPE_PEM, k))\n return cert_file, key_file",
"def get_service_acct_pem_file(args):\n # Now that we have the email\n with tempfile.NamedTemporaryFile() as ptwelve:\n with tempfile.NamedTemporaryFile() as pem:\n subprocess.check_call([\n 'gcloud', 'iam', 'service-accounts', 'keys', 'create',\n ptwelve.name,\n '--key-file-type=p12',\n '--project', args.project,\n '--iam-account', args.service_account,\n ])\n subprocess.check_call([\n 'openssl', 'pkcs12',\n '-in', ptwelve.name,\n '-out', pem.name,\n '-nodes',\n '-passin', 'pass:notasecret',\n ])\n yield pem.name",
"def test_warn_self_signed(self):\n config_dir = self.mktemp()\n os.mkdir(config_dir)\n with open(os.path.join(config_dir, \"cert.pem\"), \"w\") as f:\n f.write(\n \"\"\"-----BEGIN CERTIFICATE-----\nMIID6DCCAtACAws9CjANBgkqhkiG9w0BAQUFADCBtzELMAkGA1UEBhMCVFIxDzAN\nBgNVBAgMBsOHb3J1bTEUMBIGA1UEBwwLQmHFn21ha8OnxLExEjAQBgNVBAMMCWxv\nY2FsaG9zdDEcMBoGA1UECgwTVHdpc3RlZCBNYXRyaXggTGFiczEkMCIGA1UECwwb\nQXV0b21hdGVkIFRlc3RpbmcgQXV0aG9yaXR5MSkwJwYJKoZIhvcNAQkBFhpzZWN1\ncml0eUB0d2lzdGVkbWF0cml4LmNvbTAgFw0xNzA3MTIxNDAxNTNaGA8yMTE3MDYx\nODE0MDE1M1owgbcxCzAJBgNVBAYTAlRSMQ8wDQYDVQQIDAbDh29ydW0xFDASBgNV\nBAcMC0JhxZ9tYWvDp8SxMRIwEAYDVQQDDAlsb2NhbGhvc3QxHDAaBgNVBAoME1R3\naXN0ZWQgTWF0cml4IExhYnMxJDAiBgNVBAsMG0F1dG9tYXRlZCBUZXN0aW5nIEF1\ndGhvcml0eTEpMCcGCSqGSIb3DQEJARYac2VjdXJpdHlAdHdpc3RlZG1hdHJpeC5j\nb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDwT6kbqtMUI0sMkx4h\nI+L780dA59KfksZCqJGmOsMD6hte9EguasfkZzvCF3dk3NhwCjFSOvKx6rCwiteo\nWtYkVfo+rSuVNmt7bEsOUDtuTcaxTzIFB+yHOYwAaoz3zQkyVW0c4pzioiLCGCmf\nFLdiDBQGGp74tb+7a0V6kC3vMLFoM3L6QWq5uYRB5+xLzlPJ734ltyvfZHL3Us6p\ncUbK+3WTWvb4ER0W2RqArAj6Bc/ERQKIAPFEiZi9bIYTwvBH27OKHRz+KoY/G8zY\n+l+WZoJqDhupRAQAuh7O7V/y6bSP+KNxJRie9QkZvw1PSaGSXtGJI3WWdO12/Ulg\nepJpAgMBAAEwDQYJKoZIhvcNAQEFBQADggEBAJXEq5P9xwvP9aDkXIqzcD0L8sf8\newlhlxTQdeqt2Nace0Yk18lIo2oj1t86Y8jNbpAnZJeI813Rr5M7FbHCXoRc/SZG\nI8OtG1xGwcok53lyDuuUUDexnK4O5BkjKiVlNPg4HPim5Kuj2hRNFfNt/F2BVIlj\niZupikC5MT1LQaRwidkSNxCku1TfAyueiBwhLnFwTmIGNnhuDCutEVAD9kFmcJN2\nSznugAcPk4doX2+rL+ila+ThqgPzIkwTUHtnmjI0TI6xsDUlXz5S3UyudrE2Qsfz\ns4niecZKPBizL6aucT59CsunNmmb5Glq8rlAcU+1ZTZZzGYqVYhF6axB9Qg=\n-----END CERTIFICATE-----\"\"\"\n )\n\n config = {\n \"tls_certificate_path\": os.path.join(config_dir, \"cert.pem\"),\n \"tls_fingerprints\": [],\n }\n\n t = TestConfig()\n t.read_config(config, config_dir_path=\"\", data_dir_path=\"\")\n t.read_certificate_from_disk(require_cert_and_key=False)\n\n warnings = self.flushWarnings()\n self.assertEqual(len(warnings), 1)\n self.assertEqual(\n warnings[0][\"message\"],\n (\n \"Self-signed TLS certificates will not be accepted by \"\n \"Synapse 1.0. Please either provide a valid certificate, \"\n \"or use Synapse's ACME support to provision one.\"\n ),\n )",
"def prepare_certificate_file(certificate: str) -> str:\n certificate_file = NamedTemporaryFile(delete=False)\n certificate_path = certificate_file.name\n certificate_file.write(bytes(certificate, 'utf-8'))\n certificate_file.close()\n demisto.debug('Successfully preparing the certificate')\n return certificate_path",
"def cert_file(self):\n return self._get('cert_file')",
"def opensslCmsDataCreate( conveyedInfoFile ):\n opensslCmdArgs = [ \"openssl\", \"cms\", \"-data_create\", \"-in\", conveyedInfoFile,\n \"-outform\", \"der\" ]\n conveyedInfoCmsDerBase64 = runOpensslCmd( opensslCmdArgs, [ \"base64\" ] )\n return conveyedInfoCmsDerBase64",
"def create_ca_file(anchor_list, filename):\n try:\n f = open(filename, \"w\")\n for a in anchor_list:\n s = a.output(fmt=\"PEM\")\n f.write(s)\n f.close()\n except:\n return None\n return filename",
"def opensslCmsSignedDataCreate( conveyedInfoFile, cert, privateKey ):\n opensslCmdArgs = [ \"openssl\", \"cms\", \"-sign\", \"-in\", conveyedInfoFile,\n \"-signer\", cert,\n \"-inkey\", privateKey,\n \"-outform\", \"der\", \"-nodetach\" ]\n conveyedInfoCmsSignedDerBase64 = runOpensslCmd( opensslCmdArgs, [ \"base64\" ] )\n return conveyedInfoCmsSignedDerBase64",
"def generate_certificates():\n print('GEN CERTS')\n domain = os.environ.get('SSL_DOMAIN_NAME', 'localhost')\n email = os.environ.get('SSL_EMAIL', '[email protected]')\n print(domain)\n cert_path = '/etc/letsencrypt/live/' + domain\n if domain == \"localhost\":\n print('GEN LOCALHOST SSL KEY')\n call(['mkdir', '-p', cert_path])\n cmd = [\n 'openssl',\n 'req',\n '-x509',\n '-newkey',\n 'rsa:4096',\n '-keyout',\n cert_path +\n '/privkey.pem',\n '-out',\n cert_path +\n '/cert.pem',\n '-days',\n '365',\n '-nodes',\n '-subj',\n '/CN=localhost']\n call(cmd)\n\n else:\n # files exist so renew\n if os.path.isfile(cert_path + '/cert.pem') and os.path.isfile(cert_path + \\\n '/fullchain.pem') and os.path.isfile(cert_path + '/privkey.pem'):\n print('RENEW CERTS')\n cmd = ['certbot', 'renew']\n print(cmd)\n call(cmd)\n\n else:\n print('GENERATE CERTS')\n cmd = [\n 'certbot',\n 'certonly',\n '-a',\n 'standalone',\n '--agree-tos',\n '-d',\n domain,\n '-m',\n email,\n ' --noninteractive']\n print(cmd)\n call(cmd)\n\n # use mosquitto conf template to rewrite mosquitto conf file including env\n # SSL_CERTIFICATES_FOLDER\n marker_replace_template(\n \"/etc/mosquitto/mosquitto-ssl-template.conf\",\n \"/etc/mosquitto/mosquitto-ssl.conf\",\n 'SSL_CERTIFICATE_FOLDER',\n cert_path)"
] | [
"0.66648823",
"0.6361533",
"0.62127554",
"0.6201686",
"0.61935896",
"0.6074794",
"0.6025097",
"0.5998469",
"0.5896556",
"0.5884485",
"0.5839449",
"0.5786212",
"0.5751437",
"0.57417154",
"0.5737081",
"0.57288885",
"0.57241213",
"0.5713234",
"0.56344074",
"0.55794954",
"0.55756265",
"0.554322",
"0.55171686",
"0.55126125",
"0.55041325",
"0.54810494",
"0.54577154",
"0.54352146",
"0.54300416",
"0.54191184"
] | 0.7334299 | 0 |
Transform from one radial coordinate to another. Note that this coordinate conversion is only strictly valid inside of the LCFS. | def rad_coord_transform(x,name_in,name_out, geqdsk):
if name_in == name_out:
return x
if 'r_V' not in geqdsk['fluxSurfaces']['geo']:
R0 = geqdsk['RMAXIS']
eq_vol = geqdsk['fluxSurfaces']['geo']['vol']
r_V = np.sqrt(eq_vol/(2*np.pi**2*R0))
geqdsk['fluxSurfaces']['geo']['r_V'] = r_V
# sqrt(norm. tor. flux)
rhon_ref = geqdsk['fluxSurfaces']['geo']['rhon']
# norm. pol. flux
psin_ref = geqdsk['fluxSurfaces']['geo']['psin']
# sqrt(norm. pol. flux)
rhop_ref = np.sqrt(psin_ref)
#volume radius
r_V = geqdsk['fluxSurfaces']['geo']['r_V']
# R at midplane
Rmid = geqdsk['fluxSurfaces']['midplane']['R']
# r at midplane
R0 = geqdsk['fluxSurfaces']['R0']
rmid = Rmid - R0
# Interpolate to transform coordiantes
if name_in == 'rhon':
coord_in = rhon_ref
elif name_in == 'rhop':
coord_in = rhop_ref
elif name_in == 'r_V':
coord_in = r_V
elif name_in == 'rhov':
r_V_lcfs = np.interp(1, rhon_ref, r_V)
coord_in = r_V/r_V_lcfs
elif name_in == 'Rmid':
coord_in = Rmid
elif name_in == 'rmid':
coord_in = rmid
elif name_in == 'roa':
rmid_lcfs = np.interp(1, rhon_ref, rmid)
coord_in = rmid/rmid_lcfs
else:
raise ValueError('Input coordinate was not recognized!')
if name_out == 'rhon':
coord_out = rhon_ref
elif name_out == 'psin':
coord_out = psin_ref
elif name_out == 'rhop':
coord_out = rhop_ref
elif name_out == 'r_V':
coord_out = r_V
elif name_out == 'rhov':
r_V_lcfs = np.interp(1, rhon_ref, r_V)
coord_out = r_V/r_V_lcfs
elif name_out == 'Rmid':
coord_out = Rmid
elif name_out == 'rmid':
coord_out = rmid
elif name_out == 'roa':
rmid_lcfs = np.interp(1, rhon_ref, rmid)
coord_out = rmid/rmid_lcfs
else:
raise ValueError('Output coordinate was not recognized!')
ind = coord_in != 0
#trick for better extrapolation
return np.interp(x,coord_in[ind],coord_out[ind]/coord_in[ind])*x | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def deg2rad_inplace(a):",
"def convert(self, lat, lon):\r\n a = self.a\r\n b = self.b\r\n long0 = self.long0\r\n k0 = self.k0\r\n dx = self.dx\r\n\r\n e = (1 - b ** 2 / a ** 2) ** 0.5\r\n e2 = e ** 2 / (1 - e ** 2)\r\n n = (a - b) / (a + b)\r\n nu = a / (1 - (e ** 2) * (sin(lat) ** 2)) ** 0.5\r\n p = lon - long0\r\n\r\n A = a * (1 - n + (5 / 4.0) * (n ** 2 - n ** 3) + (81 / 64.0)*(n ** 4 - n ** 5))\r\n B = (3 * a * n / 2.0) * (1 - n + (7 / 8.0) * (n ** 2 - n ** 3) + (55 / 64.0) * (n ** 4 - n ** 5))\r\n C = (15 * a * (n ** 2) / 16.0) * (1 - n + (3 / 4.0) * (n ** 2 - n ** 3))\r\n D = (35 * a * (n ** 3) / 48.0) * (1 - n + (11 / 16.0) * (n ** 2 - n ** 3))\r\n E = (315 * a * (n ** 4) / 51.0) * (1 - n)\r\n\r\n S = A * lat - B * sin(2 * lat) + C * sin(4 * lat) - D * sin(6 * lat) + E * sin(8 * lat)\r\n\r\n K1 = S * k0\r\n K2 = k0 * nu * sin(2 * lat)/4.0\r\n K3 = (k0 * nu * sin(lat) * (cos(lat) ** 3) / 24.0) * \\\r\n (5 - tan(lat) ** 2 + 9 * e2 * (cos(lat) ** 2) + 4 * (e2 ** 2) * (cos(lat) ** 4))\r\n\r\n y = K1 + K2 * (p ** 2) + K3 * (p ** 4)\r\n\r\n K4 = k0 * nu * cos(lat)\r\n K5 = (k0 * nu * (cos(lat) ** 3) / 6.0) * (1 - tan(lat) ** 2 + e2 * (cos(lat) ** 2))\r\n\r\n x = K4 * p + K5 * (p ** 3) + dx\r\n return x, y",
"def testRadial(self):\n radialClass = xyTransformRegistry[\"radial\"]\n radialConfig = radialClass.ConfigClass()\n radialConfig.coeffs = (0, 1.05, 0.1)\n with lsst.utils.tests.getTempFilePath(\".py\") as filePath:\n self.checkConfig(radialClass, radialConfig, filePath)\n radial = radialClass(radialConfig)\n self.assertEqual(type(radial), RadialXYTransform)\n self.assertEqual(len(radial.getCoeffs()), len(radialConfig.coeffs))\n for coeff, predCoeff in zip(radial.getCoeffs(), radialConfig.coeffs):\n self.assertAlmostEqual(coeff, predCoeff)\n self.checkBasics(radial)\n for fromPoint in self.fromIter():\n fromRadius = math.hypot(fromPoint[0], fromPoint[1])\n fromAngle = math.atan2(fromPoint[1], fromPoint[0])\n predToRadius = fromRadius * \\\n (radialConfig.coeffs[2] *\n fromRadius + radialConfig.coeffs[1])\n predToPoint = Point2D(\n predToRadius * math.cos(fromAngle),\n predToRadius * math.sin(fromAngle))\n toPoint = radial.forwardTransform(fromPoint)\n for i in range(2):\n self.assertAlmostEqual(toPoint[i], predToPoint[i])",
"def project(self, (lng, lat)):\n x = lng * DEG_TO_RAD\n lat = max(min(MAX_LATITUDE, lat), -MAX_LATITUDE)\n y = lat * DEG_TO_RAD\n y = math.log(math.tan((math.pi / 4) + (y / 2)))\n return (x*EARTH_RADIUS, y*EARTH_RADIUS)",
"def radial_transform(self, width):\n\n factor = width / 2. / sp.pi\n return lambda k_rad: sp.sinc(k_rad * factor)",
"def rad2deg_inplace(a):",
"def deg2rad(a):",
"def radial2(self) -> float:\n return self.distortion_coefficients[0]",
"def xy_to_r(corr_xy):\n step_x = corr_xy.X.iloc[0]\n step_y = corr_xy.Y.iloc[0]\n corr_r = corr_xy.assign(R = ((corr_xy.X-step_x)**2 + (corr_xy.Y-step_y)**2)**0.5) \n return corr_r",
"def _polar_to_cartesian(self, radius: float, radians: float) -> None:\n self.x = round(radius * math.cos(radians), EPSILON_EXP_MINUS_1)\n self.y = round(radius * math.sin(radians), EPSILON_EXP_MINUS_1)",
"def ra2xy(self, ra):\n return -math.sin(ra), math.cos(ra)",
"def transform_from_latlon(lat, lon):\n from affine import Affine\n lat = np.asarray(lat)\n lon = np.asarray(lon)\n trans = Affine.translation(lon[0], lat[0])\n scale = Affine.scale(lon[1] - lon[0], lat[1] - lat[0])\n return trans * scale",
"def transform_to(self, coordinates) -> np.ndarray:\n coor = self.T @ (coordinates - self.O)\n r = (coor[0] ** 2 + coor[1] ** 2) ** 0.5\n phi = atan2(coor[1], coor[0])\n z = coor[2]\n return np.array([r, phi, z], dtype=float)",
"def composite(c, r):\n x, y = gta * (c, r)\n lat, lon = transform.TransformPoint(x, y)[:2]\n if not -90 <= lat <= 90:\n raise ValueError('illegal lat value, did you switch coordinates')\n return lon, lat",
"def undo_mercator_project(x,y):\n lon = y*np.pi\n ex = np.exp(4*np.pi*x)\n lat = np.arcsin((ex - 1)/(ex +1 ))\n lon = lon*360/2/np.pi\n lat = lat*360 /2/np.pi\n return lon, lat",
"def translate_polar(self, radius, angle): \n return Position.fromnp(translate_polar(self.tonp(), radius, angle))",
"def deg2rad(x):\r\n # see decorator for function body\r",
"def _translate_coordinate(self, x1, y1, x2, y2):\n\n return (x1 + x2, y1 + y2)",
"def normalize(coords, radius):\n co = coords.copy()\n # calculate current distance\n dist = (coords[0] ** 2 + coords[1] ** 2 + coords[2] ** 2) ** 0.5\n # normalize\n for axis in range(3):\n co[axis] = coords[axis] / dist * radius\n return co",
"def cartesian_to_polar(self, x, y):\n # r = (x^2+y^2)^2, theta = tan^-1(y/x)\n # pole is the reference point of the coordinate system\n x, y = self.get_rel_to_pole(x, y)\n r = math.sqrt(pow(x, 2)+pow(y, 2))\n # set specific code for edge cases\n if x == 0 and y != 0:\n sign = lambda x: (1, -1)[x < 0]\n return r, sign(y)*math.pi/2\n if x == 0 and y == 0:\n return 0, 0\n else:\n theta = math.atan(y/x)\n return r, theta",
"def tanp_to_world(self, x, y):\n ra, dec = x, y\n return ra, dec",
"def lon_lat_to_cartesian(lon, lat, R = 1):\n lon_r = np.radians(lon)\n lat_r = np.radians(lat)\n\n x = R * np.cos(lat_r) * np.cos(lon_r)\n y = R * np.cos(lat_r) * np.sin(lon_r)\n z = R * np.sin(lat_r)\n return x,y,z",
"def coords_to_residue(self, rnext: bool = ...) -> None:\n ...",
"def transform(self, x, y, z=None, crs=wgs84, nearest=False, maskout=False):\n\n x, y = np.ma.array(x), np.ma.array(y)\n\n # First to local proj\n _crs = check_crs(crs, raise_on_error=True)\n if isinstance(_crs, pyproj.Proj):\n x, y = transform_proj(_crs, self.proj, x, y)\n elif isinstance(_crs, Grid):\n x, y = _crs.ij_to_crs(x, y, crs=self.proj)\n\n # Then to local grid\n x = (x - self.x0) / self.dx\n y = (y - self.y0) / self.dy\n\n # See if we need to round\n if nearest:\n f = np.rint if self.pixel_ref == 'center' else np.floor\n x = f(x).astype(int)\n y = f(y).astype(int)\n\n # Mask?\n if maskout:\n if self.pixel_ref == 'center':\n mask = ~((x >= -0.5) & (x < self.nx-0.5) &\n (y >= -0.5) & (y < self.ny-0.5))\n else:\n mask = ~((x >= 0) & (x < self.nx) &\n (y >= 0) & (y < self.ny))\n x = np.ma.array(x, mask=mask)\n y = np.ma.array(y, mask=mask)\n\n return x, y",
"def unproject(self, (x, y)):\n lng = x/EARTH_RADIUS * RAD_TO_DEG\n lat = 2 * math.atan(math.exp(y/EARTH_RADIUS)) - math.pi/2 * RAD_TO_DEG\n return (lng, lat)",
"def radialCopy(self, bufr, x=0, y=0, z=0, startRadius=2.0, endRadius=2.0,\r\n startAngle=0.0, endAngle=360.0, step=12):\r\n st = (endAngle - startAngle) / step\r\n rst = (endRadius - startRadius) / int(st)\r\n rd = startRadius\r\n sta = startAngle\r\n\r\n blist = []\r\n for r in range(int(st)):\r\n print(\"merging \", r)\r\n ca = math.cos(math.radians(sta))\r\n sa = math.sin(math.radians(sta))\r\n sta += step\r\n rd += rst\r\n blist.append([bufr, x + ca * rd, y, z + sa * rd,\r\n 0, sta, 0, 1.0, 1.0, 1.0])\r\n\r\n self.merge(blist)\r\n print(\"merged all\")",
"def map_aux(lat, long, direction, r1, r2):\r\n\r\n dlat, dlong = direction\r\n\r\n # Transform from spherical to Cartesian coordinates\r\n # lat = 0 on z axis\r\n x1 = r1*np.sin(lat)*np.cos(long)\r\n y1 = r1*np.sin(lat)*np.sin(long)\r\n z1 = r1*np.cos(lat)\r\n\r\n dx1 = np.sin(dlat)*np.cos(dlong)\r\n dy1 = np.sin(dlat)*np.sin(dlong)\r\n dz1 = np.cos(dlat)\r\n \r\n res_shape = x1.shape\r\n\r\n xdx = x1*dx1 + y1*dy1 + z1*dz1\r\n\r\n # D may be <0 in this case nan will be returned\r\n D = xdx*xdx - r1*r1 + r2*r2\r\n # select the minimum of the 2 roots\r\n l12 = np.array([-xdx + np.sqrt(D), -xdx - np.sqrt(D)]).reshape(2, -1)\r\n indxl = np.argmin(abs(l12), axis=0)\r\n indyl = np.indices(indxl.shape)[0]\r\n\r\n # l is how far to move in direction dx\r\n l = l12[indxl, indyl].reshape(res_shape)\r\n\r\n x2 = x1 + l*dx1\r\n y2 = y1 + l*dy1\r\n z2 = z1 + l*dz1\r\n\r\n map_lat = np.arccos(z2/r2)\r\n map_long = np.arctan2(y2, x2)\r\n\r\n try:\r\n return (map_lat.reshape(res_shape), map_long.reshape(res_shape))\r\n except AttributeError: # if lat and long scalars\r\n return (np.asscalar(map_lat), np.asscalar(map_long))",
"def Reproject(x, y, in_grid = 4326, out_grid = 32737):\n \n inProj = Proj(init='epsg:'+str(in_grid))\n outProj = Proj(init='epsg:'+str(out_grid))\n \n \n x2,y2 = transform(inProj,outProj,x,y)\n \n return x2, y2",
"def to_crs(self, crs):\n if crs is None:\n raise ValueError(\"Can not transform with invalid crs\")\n if self.crs is None:\n raise ValueError(\"Can not transform geometries without crs. Set crs for this GeoSeries first.\")\n if self.crs == crs:\n return self\n return _unary_geo(arctern.ST_Transform, self, self.crs, crs, crs=crs)",
"def scalar_g2r(al, be, ga, lon, lat):\n\n rad = mt.pi / 180\n al = al * rad\n be = be * rad\n ga = ga * rad\n\n rotate_matrix = np.zeros(shape=(3, 3))\n\n rotate_matrix[0, 0] = np.cos(ga) * np.cos(al) - np.sin(ga) * np.cos(be) * np.sin(al)\n rotate_matrix[0, 1] = np.cos(ga) * np.sin(al) + np.sin(ga) * np.cos(be) * np.cos(al)\n rotate_matrix[0, 2] = np.sin(ga) * np.sin(be)\n rotate_matrix[1, 0] = -np.sin(ga) * np.cos(al) - np.cos(ga) * np.cos(be) * np.sin(\n al\n )\n rotate_matrix[1, 1] = -np.sin(ga) * np.sin(al) + np.cos(ga) * np.cos(be) * np.cos(\n al\n )\n rotate_matrix[1, 2] = np.cos(ga) * np.sin(be)\n rotate_matrix[2, 0] = np.sin(be) * np.sin(al)\n rotate_matrix[2, 1] = -np.sin(be) * np.cos(al)\n rotate_matrix[2, 2] = np.cos(be)\n \n #rotate_matrix = np.linalg.pinv(rotate_matrix)\n \n lat = lat * rad\n lon = lon * rad\n\n # geographical Cartesian coordinates:\n xr = np.cos(lat) * np.cos(lon)\n yr = np.cos(lat) * np.sin(lon)\n zr = np.sin(lat)\n\n # rotated Cartesian coordinates:\n xg = rotate_matrix[0, 0] * xr + rotate_matrix[0, 1] * yr + rotate_matrix[0, 2] * zr\n yg = rotate_matrix[1, 0] * xr + rotate_matrix[1, 1] * yr + rotate_matrix[1, 2] * zr\n zg = rotate_matrix[2, 0] * xr + rotate_matrix[2, 1] * yr + rotate_matrix[2, 2] * zr\n\n # rotated coordinates:\n rlat = np.arcsin(zg)\n rlon = np.arctan2(yg, xg)\n\n a = np.where((np.abs(xg) + np.abs(yg)) == 0)\n if a:\n lon[a] = 0\n\n rlat = rlat / rad\n rlon = rlon / rad\n\n return (rlon, rlat)"
] | [
"0.6106413",
"0.5955033",
"0.5679703",
"0.5614722",
"0.56037164",
"0.55267453",
"0.54864424",
"0.5467077",
"0.5389721",
"0.5381061",
"0.5360404",
"0.535843",
"0.535422",
"0.5338711",
"0.53266436",
"0.5319456",
"0.53012145",
"0.52922064",
"0.52901214",
"0.52864665",
"0.52845246",
"0.52561957",
"0.5243935",
"0.5240043",
"0.5228705",
"0.52147096",
"0.5200916",
"0.52007467",
"0.5189235",
"0.5187686"
] | 0.6349656 | 0 |
Transformation to apply on each notebook. You should return modified nb, resources. If you wish to apply your transform on each cell, you might want to overwrite transform_cell method instead. | def call(self, nb, resources):
self.log.debug("Applying transform: %s", self.__class__.__name__)
try :
for worksheet in nb.worksheets:
for index, cell in enumerate(worksheet.cells):
worksheet.cells[index], resources = self.transform_cell(cell, resources, index)
return nb, resources
except NotImplementedError:
raise NotImplementedError('should be implemented by subclass') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def transform_cell(self, cell, resources, index):\n\n raise NotImplementedError('should be implemented by subclass')\n return cell, resources",
"def cell_preprocessor(function):\n\n @functools.wraps(function)\n def wrappedfunc(nb: NotebookNode, resources: dict) -> (NotebookNode, dict):\n new_nb = copy.deepcopy(nb)\n for index, cell in enumerate(new_nb.cells):\n new_nb.cells[index], resources = function(cell, resources, index)\n return new_nb, resources\n\n return wrappedfunc",
"def transform(self, images):\n\n from thunder.rdds.images import Images\n\n # broadcast the transformations\n bcTransformations = images.rdd.context.broadcast(self.transformations)\n\n # apply the transformations\n newrdd = images.rdd.map(lambda (k, im): (k, bcTransformations.value[k].apply(im)))\n return Images(newrdd).__finalize__(images)",
"def _cells(notebook):\n if notebook.nbformat < 4:\n for ws in notebook.worksheets:\n for cell in ws.cells:\n yield cell\n else:\n for cell in notebook.cells:\n yield cell",
"def preprocess(self, nb: NotebookNode, resources: ResourcesDict) -> Tuple[NotebookNode, ResourcesDict]:\n new_cells = []\n\n # header\n if self.header:\n with io.open(self.header, encoding='utf-8') as fh:\n header_nb = read_nb(fh, as_version=current_nbformat)\n new_cells.extend(header_nb.cells)\n\n # body\n new_cells.extend(nb.cells)\n\n # footer\n if self.footer:\n with io.open(self.footer, encoding='utf-8') as fh:\n footer_nb = read_nb(fh, as_version=current_nbformat)\n new_cells.extend(footer_nb.cells)\n\n nb.cells = new_cells\n super(IncludeHeaderFooter, self).preprocess(nb, resources)\n\n return nb, resources",
"def convertNotebook(fname, refNrPath):\n #tempdir is where I will save in between files\n try:\n os.mkdir('tempDir')\n except:\n pass\n # get list of backgrounds\n with open(refNrPath+\".pagedata\") as file:\n backgrounds = [line.strip() for line in file]\n\n bg_pg = 0\n bglist = []\n for bg in backgrounds:\n convertSvg2PdfCmd = \"\".join([\"rsvg-convert -f pdf -o \", \"tempDir/bg_\"\\\n + str(bg_pg) + \".pdf \", str(bgPath)\\\n + bg.replace(\" \", \"\\ \") + \".svg\"])\n os.system(convertSvg2PdfCmd)\n bglist.append(\"tempDir/bg_\"+str(bg_pg)+\".pdf \")\n bg_pg += 1\n merged_bg = \"tempDir/merged_bg.pdf\"\n os.system(\"convert \" + (\" \").join(bglist) + \" \" + merged_bg)\n # get info from the pdf we just made\n input1 = PdfFileReader(open(merged_bg, 'rb'))\n pdfsize = input1.getPage(0).mediaBox\n # find out the page hashes\n content = json.loads(open(refNrPath + \".content\").read())\n # Now convert all Pages\n pdflist = []\n for pg, pg_hash in enumerate(content['pages']):\n rmpath = refNrPath + \"/\" + pg_hash + \".rm\"\n # skip page if it doesnt extist anymore. This is fine in notebooks\n # because nobody cares about the rM numbering.\n try:\n rm2svg(rmpath, \"tempDir/temprm\" + str(pg) + \".svg\",\n coloured_annotations=True)\n convertSvg2PdfCmd = \\\n \"\".join([\"rsvg-convert -f pdf -o \", \"tempDir/temppdf\" + \\\n str(pg), \".pdf \", \"tempDir/temprm\" + str(pg) + \".svg\"])\n os.system(convertSvg2PdfCmd)\n pdflist.append(\"tempDir/temppdf\"+str(pg)+\".pdf\")\n except FileNotFoundError:\n continue\n # merge all annotation pages\n merged_rm = \"tempDir/merged_rm.pdf\"\n os.system(\"convert \" + (\" \").join(pdflist) + \" \" + merged_rm)\n # combine with background\n stampCmd = \"\".join([\"pdftk \", merged_bg, \" multistamp \", merged_rm, \\\n \" output \" + syncDirectory + \"/Notes/\" + fname + \".pdf\"])\n os.system(stampCmd)\n # Delete temp directory\n shutil.rmtree(\"tempDir\", ignore_errors=False, onerror=None)\n return True",
"def patch_notebooks(notebooks_dir):\n\n nb_convert_config = Config()\n nb_convert_config.NotebookExporter.preprocessors = [\"nbconvert.preprocessors.ClearOutputPreprocessor\"]\n output_remover = nbconvert.NotebookExporter(nb_convert_config)\n for notebookfile in Path(notebooks_dir).glob(\"**/*.ipynb\"):\n if (\n not str(notebookfile.name).startswith(\"test_\")\n and notebookfile.name not in EXCLUDED_NOTEBOOKS\n ):\n nb = nbformat.read(notebookfile, as_version=nbformat.NO_CONVERT)\n found = False\n for cell in nb[\"cells\"]:\n replace_dict = cell.get(\"metadata\", {}).get(\"test_replace\")\n if replace_dict is not None:\n found = True\n for source_value, target_value in replace_dict.items():\n if source_value not in cell[\"source\"]:\n raise ValueError(\n f\"Processing {notebookfile} failed: {source_value} does not exist in cell\"\n )\n cell[\"source\"] = cell[\"source\"].replace(\n source_value, target_value\n )\n cell[\"source\"] = \"# Modified for testing\\n\" + cell[\"source\"]\n print(\n f\"Processed {notebookfile}: {source_value} -> {target_value}\"\n )\n if not found:\n print(f\"No replacements found for {notebookfile}\")\n nb_without_out, _ = output_remover.from_notebook_node(nb)\n with notebookfile.with_name(f\"test_{notebookfile.name}\").open(\"w\", encoding=\"utf-8\") as out_file:\n out_file.write(nb_without_out)",
"def write_ipynb(self):\n for nb in self.notebooks:\n nbformat.write(nb.content, os.path.join(self.dst_dir, nb.filename))",
"def create_transforms(ntiles, solution):\n rtransforms = []\n for i in range(ntiles):\n rtransforms.append(renderapi.transform.AffineModel(\n B0=solution[0][i],\n B1=solution[1][i]))\n return rtransforms",
"def transform():",
"def execute_notebook(nb, resources):\n\n if is_ipython_3():\n from IPython.nbconvert.preprocessors import ExecutePreprocessor\n nb, resources = ExecutePreprocessor().preprocess(nb, resources)\n elif runipy_available:\n from runipy.notebook_runner import NotebookRunner\n r = NotebookRunner(nb)\n r.run_notebook(skip_exceptions=True)\n nb = r.nb\n else:\n raise ImportError(\"Can't execute notebooks. Please install IPython >= 3 or runipy.\")\n\n return nb",
"def transform(self):",
"def _transform(self, document):\n pass",
"def transform_nb(dirpath, src_fname, tg_fname):\n\n srcfile = os.path.join(dirpath, src_fname)\n tgfile = os.path.join(dirpath, tg_fname)\n\n with open(srcfile, 'r') as fin:\n with open(tgfile, 'w') as fout:\n\n state = True\n skip_next = False\n\n for line in fin:\n\n if state:\n\n if '<FILL IN>' in line:\n skip_next = True\n fout.write(line)\n else:\n if skip_next:\n # This line is ignored, because the above line\n # contains a <FILL IN>\n skip_next = False\n if not line.endswith(',\\n'):\n # This is to avoid problems when the line to\n # remove is the last line in its cell\n fout.write('\" \"\\n')\n else:\n fout.write(line)\n\n if '<SOL>' in line:\n state = False\n else:\n if '</SOL>' in line:\n fout.write('\\n' + line)\n state = True\n\n # Insert backslash in spaces. This is to avoid error in the interpretation\n # of spaces (when they are part of the file name) in os commands.\n f_src = srcfile.replace(' ', '\\\\ ')\n os.system('jupyter nbconvert --to html ' + f_src + ' --output '\n + src_fname.replace('.ipynb', '.html'))\n\n # Clean student version\n f_tg = tgfile.replace(' ', '\\\\ ')\n os.system('jupyter nbconvert --ClearOutputPreprocessor.enabled=True '\n + '--inplace ' + f_tg)\n\n os.system(f'jupyter nbconvert --to slides {f_src}')\n os.system(f'jupyter nbconvert --to pdf {f_src} --output '\n + src_fname.replace('.ipynb', '.pdf'))\n\n return",
"def preprocess_cell(self, cell, resources, index):\n\n if 'source' in cell and cell.cell_type == \"markdown\":\n # Google Translate API does not preserve newline symbol and \n # leading spaces (useful to keep nested lists)\n lines = cell.source.split('\\n')\n translated_lines = [' ' * (len(line) - len(line.lstrip(' '))) +\n self.translate_client.translate(line, target_language='ru')['translatedText']\n for line in lines]\n translation = '\\n'.join(translated_lines)\n # Google Translate adds a space between ] and ( and after some / in URLs\n cell.source = translation.replace('] (', '](').replace('/ ', '/')\n\n return cell, resources",
"def transform():\n pass",
"def get_cells(self, tag):\n cells = []\n for nb in self.notebooks:\n cells.extend(nb.get_cells(tag))\n nb = new_notebook(cells=cells)\n nb[\"metadata\"][\"kernelspec\"] = {\"name\": \"python3\"}\n return nbformat.writes(nb)",
"def batch_transform(func):\n\n @functools.wraps(func)\n def create_window(*args, **kwargs):\n # passes the user defined function to BatchTransform which it\n # will call instead of self.get_value()\n return BatchTransform(*args, func=func, **kwargs)\n\n return create_window",
"def _parallel_transform(self, **kwargs):\n self.clean_input_references()\n data = np.asarray([self.x, self.y]).swapaxes(0, 1)\n data = self.skip_already_proccessed_in_label_file(data)\n # define per-item callable to be processed\n process_element = self.process_element(\n feature_name=self.feature_name,\n new_labels=self.new_labels,\n out_path=self.out_path,\n source_path=self.source_path,\n raw_path=self.raw_path,\n features_path=self.feature_path,\n existing_labels=self.existing_labels,\n **kwargs)\n try:\n with concurrent.futures.ThreadPoolExecutor(max_workers=FEATURE_EXTRACTOR_NUM_WORKERS) as executor:\n iterator = executor.map(process_element, data)\n list(iterator)\n except KeyboardInterrupt:\n print('KeyboardInterrupt catched')\n except Exception as e:\n print('error: in tranform')\n print(e)\n self.export_new_labels()\n raise e\n finally:\n print('info: exporting extraction meta-data')\n self.export_new_labels()\n return np.asarray(self.new_labels)",
"def transform(self, X, copy=...):\n ...",
"def transform(self, X, copy=...):\n ...",
"def transform(self, X, copy=...):\n ...",
"def collect(self):\n with self.fspath.open() as f:\n self.nb = reads(f.read(), 4)\n\n # Start the cell count\n cell_num = 0\n\n # Iterate over the cells in the notebook\n for cell in self.nb.cells:\n # Skip the cells that have text, headings or related stuff\n # Only test code cells\n if cell.cell_type == 'code':\n\n # If a cell starts with the comment string\n # PYTEST_VALIDATE_IGNORE_OUTPUT then test that the cell\n # executes without fail but do not compare the outputs.\n if (cell.source.startswith(r'# PYTEST_VALIDATE_IGNORE_OUTPUT') or\n cell.source.startswith(r'#PYTEST_VALIDATE_IGNORE_OUTPUT')):\n yield IPyNbCell('Cell ' + str(cell_num), self, cell_num,\n cell, docompare=False)\n\n # otherwise yield a full test (the normal case)\n else:\n yield IPyNbCell('Cell ' + str(cell_num), self, cell_num, cell)\n\n # Update 'code' cell count\n cell_num += 1",
"def main_convert(args):\n try:\n file_path = args.file_name # os.path.join(static_path, args.file_name)\n if args.slides:\n config_path = os.path.join(static_path, \"config\", \"slides_config.py\")\n output = subprocess.check_output(\n [\n \"jupyter\",\n \"nbconvert\",\n file_path,\n \"--to\",\n \"slides\",\n \"--CSSHTMLHeaderPreprocessor.style=colorful\",\n \"--reveal-prefix\",\n args.reveal_prefix,\n \"--config\",\n config_path,\n ],\n stderr=subprocess.STDOUT,\n ).decode(\"utf-8\")\n else:\n config_path = os.path.join(static_path, \"config\", \"nb_config.py\")\n output = subprocess.check_output(\n [\n \"jupyter\",\n \"nbconvert\",\n file_path,\n \"--to\",\n \"html\",\n \"--config\",\n config_path,\n ],\n stderr=subprocess.STDOUT,\n ).decode(\"utf-8\")\n print(output.rstrip())\n _name = get_out_name(args)\n # _name = output.split(\" \")[-1].rstrip()\n if args.c:\n with open(_name, \"r\") as f:\n clean_file = clean_html(f.read())\n with open(_name, \"w\") as f:\n f.write(clean_file)\n if args.bib_name is not None:\n add_ref(_name, args.bib_name, keep_label=args.l, slides=args.slides)\n else:\n with open(_name, \"r\") as f:\n clean_file = clean_html_refs(clean_file)\n with open(_name, \"w\") as f:\n f.write(clean_file)\n except IndexError:\n print(\"Provide the name of the notebook.\")",
"def convert_notebook(all_flag, overwrite_flag, filepath):\n context = load_context(Path.cwd())\n\n if not filepath and not all_flag:\n secho(\n \"Please specify a notebook filepath \"\n \"or add '--all' to convert all notebooks.\"\n )\n sys.exit(1)\n\n kedro_project_path = context.project_path\n kedro_package_name = \"za_covid_map\"\n\n if all_flag:\n # pathlib glob does not ignore hidden directories,\n # whereas Python glob does, which is more useful in\n # ensuring checkpoints will not be included\n pattern = kedro_project_path / \"**\" / \"*.ipynb\"\n notebooks = sorted(Path(p) for p in iglob(str(pattern), recursive=True))\n else:\n notebooks = [Path(f) for f in filepath]\n\n counter = Counter(n.stem for n in notebooks)\n non_unique_names = [name for name, counts in counter.items() if counts > 1]\n if non_unique_names:\n raise KedroCliError(\n \"Found non-unique notebook names! \"\n \"Please rename the following: {}\".format(\", \".join(non_unique_names))\n )\n\n for notebook in notebooks:\n secho(\"Converting notebook '{}'...\".format(str(notebook)))\n output_path = (\n kedro_project_path\n / \"src\"\n / kedro_package_name\n / \"nodes\"\n / \"{}.py\".format(notebook.stem)\n )\n\n if output_path.is_file():\n overwrite = overwrite_flag or click.confirm(\n \"Output file {} already exists. Overwrite?\".format(str(output_path)),\n default=False,\n )\n if overwrite:\n export_nodes(notebook, output_path)\n else:\n export_nodes(notebook, output_path)\n\n secho(\"Done!\")",
"def _body(self, x, ensembled_batch, non_ensembled_batch, idx):\n i, current_representations = x\n del x\n feats = self._slice_batch(i, ensembled_batch, non_ensembled_batch)\n representations_update = self.evoformer(*self.batch_expand(feats))\n new_representations = {}\n for k in current_representations:\n new_representations[k] = (\n current_representations[k] + representations_update[k])\n del representations_update\n return i+1, new_representations",
"def transform(self) -> DataFrame:\n _check_model(self.model_df)\n\n transform_key_pattern = ['header_block', 'sample_block']\n\n if 'label' in self.block_df.columns:\n transform_key_pattern.append('label')\n joined = self.block_df.drop('sort_key') \\\n .join(self.model_df, ['header_block', 'sample_block', 'header'], 'right') \\\n .withColumn('label', f.coalesce(f.col('label'), f.col('labels').getItem(0)))\n else:\n joined = self.block_df.drop('sort_key') \\\n .join(self.model_df, ['header_block', 'sample_block', 'header'], 'right')\n\n transform_udf = pandas_udf(\n lambda key, pdf: apply_model(key, transform_key_pattern, pdf, self._std_label_df, self.\n sample_blocks, self._alphas, self._std_cov_df),\n reduced_matrix_struct, PandasUDFType.GROUPED_MAP)\n\n record_hls_event('wgrRidgeReduceTransform')\n\n self.reduced_block_df = joined.groupBy(transform_key_pattern).apply(transform_udf)\n\n return self.reduced_block_df",
"def _render_notebook_from_source(\n self, ipynb_source: str, indices: Any | None = None,\n clear_output: bool | None = False,\n clear_markdown: bool | None = False, **kwargs: Any) -> str:\n import nbformat\n from nbformat.reader import parse_json\n nb_source_dict = parse_json(ipynb_source)\n\n if indices:\n nb_source_dict.update(\n {\"cells\": [nb_source_dict[\"cells\"][idx] for idx in indices]})\n\n if clear_markdown:\n nb_source_dict.update(\n {\"cells\": [cell for cell in nb_source_dict[\"cells\"]\n if cell[\"cell_type\"] != \"markdown\"]})\n\n nb_source_dict.update({\"cells\": nb_source_dict[\"cells\"]})\n\n import json\n ipynb_source = json.dumps(nb_source_dict)\n notebook = nbformat.reads(ipynb_source, as_version=4)\n\n from traitlets.config import Config\n c = Config()\n\n # This is to prevent execution of arbitrary code from note book\n c.ExecutePreprocessor.enabled = False\n if clear_output:\n c.ClearOutputPreprocessor.enabled = True\n\n c.CSSHTMLHeaderPreprocessor.enabled = False\n c.HighlightMagicsPreprocessor.enabled = False\n\n import os\n\n # Place the template in course template dir\n import course\n template_path = os.path.join(\n os.path.dirname(course.__file__),\n \"templates\", \"course\", \"jinja2\")\n c.TemplateExporter.template_path.append(template_path)\n\n from nbconvert import HTMLExporter\n html_exporter = HTMLExporter(\n config=c,\n template_file=\"nbconvert_template.tpl\"\n )\n\n (body, resources) = html_exporter.from_notebook_node(notebook)\n\n return \"<div class='relate-notebook-container'>%s</div>\" % body",
"def transform_blocks_observation(observation, scale, translation):\n transform = lambda b: transform_block(b, scale, translation)\n return BlocksObservation(\n [transform(b) for b in observation.blocks],\n [transform(b) for b in observation.obstacles],\n [transform(b) for b in observation.targets],\n [transform(b) for b in observation.balls])",
"def replace_plugins_with_calls(nb):\n for cell in nb['cells']:\n cell['source'] = '\\n'.join(replace_plugins(get_source(cell)))\n \n return nb"
] | [
"0.585268",
"0.57084334",
"0.56029147",
"0.5561479",
"0.5502522",
"0.5467713",
"0.5347716",
"0.52935684",
"0.52670693",
"0.5254548",
"0.5215245",
"0.52104896",
"0.51687974",
"0.5122487",
"0.50810003",
"0.50767034",
"0.50586015",
"0.5045944",
"0.4984737",
"0.49372816",
"0.49372816",
"0.49372816",
"0.49370518",
"0.49308148",
"0.49209017",
"0.49099264",
"0.49050292",
"0.49034077",
"0.48826587",
"0.48732734"
] | 0.76527107 | 0 |
Overwrite if you want to apply a transformation on each cell. You should return modified cell and resource dictionary. | def transform_cell(self, cell, resources, index):
raise NotImplementedError('should be implemented by subclass')
return cell, resources | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def call(self, nb, resources):\n self.log.debug(\"Applying transform: %s\", self.__class__.__name__)\n try :\n for worksheet in nb.worksheets:\n for index, cell in enumerate(worksheet.cells):\n worksheet.cells[index], resources = self.transform_cell(cell, resources, index)\n return nb, resources\n except NotImplementedError:\n raise NotImplementedError('should be implemented by subclass')",
"def transform():",
"def transform(self):",
"def _apply_transform(self):\n pass",
"def map(self, function=lambda item: item):\n for i, row in enumerate(self):\n for j, item in enumerate(row):\n row[j] = function(item)",
"def apply(self, transform_func):\n #input_shapes = transform_func.input_shapes\n #input_types = transform_func.input_types\n #data_shapes = transform_func.data_shapes\n #data_types = transform_func.data_types\n #assert input_shapes == self._data_shapes\n #assert input_types = self._data_types\n ret_gen = transform_func(self.generator)\n ret = type(self).from_generator_func(ret_gen)\n if self.name is not None:\n ret.name = self.name\n #ret.data_shapes = data_shapes\n #ret.data_types = data_types\n return ret",
"def transform(self, X, copy=...):\n ...",
"def transform(self, X, copy=...):\n ...",
"def transform(self, X, copy=...):\n ...",
"def transform():\n pass",
"def transform(self, X, y=None):\n X = X.copy()\n if isinstance(self.transformation, BaseEstimator):\n X[self.columns_to_transform_] = self.transformation.transform(\n X[self.columns_to_transform_]\n )\n else:\n X[self.columns_to_transform_] = X[self.columns_to_transform_].applymap(\n self.transformation\n )\n\n return X",
"def _transform(self, document):\n pass",
"def preprocess_cell(self, cell, resources, index):\n\n if 'source' in cell and cell.cell_type == \"markdown\":\n # Google Translate API does not preserve newline symbol and \n # leading spaces (useful to keep nested lists)\n lines = cell.source.split('\\n')\n translated_lines = [' ' * (len(line) - len(line.lstrip(' '))) +\n self.translate_client.translate(line, target_language='ru')['translatedText']\n for line in lines]\n translation = '\\n'.join(translated_lines)\n # Google Translate adds a space between ] and ( and after some / in URLs\n cell.source = translation.replace('] (', '](').replace('/ ', '/')\n\n return cell, resources",
"def applyMapping(self):\n pass",
"def transform(self, X):\n ...",
"def transform(self, X):\n ...",
"def transform(self, X):\n ...",
"def transform(self, X):\n ...",
"def transform(self, X):\n ...",
"def transform(self, X):\n ...",
"def transform(self, X):\n ...",
"def apply_transformations(rdd):\n ### BEGIN SOLUTION ###\n return rdd",
"def _transform(self, dataset):\n raise NotImplementedError()",
"def cell_preprocessor(function):\n\n @functools.wraps(function)\n def wrappedfunc(nb: NotebookNode, resources: dict) -> (NotebookNode, dict):\n new_nb = copy.deepcopy(nb)\n for index, cell in enumerate(new_nb.cells):\n new_nb.cells[index], resources = function(cell, resources, index)\n return new_nb, resources\n\n return wrappedfunc",
"def transformation(self):\n for key in self.combination_dict.keys():\n if self.combination_dict[key]['column_count'] == 2:\n if self.data_dict[self.combination_dict[key]['column1']]['data_type'] == 'tem' or self.data_dict[self.combination_dict[key]['column2']]['data_type'] == 'tem':\n self.temporal_transformation(self.combination_dict[key])\n elif self.data_dict[self.combination_dict[key]['column1']]['data_type'] == 'cat' or self.data_dict[self.combination_dict[key]['column2']]['data_type'] == 'cat':\n self.categorical_transformation(self.combination_dict[key])\n elif self.data_dict[self.combination_dict[key]['column1']]['data_type'] == 'num' and self.data_dict[self.combination_dict[key]['column2']]['data_type'] == 'num':\n self.numerical_transformation(self.combination_dict[key])\n\n elif self.combination_dict[key]['column_count'] == 3:\n num_count = 0\n num_column = []\n if self.data_dict[self.combination_dict[key]['column1']]['data_type'] == 'num':\n num_count += 1\n num_column.append(0)\n elif self.data_dict[self.combination_dict[key]['column2']]['data_type'] == 'num':\n num_count += 1\n num_column.append(1)\n elif self.data_dict[self.combination_dict[key]['column3']]['data_type'] == 'num':\n num_count += 1\n num_column.append(2)\n\n if num_count == 1:\n self.three_column_groupby_logic(self.combination_dict[key], num_column)\n\n m_score_pie = []\n m_score_bar = []\n m_score_line = []\n m_score_scatter = []\n # for key in self.scenario_dict:\n # if self.scenario_dict\n for key in self.scenario_dict:\n if math.isnan(self.scenario_dict[key][\"Scatter_chart_score\"]):\n m_score_scatter.append(0)\n else:\n m_score_scatter.append(self.scenario_dict[key][\"Scatter_chart_score\"])\n m_score_pie.append(self.scenario_dict[key][\"Pie_chart_score\"])\n m_score_bar.append(self.scenario_dict[key][\"Bar_chart_score\"])\n m_score_line.append(self.scenario_dict[key][\"Line_chart_score\"])\n\n m_score_pie /= np.max(m_score_pie)\n m_score_bar /= np.max(m_score_bar)\n m_score_line /= np.max(m_score_line)\n m_score_scatter /= np.max(m_score_scatter)\n m_score = [m_score_pie, m_score_bar, m_score_line, m_score_scatter]\n match_index = np.argmax(m_score, axis = 0)\n i = 0\n for key in self.scenario_dict:\n if match_index[i] == 0:\n self.scenario_dict[key][\"Chart_Type\"] = \"pie\"\n if match_index[i] == 1:\n self.scenario_dict[key][\"Chart_Type\"] = \"bar\"\n if match_index[i] == 2:\n self.scenario_dict[key][\"Chart_Type\"] = \"line\"\n if match_index[i] == 3:\n self.scenario_dict[key][\"Chart_Type\"] = \"scatter\"\n self.scenario_dict[key][\"m_score\"] = m_score[match_index[i]][i]\n i += 1\n\n return self.scenario_dict",
"def transform(self, data):",
"def transform(self, X):\n\n X = super().transform(X)\n\n X[self.columns] = self.value\n\n return X",
"def apply(self):\n if self.applied:\n raise RuntimeError(\"Transform applied more than once\")\n \n self._apply()\n \n self.applied = True\n \n return self.template",
"def _apply_transform(self, w2w_transform):\n raise NotImplementedError",
"def transform(self, images):\n\n from thunder.rdds.images import Images\n\n # broadcast the transformations\n bcTransformations = images.rdd.context.broadcast(self.transformations)\n\n # apply the transformations\n newrdd = images.rdd.map(lambda (k, im): (k, bcTransformations.value[k].apply(im)))\n return Images(newrdd).__finalize__(images)"
] | [
"0.65440035",
"0.5804093",
"0.57728004",
"0.57344264",
"0.5611276",
"0.5597317",
"0.5579851",
"0.5579851",
"0.5579851",
"0.55642366",
"0.55408597",
"0.54695153",
"0.5432807",
"0.5426063",
"0.53927785",
"0.53927785",
"0.53927785",
"0.53927785",
"0.53927785",
"0.53927785",
"0.53927785",
"0.5383675",
"0.53744787",
"0.53618735",
"0.5352578",
"0.5343396",
"0.5327742",
"0.53055006",
"0.5279877",
"0.52662474"
] | 0.70315164 | 0 |
Publish flow to OpenML server. Returns | def publish(self):
xml_description = self._generate_flow_xml()
file_elements = {'description': xml_description}
return_code, return_value = _perform_api_call(
"flow/", file_elements=file_elements)
self.flow_id = int(xmltodict.parse(return_value)['oml:upload_flow']['oml:id'])
return self | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def publish(self):\n return",
"def publish():\n pass",
"def publish(self, waypoints): \n lane = Lane()\n lane.header.frame_id = '/world'\n lane.header.stamp = rospy.Time(0)\n lane.waypoints = waypoints\n self.final_waypoints_pub.publish(lane)",
"def _generate_flow_xml(self):\n model = self.model\n\n flow_dict = OrderedDict()\n flow_dict['oml:flow'] = OrderedDict()\n flow_dict['oml:flow']['@xmlns:oml'] = 'http://openml.org/openml'\n flow_dict['oml:flow']['oml:name'] = self._get_name()\n flow_dict['oml:flow']['oml:external_version'] = self.external_version\n flow_dict['oml:flow']['oml:description'] = self.description\n\n clf_params = model.get_params()\n flow_parameters = []\n for k, v in clf_params.items():\n # data_type, default_value, description, recommendedRange\n # type = v.__class__.__name__ Not using this because it doesn't conform standards\n # eg. int instead of integer\n param_dict = {'oml:name': k}\n flow_parameters.append(param_dict)\n\n flow_dict['oml:flow']['oml:parameter'] = flow_parameters\n\n flow_xml = xmltodict.unparse(flow_dict, pretty=True)\n\n # A flow may not be uploaded with the encoding specification..\n flow_xml = flow_xml.split('\\n', 1)[-1]\n return flow_xml",
"def on_publish_edge(self):\n logging.debug(\"Edge data published\")",
"async def publish(self, body, routing_key=None):\n pass # pragma: no cover",
"def post(self):\n text = 'HELLO from socnet API Server!'\n return push_to_mattermost(text)",
"def publish_action(self, action):\n raise NotImplementedError",
"def on_publish( client, userdata, mid ):\n logging.info( \"Data published successfully.\" )",
"def publishOGC(self):\n\n\tif request.method != 'POST' or not request.is_xhr:\n\t return {\n 'success': False,\n 'message': toolkit._(\"Bad request - JSON Error: No request body data\")\n }\n\n\tcontext = {'model': model, 'session': model.Session,\n\t\t'user': c.user or c.author, 'auth_user_obj': c.userobj}\n\n\tdata = clean_dict(unflatten(tuplize_dict(parse_params(request.params))))\n\n\tresult = {'success': False,\n 'message': toolkit._(\"Not enough information to publish this resource.\")\n }\n\n\tresource_id = data.get(\"resource_id\", None)\n \tusername = context.get(\"user\", None)\n \tpackage_id = data.get(\"package_id\", None)\n \tlat_field = data.get(\"geoserver_lat_field\", None)\n \tlng_field = data.get(\"geoserver_lng_field\", None)\n\tstate = data.get(\"geoserver_state_field\", None)\n\n\t#get layer from package\n\ttry:\n\t md_package = None\n\t pkg = toolkit.get_action('package_show')(context, {'id': package_id})\n\t extras = pkg.get('extras', [])\n\n for extra in extras:\n key = extra.get('key', None)\n if key == 'md_package':\n md_package = json.loads(extra.get('value'))\n break\n\n\t resourceDescription = md_package.get('resourceDescription', {})\n\t layer = resourceDescription.get('usginContentModelLayer', resource_id)\n\t version = resourceDescription.get('usginContentModelVersion', None)\n\n # handle harvested datasets that do not have a md_package\n \n if layer == resource_id and version == None:\n usgin_tag = []\n\n for tag in pkg['tags']:\n if tag['name'].startswith('usgincm:'):\n usgin_tag.append(tag['name']) \n\n for key,value in (get_meta_action.get_usgin_prefix()).iteritems():\n if reduce(lambda v1,v2: v1 or v2, map(lambda v: v in usgin_tag, value)):\n key_arr = key.split(\"+\")\n break\n\n layer = key_arr[1]\n version = key_arr[2] \n \n\texcept:\n\t return result\n\n\tlayer_name = data.get(\"layer_name\", layer)\n\tworkspace_name = state+''+layer_name\n\n\tif None in [resource_id, layer_name, username, package_id, version, state]:\n\t return result\n\n\ttry:\n\t result = toolkit.get_action('geoserver_publish_ogc')(context, {'package_id': package_id, 'resource_id': resource_id, 'workspace_name': workspace_name, 'layer_name': layer_name, 'username': username, 'col_latitude': lat_field, 'col_longitude': lng_field, 'layer_version': version})\n\texcept:\n\t return {\n 'success': False,\n 'message': toolkit._(\"An error occured while processing your request, please contact your administrator.\")\n }\n\n\treturn result",
"def http_connect(self, flow: mitmproxy.http.HTTPFlow):",
"def test_stream_publish(self):\n pass",
"def fast_publish(self, request):\n self.__connection.fast_publish(request)",
"def publish(self, node, topic, data={}, on_publish=None, on_response=None):\n pass",
"def publish(self, payload, **kwargs):\n if self.opts.get(\"ipc_mode\", \"\") == \"tcp\":\n pull_uri = int(self.opts.get(\"tcp_master_publish_pull\", 4514))\n else:\n pull_uri = os.path.join(self.opts[\"sock_dir\"], \"publish_pull.ipc\")\n if not self.pub_sock:\n self.pub_sock = salt.utils.asynchronous.SyncWrapper(\n salt.transport.ipc.IPCMessageClient,\n (pull_uri,),\n loop_kwarg=\"io_loop\",\n )\n self.pub_sock.connect()\n self.pub_sock.send(payload)",
"def publish(self):\n if not self.parent_node.is_job:\n return\n\n self.winstance.send_event('Publishing job outputs..')\n result = self.winstance.execute_operation('hpc.interfaces.'\n 'lifecycle.publish',\n kwargs={\"name\": self.name})\n result.task.wait_for_terminated()\n if result.task.get_state() != tasks.TASK_FAILED:\n self.winstance.send_event('..outputs sent for publication')\n\n return result.task",
"def publish(self, block, event_type, event_data):\n raise NotImplementedError(\"Runtime needs to provide publish()\")",
"def request(self, flow: mitmproxy.http.HTTPFlow):",
"def request(self, flow: mitmproxy.http.HTTPFlow):",
"def _publish_model(self):\n # Check if already published\n if self.model_published:\n return\n\n # Trace CPO model if required\n ctx = self.context\n lout = ctx.get_log_output()\n if lout and ctx.solver.trace_cpo:\n stime = time.time()\n lout.write(\"Model '\" + str(self.model.get_name()) + \"' in CPO format:\\n\")\n lout.write(self.cpostr)\n lout.write(\"\\n\")\n self.model.write_information(lout)\n lout.write(\"\\n\")\n lout.flush()\n self.process_infos.incr(CpoProcessInfos.MODEL_DUMP_TIME, time.time() - stime)\n\n # Dump in dump directory if required\n if ctx.model.dump_directory:\n stime = time.time()\n make_directories(ctx.model.dump_directory)\n mname = self.model.get_name()\n if mname is None:\n mname = \"Anonymous\"\n else:\n # Remove special characters introduced by Jupyter\n mname = mname.replace('<', '').replace('>', '')\n file = ctx.model.dump_directory + \"/\" + mname + \".cpo\"\n with utils.open_utf8(file, 'w') as f:\n f.write(self.cpostr)\n self.process_infos.incr(CpoProcessInfos.MODEL_DUMP_TIME, time.time() - stime)\n\n # Set published indicator\n self.model_published = True",
"def publish(self, message: str) -> None:",
"def on_publish(unused_client, unused_userdata, unused_mid):\n\tprint('on_publish')",
"def request(self, flow: mitmproxy.http.HTTPFlow):\n pass",
"def publish(self):\n data = self.read_all_values()\n logger.info(data)\n if self.mqtt:\n self.mqtt.publish_json(data)",
"def on_publish(client, userdata, mid):\n print('on_publish')\n print(\" userdata:\" + str(userdata))\n print(\" mid:\" + str(mid))\n print()",
"def publish(self, settings, item):\n\n publisher = self.parent\n engine = publisher.engine\n document = item.properties[\"document\"]\n\n path = _document_path(document)\n item.properties[\"upload_path\"] = path\n item\n psdProject = PSDImage.open(path)\n\n #save layers to link and create new task to do so\n for layer in psdProject:\n layer.compose().save(layer.name+'.tiff')\n self.logger.info(\"Saved Layer {layerName}.psd\".format(layerName=layer.name))\n publish = sgtk.util.register_publish(publisher.sgtk,\n item.context,\n os.path.join(os.path.dirname(path),layer.name+'.tiff'),\n layer.name,\n version_number=None,\n published_file_type=\"Rendered Image\")",
"def publish(self, kpi_dict):\n pass",
"def test_workflows_change_stream_post(self):\n pass",
"def on_publish(client, userdata, mid):\n print(\"Message Published.\")",
"def publish(self):\n self.published = True\n self.save()# pylint: disable=no-member"
] | [
"0.637059",
"0.6307481",
"0.5872563",
"0.5749436",
"0.56772095",
"0.56606215",
"0.5617935",
"0.55418754",
"0.552572",
"0.5460382",
"0.54280865",
"0.54070175",
"0.53559107",
"0.5355898",
"0.5346886",
"0.53458697",
"0.5340997",
"0.53381914",
"0.53381914",
"0.5327639",
"0.52955705",
"0.52857363",
"0.528472",
"0.5253107",
"0.5250848",
"0.52411133",
"0.5219684",
"0.5217716",
"0.5209242",
"0.52067924"
] | 0.7366736 | 0 |
Checks if a flow exists for the given model and possibly creates it. If the given flow exists on the server, the flowid will simply be returned. Otherwise it will be uploaded to the server. Returns | def _ensure_flow_exists(self):
import sklearn
flow_version = 'sklearn_' + sklearn.__version__
_, _, flow_id = _check_flow_exists(self._get_name(), flow_version)
# TODO add numpy and scipy version!
if int(flow_id) == -1:
return_code, response_xml = self.publish()
response_dict = xmltodict.parse(response_xml)
flow_id = response_dict['oml:upload_flow']['oml:id']
return int(flow_id)
return int(flow_id) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def create_flow(self, flow: \"FlowObject\") -> UUID:\n return await self.create_flow_from_name(flow.name)",
"async def create_flow_from_name(self, flow_name: str) -> UUID:\n flow_data = FlowCreate(name=flow_name)\n response = await self._client.post(\n \"/flows/\", json=flow_data.dict(json_compatible=True)\n )\n\n flow_id = response.json().get(\"id\")\n if not flow_id:\n raise httpx.RequestError(f\"Malformed response: {response}\")\n\n # Return the id of the created flow\n return UUID(flow_id)",
"def flow_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"flow_id\")",
"def _add_flow_cell_to_status_db(\n self, cgstats_flow_cell: StatsFlowcell, flow_cell: Optional[Flowcell], flow_cell_id: str\n ) -> Flowcell:\n if not flow_cell:\n flow_cell: Flowcell = self.db.add_flow_cell(\n flow_cell_name=flow_cell_id,\n sequencer_name=cgstats_flow_cell.sequencer,\n sequencer_type=cgstats_flow_cell.sequencer_type,\n date=cgstats_flow_cell.date,\n flow_cell_status=FlowCellStatus.ON_DISK,\n )\n return flow_cell",
"def del_flow(self, flow_name):\n\n try:\n of_response = requests.delete(self.url + \"restconf/config/opendaylight-inventory:nodes/node/\" + self.id +\n \"/table/0/flow/\" + flow_name, headers=self.headers)\n error_text = \"Openflow response {}: {}\".format(of_response.status_code, of_response.text)\n if of_response.status_code != 200:\n self.logger.warning(\"del_flow \" + error_text)\n raise OpenflowConnUnexpectedResponse(error_text)\n self.logger.debug(\"del_flow OK \" + error_text)\n return None\n except requests.exceptions.RequestException as e:\n # raise an exception in case of contection error\n error_text = type(e).__name__ + \": \" + str(e)\n self.logger.error(\"del_flow \" + error_text)\n raise OpenflowConnConnectionException(error_text)",
"def get_flow(self, flow_name: str) -> \"Flow\":\n if flow_name not in self.flows:\n raise ValueError(\"Flow is not contained in this Storage\")\n\n req_function = self._method_to_function[self.get_flow_request_http_method]\n\n get_flow_request_kwargs = _render_dict(self.get_flow_request_kwargs)\n\n response = req_function(**get_flow_request_kwargs) # type: ignore\n response.raise_for_status()\n\n if self.stored_as_script:\n flow_script_content = response.content.decode(\"utf-8\")\n return extract_flow_from_file(\n file_contents=flow_script_content, flow_name=flow_name\n )\n\n return flow_from_bytes_pickle(response.content)",
"def UpdateFlow(self,\n client_id,\n flow_id,\n flow_obj=db.Database.unchanged,\n flow_state=db.Database.unchanged,\n client_crash_info=db.Database.unchanged,\n processing_on=db.Database.unchanged,\n processing_since=db.Database.unchanged,\n processing_deadline=db.Database.unchanged):\n\n try:\n flow = self.flows[(client_id, flow_id)]\n except KeyError:\n raise db.UnknownFlowError(client_id, flow_id)\n\n if flow_obj != db.Database.unchanged:\n new_flow = flow_obj.Copy()\n\n # Some fields cannot be updated.\n new_flow.client_id = flow.client_id\n new_flow.flow_id = flow.flow_id\n new_flow.long_flow_id = flow.long_flow_id\n new_flow.parent_flow_id = flow.parent_flow_id\n new_flow.parent_hunt_id = flow.parent_hunt_id\n new_flow.flow_class_name = flow.flow_class_name\n new_flow.creator = flow.creator\n\n self.flows[(client_id, flow_id)] = new_flow\n flow = new_flow\n\n if flow_state != db.Database.unchanged:\n flow.flow_state = flow_state\n if client_crash_info != db.Database.unchanged:\n flow.client_crash_info = client_crash_info\n if processing_on != db.Database.unchanged:\n flow.processing_on = processing_on\n if processing_since != db.Database.unchanged:\n flow.processing_since = processing_since\n if processing_deadline != db.Database.unchanged:\n flow.processing_deadline = processing_deadline\n flow.last_update_time = rdfvalue.RDFDatetime.Now()",
"def run_flow(flow_id):\n if flow_id == 1:\n etl.load_images_from_urls()\n elif flow_id == 2:\n etl.find_edges_and_save()\n elif flow_id == 3:\n etl.normalize_dataset()\n elif flow_id == 4:\n classifiers.run_models_comparison()",
"def get_or_create(self, model, **kwargs):\n self.logger.debug(\"get or create on {}\".format(model.__tablename__))\n item = self.db.session.query(model).filter_by(**kwargs).first()\n if item:\n self.logger.debug(\"item exists: {}\".format(item))\n return item\n else:\n self.logger.debug(\"item does not exist, creating it now\")\n item = model(**kwargs)\n self.logger.debug(\"created {}, adding item to db now\".format(item))\n self.db.session.add(item)\n return item",
"def createNewModel(self, modelName):\n try:\n storage = FileSystemStorage(join(settings.MEDIA_ROOT, 'models'))\n\n folderSufix = 1\n new_model_name = modelName\n while storage.exists(join(storage.base_location, new_model_name)):\n folderSufix += 1\n new_model_name = f'{modelName}_{folderSufix}'\n\n folder_path = join(storage.base_location, new_model_name)\n model_file = join(folder_path, f'{new_model_name}.ppl')\n\n if not storage.exists(folder_path):\n os.mkdir(folder_path)\n\n calcEngine = CalcEngine.factory(self.client_session)\n if calcEngine.createNewModel(model_file, new_model_name):\n self.closeModel()\n return self.openModel(join(storage.base_location, new_model_name, f'{new_model_name}.ppl'))\n except Exception as ex:\n raise ex",
"def _ensure_side_model_exists(self):\n # TODO used metaclass for more pythonic\n self.user.create_if_not_exists()\n # Project model need User object exists\n self.project.create_if_not_exists(self._user)",
"def add_flow(self, flow: FlowRoot):\n with self._lock:\n self.flow_roots[flow.name] = flow",
"def push():\n model_id = flask.request.args.get('id')\n model_grand_list = app.config['store_cache'].read()\n found = False\n if model_grand_list is not None:\n for store in model_grand_list:\n for model in model_grand_list[store]['model_list']:\n if model['id'] == model_id:\n url = model_grand_list[store]['base_url']\n directory = model['dir_name']\n found = True\n break\n if found:\n break\n if not found:\n return 'Error', 404\n else:\n weights, model, label, meta_data = retrieve_files(url, directory)\n job = PretrainedModelJob(\n weights,\n model,\n label,\n meta_data['framework'],\n username=auth.get_username(),\n name=meta_data['name']\n )\n scheduler.add_job(job)\n return flask.redirect(flask.url_for('digits.views.home', tab=3)), 302",
"def test_model_flow_node_model_flow_id_node_id_component_put(self):\n pass",
"def hasModel(self, model):\n if model in self.models:\n return S_OK()\n else:\n return S_ERROR(\"Model %s is not defined, use any of %s\" % (model, self.models.keys()))",
"def test_workflows_id_exists_get(self):\n pass",
"def check_model_exists(class_name):\n if path.exists(settings.get('FALAFEL_DIR') + settings.get('MODELS_DIR') + '/' + class_name + '.py'):\n return True\n else:\n return False",
"def _create_entity(self, model_name, entity):\n model_pool = self.pool.get(model_name)\n prepared_entity = self._prepare_entity(model_name, entity)\n if not prepared_entity:\n logger.debug(\"Prepared entity is empty : %s model %s\" % (prepared_entity, model_name))\n return False\n\n logger.debug(\"Creating entity %s\\n%s\" % (model_name, prepared_entity))\n new_id = model_pool.create(self.cr, self.uid, prepared_entity)\n\n logger.debug(\"Created %s, id %s\" % (model_name, new_id))\n return new_id",
"def post(self, request, *args, **kwargs):\n self.create_flow_file_db_entry()\n self.handle_chunk(request)\n return self.return_response(self.flow_file.identifier)",
"def add_flow(self, flow: \"Flow\") -> str:\n self.flows = {flow.name: flow.name} # type: Dict[str, str]\n self._flows = {flow.name: flow} # type: Dict[str, Flow]\n return flow.name",
"def getFile(self, model):\n res = self.hasModel(model)\n if not res['OK']:\n return res\n if not self.models[model]:\n return S_ERROR(\"No file attached to model %s\" % model)\n return S_OK(self.models[model])",
"def create_flow_event(self, source, sink, instance):\n try: # Check the cache\n flow = self.flow_lookup[(source, sink)]\n if flow:\n flow.add_event(instance, source, sink)\n except KeyError:\n for flow in self.flows:\n if flow.add_event(instance, source, sink): break\n else:\n flow = None\n # Cache the result\n self.flow_lookup[(source, sink)] = flow",
"def check_model_exists(ckpt):\n expected_data = ckpt + \".data-00000-of-00001\"\n return os.path.exists(expected_data)",
"def WriteFlowObject(self, flow_obj, allow_update=True):\n if flow_obj.client_id not in self.metadatas:\n raise db.UnknownClientError(flow_obj.client_id)\n\n key = (flow_obj.client_id, flow_obj.flow_id)\n\n if not allow_update and key in self.flows:\n raise db.FlowExistsError(flow_obj.client_id, flow_obj.flow_id)\n\n now = rdfvalue.RDFDatetime.Now()\n\n clone = flow_obj.Copy()\n clone.last_update_time = now\n clone.create_time = now\n\n self.flows[key] = clone",
"def post(cls, flow_name: str):\n data = file_schema.load(request.files) # {\"file\": FileStorage}\n try:\n file_path = uploads.save_file(data[\"file\"], folder=flow_name)\n basename = uploads.get_basename(file_path)\n return {\"message\": gettext(\"file_uploaded\").format(basename)}, 200\n \n except UploadNotAllowed:\n extension = uploads.get_extension(data[\"file\"])\n return {\"message\": gettext(\"file_illegal_extension\").format(extension)}, 400",
"def places_create_one():\n if request.method == 'POST':\n place = request.json['place']\n place = get_place_data(place)\n place = filter_essential_data(place)\n save_place(place)\n data = is_exists(place['local_name'])\n if data is False:\n return jsonify({'response': 'Not save.'}), 406\n return jsonify(place), 200",
"def get_or_create_oeid(self, cr, uid, external_session, external_id, context=None):\n if external_id:\n existing_id = self.get_oeid(cr, uid, external_id, external_session.referential_id.id, context=context)\n if existing_id:\n return existing_id\n external_session.logger.info(('Missing openerp resource for object %s'\n ' with external_id %s. Importing on the fly')%(self._name, external_id))\n return self._import_one_resource(cr, uid, external_session, external_id, context=context)\n return False",
"def workflow(site):\n click.echo(\"Preparing input for site {}\".format(site))\n key_store = KeyStore(get_config_file())\n if key_store.does_site_exist(site):\n flow.generate_mesh()\n flow.generate_control_files()\n flow.compress_input()\n flow.encrypt_input()\n flow.transfer_files()\n else:\n click.echo(\"Site {} does not exist in keystore, please add_site\".format(site))",
"def get_or_initialize_model(self, payload):\n try:\n model = self.get_model(payload)\n except DoesNotExistException:\n model = self.initialize_model()\n\n model.id = payload.get('local_id', model.id)\n return model",
"def save_flow_definition(self, definition):\n return FlowDefinition.deserialize(self._post('flow_definition', None, definition.serialize()))"
] | [
"0.63103426",
"0.5682208",
"0.5225242",
"0.513043",
"0.49558088",
"0.49539042",
"0.48757178",
"0.4860312",
"0.48489386",
"0.48403105",
"0.4821129",
"0.48097745",
"0.48096967",
"0.4789475",
"0.47876206",
"0.47869292",
"0.47778752",
"0.47659418",
"0.4671119",
"0.46696758",
"0.4665771",
"0.4657776",
"0.46559095",
"0.46494666",
"0.4647151",
"0.45703918",
"0.4563991",
"0.45594332",
"0.45581228",
"0.45430124"
] | 0.69185144 | 0 |
Read the data in from xyz.csv add two new columns, one to calculate dollar flux, and the other to calculate percentage flux return as a list of tuples | def calculate_flux(XYZ: str) -> list:
df = pd.read_csv(XYZ)
df['Dollar Flux'] = df['12/31/20'] - df['12/31/19']
df['Percentage Flux'] = df['12/31/20'] / df['12/31/19'] - 1
return list(tuple(df.loc[i]) for i in range(df.shape[0])) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calculate_flux(XYZ: str) -> list:\n\n\n data = pd.read_csv(XYZ,dtype={'12/31/2020': int,'12/31/2019': int})\n\n data['dollar_flux'] = data.iloc[:,1].sub(data.iloc[:,2])\n data['pct_flux'] = data.iloc[:,[-2,1]].pct_change(axis=1).dropna(axis=1)\n\n\n return list(data.to_records(index=False))",
"def read_full_data(self):\n x=[]\n y=[]\n z=[]\n with open(self.file, 'r') as rf:\n reader = csv.reader(rf, delimiter=',')\n for row in reader:\n x.append(row[0])\n # Remove header from csv file, if it exists\n if x[0].split()[0] == '%':\n x.remove(row[0])\n else:\n y.append(row[1])\n z.append(row[2])\n return x,y,z",
"def readCSV(filename):\r\n data = list( csv.reader(open('HW_08_DBScan_Data_NOISY_v300.csv','r'),delimiter=','))\r\n for dIdx in range(len(data)):\r\n data[dIdx] = [float(data[dIdx][0]),float(data[dIdx][1]),float(data[dIdx][2])]\r\n #print(data[0])\r\n return data",
"def getFeats(x):\n with open('LEN+PUNCT2.csv', 'r') as fh:\n reader = csv.reader(fh)\n # skip headers\n next(reader, None)\n csv_data = []\n for row in reader:\n csv_data.append([float(var) for var in row])\n csv_data = np.asarray(csv_data)\n return csv_data",
"def csv_parser(lines): \n\n data_points = []\n for line in lines:\n items = line.strip().split(\",\")\n try: #will fail on header line in file\n data_points.append(map(float, items[1:])) #first item is the label\n except ValueError: #must be the header\n continue\n return data_points",
"def read(self, filename):\n lines = []\n rawData = []\n file = open(filename, \"rU\")\n csv_reader = csv.reader( file )\n for line in csv_reader:\n lines.append(line)\n for item in range(len(line)):\n line[item] = line[item].replace(\" \",\"\")\n self.headers = lines[0]\n self.types = lines[1]\n rawData = lines[2:]\n for row in rawData:\n newRow = []\n for i in range(len(row)):\n if self.types[i] != 'numeric':\n continue\n else:\n newRow.append(float((row[i].strip())))\n self.finalData.append(newRow)\n self.data = np.matrix(self.finalData)\n\n for i in range(len(self.types)):\n if self.types[i] == 'numeric':\n self.numHeadList.append(self.headers[i])\n i = 0\n for header in self.numHeadList:\n self.header2col[header] = i\n i += 1\n\n return self.data",
"def post_process_data(input_file):\n data_list, header_list = Parser.__parse_csv_data(input_file)\n json_data = Parser.__read_column_index()\n Y = [json_data['output'][data[1]]['value'] for data in data_list]\n data_list = [d[3:] for d in data_list]\n X = []\n\n for i in range(len(data_list)):\n x = numpy.zeros(len(json_data['input']))\n x[json_data['input']['pre-tax amount']['column_index']] = data_list[i][3]\n x[json_data['input']['tax amount']['column_index']] = data_list[i][3]\n\n for j in range(len(data_list[i])):\n try:\n float(data_list[i][j])\n except ValueError:\n try:\n x[json_data['input'][data_list[i][j]]['column_index']] = 1\n except KeyError:\n pass\n X.append(x)\n return X, Y",
"def read_data(filename, delimiter=\",\", has_header=True):\n data = []\n header = []\n with open(filename) as f:\n reader = csv.reader(f, delimiter=delimiter)\n if has_header:\n header = next(reader, None)\n for line in reader:\n example = [float(x) for x in line]\n data.append(example)\n\n return header, data",
"def read_csv():",
"def main():\n data_file = open('lecture15f.csv', 'r')\n sum_of_values = 0.0\n count = 0\n\n data_file.readline()\n for line in data_file:\n print ('line of file =', line)\n # 'float,float,float,float'\n for data_point in line.split(','):\n # ['float', 'float', 'float',]\n print ('data point in line =', data_point)\n sum_of_values = sum_of_values + float(data_point)\n count += 1\n\n print(\"The average of {0} values is {1}\".format(\n count,\n sum_of_values / count))",
"def prepare_data():\n df = pd.read_csv('Wholesale customers data.csv')\n df_numeric = df[['Fresh', 'Milk', 'Grocery', 'Frozen', 'Detergents_Paper', 'Delicassen']]\n return df, df_numeric",
"def load_data(filename):\n #Admittedly copy-pasted from Heredity project cuz I'm resourceful like that\n #Makes 2 lists, one for evidence and one for labels\n evidence = []\n labels = []\n #Open csv file\n with open(\"shopping.csv\") as f:\n reader = csv.reader(f)\n next(reader)\n #Iterate through user rows of file\n for row in reader:\n i = 0\n tmp_list = []\n for column in row:\n if i in [0,2,4,11,12,13,14]:\n column = int(column)\n if i in [1,3,5,6,7,8,9]:\n column = float(column)\n if i == 10:\n if column == \"Jan\":\n column = 0\n if column == \"Feb\":\n column = 1\n if column == \"Mar\":\n column = 2\n if column == \"Apr\":\n column = 3\n if column == \"May\":\n column = 4\n if column == \"June\":\n column = 5\n if column == \"Jul\":\n column = 6\n if column == \"Aug\":\n column = 7\n if column == \"Sep\":\n column = 8\n if column == \"Oct\":\n column = 9\n if column == \"Nov\":\n column = 10\n if column == \"Dec\":\n column = 11\n if i in [15,16]:\n if column == \"Returning_Visitor\" or column == \"TRUE\":\n column = 1\n else:\n column = 0\n if i == 17:\n if column == \"TRUE\":\n column = 1\n else:\n column = 0\n labels.append(column)\n else:\n tmp_list.append(column)\n i+=1\n evidence.append(tmp_list)\n \n return (evidence,labels)",
"def load_data_from_csv(f_name):\n data = []\n f = open(f_name, \"r\")\n reader = csv.reader(f,delimiter=\",\")\n for row in reader:\n data.append([float(i) for i in row])\n f.close()\n data = np.array(data)\n x = data[0,:]\n data = data[1:,:].swapaxes(0,1)\n return x, data",
"def get_weather_data(filename, dates, highs, lows, date_index, high_index,\n low_index):\n with open(filename) as f:\n reader = csv.reader(f)\n header_row = next(reader)\n\n # Get data temp.\n for row in reader:\n current_date = datetime.strptime(row[date_index], '%Y-%m-%d')\n try:\n high = int(row[high_index])\n low = int(row[low_index])\n except ValueError:\n print(f\"No data for {current_date}\")\n else:\n dates.append(current_date)\n highs.append(high)\n lows.append(low)",
"def read_fermi_3fhl():\n with open('fermi_3fhl.csv') as fh:\n lines = fh.readlines()\n colnames = lines[0].strip().split(',')\n data = []\n for line in lines[1:]:\n parts = line.strip().split(',')\n row = dict(zip(colnames, parts))\n for name in ['Flux', 'GLON', 'GLAT', 'Signif_Avg']:\n row[name] = float(row[name])\n data.append(row)\n return data",
"def read(self):\n\n with open(self.path, 'r') as to_read:\n reader = csv.reader(to_read)\n _ = reader.next()\n camera_vals = []\n velocities = []\n for row in reader:\n camera_vals.append(row[:len(camera_headers)])\n velocities.append(row[len(camera_headers):][0])\n velocities = [[int(x) for x in string] for string in velocities]\n camera_vals = camera_vals[::downsample_constant]\n velocities = velocities[::downsample_constant]\n return (\n [[float(x) for x in row] for row in camera_vals],\n velocities\n )",
"def import_data(address):\n try:\n inputcsv = csv.reader(open(address, \"r\"), delimiter=\";\", lineterminator=\"\\n\")\n except IOError:\n print \"File not exists or is unreadable, please check it.\"\n exit(1)\n\n data = list() # all data\n item = list() # each tabular\n count = 0\n subcount = 0\n try:\n for row in inputcsv:\n if count < 2 : # read Time period and number of product\n data.append(int(row[1]))\n else :\n item.append(row[1:])\n subcount +=1 \n if subcount == data[1]:\n data.append(np.array(item, dtype=float))\n item = list()\n subcount = 0\n count += 1\n if (data[1] > 1):\n data.append(np.array(item, dtype=float)) # manage the last tabular\n except:\n print \"File is not well formated, please correct it.\"\n exit(1)\n return data",
"def csv_loader(csv_file):\n df = pd.read_csv(csv_file, sep=';', parse_dates=['Data_Alteraçao'])\n pd.set_option('display.float_format', '{:.0f}'.format)\n\n df = df.fillna(0)\n df = df.drop(columns=['Cod. Pareamento', 'Cod. UF', 'Sigla UF', 'Cod. Subarea',\n 'Nome Subarea', 'Cod. Municipio', 'Nome Municipio', 'Codigo Agencia',\n 'Nome Agencia', 'Cod. Setor', 'Cod. Logradouro CNEFE',\n 'Tipo Logradouro CNEFE', 'Titulo Logradouro CNEFE',\n 'Nome Logradouro CNEFE', 'Nome Tratado CNEFE', 'Tipo Logradouro DNE',\n 'Titulo Logradouro DNE', 'Nome Logradouro DNE', 'Nome Tratado DNE',\n 'Logradouro Completo DNE', 'Distancia', 'Cod. Match', 'Motivo Match',\n 'CEPs Face', 'Localidade Face',\n 'Alterar Logradouro para DNE?', 'Observaçao', 'SIAPE Alteração',\n 'Nome Alteraçao', 'Data_Alteraçao', 'Status', 'Unnamed: 33'])\n\n # df.astype({'CEP Logradouro CNEFE': 'int32'}).dtypes\n\n df['CEP'] = df['CEP'].str.replace(' ', '', regex=False)\n\n ceps_dne = []\n for index, row in df.iterrows():\n if type(row.CEP) == str:\n for cep in row.CEP.split(','):\n # print(index, cep)\n ceps_dne.append(int(cep))\n\n ceps_cnefe = df['CEP Logradouro CNEFE'].astype(int).tolist()\n ceps = ceps_dne + ceps_cnefe\n ceps = list(set(ceps))\n return pd.Series(ceps)",
"def load_data_from_file(filename):\r\n time = []\r\n position = []\r\n with open(filename, 'r') as original:\r\n time_position = list(csv.reader(original)) # list()\r\n for row in range(1, len(time_position)):\r\n time.append(float(time_position[row][0]))\r\n position.append(float(time_position[row][1]))\r\n\r\n return time, position",
"def get_data(filename):\n\n # Read csv file as panda dataframe\n data = pd.read_csv(filename)\n data.columns = ['Date', 'Open', 'High', 'Low', 'Close', 'Volume', 'MarketCap']\n\n # Adjast frames\n data['Date'] = pd.to_datetime(data['Date'])\n data['Average'] = data.eval('Open + Close') / 2\n \n # Adjust diff column\n data['Diff'] = data['Average'] - data['Average'].shift(1)\n \n # Return data as lists\n return data['Date'].tolist()[1:], data['Average'].tolist()[1:], data['Diff'].tolist()[1:]",
"def readData(file):\n \n inputValues=list()\n outputValue=list()\n totalData=list()\n \n with open(file) as fp :\n for line in fp:\n if line.strip( ) == '':\n continue\n attributeValue = line.strip().split(\",\")\n inputValue1 = float(attributeValue[0])\n inputValue2 = float(attributeValue[1])\n \n inputValues+=[[inputValue1]+[inputValue2]]\n outputValue+=[int(attributeValue[2])]\n totalData+=[[inputValue1]+[inputValue2]+[int(attributeValue[2])]]\n \n \n return inputValues,outputValue,totalData",
"def load_csv(stock_name):\n filename = file_prefix + stockname_to_filename(stock_name) + \".csv\"\n dates = []\n prices = []\n with open(filename, \"r\") as file:\n file.__next__()\n for line in file:\n date, price = line.strip().split(csv_delimiter)\n dates.append(date)\n prices.append(float(price))\n return dates, prices",
"def read_csv():\n points = []\n with open(sys.argv[1], \"rU\") as f:\n reader = csv.reader(f)\n for row in reader:\n if len(row) > 3:\n print(\"Points in CSV file are greater than 3 dimensions\")\n sys.exit(0)\n # If set of points is 2 dimensional, autogenerate the 3rd dimension\n elif len(row) == 2:\n row.append(['0'])\n points.append(tuple(map(float, row)))\n return points",
"def readData():\n fileName = sys.argv[1]\n inputArray = []\n with open(fileName) as csvFile:\n reader = csv.reader(csvFile)\n arraySlice = []\n for row in reader:\n arraySlice = (row[235:587])\n if arraySlice[0] != \"\":\n arraySlice = [float(i) for i in arraySlice]\n inputArray.append(arraySlice)\n csvFile.close()\n return inputArray",
"def load_data(self, filename):\r\n #sqlcontext = SQLContext(self.sc)\r\n #df = sqlcontext.read.format('com.databricks.spark.csv').options(header='false', inferschema='true').load(filename)\r\n #df = sc.textFile(r\"C:\\Users\\mohan\\Downloads\\patches.csv\").map(lambda line: line.split(\",\"))\r\n #print (df.count())\r\n df = self.sc.textFile(filename).map(lambda line: line.split(\",\"))\r\n l = df.map(lambda w: [int(float(c)) for c in w]).zipWithIndex()\r\n return l\r\n raise NotImplementedError",
"def read_file(filename):\n reader = csv.reader(open(filename))\n names, distances = [], []\n for row in reader:\n names.append(row[0].strip())\n distances.append(tuple(int(value) for value in row[1:]))\n return names, distances",
"def read(self, args):\n\t\twith open(self.filename, 'rb') as csvfile:\n\t\t\tfilereader = csv.reader(csvfile)\n\t\t\tfor row in filereader:\t\t\t#reads the csv line by line\n\t\t\t\tfor num in row:\t\t\t\t#reads each entry in the csv\n\t\t\t\t\tif num != 'NA' and not num.startswith('V'): \t#cuts out the crap we don't care about\n\t\t\t\t\t\tself.all_likes.add((row[0],num))\t\t\t#adds a tuple to the set 'all_likes' with (<IDnum>, <likedIDnum>)\n\t\t\t\t\telse:\n\t\t\t\t\t\tcontinue",
"def read_1D_comsol_data(self):\n x=[]\n y=[]\n with open(self.file, 'r') as rf:\n reader = csv.reader(rf, delimiter=',')\n for row in reader:\n x.append(row[0])\n y.append(row[1])\n x = np.asarray((x),dtype=float)\n y = np.asarray((y),dtype=float)\n return x,y",
"def __load_csv(filename):\n fp = open(Parser.DATA_FOLDER_PATH + filename + '.csv', 'r')\n records = []\n for line in fp:\n items = line.strip().split(',')\n x, y, z = '0', '0', '0'\n if len(items) > 1:\n x = items[1]\n if len(items) > 2:\n y = items[2]\n if len(items) > 3:\n z = items[3]\n\n values = [x, y, z]\n records.append(values)\n\n # Discard some beginning data which may be noisy\n # del records[:int(len(records) / 30)]\n n = len(records)\n\n for i in range(n):\n rec = []\n # Consider X, Y, Z axes\n for k in range(3):\n # If can convert string to float\n try:\n val = float(records[i][k])\n except ValueError:\n val = 0\n rec.append(val)\n\n # Replace it\n records[i] = rec\n return records",
"def load_csv_model(filename) -> tuple:\n dat_sci = pd.read_csv(resources_folder(filename), index_col=0)\n commenter('data from ' + filename, lambda: print(dat_sci))\n\n ind = dat_sci.index\n # commenter('index', lambda: print(ind))\n col = dat_sci.columns\n # commenter('columns', lambda: print(col))\n # self.data = np.asmatrix(dat_sci.values)\n # commenter('data', lambda: print(self.data))\n # print(type(dat_sci))\n\n return dat_sci, ind, col"
] | [
"0.685175",
"0.6411039",
"0.62081707",
"0.6109357",
"0.60995525",
"0.5916653",
"0.5815268",
"0.58103406",
"0.5806701",
"0.5740641",
"0.57309264",
"0.571921",
"0.570458",
"0.5695528",
"0.5673751",
"0.56716675",
"0.5653346",
"0.5641983",
"0.5631174",
"0.5611434",
"0.56070626",
"0.55897486",
"0.5571428",
"0.5563856",
"0.5530979",
"0.55294985",
"0.5521956",
"0.55019814",
"0.54849255",
"0.548416"
] | 0.70332384 | 0 |
Run gdal_merge using an external process. | def run_merge(*src, argv=None):
tmpdir = tempfile.mkdtemp()
inputs = []
for i, drv in enumerate(src):
if type(drv) != str:
tmppath = os.path.join(tmpdir, "input_%s.tif" % i)
drv.write(tmppath)
inputs.append(tmppath)
else:
inputs.append(src)
if "-o" in argv:
outpath = argv[argv.index("-o") + 1]
else:
outpath = os.path.join(tempfile.gettempdir(), "%s.tif" % next(tempfile._get_candidate_names()))
logger.debug("Writing to file %s" % outpath)
argv += ["-o", outpath]
argv = gdal.GeneralCmdLineProcessor(argv)
options = argv + inputs
assert run_external_app("gdal_merge.py", options) == 0, "Error running gdal_merge"
remove_directory(tmpdir)
return gdal.Open(outpath) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def call_gdal_util(util_name,\n gdal_path=None,\n src_files='',\n src_band=None,\n dst_file=None,\n options={}):\n # define specific options\n _opt_2b_in_quote = [\"-mo\", \"-co\"]\n\n # get the gdal installed path if it is set in system environmental variable\n if not gdal_path:\n gdal_path = _find_gdal_path()\n\n # prepare the command string\n cmd = []\n gdal_cmd = os.path.join(gdal_path, util_name) if gdal_path else util_name\n # put gdal_cmd in double quotation\n cmd.append('\"%s\"' % gdal_cmd)\n\n for k, v in iter(options.items()):\n if k in _opt_2b_in_quote:\n if (k == \"-mo\" or k == \"-co\") and isinstance(v, (tuple, list)):\n for i in range(len(v)):\n cmd.append(\" \".join((k, '\"%s\"' % v[i])))\n else:\n cmd.append(\" \".join((k, '\"%s\"' % v)))\n else:\n if v is not None:\n cmd.append(k)\n # if hasattr(v, \"__iter__\"):\n # cmd.append(' '.join(map(str, v)))\n # else:\n # cmd.append(str(v))\n cmd.append(str(v))\n\n # add source files and destination file (in double quotation)\n dst_file_ori = None\n # switch for multiple source files (e.g. for gdal_merge.py)\n if isinstance(src_files, list):\n src_files_str = \" \".join(src_files)\n # switch for single source files\n else:\n src_files_str = '\"%s\"' % src_files\n # NETCDF input case\n if src_files.endswith('.nc'):\n src_files = 'NETCDF:{}:{}'.format(src_files, src_band)\n # create an interim existing file\n if src_files == dst_file:\n fileparts = os.path.splitext(dst_file)\n dst_file_tmp = fileparts[0] + '_temp' + fileparts[1]\n dst_file_ori = dst_file\n dst_file = dst_file_tmp\n\n # build the final call\n cmd.append(src_files_str)\n cmd.append('\"%s\"' % dst_file)\n\n # create the directory if not exists\n if dst_file is not None:\n if not os.path.exists(os.path.dirname(dst_file)):\n os.makedirs(os.path.dirname(dst_file))\n\n # check for success\n output = subprocess.check_output(\" \".join(cmd), shell=True, cwd=gdal_path)\n succeed = _analyse_gdal_output(output)\n\n # restore old filename\n if succeed and dst_file_ori is not None:\n os.remove(dst_file_ori)\n os.rename(dst_file, dst_file_ori)\n\n return succeed, output",
"def hxlmerge():\n run_script(hxlmerge_main)",
"def merge_rasters(self):\n for index, i in enumerate(self.months):\n month = str(index + 1)\n if len(month) < 2:\n month = '0' + month\n rasters = [str(x) for x in i.joinpath('subnational').iterdir() if not x.name.endswith('txt') if x.name.endswith('norm.tif')]\n outfile = i.joinpath(f'{self.country}_{month}_normalised.tif')\n tiffs = \" \".join(rasters)\n gdal_cmd = f\"gdal_merge.py -o {outfile} -a_nodata -99999.0 -of gtiff {tiffs}\"\n subprocess.call(gdal_cmd, shell=True)",
"def merge_clouds(commande):\n commande+=\" -merge_clouds -save_clouds\"\n subprocess.call(commande)\n return True",
"def merge(mergeFiles,mergeDb,createDB,dbase,v,dfile):\n\tglobal verbose\n\n\tverbose = v\n\tif len(mergeFiles) > 0:\n\t\tfor f in mergeFiles:\n\t\t\tprint \"Merge => \"+ f\n\t\t\ttry:\n\t\t\t\tfl = open(f,'r')\n\t\t\t\tProcessEntryFile(fl)\n\t\t\t\tfl.close()\n\t\t\t\tif verbose >= 1:\n\t\t\t\t\tprint reference\n\t\t\texcept IOError:\n\t\t\t\tprint 'File '+f +' cannot be open'\n\n\tif len(mergeDb) > 0:\n\t\tfor f in mergeDb:\n\t\t\tprint \"Merge => \"+ f\n\t\t\tProcessEntryBase(f)\n\t\t\tif verbose >= 1:\n\t\t\t\tprint reference\n\t\n\tif dfile != '':\n\t\ttry:\n\t\t\tif os.path.exists(dfile):\n\t\t\t\tos.remove(dfile)\n\t\t\tfref = open(dfile,'w')\n\t\t\tput_in_file('',fref,reference)\n\t\t\tfref.close()\n\t\t\tif os.path.exists(afile):\n\t\t\t\tos.remove(afile)\n\t\t\tfref = open(afile,'w')\n\t\t\tput_in_afile('',fref,reference)\n\t\t\tfref.close()\n\t\texcept IOError:\n\t\t\tprint 'Cannot open '+dfile+' file'\n\n\tif dbase != '':\n\t\tput_in_db(dbase,reference,createDB)",
"def merge(): #Status: WIP\r\n pass",
"def execute(self, parameters, messages):\r\n arcpy.AddMessage(\"\\nPerforming overall merge...\")\r\n logging.info(\"Starting mergeAreas.py script...\\n\")\r\n # Define variables from parameters\r\n overlapWorkspace = parameters[0].valueAsText\r\n gdbWorkspace = parameters[1].valueAsText\r\n featWorkspace = parameters[2].valueAsText\r\n\r\n # Determine list of total overlap, no overlap and to merge feature classes in overlap feature dataset workspace to process.\r\n arcpy.env.workspace = overlapWorkspace\r\n mergeList = arcpy.ListFeatureClasses(\"*_toMerge\")\r\n totalOverlapList = arcpy.ListFeatureClasses(\"*_TotalOverlap\")\r\n noOverlapList = arcpy.ListFeatureClasses(\"*_noOverlap\")\r\n if len(mergeList) > 0:\r\n arcpy.AddMessage(\"Workspace contains the following \" + str(len(mergeList)) + \" feature classes to merge: \" + str(mergeList))\r\n\r\n # Organize toMerge feature classes by date\r\n mergeDictbyDate = {}\r\n for fc in mergeList:\r\n fcPath = os.path.join(overlapWorkspace, fc)\r\n fcDate = fc.split(\"_\")[1]\r\n mergeDictbyDate[fcDate] = [fcPath]\r\n\r\n # Append no overlap feature classes toMerge feature classes by date\r\n for noOverlapFc in noOverlapList:\r\n noOverlapPath = os.path.join(overlapWorkspace, noOverlapFc)\r\n noOverlapDate = noOverlapFc.split(\"_\")[1]\r\n mergeDictbyDate[noOverlapDate].append(noOverlapPath)\r\n\r\n # Organize dark targets feature classes by date\r\n arcpy.env.workspace = featWorkspace\r\n fcList = arcpy.ListFeatureClasses()\r\n fcDictByDate = {}\r\n for fc in fcList:\r\n fcPath = os.path.join(featWorkspace, fc)\r\n fcSplit = fc.split(\"_\")\r\n if fcSplit[1] in fcDictByDate:\r\n fcDictByDate[fcSplit[1]].append(fcPath)\r\n else:\r\n fcDictByDate[fcSplit[1]] = [fcPath]\r\n\r\n # Iterate through dark targets acquisition dates and check for acquisition dates with more than a single feature class (for merging)\r\n for key in fcDictByDate:\r\n if len(fcDictByDate[key]) > 1:\r\n\r\n # Iterate through feature classes within acquisition date\r\n for fc in fcDictByDate[key]:\r\n fcSplit = fc.split(\"_\")\r\n\r\n # Check for and add acquisition date toMerge feature classes if not already present\r\n if fcSplit[len(fcSplit)-2] not in mergeDictbyDate:\r\n mergeDictbyDate[fcSplit[len(fcSplit)-2]] = [fc]\r\n\r\n # Check for and add feature class toMerge feature classes if not already present within acquisition date\r\n else:\r\n fcValue = fc.split(\"\\\\\")[len(fc.split(\"\\\\\"))-1] + \"_noOverlap\"\r\n fcValuePath = os.path.join(overlapWorkspace, fcValue)\r\n if fcValuePath not in mergeDictbyDate[key]:\r\n mergeDictbyDate[key].append(fc)\r\n\r\n # Iterate through dark targets acquisition dates to compile lists of feature classes to merge\r\n for key in mergeDictbyDate:\r\n arcpy.AddMessage(\"\\nMerging feature classes in \" + key + \"...\")\r\n logging.info(\"Processing merges for acquisition date '%s'\", key)\r\n mergeList = []\r\n\r\n # Iterate through feature classes within acquisition date and append them to merge list\r\n for item in mergeDictbyDate[key]:\r\n mergeList.append(item)\r\n\r\n # Merge feature classes in merge list into single feature class for the acquisition date\r\n outputDissolveString = \"RS2_\" + key + \"_toDissolve\"\r\n outputDissolve = os.path.join(overlapWorkspace, outputDissolveString)\r\n arcpy.Merge_management(mergeList, outputDissolve)\r\n logging.info(\"Merge: '%s' created from merging the following feature classes: '%s'\", outputDissolve, str(mergeList))\r\n\r\n # Dissolve attribute duplicates and rename fields\r\n arcpy.AddMessage(\"Dissolving...\")\r\n dissolveLyr = \"dissolveLyr\"\r\n outputMergeString = \"RS2_\" + key + \"_merged\"\r\n outputMerge = os.path.join(gdbWorkspace, outputMergeString)\r\n dissolveFields = [\"Pid\", \"RsatID\"]\r\n fieldList = arcpy.ListFields(outputDissolve)\r\n statsFields = []\r\n for field in fieldList:\r\n if \"OBJECTID\" in field.name or \"FID\" in field.name or \"Shape\" in field.name or field.name in dissolveFields or field.name == \"ID\":\r\n continue\r\n statsField = [field.name,\"FIRST\"]\r\n statsFields.append(statsField)\r\n arcpy.MakeFeatureLayer_management(outputDissolve, dissolveLyr)\r\n logging.info(\"Make Feature Layer: '%s' layer created from '%s' feature class\", dissolveLyr, outputDissolve)\r\n arcpy.Dissolve_management(dissolveLyr, outputMerge, dissolveFields, statsFields)\r\n logging.info(\"Dissolve: '%s' feature class created from '%s' layer dissolve\", outputMerge, dissolveLyr)\r\n fieldList = arcpy.ListFields(outputMerge)\r\n for field in fieldList:\r\n if field.name.startswith(\"FIRST_\"):\r\n newName = field.name[6:]\r\n arcpy.AlterField_management(outputMerge, field.name, newName)\r\n\r\n # Update targetID with combined target ID for overlapping features\r\n arcpy.AddMessage(\"Updating targetID...\")\r\n finalOutputString = \"RS2_\" + key\r\n overlapBool = False\r\n\r\n # Iterate through total overlap feature classes\r\n for fc in totalOverlapList:\r\n\r\n # Check for merged acquisition date feature class containing overlapping features (by finding equivalent total overlap feature class)\r\n if finalOutputString == fc.strip(\"_TotalOverlap\"):\r\n overlapBool = True\r\n\r\n # Perform spatial join to access targetID field from total overlap feature class\r\n totalOverlapFc = os.path.join(overlapWorkspace, fc)\r\n finalOutput = os.path.join(gdbWorkspace, finalOutputString)\r\n fieldmappings = arcpy.FieldMappings()\r\n fieldmappings.addTable(outputMerge)\r\n fldmap_TARGETID = arcpy.FieldMap()\r\n fldmap_TARGETID.addInputField(totalOverlapFc, \"targetID\")\r\n fld_TARGETID = fldmap_TARGETID.outputField\r\n fld_TARGETID.name = \"targetID_1\"\r\n fldmap_TARGETID.outputField = fld_TARGETID\r\n fieldmappings.addFieldMap(fldmap_TARGETID)\r\n arcpy.SpatialJoin_analysis(outputMerge, totalOverlapFc, finalOutput, \"#\", \"#\", fieldmappings)\r\n logging.info(\"Spatial Join: '%s' feature class created by joining '%s' with '%s'\", finalOutput, outputMerge, totalOverlapFc)\r\n\r\n # Update targetID with combined targetID determined from total overlap feature class\r\n expression = \"copyTargetID(!targetID!, !targetID_1!)\"\r\n codeblock = \"\"\"def copyTargetID(targetID, comb_targetID):\r\n if comb_targetID is None:\r\n return targetID\r\n else:\r\n return comb_targetID\"\"\"\r\n arcpy.CalculateField_management(finalOutput, \"targetID\", expression, \"PYTHON_9.3\", codeblock)\r\n logging.info(\"Calculate Field: 'targetID' field value calculated for '%s' feature class\", finalOutput)\r\n\r\n # Delete extraneous fields\r\n arcpy.DeleteField_management(finalOutput, \"targetID_1\")\r\n arcpy.DeleteField_management(finalOutput, \"Join_Count\")\r\n arcpy.DeleteField_management(finalOutput, \"TARGET_FID\")\r\n\r\n # Rename merged acquisition date feature class to appropriate name if it does not contain overlapping targets\r\n if overlapBool is False:\r\n arcpy.Rename_management(outputMerge, finalOutputString)\r\n logging.info(\"Rename: '%s' feature class renamed to '%s'\", outputMerge, finalOutputString)\r\n\r\n # Delete unneeded process outputs (dissolve and merge outputs)\r\n arcpy.Delete_management(outputDissolve)\r\n logging.info(\"Delete: '%s' feature class deleted\", outputDissolve)\r\n if arcpy.Exists(outputMerge):\r\n arcpy.Delete_management(outputMerge)\r\n logging.info(\"Delete: '%s' feature class deleted\", outputMerge)\r\n\r\n logging.info(\"Processing for merges for acquisition date '%s' complete\\n\", key)\r\n\r\n # Iterate through dark targets acquisition dates to export single feature classes\r\n arcpy.AddMessage(\"\\nExporting single feature classes...\")\r\n logging.info(\"Processing single feature classes to export\")\r\n for key in fcDictByDate:\r\n if len(fcDictByDate[key]) == 1:\r\n for fc in fcList:\r\n fcSplit = fc.split(\"_\")\r\n if fcSplit[1] in mergeDictbyDate:\r\n continue\r\n else:\r\n outputFeatureName = \"RS2_\" + fcSplit[1]\r\n arcpy.FeatureClassToFeatureClass_conversion(fc, gdbWorkspace, outputFeatureName, \"#\", \"#\", )\r\n logging.info(\"Feature Class to Feature Class: '%s' feature class converted to '%s'\", fc, outputFeatureName)\r\n outputFeatPath = os.path.join(gdbWorkspace, outputFeatureName)\r\n arcpy.DeleteField_management(outputFeatPath, \"FID\")\r\n logging.info(\"Processing of single feature classes to export complete\")\r\n\r\n logging.info(\"mergeAreas.py script finished\\n\\n\")\r\n\r\n return",
"def execute(args):\n\n #append a _ to the suffix if it's not empty and doens't already have one\n try:\n file_suffix = args['results_suffix']\n if file_suffix != \"\" and not file_suffix.startswith('_'):\n file_suffix = '_' + file_suffix\n except KeyError:\n file_suffix = ''\n\n filesystemencoding = sys.getfilesystemencoding()\n\n timber_shape = ogr.Open(\n args['timber_shape_uri'].encode(filesystemencoding), 1)\n\n #Add the Output directory onto the given workspace\n workspace_dir = args['workspace_dir'] + os.sep + 'output/'\n if not os.path.isdir(workspace_dir):\n os.makedirs(workspace_dir)\n\n #CopyDataSource expects a python string, yet some versions of json load a\n #'unicode' object from the dumped command line arguments. The cast to a\n #python string here should ensure we are able to proceed.\n shape_source = str(workspace_dir + 'timber%s.shp' % file_suffix)\n\n #If there is already an existing shapefile with the same name\n #and path, delete it\n if os.path.isfile(shape_source):\n os.remove(shape_source)\n\n #Copy the input shapefile into the designated output folder\n driver = ogr.GetDriverByName('ESRI Shapefile')\n copy = driver.CopyDataSource(timber_shape, shape_source)\n\n #OGR closes datasources this way to make sure data gets flushed properly\n timber_shape.Destroy()\n copy.Destroy()\n\n timber_output_shape = ogr.Open(shape_source.encode(filesystemencoding), 1)\n\n layer = timber_output_shape.GetLayerByName('timber%s' % file_suffix)\n #Set constant variables from arguments\n mdr = args['market_disc_rate']\n attr_table = dbf.Dbf(args['attr_table_uri'], readOnly=True)\n #Set constant variables for calculations\n mdr_perc = 1 + (mdr / 100.00)\n sumtwo_lower_limit = 0\n\n #Create three new fields on the shapefile's polygon layer\n for fieldname in ('TNPV', 'TBiomass', 'TVolume'):\n field_def = ogr.FieldDefn(fieldname, ogr.OFTReal)\n layer.CreateField(field_def)\n\n #Build a lookup table mapping the Parcel_IDs and corresponding row index\n parcel_id_lookup = {}\n for i in range(attr_table.recordCount):\n parcel_id_lookup[attr_table[i]['Parcel_ID']] = attr_table[i]\n\n #Loop through each feature (polygon) in the shapefile layer\n for feat in layer:\n #Get the correct polygon attributes to be calculated by matching the\n #feature's polygon Parcl_ID with the attribute tables polygon Parcel_ID\n parcl_index = feat.GetFieldIndex('Parcl_ID')\n parcl_id = feat.GetField(parcl_index)\n attr_row = parcel_id_lookup[parcl_id]\n #Set polygon attribute values from row\n freq_harv = attr_row['Freq_harv']\n num_years = float(attr_row['T'])\n harv_mass = attr_row['Harv_mass']\n harv_cost = attr_row['Harv_cost']\n price = attr_row['Price']\n maint_cost = attr_row['Maint_cost']\n bcef = attr_row['BCEF']\n parcl_area = attr_row['Parcl_area']\n perc_harv = attr_row['Perc_harv']\n immed_harv = attr_row['Immed_harv']\n\n sumtwo_upper_limit = int(num_years - 1)\n #Variable used in npv summation one equation as a distinguisher\n #between two immed_harv possibilities\n subtractor = 0.0\n yr_per_freq = num_years / freq_harv\n\n #Calculate the harvest value for parcel x\n harvest_value = (perc_harv / 100.00) * ((price * harv_mass) - harv_cost)\n\n #Initiate the biomass variable. Depending on 'immed_Harv' biomass\n #calculation will differ\n biomass = None\n\n #Check to see if immediate harvest will occur and act accordingly\n if immed_harv.upper() == 'N' or immed_harv.upper() == 'NO':\n sumone_upper_limit = int(math.floor(yr_per_freq))\n sumone_lower_limit = 1\n subtractor = 1.0\n summation_one = npv_summation_one(\n sumone_lower_limit, sumone_upper_limit, harvest_value,\n mdr_perc, freq_harv, subtractor)\n summation_two = npv_summation_two(\n sumtwo_lower_limit, sumtwo_upper_limit, maint_cost, mdr_perc)\n #Calculate Biomass\n biomass = \\\n parcl_area * (perc_harv / 100.00) * harv_mass \\\n * math.floor(yr_per_freq)\n elif immed_harv.upper() == 'Y' or immed_harv.upper() == 'YES':\n sumone_upper_limit = int((math.ceil(yr_per_freq) - 1.0))\n sumone_lower_limit = 0\n summation_one = npv_summation_one(\n sumone_lower_limit, sumone_upper_limit, harvest_value,\n mdr_perc, freq_harv, subtractor)\n summation_two = npv_summation_two(\n sumtwo_lower_limit, sumtwo_upper_limit, maint_cost, mdr_perc)\n #Calculate Biomass\n biomass = (\n parcl_area * (perc_harv / 100.00) * harv_mass *\n math.ceil(yr_per_freq))\n\n #Calculate Volume\n volume = biomass * (1.0 / bcef)\n\n net_present_value = (summation_one - summation_two)\n total_npv = net_present_value * parcl_area\n\n #For each new field set the corresponding value to the specific polygon\n for field, value in (\n ('TNPV', total_npv), ('TBiomass', biomass),\n ('TVolume', volume)):\n index = feat.GetFieldIndex(field)\n feat.SetField(index, value)\n\n #save the field modifications to the layer.\n layer.SetFeature(feat)\n feat.Destroy()\n\n #OGR closes datasources this way to make sure data gets flushed properly\n timber_output_shape.Destroy()\n\n #Close the polygon attribute table DBF file and wipe datasources\n attr_table.close()\n copy = None\n timber_shape = None\n timber_output_shape = None",
"def run(self):\n if self.is_complete:\n LOG.debug(\"Skipping Geopackage, file exists\")\n return\n keys_points = self.feature_selection.key_union('points')\n keys_lines = self.feature_selection.key_union('lines')\n keys_polygons = self.feature_selection.key_union('polygons')\n osmconf = OSMConfig(self.stage_dir,points=keys_points,lines=keys_lines,polygons=keys_polygons)\n conf = osmconf.create_osm_conf()\n ogr_cmd = self.ogr_cmd.safe_substitute({'gpkg': self.output_gpkg,\n 'osm': self.input_pbf, 'osmconf': conf})\n LOG.debug('Running: %s' % ogr_cmd)\n subprocess.check_call(ogr_cmd, shell=True, executable='/bin/bash')\n\n \"\"\"\n Create the default osm gpkg schema\n \"\"\"\n conn = sqlite3.connect(self.output_gpkg)\n conn.enable_load_extension(True)\n cur = conn.cursor()\n cur.execute(\"select load_extension('mod_spatialite')\")\n cur.execute(\"CREATE TABLE boundary (id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, geom GEOMETRY)\");\n cur.execute(\"INSERT INTO boundary (geom) VALUES (GeomFromWKB(?,4326));\",(self.aoi_geom.wkb,))\n cur.executescript(SPATIAL_SQL)\n self.update_zindexes(cur,self.feature_selection)\n\n # add themes\n create_sqls, index_sqls = self.feature_selection.sqls\n for query in create_sqls:\n cur.executescript(query)\n for query in index_sqls:\n cur.executescript(query)\n conn.commit()\n conn.close()\n\n if self.per_theme:\n # this creates per-theme GPKGs\n for theme in self.feature_selection.themes:\n conn = sqlite3.connect(self.stage_dir + slugify(theme) + \".gpkg\")\n conn.enable_load_extension(True)\n cur = conn.cursor()\n cur.execute(\"attach database ? as 'geopackage'\",(self.output_gpkg,))\n cur.execute(\"create table gpkg_spatial_ref_sys as select * from geopackage.gpkg_spatial_ref_sys\")\n cur.execute(\"create table gpkg_contents as select * from geopackage.gpkg_contents where 0\")\n cur.execute(\"create table gpkg_geometry_columns as select * from geopackage.gpkg_geometry_columns where 0\")\n for geom_type in self.feature_selection.geom_types(theme):\n for stmt in self.feature_selection.create_sql(theme,geom_type):\n cur.executescript(stmt)\n conn.commit()\n conn.close()",
"def merge():\n click.echo(\"Not implemented yet. In the future, this command will be used for merging models.\")\n sys.exit(-2)",
"def convert(threshold, infile, tmpfile_1, tmpfile_2, outfile):\n args = [\n \"gdal_calc.py\",\n '-A', infile,\n '--outfile={}'.format(tmpfile_1),\n '--calc=logical_and(A>={}, A<999)'.format(threshold),\n '--type=Byte', '--NoDataValue=0',\n '--co=SPARSE_OK=YES',\n '--co=NBITS=1',\n '--quiet'\n # Could enable compression\n # --co=\"COMPRESS=LZW\"\n ]\n subprocess.run(args)\n\n subprocess.run([\n \"gdal_polygonize.py\",\n tmpfile_1,\n '-q',\n '-f', 'ESRI Shapefile',\n tmpfile_2\n ])\n\n subprocess.run([\n \"ogr2ogr\",\n '-a_srs', 'EPSG:4326',\n outfile,\n tmpfile_2\n ])\n\n subprocess.run([\"rm\", tmpfile_1])\n subprocess.run([\"rm\", tmpfile_2])\n subprocess.run([\"rm\", tmpfile_2.replace('shp', 'shx')])\n subprocess.run([\"rm\", tmpfile_2.replace('shp', 'dbf')])\n subprocess.run([\"rm\", tmpfile_2.replace('shp', 'prj')])",
"def main(argv):\n\n \n\n if validate_argv(argv) is False:\n print \"Usage: mergeFiles.py <search_term>\"\n sys.exit()\n\n input_directory_name = 'data_raw'\n search_term = argv[0]\n output_file_name = search_term + '_merged.tsv'\n output_directory_name = 'merged'\n\n\n output_path = fp.set_output_file_path(output_file_name, output_directory_name) \n output = open(output_path, 'a')\n for h1 in range(3):\n for h2 in range(10):\n for m1 in range(6):\n for m2 in range(10):\n file_name = search_term + '_' + str(h1) + str(h2) + str(m1) + str(m2) + '.tsv'\n file_path = fp.get_file_path(file_name, input_directory_name)\n if fp.filename_exists(file_path):\n file = open(file_path, 'r')\n file.next()\n for line in file:\n output.write(line)\n file.close()\n output.close()",
"def on_merge(self, to_be_merged, merge_result, context):\n pass",
"def mergePolyShapefiles(input1Filename, input2Filename, mergedFilename,field_names = [cc.DEV_LAYER_ATTRIBUTE_NAME,]):\n\n input1Ds = ogr.Open(input1Filename)\n if not input1Ds:\n sys.exit(\"Unable to open input file '{0}'\".format(input1Filename))\n input1lyr = input1Ds.GetLayer()\n inp1SRS = input1lyr.GetSpatialRef()\n\n input2Ds = ogr.Open(input2Filename)\n if not input2Ds:\n sys.exit(\"Unable to open input file '{0}'\".format(input2Filename))\n\n input2lyr = input2Ds.GetLayer()\n inp2SRS = input2lyr.GetSpatialRef()\n\n # Check that files have matching SRS, as we're not reprojecting. Use MorphToESRI to overcome weird issues where\n # parameters are same but just in different positions\n inp1SRS.MorphToESRI()\n inp2SRS.MorphToESRI()\n if inp1SRS.ExportToWkt()<> inp2SRS.ExportToWkt():\n print inp1SRS.ExportToWkt()\n print inp2SRS.ExportToWkt()\n sys.exit(\"The SRS of the input files '{0}' and '{1}' do not match. Merge cannot be completed.\".format(\n input1Filename,input2Filename))\n\n # DEVTODO: Should check for matching geometry types - them we could generalize this function. For now,\n # only support Polygons\n\n shpdriver = ogr.GetDriverByName('ESRI Shapefile')\n if os.path.exists(mergedFilename):\n shpdriver.DeleteDataSource(mergedFilename)\n if os.path.exists(mergedFilename):\n sys.exit(\"Unable to delete existing Shapefile '{0}'\".format(mergedFilename))\n\n outputBufferds = shpdriver.CreateDataSource(mergedFilename)\n outputlyr = outputBufferds.CreateLayer(mergedFilename, geom_type=ogr.wkbPolygon, srs=inp1SRS)\n\n # Add input Layer Fields to the output Layer if its listed in the field_names list\n inLayerDefn = input1lyr.GetLayerDefn()\n for i in range(0, inLayerDefn.GetFieldCount()):\n fieldDefn = inLayerDefn.GetFieldDefn(i)\n fieldName = fieldDefn.GetName()\n if fieldName not in field_names:\n continue\n outputlyr.CreateField(fieldDefn)\n fieldDefn = None\n print \"\\tCreated an Attribute '{0}' in merged shapefile '{1}'\".format(fieldName,mergedFilename)\n\n # Get the output Layer's Feature Definition\n outLayerDefn = outputlyr.GetLayerDefn()\n inputLayerDefn = input1lyr.GetLayerDefn()\n\n # Add features to the ouput Layer\n for i in range(0, input1lyr.GetFeatureCount()):\n # Get the input Feature\n inFeature = input1lyr.GetFeature(i)\n outFeature = ogr.Feature(outLayerDefn)\n outFeature.SetGeometry(inFeature.GetGeometryRef())\n\n # Add specified field values from input Layer\n for i in range(0, inputLayerDefn.GetFieldCount()):\n fieldDefn = inputLayerDefn.GetFieldDefn(i)\n fieldName = fieldDefn.GetName()\n if fieldName not in field_names:\n continue\n\n outFeature.SetField(fieldName, inFeature.GetField(fieldName))\n\n outputlyr.CreateFeature(outFeature)\n outFeature = None\n\n inputLayerDefn = input2lyr.GetLayerDefn()\n\n for i in range(0, input2lyr.GetFeatureCount()):\n # Get the input Feature\n inFeature = input2lyr.GetFeature(i)\n\n outFeature = ogr.Feature(outLayerDefn)\n outFeature.SetGeometry(inFeature.GetGeometryRef())\n\n # Add specified field values from input Layer\n for i in range(0, inputLayerDefn.GetFieldCount()):\n fieldDefn = inputLayerDefn.GetFieldDefn(i)\n fieldName = fieldDefn.GetName()\n if fieldName not in field_names:\n continue\n\n outFeature.SetField(fieldName, inFeature.GetField(fieldName))\n\n outputlyr.CreateFeature(outFeature)\n outFeature = None\n\n outputBufferds.Destroy()\n # Create prj file\n pu.createPrjFile(mergedFilename,inp1SRS)",
"def main() -> None:\n\n parser = argparse.ArgumentParser(description=\"Merger option parser\")\n parser.add_argument(\"-f\",\n \"--files\",\n required=True,\n help=\"List of files to merge\",\n nargs=\"+\")\n parser.add_argument(\"-o\",\n \"--output\",\n required=True,\n help=\"Name of output file\")\n parser.add_argument(\"-r\",\n \"--resample\",\n required=False,\n help=\"Resample algorithm\",\n default=\"average\")\n\n args = parser.parse_args()\n\n if args.files is None:\n raise ValueError(\"ERROR: No input files passed.\")\n\n if len(args.files) == 1:\n raise ValueError(\n \"ERROR: Merger takes at least 2 files, but 1 was given: {0}\".\n format(args.files[0]))\n\n merge(args.files, output_file=args.output, resample=args.resample)",
"def svn_fs_merge(*args):\r\n return _fs.svn_fs_merge(*args)",
"def merge_wrapper(processdir, basedir, starglob, superstarglob, calibrootglob, njobs=2, invert=False):\n for glob in [starglob, superstarglob, calibrootglob]:\n assert path.dirname(glob), \\\n f\"Glob : {glob} should be/contain a subdirectory\"\n\n superstarGlobNew = get_glob_strings(superstarglob)\n calibrootGlob1, calibrootGlob2 = get_glob_strings(calibrootglob)\n superstardir = get_dir_from_glob(processdir, superstarglob)\n calibdir = get_dir_from_glob(basedir, calibrootglob)\n starglob = processdir + starglob\n\n # ssmcolfnames = converter(superstardir,\n # globstr1=superstarGlobNew,\n # globstr2=superstarGlobNew,\n # njobs=42,\n # mergecolsonly=True)\n # yecho(\"SuperStarfiles done.\")\n # tofiltercalibglob = converter(processdir,\n # globstr1=calibrootGlob1,\n # globstr2=calibrootGlob2,\n # njobs=42,\n # mergecolsonly=False)\n # yecho(\"Extracting done.\")\n tofiltercalibglob = \"./csv/*.csv\"\n ssmcolfnames = glob_and_check(\"./superstar/mergecols/*.csv\")\n\n yecho(\"Removing events.\")\n if njobs > 1:\n splitcalib = split_by_dates(tofiltercalibglob)\n splitstar = split_by_dates(starglob)\n splitss = split_by_dates(ssmcolfnames)\n # needs filename output\n assert len(splitcalib) == len(splitstar) == len(splitss), \"only works the first time when no calibfiles got moved, for everything else this needs a new function with more logic\"\n Parallel(n_jobs=njobs)\\\n (delayed(single_remove_events)(calibglob, starglob, ssglob, njobs, invert)\n for calibglob, starglob, ssglob in zip(splitcalib, splitstar, splitss))\n # filteredFiles = [f for arr in filteredFiles for f in arr]\n else:\n check_telescope_files(rootdir=None, globstr1=ssmcolfnames,\n globstr2=calibmcolfnames, replacer=(\"_Y_\", \"_I_\"))\n remover = EventRemover(tofiltercalibglob=tofiltercalibglob,\n starglob=starglob,\n superstarmcolglob=ssmcolfnames)\n remover.remove_events()\n filteredFiles = remover.outfilenames\n yecho(\"Removed events that get thrown out during image cleaning and superstar processing and wrote the merged runs to:\")\n yecho(f\"{path.basename(filteredFiles[0])}\")\n # return filteredFiles",
"def do_merge_all():\n for rawd, merged in TOMERGE:\n mylogger.info(\"cleaning \" + merged)\n ensure_dir(merged)\n cleandir(merged)\n mylogger.info(\"merging \" + rawd + \" to \" + merged)\n build_merged_dir(build_sensor_file_map(rawd), merged)\n\n # add timestamp file\n\tf = open(TIMESTAMP_FILE,\"w\")\n\tf.write(str(datetime.datetime.now()))\n\tf.close()",
"def up(job, inputFileID1, inputFileID2, memory=sortMemory):\n with job.fileStore.writeGlobalFileStream() as (fileHandle, outputFileStoreID):\n with job.fileStore.readGlobalFileStream( inputFileID1 ) as inputFileHandle1:\n with job.fileStore.readGlobalFileStream( inputFileID2 ) as inputFileHandle2:\n merge(inputFileHandle1, inputFileHandle2, fileHandle)\n job.fileStore.logToMaster( \"Merging %s and %s to %s\"\n % (inputFileID1, inputFileID2, outputFileStoreID) )\n #Cleanup up the input files - these deletes will occur after the completion is successful. \n job.fileStore.deleteGlobalFile(inputFileID1)\n job.fileStore.deleteGlobalFile(inputFileID2)\n return outputFileStoreID",
"def merge():\n\n print(\"Starting merge thread...\\n\\n\")\n\n cmd = \"tsp \"\n for c in channels:\n port = 2000 + int(c)\n\n if c != \"1\":\n cmd += \"-P merge \\\"tsp \"\n cmd += f\"-I ip 230.2.2.2:{port}\\\" \"\n else:\n cmd += f\"-I ip 230.2.2.2:{port} \"\n \n cmd += \"-O ip --enforce-burst 230.2.2.2:2000\"\n\n tsduck = subprocess.call(\n cmd,\n shell=False,\n stdout=open(os.devnull, 'w'),\n stderr=subprocess.STDOUT\n )",
"def main():\n try:\n merge_envs(parse_args())\n except MergeError:\n return 1",
"def hxlmerge_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):\n\n parser = make_args('Merge columns from one HXL dataset into another (similar to SQL join).')\n parser.add_argument(\n '-m',\n '--merge',\n help='HXL file or URL to merge',\n metavar='filename',\n required=True\n )\n parser.add_argument(\n '-k',\n '--keys',\n help='HXL tag(s) to use as a shared key.',\n metavar='tag,tag...',\n required=True,\n type=hxl.model.TagPattern.parse_list\n )\n parser.add_argument(\n '-t',\n '--tags',\n help='Comma-separated list of column tags to include from the merge dataset.',\n metavar='tag,tag...',\n required=True,\n type=hxl.model.TagPattern.parse_list\n )\n parser.add_argument(\n '-r',\n '--replace',\n help='Replace empty values in existing columns (when available) instead of adding new ones.',\n action='store_const',\n const=True,\n default=False\n )\n parser.add_argument(\n '-O',\n '--overwrite',\n help='Used with --replace, overwrite existing values.',\n action='store_const',\n const=True,\n default=False\n )\n add_queries_arg(parser, 'Merged data only from rows that match at least one query.')\n\n args = parser.parse_args(args)\n\n do_common_args(args)\n\n with make_source(args, stdin) as source, make_output(args, stdout) as output, hxl.input.data(args.merge, hxl.InputOptions(allow_local=True)) if args.merge else None as merge_source:\n filter = hxl.filters.MergeDataFilter(\n source, merge_source=merge_source,\n keys=args.keys, tags=args.tags, replace=args.replace, overwrite=args.overwrite,\n queries=args.query\n )\n hxl.input.write_hxl(output.output, filter, show_tags=not args.strip_tags)\n\n return EXIT_OK",
"def merge_regions(bed_files, out_bed):\n merge_all = (\"zcat {0} | \"\n \"sort -k1,1 -k2,2n | \"\n \"bedtools merge -i stdin | \"\n \"gzip -c \"\n \"> {1}\").format(' '.join(bed_files), out_bed)\n print merge_all\n os.system(merge_all)\n\n return None",
"def bed_merge(output_file, *inputfiles):\n working_dir = os.path.dirname(inputfiles[0]);\n temp_file1 = working_dir + os.sep + \"temp_dfj304jfd.txt\";\n\n #Concatenate input files\n cat_command = ['cat'];\n cat_command.extend(inputfiles);\n with open(temp_file1, 'w') as fout:\n sp.check_call(cat_command, stdout=fout);\n\n #Sort file to be merged\n temp_file2 = working_dir + os.sep + \"temp_fje094j3.txt\";\n with open(temp_file2, 'w') as fout:\n sp.check_call(['sortBed','-i',temp_file1], stdout=fout);\n\n #Merge file\n if(output_file.find(os.sep) == -1):\n output_file = working_dir + os.sep + output_file;\n\n with open(output_file, 'w') as fout:\n sp.check_call(['bedtools','merge','-i',temp_file2], stdout=fout);\n\n #Clean up temporary files\n os.remove(temp_file1);\n os.remove(temp_file2);\n\n return output_file;",
"def step_parallel(in_csv_filename, terrestrial_data, marine_data, ancillary_path,\n out_csv_filename, from_gbif=True):\n csv_filename_pairs, header = get_chunk_files(\n in_csv_filename, out_csv_filename=out_csv_filename)\n\n# in_csv_fn, out_csv_fn = csv_filename_pairs[0]\n# intersect_csv_and_shapefiles(in_csv_fn, terrestrial_data,\n# marine_data, ancillary_path, out_csv_fn, False)\n\n with ProcessPoolExecutor() as executor:\n for in_csv_fn, out_csv_fn in csv_filename_pairs:\n executor.submit(\n intersect_csv_and_shapefiles, in_csv_fn, terrestrial_data,\n marine_data, ancillary_path, out_csv_fn, from_gbif)\n\n try:\n outf = open(out_csv_filename, 'w', encoding='utf-8')\n outf.write('{}'.format(header))\n smfile_linecount = 0\n for _, small_csv_fn in csv_filename_pairs:\n curr_linecount = get_line_count(small_csv_fn) - 1\n print('Appending {} records from {}'.format(\n curr_linecount, small_csv_fn))\n # Do not count header\n smfile_linecount += (curr_linecount)\n lineno = 0\n try:\n for line in open(small_csv_fn, 'r', encoding='utf-8'):\n # Skip header in each file\n if lineno == 0:\n pass\n else:\n outf.write('{}'.format(line))\n lineno += 1\n except Exception as inner_err:\n print('Failed to write {} to merged file; {}'.format(small_csv_fn, inner_err))\n except Exception as outer_err:\n print('Failed to write to {}; {}'.format(out_csv_filename, outer_err))\n finally:\n outf.close()\n\n lgfile_linecount = get_line_count(out_csv_filename) - 1\n print('Total {} of {} records written to {}'.format(\n lgfile_linecount, smfile_linecount, out_csv_filename))",
"def align_rasters(ref_raster, tar_raster, output_suffix):\n command = [\"gdalbuildvrt\", \"-te\"]\n hDataset = gdal.Open(ref_raster, gdal.GA_ReadOnly)\n if hDataset is None:\n return False\n adfGeoTransform = hDataset.GetGeoTransform(can_return_null=True)\n\n tif_file=tar_raster\n vrt_file = tif_file.replace('.tif', '.vrt')\n\n if adfGeoTransform is not None:\n dfGeoXUL = adfGeoTransform[0]\n dfGeoYUL = adfGeoTransform[3]\n dfGeoXLR = adfGeoTransform[0] + adfGeoTransform[1] * hDataset.RasterXSize + \\\n adfGeoTransform[2] * hDataset.RasterYSize\n dfGeoYLR = adfGeoTransform[3] + adfGeoTransform[4] * hDataset.RasterXSize + \\\n adfGeoTransform[5] * hDataset.RasterYSize\n xres = str(abs(adfGeoTransform[1]))\n yres = str(abs(adfGeoTransform[5]))\n\n subprocess.call(command + [str(dfGeoXUL), str(dfGeoYLR), str(dfGeoXLR),\n str(dfGeoYUL), \"-q\", \"-tr\", xres, yres,\n vrt_file, tif_file])\n\n output_file = tif_file.replace('.tif', output_suffix)\n\n print('gdal_translate -q {} {}'.format(vrt_file, output_file))\n\n cmd = 'gdal_translate -q {} {}'.format(vrt_file, output_file)\n\n #print(dfGeoXUL, dfGeoYLR, dfGeoXLR, dfGeoYUL, xres, yres)\n\n subprocess.call(cmd, shell=True)\n os.remove(vrt_file)\n\n return True\n\n else:\n\n return False",
"def processAlgorithm(self, parameters, context, feedback):\n output = self.parameterAsOutputLayer(parameters, self.OUTPUT, context)\n\n # DO SOMETHING \n sql = ('SELECT geom FROM (SELECT (ST_Dump(ST_Intersection(T1.geom, T2.geom))).geom FROM ' \n f'{parameters[self.TABLE]} AS T1 JOIN {parameters[self.TABLE]} AS T2 '\n 'ON (ST_Intersects(T1.geom, T2.geom) AND NOT ST_Touches(T1.geom, T2.geom)) '\n f'AND T1.{parameters[self.PRIMARY_KEY]} > T2.{parameters[self.PRIMARY_KEY]}) AS sobreposicao '\n 'WHERE ST_Dimension(geom) = 2 AND ST_Area(geom) > 0.0000001') \n \n \n feedback.pushInfo(sql)\n\n find_pseudo = processing.run(\"gdal:executesql\",\n {'INPUT': parameters['INPUT'],\n 'SQL':sql,\n 'OUTPUT': output},\n context=context, feedback=feedback, is_child_algorithm=True)\n\n\n return {self.OUTPUT: find_pseudo['OUTPUT']}",
"def abort_merge():\n common.safe_git_call('merge --abort')",
"def mergefsl(log, file_list, outname):\n cmdargs = split('fslmerge -t {} {}'.format(outname, file_list))\n proc = Popen(cmdargs, stdout=PIPE, stderr=STDOUT)\n log.info(proc.stdout.read())",
"def write_merge_script(s,inputs=[]):\n assert len(inputs)>0\n # hadd determines if we are merging main histograms file, or unfolding files\n hadd = True if s.jobtype == \"MRG\" else False\n s.jobfile = os.path.join(s.submitdir, 'merge_wasym.sh' if hadd else 'munfold_wasym.sh')\n s.outROOT = ('root_' if hadd else 'unfold_')+s.tag+\".root\"\n s.outROOTpath = os.path.join('results','ana_wasym',s.outROOT)\n pre = 'merge' if hadd else 'munfold'\n s.outOU = os.path.join(s.submitdir, pre+'_wasym.out.log')\n s.outER = os.path.join(s.submitdir, pre+'_wasym.err.log')\n s.outLOG = os.path.join(s.submitdir, pre+'_wasym.log.log')\n flist = 'wasym.root.list' if hadd else 'wasym.unfold.list'\n s.outputs += [flist]\n f = open(s.jobfile, \"w\")\n print >>f, SH_PRE%(s.fdic[0],s.fdic[1])\n print >>f,'RMODE=merge'\n print >>f,'nexpected=%d'%len(inputs)\n print >>f,'ntot=0'\n print >>f,'rm -f ${ROOTDIR}/%s ; touch ${ROOTDIR}/%s;'%(flist,flist)\n for fin in inputs:\n fname = fin if hadd else '%s.unfold'%fin\n print >>f,'f=\"${RESDIR}/%s.root\"'%fname\n print >>f,'st=`xrd uct3-xrd.mwt2.org existfile $f`'\n print >>f,'if [ \"$st\" == \"The file exists.\" ]; then'\n # xrootd files: reduce cache size, since hadd is stupid and will eat 100% of RAM\n print >>f,'echo ${RESHOST}/$f?cachesz=1000000 >> ${ROOTDIR}/%s'%flist\n print >>f,'((ntot++))'\n print >>f,'else'\n print >>f,'echo ERROR: failed to locate file $f'\n print >>f,'fi'\n print >>f,'if [ \"$ntot\" -eq \"$nexpected\" ]; then echo \"ALL DONE\"; else echo \"ERROR: missing `expr $nexpected - $ntot` files\"; echo exit 202; exit 202; fi'\n print >>f,'if [ \"$ntot\" -eq \"0\" ]; then echo \"ERROR: no files to merge\"; echo exit 203; exit 203; fi'\n print >>f,\"\"\"\n# a special version of hadd that adds files in chunks of 20\nfunction hadd2() {\n local per\n per=30 #20\n fin=$1\n opts=$2\n fout=$3\n shift\n n=`cat $fin | wc -l`\n ngrp=`expr $n / $per`\n nrem=`expr $n % $per`\n if [ \\\"$nrem\\\" == \\\"0\\\" ]; then ngrp=`expr $ngrp - 1`; fi\n for igrp in `seq 0 $ngrp`; do\n\timin=`expr $per \\* $igrp`\n\timax=`expr $per \\* $igrp + $per`\n\tif [ \\\"$imax\\\" -gt \\\"$n\\\" ]; then imax=`expr $per \\* $igrp + $nrem`; fi\n\t# offset by 1\n\timin=`expr $imin + 1`\n\timax=`expr $imax`\n\tidel=`expr $imax - $imin + 1`\n\techo \\\"===== Part $igrp / $ngrp : $imin to $imax\\\"\n\techo hadd ${opts} \\\"${fout}.TMPHADD_${igrp}.root\\\" `cat $fin | head -n $imax | tail -n $idel`\n\thadd ${opts} \\\"${fout}.TMPHADD_${igrp}.root\\\" `cat $fin | head -n $imax | tail -n $idel`\n\tst=$?\n\tif [ \\\"$st\\\" != \\\"0\\\" ]; then\n\t echo \\\"ERROR: merge step $igrp failed. Bailing out...\\\"\n\t return $st\n\tfi\n done\n # remove opts to speed up the last step and prevent creation of additional ntuple cycles;2\n echo hadd ${fout} ${fout}.TMPHADD_*root*\n hadd ${fout} ${fout}.TMPHADD_*root*\n st=$?\n rm -f ${fout}.TMPHADD_*root*\n return $st\n}\n \"\"\"\n if False:\n if hadd:\n print >>f, 'echo hadd -O %s `cat ${ROOTDIR}/%s`'%(s.outROOTpath,flist)\n print >>f, 'hadd -O %s `cat ${ROOTDIR}/%s`'%(s.outROOTpath,flist)\n else:\n print >>f, 'echo hadd -T %s `cat ${ROOTDIR}/%s`'%(s.outROOTpath,flist)\n print >>f, 'hadd -T %s `cat ${ROOTDIR}/%s`'%(s.outROOTpath,flist)\n else:\n print >>f, 'hadd2 ${ROOTDIR}/%s \"%s\" %s'%(flist,\"-O\" if hadd else \"-T\",s.outROOTpath)\n print >>f, \"status=$?\"\n print >>f, SH_POST\n f.close()\n os.system('chmod +x %s'%s.jobfile)\n s.write_submit_script()\n return True"
] | [
"0.6366613",
"0.6252653",
"0.5960626",
"0.59505725",
"0.5807357",
"0.5765494",
"0.57650673",
"0.5709829",
"0.563321",
"0.5595598",
"0.55921483",
"0.5452208",
"0.5446093",
"0.54422516",
"0.5391504",
"0.5345127",
"0.5292853",
"0.52140045",
"0.5213958",
"0.51817507",
"0.5178978",
"0.5169133",
"0.5158056",
"0.5155896",
"0.51134247",
"0.51075274",
"0.5092026",
"0.5083469",
"0.5048908",
"0.50345916"
] | 0.7390473 | 0 |
Create inital launcher with angle 45 degrees and velocity 40 win is the GraphWin to draw the launcher in. | def __init__(self, win):
# draw the base shot of the launcher
base = Circle(Point(0,0), 3)
base.setFill("red")
base.setOutline("red")
base.draw(win)
# save the window and create initial angle and velocity
self.win = win
self.angle = radians(45.0)
self.vel = 40.0
# create inital "dummy" arrow
self.arrow = Line(Point(0,0), Point(0,0)).draw(win)
# replace it with the correct arrow
self.redraw() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, win): \r\n\r\n # draw the base shot of the launcher \r\n base = Circle(Point(0,0), 3) \r\n base.setFill('red')\r\n base.setOutline('red')\r\n base.draw(win) \r\n\r\n # save the window and create initial angle and velocity\r\n self.win = win \r\n self.angle = radians(45.0)\r\n self.vel = 40.0 \r\n\r\n # create initial 'dummy' arrow \r\n self.arrow = Line(Point(0,0), Point(0, 0)).draw(win) \r\n # replace it with the correct arrow \r\n self.redraw()",
"def __init__(self):\n\n # initialize window\n self.win = graphics.GraphWin(\"Lunar Lander Game\", 300, 500)\n \n # transform coordinates\n self.win.setCoords(0, -10, 300, 600)\n\n self.surface_polygon = self.create_surface()\n self.surface_polygon.draw(self.win)\n self.background()\n \n\n self.lander_polygon = None\n # Draws two different thrust buttons\n self.b1 = Button(graphics.Point(100, 560), 80, 20, 'Thrust')\n self.b2 = Button(graphics.Point(200, 560), 80, 20, 'No Thrust')\n self.b1.draw(self.win)\n self.b2.draw(self.win)\n \n # Draws text values for altitude, velocity, and fuel\n self.alt_num = graphics.Text(graphics.Point(50, 400), 'Altitude: ')\n self.vel_num = graphics.Text(graphics.Point(50, 450), 'Velocity: ')\n self.fuel_num = graphics.Text(graphics.Point(50, 500), 'Fuel: ')\n self.alt_num.draw(self.win)\n self.vel_num.draw(self.win)\n self.fuel_num.draw(self.win)",
"def setupNewGame(self):\r\n self.level = 1\r\n self.num_cows = 2\r\n self.num_farmers = 1\r\n self.levelHeading = Text(self.gameDisplay, 120, 425, 175, self.light_orange, \"Farm 1\")\r\n self.shield_indicator.image = self.greenShield\r\n updatedHeading = self.levelHeading\r\n self.startUX[0] = updatedHeading",
"def main():\n # background\n background = background_maker()\n\n # face\n face = face_maker()\n\n # eye\n eye_l = eye_maker()\n eye_r = eye_maker()\n\n # mouth\n mouth = mouth_maker()\n mouth_1 = GArc(60, 60, 290, 60)\n mouth_2 = GArc(60, 60, 190, 60)\n\n # nose\n nose = GOval(10, 10)\n nose.filled = True\n\n # ear\n ear_l = ear_maker()\n ear_r = ear_maker()\n ear_ll = ear2_maker()\n ear_rr = ear2_maker()\n\n # body\n body = body_maker()\n body2 = body2_maker()\n body3 = body3_maker()\n\n # label\n label = label_maker('Rilakkuma', 70)\n label2 = label_maker('Min', 10, font='Dialog')\n\n # arm\n arm_l = arm1_maker()\n arm_r = arm2_maker()\n\n # leg\n leg = leg_maker()\n leg2 = leg_maker()\n\n # show my draw\n window.add(background)\n window.add(leg, (window.width - leg.width) / 2 - body.width/3.7, (window.height - leg.height) / 2 + body.height*1.1)\n window.add(leg2, (window.width - leg2.width) / 2 + body.width / 3.7,\n (window.height - leg2.height) / 2 + body.height * 1.1)\n window.add(body, (window.width - body.width) / 2, (window.height - body.height) / 2 + face.height/1.4)\n window.add(body2, (window.width - body2.width) / 2,\n (window.height - body2.height) / 2 + face.height/1.4 + body.height/3.3)\n window.add(body3, (window.width - body3.width) / 2, (window.height - body3.height) / 2 + face.height/1.2)\n window.add(arm_l, (window.width - arm_l.width) / 2 - body.width / 2.9,\n (window.height - arm_l.height) / 2 + face.height / 1.5)\n window.add(arm_r, (window.width - arm_r.width) / 2 + body.width / 2.9,\n (window.height - arm_r.height) / 2 + face.height / 1.5)\n window.add(label, (window.width-label.width)/2, window.height/4)\n window.add(ear_l, (window.width - ear_l.width) / 2 - face.width / 2.25,\n (window.height - ear_l.height) / 2 - face.height / 3)\n window.add(ear_ll, (window.width - ear_ll.width) / 2 - face.width / 2.25,\n (window.height - ear_ll.height) / 2 - face.height / 3.5)\n window.add(ear_r, (window.width - ear_r.width) / 2 + face.width / 2.25,\n (window.height - ear_r.height) / 2 - face.height / 3)\n window.add(ear_rr, (window.width - ear_rr.width) / 2 + face.width / 2.25,\n (window.height - ear_rr.height) / 2 - face.height / 3.5)\n window.add(face, (window.width - face.width) / 2, (window.height - face.height) / 2)\n window.add(eye_l, (window.width - eye_l.width) / 2 - face.width / 5, (window.height - eye_l.height) / 2)\n window.add(eye_r, (window.width - eye_r.width) / 2 + face.width / 5, (window.height - eye_r.height) / 2)\n window.add(mouth, (window.width - mouth.width) / 2, (window.height - mouth.height) / 2 + face.height / 8)\n window.add(nose, (window.width - nose.width) / 2, (window.height - nose.height) / 2 + face.height / 12)\n window.add(mouth_1, (window.width - mouth_1.width) / 2 - face.width / 20,\n (window.height - mouth_1.height) / 2 + face.height / 11)\n window.add(mouth_2, (window.width - mouth_2.width) / 2 + face.width / 20,\n (window.height - mouth_2.height) / 2 + face.height / 11)\n window.add(label2, window.width-label2.width, window.height)\n\n # kuma2\n kuma2_color = '0xFFEEDD'\n face2 = face_maker(140, color=kuma2_color)\n\n eye2_l = eye_maker(size=15)\n eye2_r = eye_maker(size=15)\n\n mouth2 = mouth_maker(size=40)\n mouth2_1 = GArc(60, 60, 290, 60)\n mouth2_2 = GArc(60, 60, 190, 60)\n\n nose2 = GOval(8, 8)\n nose2.filled = True\n\n ear2_l = ear_maker(size=50, color=kuma2_color)\n ear2_r = ear_maker(size=50, color=kuma2_color)\n ear2_ll = ear2_maker(size=30, color='0xFFC1E0')\n ear2_rr = ear2_maker(size=30, color='0xFFC1E0')\n\n body_2 = body_maker(size=100, color=kuma2_color)\n body2_2 = body2_maker(size=85, color=kuma2_color)\n body3_2 = body3_maker(size=60)\n\n arm2_l = arm1_maker(size=40, color=kuma2_color)\n arm2_r = arm2_maker(size=40, color=kuma2_color)\n\n leg_2 = leg_maker(size=25, color=kuma2_color)\n leg2_2 = leg_maker(size=25, color=kuma2_color)\n\n buttons = GOval(15, 15)\n buttons.filled = True\n buttons.fill_color = 'red'\n\n window.add(leg_2, (window.width - leg_2.width) / 2 - face.width / 1.05 - body_2.width/3.3,\n (window.height - leg_2.height) / 2 + face.height / 1.4 + body2.height * 0.82)\n window.add(leg2_2, (window.width - leg2_2.width) / 2 - face.width / 1.05 + body_2.width/3.3,\n (window.height - leg2_2.height) / 2 + face.height / 1.4 + body2.height * 0.82)\n window.add(body_2, (window.width - body_2.width) / 2 - face.width/1.05,\n (window.height - body_2.height) / 2 + face.height / 1.4)\n window.add(body2_2, (window.width - body2_2.width) / 2 - face.width/1.05,\n (window.height - body2_2.height) / 2 + face.height / 1.4 + body_2.height / 3.3)\n window.add(body3_2, (window.width - body3_2.width) / 2 - face.width/1.05,\n (window.height - body3_2.height) / 2 + face.height / 1.2)\n window.add(arm2_l, (window.width - arm2_l.width) / 2 - face.width / 1.05 - body_2.width/2.9,\n (window.height - arm2_l.height) / 2 + face2.height / 1.06)\n window.add(arm2_r, (window.width - arm2_r.width) / 2 - face.width / 1.05 + body_2.width/2.9,\n (window.height - arm2_r.height) / 2 + face2.height / 1.06)\n window.add(ear2_l, (window.width - ear2_l.width) / 2 - face.width / 0.8,\n (window.height - ear2_l.height) / 2 - face2.height / 9)\n window.add(ear2_ll, (window.width - ear2_ll.width) / 2 - face.width / 0.8,\n (window.height - ear2_ll.height) / 2 - face2.height / 15)\n window.add(ear2_r, (window.width - ear2_r.width) / 2 - face.width / 1.5,\n (window.height - ear2_r.height) / 2 - face2.height / 9)\n window.add(ear2_rr, (window.width - ear2_rr.width) / 2 - face.width / 1.52,\n (window.height - ear2_rr.height) / 2 - face2.height / 15)\n window.add(face2, (window.width-face2.width)/2 - face.width/1.05, (window.height-face2.height)/2 + face2.height/4)\n window.add(eye2_l, (window.width - eye2_l.width) / 2 - face.width / 0.9,\n (window.height - eye2_l.height) / 2 + face2.height/4)\n window.add(eye2_r, (window.width - eye2_r.width) / 2 - face.width / 1.25,\n (window.height - eye2_r.height) / 2 + face2.height/4)\n window.add(mouth2, (window.width - mouth2.width) / 2 - face.width/1.05,\n (window.height - mouth2.height) / 2 + face2.height / 2.4)\n window.add(nose2, (window.width - nose2.width) / 2 - face.width/1.05,\n (window.height - nose2.height) / 2 + face2.height / 2.5)\n window.add(mouth2_1, (window.width - mouth2_1.width) / 2 - face.width / 1,\n (window.height - mouth2_1.height) / 2 + face2.height / 2.5)\n window.add(mouth2_2, (window.width - mouth2_2.width) / 2 - face.width / 1.1,\n (window.height - mouth2_2.height) / 2 + face2.height / 2.5)\n window.add(buttons, (window.width-buttons.width)/2 - face.width/1.05,\n (window.height-buttons.height)/2 + face.height/1.62)",
"def createGameWindow():\n gameWindow = g.GraphWin(\"game\", 450, 800) #Window to show game\n\n return gameWindow",
"def __init__(self, win, angle, velocity, height):\n \n self.proj = Projectile(angle, velocity, height)\n self.marker = Circle(Point(0,height), 3)\n self.marker.setFill(\"red\")\n self.marker.setOutline(\"red\")\n self.marker.draw(win)",
"def __init__(self,name,speed,depth_of_view,view_angle,x_coor = \"\",y_coor = \"\"):\n self.name = name\n self.speed = speed # That will the instantenous speed of the robot\n self.depth_of_view = depth_of_view # That will the instantenous depth of view of the robot\n self.view_angle = view_angle # That will the instantenous view angle of the robot\n self.type = \"Robot\" #Specift the object type\n self.x = x_coor # store the position of the robot\n self.y = y_coor # store the position of the robot\n self.kind = name #Store its kind to give the GUI",
"def __init__(self, configs, simulator, wait_time=3):\n self.configs = configs\n self.sim = simulator.sim\n self.gripper = VREP_Gripper()\n self.open()",
"def initialize_plotter(width, height, min_x, max_x, min_y, max_y):\n global x_begin, x_end, x_increment\n turtle.delay(0)\n x_begin, x_end = min_x, max_x\n turtle.setup(width=width, height=height)\n turtle.screensize(width, height)\n turtle.setworldcoordinates(min_x, min_y, max_x, max_y)\n x_increment = (max_x - min_x)/width\n turtle.hideturtle()\n turtle.pencolor('black')\n turtle.penup()\n turtle.setposition(min_x, 0)\n turtle.setheading(0)\n turtle.pendown()\n turtle.forward(max_x - min_x)\n turtle.penup()\n turtle.setposition(0, min_y)\n turtle.setheading(90)\n turtle.pendown()\n turtle.forward(max_y - min_y)",
"def main():\n draw_sun()\n draw_pavement()\n draw_building()\n martin.goto(12, 40) # lines 171, 173, and 175 move the turtle down to space out the windows on the building.\n draw_windows()\n martin.goto(12, 0)\n draw_windows()\n martin.goto(12, -40)\n draw_windows()\n draw_door()\n draw_doorknob()",
"def __init__(self, _pendown=1, gridmode=False, gridsize=50, homeX = 50 + 25 + 5, homeY = 50 + 25 + 5, canvWidth = 400, canvHeight = 200, \\\n turtleMainColor=\"#00A651\", turtleAccentColor=\"#FFF600\", speed = 5, rotspeed = 5, pencolor = 'red', penwidth=3):\n self._turtleMainColor = turtleMainColor\n self._turtleAccentColor = turtleAccentColor\n self._speed = speed\n self._rotspeed = rotspeed\n self._pendown = _pendown\n self._pencolor = pencolor\n self._penwidth = penwidth\n self._rotation = 90\n self._gridsize = gridsize\n self._gridmode = gridmode\n \n if(gridmode and homeX == 80):\n homeX = 0\n homeY = 0\n \n self._x = homeX\n self._y = homeY\n self._homeX = homeX\n self._homeY = homeY\n \n self._canvWidth = canvWidth\n self._canvHeight = canvHeight\n self._actions = []\n self._levelDataString = [] \n \n self._walls = []\n self._lava = []\n \n self._appendCurrentState();",
"def init_game():\n return BoardRenderer('LifeSim', GRID_SIZE, BLOCK_SIZE), World(GRID_SIZE, LAKE_SIZE, FOREST_WIDTH)",
"def Spawn(self):\n if len(Ant.antArray) < Ant.antLimit:\n Ant.antArray.append(self)\n self.display.set_at((self.x,self.y), Colors.A_Plant)\n pygame.display.update(pygame.Rect(self.x,self.y,1,1))",
"def __init__(self, parent):\n super(Demo5, self).__init__(parent)\n self.angle = 0.0\n self.replication = 1.0\n self.offset = 0.0\n self.deltaRep = 1\n self.revolution = 0\n self.stepsPer90 = 180\n self.stepsLeft = self.stepsPer90\n self.deltaAng = 90.0\n self.deltaOff = 0.15\n self.spin = True\n self.x2yAspect = 1.0\n self.texture = None",
"def __init__(self, win, angle, velocity, height): \r\n\r\n self.proj = Projectile(angle, velocity, height) \r\n self.marker = Circle(Point(0, height), 3) \r\n self.marker.setFill(\"red\")\r\n self.marker.setOutline(\"red\")\r\n self.marker.draw(win)",
"def default_door():\n X = [0.0, 0.14, 1.12, 1.26]\n Y = [0.0, 0.14, 2.24]\n Z = [-0.14, 0.14]\n V, F = True, False\n occupancy = [\n [[V], [V]],\n [[V], [F]],\n [[V], [V]]\n ]\n return w7.window(X, Y, Z, occupancy)",
"def _create_main_shape(self):\n\n a, b = gc( self.size/2,\n self._ZERO_DEGREES - self.angle,\n self._180_DEGREES + self.angle)\n self.wafer_points = zip(a,b)\n self.wafer_polygon = gdspy.Polygon(self.wafer_points, self.WAFER_LAYER)\n self.cell.add(self.wafer_polygon)",
"def Spawn(self):\n if len(Ant.antArray) < Ant.antLimit:\n Ant.antArray.append(self)\n self.display.set_at((self.x,self.y), Colors.A_Wood)\n pygame.display.update(pygame.Rect(self.x,self.y,1,1))",
"def drawmaze(self):\n win=GraphWin(\"Perfect Maze\",600,600) \n win.setBackground(\"White\")\n scale=600/self.N #Used to generalize the size difference for the input of larger numbers. The background resolution/ grid size, N\n\n x1=scale\n y1=0\n x2=scale\n y2=scale\n\n ##VERTICAL LINES ####\n for i in range(self.N,0,-1):\n for j in range(1,self.N):\n if self.East[j][i]: #If East is true, draw a line.\n \n line=Line(Point(x1,y1),Point(x2,y2)) #lines | |\n line.setFill(\"red\")\n line.draw(win)\n x1+=scale #Increment causes |->|\n x2+=scale #Increment causes |->|\n y1+=scale #Used to draw two more\n y2+=scale #of the same spaced lines further down.\n x1=scale #Reset\n x2=scale #Reset\n\n\n ##HORIZONTAL LINES##\n x1=0\n y1=scale\n x2=scale\n y2=scale\n\n\n for i in range(self.N,1,-1):\n for j in range(1,self.N+1):\n if self.South[j][i]: #If South is true, draw a line.\n \n line=Line(Point(x1,y1),Point(x2,y2))\n line.setFill(\"red\")\n line.draw(win)\n x1+=scale\n x2+=scale\n y1+=scale\n y2+=scale\n x1=0\n x2=scale\n\n const=scale//5 #Very useful const which helps in placing circles on grid.\n x=scale//2\n y=600-scale//2\n #radius=(scale-(4*scale//self.N))/2\n radius=scale//2-(const)\n start=Point(x,y) #START POINT HERE \n circ=Circle(start,radius)\n circ.setFill(\"Red\")\n label=Text(start,\"Start\")\n label.setFill(\"Black\")\n circ.draw(win)\n label.draw(win)\n #print(self.CurrentCell)\n #Using the current cell from the finished algorithm(last place visited), a circle can be placed at that point.\n endpointx=(self.CurrentCell[0]-1)*scale +scale//2 ####MAKING END POINT X\n endpointy=600-(self.CurrentCell[1]-1)*scale-scale//2 ####MAKING END POINT Y\n endpoint=Point(endpointx,endpointy)\n circ2=Circle(endpoint,radius)\n circ2.setFill(\"White\")\n label2=Text(endpoint,\"End\")\n circ2.draw(win)\n label2.draw(win)\n \n ###############CREATE KEY########################\n \n \n keypointx=(self.MazeKey[0]-1)*scale +scale//2 ####MAKING END POINT X\n keypointy=600-(self.MazeKey[1]-1)*scale-scale//2 ####MAKING END POINT Y\n keypoint=Point(keypointx,keypointy)\n circ3=Circle(keypoint,radius)\n circ3.setFill(\"Blue\")\n label3=Text(keypoint,\"Key\")\n circ3.draw(win)\n label3.draw(win)\n pathcol=\"Yellow\"\n##\n\n \n for i in range(1,len(self.EntirePath)): \n pathpointx=(self.EntirePath[i][0]-1)*scale +scale//2 ####MAKING END POINT X\n pathpointy=600-(self.EntirePath[i][1]-1)*scale-scale//2 ####MAKING END POINT Y\n pathpoint=Point(pathpointx,pathpointy)\n drawpath=Circle(pathpoint,radius)\n drawpath.setFill(pathcol)\n if self.EntirePath[i]==self.KeyPath[-1]:\n pathcol=\"Violet\"\n label4=Text(keypoint,\"Key\")\n label4.draw(win) \n drawpath.draw(win)\n drawpath.setWidth(1)\n sleep(0.1)\n \n #drawpath.draw(win)\n \n label5=Text(endpoint,\"Maze Solved \")\n label5.draw(win)\n circ4=Circle(start,radius)\n circ4.setFill(\"Red\")\n circ4.draw(win) \n label6=Text(start,\"Start \")\n label6.draw(win)",
"def __init__(self, root):\n self.app=root\n self.app.geometry('800x500')\n self.app.title(\"Bryce Streeper: Asset Allocation Visual \")\n self.makeTitle()\n self.makeGraph()\n self.makeSliders()\n self.update()",
"def create_screen(self, width, height):",
"def Spawn(self):\n if len(Ant.antArray) < Ant.antLimit:\n Ant.antArray.append(self)\n self.facing = random.randint(0,3)\n self.display.set_at((self.x,self.y), Colors.A_Zombie)\n pygame.display.update(pygame.Rect(self.x,self.y,1,1))",
"def create_food(self):\n self.penup()\n self.shape(\"circle\")\n self.color(\"green\")\n self.x_cordinates = random.randint(-210, 210)\n self.y_cordinates = random.randint(-210, 210)\n self.goto(self.x_cordinates, self.y_cordinates)\n print(f\"This Is Food {self.x_cordinates} and {self.y_cordinates}\")\n # self.stamp()",
"def create_super_ball():\n super_balls.append(gen_super_ball())\n generate_velocity(super_balls)",
"def __init__(self):\n self.circle=visual.Circle(win,radius=.5, edges=32, fillColor='white') \n self.circle2=visual.Circle(win,radius=.1, edges=32, fillColor='white') \n self.linev = visual.Line(win, start=(0,.8), end=(0,-.8), lineWidth=6, lineColor='black') \n self.lineh = visual.Line(win, start=(.8,0), end=(-.8,0), lineWidth=6, lineColor='black') \n \n self.components = [self.circle, self.circle2, self.linev, self.lineh]",
"def Spawn(self):\n if len(Ant.antArray) < Ant.antLimit:\n Ant.antArray.append(self)\n self.display.set_at((self.x,self.y), Colors.A_black)\n pygame.display.update(pygame.Rect(self.x,self.y,1,1))",
"def __init__(self):\n self.size = 16\n self.color = COLOR\n self.pos = self.spawn()",
"def __init__(self):\n # Screen settings\n self.screen_width = 2400\n self.screen_height = 1600\n self.bg_color = (0, 0, 0)\n\n # Raindrop settings\n self.r_y_speed = 10",
"def Spawn(self):\n if len(Ant.antArray) < Ant.antLimit:\n Ant.antArray.append(self)\n self.display.set_at((self.x,self.y), Colors.A_Water)\n pygame.display.update(pygame.Rect(self.x,self.y,1,1))",
"def initialise_screen(self):\n self.objects.append(\n pgzero.actor.Actor('screen_background', topleft=(-1, 0))\n )\n self.vessel = gauge.Gauge(\n name='vessel',\n foreground='vessel_front',\n back_colour=S['vessel-back-colour'],\n front_colour=S['vessel-water-colour'],\n size=S['vessel-size'],\n value=20,\n orientation=gauge.Gauge.VERTICAL,\n bar_offset=S['vessel-bar-offset'],\n )\n self.vessel.pos = S['vessel-position']\n self.objects.append(self.vessel)\n #\n # The panels showing the individual people\n self.health_panels = {}\n for idx, name in enumerate('abcde'):\n panel = healthpanel.HealthPanel(name, self)\n panel.pos = (S['panel-initial-x'] + idx * S['panel-dx'], S['panel-initial-y'])\n self.objects.append(panel)\n self.health_panels[name] = panel\n #\n self.tabbed = tabbed.Tabbed()\n self.objects.append(self.tabbed)\n #\n self.clock = game.clock.Clock('clock', self)\n self.clock.pos = S['clock-pos']\n self.objects.append(self.clock)\n self.end_of_day = None\n #\n self.awaiting_conversations = set()\n self.deaths = {}"
] | [
"0.73945683",
"0.6352571",
"0.5805394",
"0.579472",
"0.5572521",
"0.55596024",
"0.5541648",
"0.5446335",
"0.54389066",
"0.5437201",
"0.5429614",
"0.54086167",
"0.54053193",
"0.5386864",
"0.5362621",
"0.53455466",
"0.5335464",
"0.53260404",
"0.52860785",
"0.52791184",
"0.527336",
"0.5273013",
"0.5262474",
"0.52597487",
"0.5243182",
"0.5236865",
"0.52352434",
"0.5231447",
"0.5229278",
"0.52283776"
] | 0.7478532 | 0 |
undraw the arrow and draw a new one for the current values of angle and velocity. | def redraw(self):
self.arrow.undraw()
pt2 = Point(self.vel*cos(self.angle), self.vel*sin(self.angle))
self.arrow = Line(Point(0,0), pt2).draw(self.win)
self.arrow.setArrow("last")
self.arrow.setWidth(3) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def redraw(self): \r\n\r\n self.arrow.undraw() \r\n pt2 = Point(self.vel*cos(self.angle), self.vel*sin(self.angle))\r\n self.arrow = Line(Point(0,0), pt2).draw(self.win) \r\n self.arrow.setArrow('last')\r\n self.arrow.setWidth(3)",
"def get_quiver_arrows(self):\n dif_x = [i - j for i, j in zip(self.end_x, self.x)]\n dif_y = [i - j for i, j in zip(self.end_y, self.y)]\n\n # Get barb lengths(default arrow length = 30% barb length)\n barb_len = [None] * len(self.x)\n for index in range(len(barb_len)):\n barb_len[index] = math.hypot(dif_x[index] / self.scaleratio, dif_y[index])\n\n # Make arrow lengths\n arrow_len = [None] * len(self.x)\n arrow_len = [i * self.arrow_scale for i in barb_len]\n\n # Get barb angles\n barb_ang = [None] * len(self.x)\n for index in range(len(barb_ang)):\n barb_ang[index] = math.atan2(dif_y[index], dif_x[index] / self.scaleratio)\n\n # Set angles to create arrow\n ang1 = [i + self.angle for i in barb_ang]\n ang2 = [i - self.angle for i in barb_ang]\n\n cos_ang1 = [None] * len(ang1)\n for index in range(len(ang1)):\n cos_ang1[index] = math.cos(ang1[index])\n seg1_x = [i * j for i, j in zip(arrow_len, cos_ang1)]\n\n sin_ang1 = [None] * len(ang1)\n for index in range(len(ang1)):\n sin_ang1[index] = math.sin(ang1[index])\n seg1_y = [i * j for i, j in zip(arrow_len, sin_ang1)]\n\n cos_ang2 = [None] * len(ang2)\n for index in range(len(ang2)):\n cos_ang2[index] = math.cos(ang2[index])\n seg2_x = [i * j for i, j in zip(arrow_len, cos_ang2)]\n\n sin_ang2 = [None] * len(ang2)\n for index in range(len(ang2)):\n sin_ang2[index] = math.sin(ang2[index])\n seg2_y = [i * j for i, j in zip(arrow_len, sin_ang2)]\n\n # Set coordinates to create arrow\n for index in range(len(self.end_x)):\n point1_x = [i - j * self.scaleratio for i, j in zip(self.end_x, seg1_x)]\n point1_y = [i - j for i, j in zip(self.end_y, seg1_y)]\n point2_x = [i - j * self.scaleratio for i, j in zip(self.end_x, seg2_x)]\n point2_y = [i - j for i, j in zip(self.end_y, seg2_y)]\n\n # Combine lists to create arrow\n empty = [None] * len(self.end_x)\n arrow_x = utils.flatten(zip(point1_x, self.end_x, point2_x, empty))\n arrow_y = utils.flatten(zip(point1_y, self.end_y, point2_y, empty))\n return arrow_x, arrow_y",
"def _draw_arrow_head(\n self, ra: Quantity, dec: Quantity, dra: Quantity, ddec: Quantity\n ) -> None:\n\n h = 0.002 * u.deg\n w = 0.0013 * u.deg\n ra_correction = abs(np.cos(dec))\n v_x, v_y = dra * ra_correction, ddec\n length = np.sqrt(v_x ** 2 + v_y ** 2)\n v_x, v_y = (\n v_x.to_value(u.deg) / length.to_value(u.deg),\n v_y.to_value(u.deg) / length.to_value(u.deg),\n ) # v is normalised and points in the direction of the arrow\n u_x, u_y = -v_y, v_x # u is normalised and orthogonal to v\n dx_1 = (-h * v_x + w * u_x) / ra_correction\n dy_1 = -h * v_y + w * u_y\n dx_2 = (-h * v_x - w * u_x) / ra_correction\n dy_2 = -h * v_y - w * u_y\n\n coords = np.array(\n [\n [\n (ra + dx_1).to_value(u.deg),\n ra.to_value(u.deg),\n (ra + dx_2).to_value(u.deg),\n ],\n [\n (dec + dy_1).to_value(u.deg),\n dec.to_value(u.deg),\n (dec + dy_2).to_value(u.deg),\n ],\n ]\n )\n self.plot.show_lines([coords], color=\"b\", linewidth=1, alpha=1)",
"def draw(self, ctx):\n\n # trigonometric calculations of the arrow head for the two bottom corner points\n arrow_x0 = int(self._position[0] + cos(self._angle - pi - self._width / 2) * self._length)\n arrow_y0 = int(self._position[1] + sin(self._angle - pi - self._width / 2) * self._length)\n arrow_x1 = int(self._position[0] + cos(self._angle + pi + self._width / 2) * self._length)\n arrow_y1 = int(self._position[1] + sin(self._angle + pi + self._width / 2) * self._length)\n\n # define the properties of the arrow head to the GraphicsContext object\n ctx.set_source_rgb(*self._color)\n r = 0\n g = 0\n b = 0\n if self._color[0] != 0:\n r = 1\n if self._color[1] != 0:\n g = 1\n if self._color[2] != 0:\n b = 1\n ctx.set_source_rgba(r, g, b, 0.60)\n\n # draw arrow head\n ctx.move_to(arrow_x0, arrow_y0)\n ctx.line_to(*self._position)\n ctx.line_to(arrow_x1, arrow_y1)\n ctx.line_to(arrow_x0, arrow_y0)\n\n # finalize drawing\n ctx.close_path()\n ctx.fill_preserve()\n ctx.stroke()",
"def draw_arrow(dc, fr, to, tail_angle, tail_length):\r\n end_x, end_y = to[0], to[1]\r\n vec = -(to - fr)\r\n vec = vec.normalized()\r\n tail_1 = vec.rotated(tail_angle) * tail_length\r\n tail_2 = vec.rotated(-tail_angle) * tail_length\r\n dc.DrawLine(end_x, end_y, end_x+tail_1[0], end_y+tail_1[1])\r\n dc.DrawLine(end_x, end_y, end_x+tail_2[0], end_y+tail_2[1])",
"def _draw_arrow(event, x, y, flags, params):\n global img, source_img, state\n global p1, p2\n if event == cv2.EVENT_LBUTTONDOWN:\n state = True\n p1 = (x, y)\n img = source_img.copy()\n elif event == cv2.EVENT_LBUTTONUP:\n state = False\n p2 = (x, y)\n img = source_img.copy()\n cv2.arrowedLine(img, p1, p2, DrawingShapeUtils.COLOR, \n DrawingShapeUtils.LINE_THICKNESS)\n cv2.arrowedLine(img, p2, p1, DrawingShapeUtils.COLOR, \n DrawingShapeUtils.LINE_THICKNESS)\n elif event == cv2.EVENT_MOUSEMOVE:\n if state:\n img = source_img.copy()\n cv2.arrowedLine(img, p1, (x, y), DrawingShapeUtils.COLOR,\n DrawingShapeUtils.LINE_THICKNESS)\n cv2.arrowedLine(img, (x, y), p1, DrawingShapeUtils.COLOR,\n DrawingShapeUtils.LINE_THICKNESS)",
"def draw_a(self):\r\n pen.down()\r\n pen.left(90)\r\n pen.forward(40)\r\n pen.right(90)\r\n pen.forward(40)\r\n pen.right(90)\r\n pen.forward(40)\r\n pen.up()\r\n pen.back(20)\r\n pen.right(90)\r\n pen.down()\r\n pen.forward(40)\r\n pen.up()\r\n pen.left(90)\r\n pen.forward(20)\r\n pen.left(90)\r\n pen.forward(50)",
"def arrowhead(cls, base = 20 / 3 ** 0.5, height = 10):\n svg = SVG()\n b2, h = base / 2, height\n svg.path([\"M 0\", -b2, \"L\", h, \"0 L 0\", b2, \"z\", ])\n return svg",
"def arrowhead(cls, base = 20 / 3 ** 0.5, height = 10):\n svg = SVG()\n b2, h = base / 2, height\n svg.path([\"M 0\", -b2, \"L\", h, \"0 L 0\", b2, \"z\", ])\n return svg",
"def draw():\n ant.move(aim)\n ant.x = wrap(ant.x)\n ant.y = wrap(ant.y)\n\n aim.move(random() - 0.5)\n aim.rotate(random() * 10 - 5)\n\n clear()\n goto(ant.x, ant.y)\n dot(4)\n\n ontimer(draw, 100)",
"def draw_arrows(self):\n for arrow in self.arrows:\n arrow.draw(self)",
"def draw_arrow(image, x, y, options, forward):\n width, height = size_of_arrow(options)\n line_pos = height / 2\n indent = options.raildraw_arrow_indent * width\n if forward:\n image.move_to(x, y + line_pos)\n image.line_to(x + indent, y + line_pos)\n image.stroke()\n image.move_to(x, y)\n image.line_to(x + width, y + line_pos)\n image.line_to(x, y + height)\n image.line_to(x + indent, y + line_pos)\n else:\n image.move_to(x + width, y + line_pos)\n image.line_to(x + (width - indent), y + line_pos)\n image.stroke()\n image.move_to(x + width, y)\n image.line_to(x, y + line_pos)\n image.line_to(x + width, y + height)\n image.line_to(x + (width - indent), y + line_pos)\n image.close_path()\n image.fill()",
"def erase(self):\r\n self.in_arrow = None\r\n self.out_arrow = None",
"def update_position(self):\n p1, p2 = connection_points_between_figure_elements(self.vertex1,\n self.vertex2)\n self.set_xdata((p1.x, p2.x))\n self.set_ydata((p1.y, p2.y))\n self.arrow.remove()\n self.arrow = create_directional_arrow(self)\n self.axes.add_patch(self.arrow)",
"def DrawArrow(self, dc):\r\n\r\n rect = self.GetClientRect()\r\n point = wx.Point()\r\n\r\n point.x = (rect.GetLeft() + rect.GetRight()) / 2\r\n point.y = (rect.GetTop() + rect.GetBottom()) / 2\r\n rx, ry = wx.Size(), wx.Size()\r\n \r\n if self._direction == wx.TOP:\r\n rx = wx.Size(1, 0)\r\n ry = wx.Size(0, 1)\r\n\r\n elif self._direction == wx.LEFT:\r\n rx = wx.Size(0, -1)\r\n ry = wx.Size(1, 0)\r\n\r\n elif self._direction == wx.RIGHT:\r\n rx = wx.Size(0, 1)\r\n ry = wx.Size(-1, 0)\r\n\r\n elif self._direction == wx.BOTTOM:\r\n rx = wx.Size(-1, 0)\r\n ry = wx.Size(0, -1) \r\n\r\n point.x += ry.x*3\r\n point.y += ry.y*3\r\n\r\n dc.SetPen(wx.Pen(colourIconArrow))\r\n\r\n for i in xrange(4):\r\n pt1 = wx.Point(point.x - rx.x*i, point.y - rx.y*i)\r\n pt2 = wx.Point(point.x + rx.x*(i+1), point.y + rx.y*(i+1))\r\n dc.DrawLinePoint(pt1, pt2)\r\n point.x += ry.x\r\n point.y += ry.y",
"def arrowhead(head,headwidth,angle):\r\n w(\"%% begin arrowhead\")\r\n holdhead = apoint(head)\r\n head = [0,0]\r\n tip = rapoint([head[0] + headwidth,head[1]])\r\n p1 = rapoint([head[0] - headwidth,head[1] + headwidth])\r\n p2 = rapoint([head[0] - headwidth,head[1] - headwidth])\r\n c1 = rapoint([head[0],head[1]-headwidth/2])\r\n c2 = rapoint([head[0],head[1]+headwidth/2])\r\n w(\"gsave\")\r\n w(\"%d %d translate\" % (holdhead[0],holdhead[1]))\r\n w(\"%d rotate\" % angle)\r\n w(\"%d %d moveto\" % (p1[0],p1[1]))\r\n w(\"%d %d lineto\" % (tip[0],tip[1]))\r\n w(\"%d %d lineto\" % (p2[0],p2[1]))\r\n w(\"%d %d %d %d %d %d curveto\"% (c1[0],c1[1],c2[0],c2[1],p1[0],p1[1]))\r\n w(\"closepath\")\r\n w(\"fill\")\r\n w(\"grestore\")\r\n w(\"%% end arrowhead\")",
"def __init__(self, win): \r\n\r\n # draw the base shot of the launcher \r\n base = Circle(Point(0,0), 3) \r\n base.setFill('red')\r\n base.setOutline('red')\r\n base.draw(win) \r\n\r\n # save the window and create initial angle and velocity\r\n self.win = win \r\n self.angle = radians(45.0)\r\n self.vel = 40.0 \r\n\r\n # create initial 'dummy' arrow \r\n self.arrow = Line(Point(0,0), Point(0, 0)).draw(win) \r\n # replace it with the correct arrow \r\n self.redraw()",
"def do_altangle(self):\n nave = 10000\n x, y, z, angle = cbp.phidget.main(nave)\n current_angle = angle\n #print(current_angle)\n self.altangle = current_angle\n return current_angle",
"def move_forward(self,length,draw=True):\r\n new_x = self.x + length * math.cos(math.radians(self.angle))\r\n new_y = self.y + length * math.sin(math.radians(self.angle))\r\n self.draw_tool.line(((self.x,self.y),(new_x,new_y)), fill=(0,0,0),width=2)\r\n self.x = new_x\r\n self.y = new_y",
"def draw_arrow(axes, startx, starty, orient, arrow_len=5.0, color='black', lw=2.0):\n xy = (startx, starty)\n dxy = (np.cos(orient) * arrow_len, np.sin(orient) * arrow_len)\n xytext = tuple(map(sum, zip(xy, dxy)))\n axes.annotate(\n \"\",\n xy=xy,\n xytext=xytext,\n arrowprops=dict(arrowstyle=\"<-\", lw=lw),\n color=color,\n )",
"def render_arrow(arrow):\r\n if arrow == '->':\r\n return u'\\u2192'\r\n if arrow == '<->':\r\n return u'\\u2194'\r\n\r\n # this won't be reached unless we add more arrow types, but keep it to avoid explosions when\r\n # that happens.\r\n return arrow",
"def set_angel(self):\n self.angle = math.degrees(math.atan2(self.next.y - self.y, self.next.x - self.x)\n - math.atan2(self.prev.y - self.y, self.prev.x - self.x))\n\n if self.angle < 0:\n self.angle += 360",
"def draw_aim(self):\n polygon(screen, self.color, [(self.x, self.y), (self.x + self.r * 1.71 / 2, self.y - self.r / 2),\n (self.x + self.r * 1.71, self.y), (self.x + self.r * 1.71, self.y + self.r),\n (self.x + self.r * 1.71 / 2, self.y + 3 * self.r / 2), (self.x, self.y + self.r)])",
"def update(self, mouse_pos):\n angle = get_angle(self.base_rect.center, mouse_pos)\n if 1.75 * pi >= angle >= 1.5 * pi:\n angle = 1.75 * pi\n elif 1.25 * pi <= angle < 1.5 * pi:\n angle = 1.25 * pi\n self.angle = angle\n\n rotated_barrel = pg.transform.rotate(self.barrel, degrees(self.angle))\n barrel_rect = rotated_barrel.get_rect()\n surf = pg.Surface(barrel_rect.size)\n surf.fill((255, 0, 255))\n surf.set_colorkey((255, 0, 255))\n rect = pg.Rect((0, 0), self.base_rect.size)\n rect.center = barrel_rect.center\n surf.blit(rotated_barrel, (0, 0))\n surf.blit(self.turret_base, rect)\n self.image = surf\n self.rect = self.image.get_rect(center=self.base_rect.center)\n self.barrel_rect = barrel_rect\n self.barrel_rect.center = self.base_rect.center",
"def add_arrow(self, arrow):\n self.arrows.append(arrow)",
"def zero(self):\n\t\tself.angle = 0.0\n\t\tself.draw()\n\t\ttime.sleep(self.delay)",
"def settle(self):\n if (self.angle >= self.max_angle) or (\n self.angle <= -self.max_angle\n ): # time to reverse\n print(\"reverse\", self.angle, self.max_angle)\n self.speed *= -0.9 # damped\n self.max_angle *= 0.9\n if self.speed > 0:\n self.angle = self.max_angle\n else:\n self.angle = -self.max_angle\n\n self.angle += radians(self.speed)\n print(self.angle, self.max_angle, self.speed)\n self.x = self.cx + self.length * sin(self.angle)\n self.y = self.cy + self.length * cos(self.angle)",
"def draw_o(self):\r\n pen.down()\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.up()\r\n pen.forward(50)",
"def fire(self, angle):\r\n #convert the angle to the slope multiply by bullet speed for velocity\r\n self.velocity.dy = math.sin(math.radians(angle)) * BULLET_SPEED\r\n #convert the angle to the slope multiply by bullet speed for velocity\r\n self.velocity.dx = math.cos(math.radians(angle)) * BULLET_SPEED",
"def change_angle(self, up_or_down):\n self.angle += up_or_down * math.pi / 180"
] | [
"0.77334005",
"0.65644336",
"0.6287359",
"0.6213265",
"0.61974335",
"0.6153086",
"0.5998839",
"0.5908423",
"0.5908423",
"0.59016716",
"0.5891196",
"0.58837223",
"0.5872633",
"0.5846972",
"0.58233887",
"0.5820985",
"0.5800393",
"0.57933944",
"0.5788004",
"0.5777631",
"0.57577807",
"0.57451636",
"0.5723507",
"0.57033896",
"0.5700273",
"0.5694485",
"0.5687302",
"0.5655517",
"0.56412506",
"0.5565789"
] | 0.7731802 | 1 |
change angle by amt degrees | def adjAngle(self, amt):
self.angle = self.angle+radians(amt)
self.redraw() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def adjAngle(self, amt): \r\n\r\n self.angle = self.angle + radians(amt)\r\n self.redraw()",
"def angle(self) -> float:\n ...",
"def angle(self, angle_deg) -> None:\n ...",
"def rotateDegrees(angle):\n rotate(angle *2*math.pi / 360)",
"def angle(self) -> int:",
"def angle(self):\n self.convert_window(\"Angle\", \"degree\", [\"arcminute\", \"arcsecond\", \"circle\", \"degree\", \"gon\", \"gradian\", \"mil(Nato)\", \"mil(Soviet Union)\", \"mil(Sweden)\", \"octant\", \"quadrant\", \"radian\", \"revolution\", \"sextant\", \"sign\", \"turn\"])",
"def angle(self, dangle_deg: float) -> None:\n ...",
"def angle(self, angle: int, time: int = 0, /) -> None:",
"def setAngle(self, angle):\n self._angle = (angle + math.pi / 2) % math.pi - math.pi / 2\n # self._angle = angle % (2*math.pi)",
"def rotate(self,amount):\n self.angle += amount\n if self.drawn == True:\n self.draw()",
"def angle(z):",
"def angle(self, value):\n if value is None:\n value = 0.0\n\n self.__angle = value",
"def set_angle(self, ang):\n if ang < 0:\n ang = 0\n elif ang > 180:\n ang = 180\n dutyCycle = 5 + (ang*5/180)\n self.servoPort.ChangeDutyCycle(dutyCycle)",
"def angle(self):\n return 0",
"def change_angle(self, new_angle):\r\n self.angle = new_angle",
"def setAngle(self,angle = 2.5):\n pass",
"def srotate(self, angle):\n\n self.angle = self.angle + angle",
"def get_angle(n):\n return n % 360 if n > 360 else (n * 180) / PI",
"def setAngle(self,a):\n self.angle = a\n if self.drawn == True:\n self.draw()",
"def _rotate(self, angle):\n angle *= self._degreesPerAU\n self._orient = self._orient.rotate(angle)",
"def value_to_angle(value):\n return ...",
"def adjustAngle(self, angle):\n\t\tif self.timeout <= 0:\n\t\t\tself.angle = (self.angle + angle) % 360",
"def get_angle(self, angle_):\n return self.two_pi * angle_",
"def wheel_angle(self, angle):\n self.angle = angle",
"def set_angle(self, angle):\n return self.bot_client.send_command(_Command.SetAngle, angle)",
"def angle_modulo_360(angle):\n if angle > 180.0:\n return angle - 360.0\n elif angle < -180.0:\n return angle + 360.0\n else:\n return angle",
"def set_angle(self, angle):\n new_angle = angle\n\n # Declaring conversion constants\n angle_min = 0\n angle_max = 180\n angle_range = angle_max - angle_min\n dc_range = self._dc_max - self._dc_min\n\n # Enforcing angle range\n if new_angle > angle_max:\n new_angle = angle_max\n elif new_angle < angle_min:\n new_angle = angle_min\n\n # Scaling input angle to an appropriate duty cycle\n duty_cycle = ((dc_range / angle_range) * (new_angle - angle_min)) + self._dc_min\n\n self._servo_pwm.changeDutyCycle(duty_cycle)",
"def rotate(self, angle):\n old_angle, tilt = self.rotation\n new_angle = old_angle + angle\n while new_angle > 90:\n new_angle = new_angle - 90\n while angle < -90:\n new_angle = new_angle + 90\n self.rotation = (new_angle, tilt)",
"def set_angle(self, angle=0.0):\n self.angle = angle",
"def positive_degrees(angle):\n return (angle + 360) % 360"
] | [
"0.8125653",
"0.7635241",
"0.7557885",
"0.75281644",
"0.751763",
"0.7443927",
"0.73215574",
"0.7298224",
"0.72973174",
"0.7293998",
"0.7246493",
"0.72278506",
"0.7201105",
"0.7193929",
"0.7187989",
"0.7185721",
"0.7136344",
"0.71154463",
"0.71035147",
"0.7098545",
"0.7091846",
"0.70871514",
"0.7062984",
"0.70335495",
"0.6983922",
"0.6974767",
"0.6972495",
"0.69700414",
"0.6951613",
"0.69510543"
] | 0.79394275 | 1 |
win is the GraphWin to display the shot, angle, velocity, and height are initial projectile parameters. | def __init__(self, win, angle, velocity, height):
self.proj = Projectile(angle, velocity, height)
self.marker = Circle(Point(0,height), 3)
self.marker.setFill("red")
self.marker.setOutline("red")
self.marker.draw(win) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, win):\n \n # draw the base shot of the launcher\n base = Circle(Point(0,0), 3)\n base.setFill(\"red\")\n base.setOutline(\"red\")\n base.draw(win)\n\n # save the window and create initial angle and velocity\n self.win = win\n self.angle = radians(45.0)\n self.vel = 40.0\n \n # create inital \"dummy\" arrow\n self.arrow = Line(Point(0,0), Point(0,0)).draw(win)\n # replace it with the correct arrow\n self.redraw()",
"def drawWindow(win, birds, pipes, base, score, gen):\n # Put on the background first\n win.blit(BACKGROUND_IMG, (0, 0))\n\n # Draw the pipes on screen\n for pipe in pipes:\n pipe.draw(win)\n\n # Show the score\n text = SCORE_FONT.render(\"Score: \"+str(score),1,(255,0,0,0))\n win.blit(text, (WINDOW_WIDTH - 10 -text.get_width(), 10 ))\n\n # Show the current generation\n text = SCORE_FONT.render(\"Generation: \"+str(gen),1,(255,0,0,0))\n win.blit(text, ( 10, 10 ))\n # Show no of birds that are currently alive\n text = SCORE_FONT.render(\"Alive: \"+str(len(birds)),1,(255,0,0,0))\n win.blit(text, ( 10, 30 ))\n\n # Show name of creator\n tag = SCORE_FONT.render(\"rgo_209\",1,(0,0,0,0))\n win.blit(tag, ( 10, 550 ))\n\n\n # Draw the base on screen\n base.draw(win)\n\n # Draw all the birds that are currently alive\n for bird in birds:\n bird.draw(win)\n\n # Update the screen\n pygame.display.update()",
"def __init__(self, win): \r\n\r\n # draw the base shot of the launcher \r\n base = Circle(Point(0,0), 3) \r\n base.setFill('red')\r\n base.setOutline('red')\r\n base.draw(win) \r\n\r\n # save the window and create initial angle and velocity\r\n self.win = win \r\n self.angle = radians(45.0)\r\n self.vel = 40.0 \r\n\r\n # create initial 'dummy' arrow \r\n self.arrow = Line(Point(0,0), Point(0, 0)).draw(win) \r\n # replace it with the correct arrow \r\n self.redraw()",
"def __init__(self, window: pg.Surface):\n self.window = window\n self.board_matrix = np.full(Dimension.board_size(), 1)\n self.maximum_obstacles_on_board = 10\n self.obstacles = self.create_obstacles()",
"def __init__(self, win, angle, velocity, height): \r\n\r\n self.proj = Projectile(angle, velocity, height) \r\n self.marker = Circle(Point(0, height), 3) \r\n self.marker.setFill(\"red\")\r\n self.marker.setOutline(\"red\")\r\n self.marker.draw(win)",
"def draw(self, win):\n # draw grid\n gap = self.width // 9\n for i in range(self.rows + 1):\n if i % 3 == 0 and i != 0:\n thick = 4\n else:\n thick = 1\n pygame.draw.line(win, (0, 0, 0), (0, i * gap), (self.width, i * gap), thick)\n pygame.draw.line(win, (0, 0, 0), (i * gap, 0), (i * gap, self.width), thick)\n\n # draw cubes\n for i in range(self.rows):\n for j in range(self.cols):\n self.cubes[i][j].draw(win)",
"def shotWinUI(*args):\n### ---------- should check for current project\n if cmds.window(\"shotWin\", exists = True):\n cmds.deleteUI(\"shotWin\")\n\n widgets[\"win\"] = cmds.window(\"shotWin\", t= \"Charlex Shot Manager\", w=1000, h=560, s=False)\n widgets[\"mainCLO\"] = cmds.columnLayout(w=1000, h=560)\n\n #######################\n #top bar layout\n #######################\n\n #rowlayout\n widgets[\"bannerFLO\"] = cmds.formLayout(w=1000, h=50, bgc=(.300,.3,.300))\n widgets[\"bannerImage\"] = cmds.image(image=\"{0}/banner_shotWin.png\".format(pi.images))\n widgets[\"spotImage\"] = cmds.iconTextButton(style=\"iconOnly\", image = \"{0}/defaultSpotImage.jpg\".format(pi.images), w=50, h=50, ann=ann[\"spotIcon\"], c=changeSpotIcon)\n widgets[\"projectText\"] = cmds.text(l=\"Project Name: Spot Name\", font = \"boldLabelFont\")\n widgets[\"sceneText\"] = cmds.text(l=\"Current Scene\") \n widgets[\"projectButton\"] = cmds.button(l=\"Change Job\", w = 100, h= 40, bgc= (.5,.5,.5), ann = ann[\"proj\"], c=setProject)\n widgets[\"refreshButton\"] = cmds.button(l=\"Refresh\", w = 60, h= 40, bgc= (.2,.2,.2), c = populateWindow)\n widgets[\"exploreButton\"] = cmds.button(l=\"Explore\\nReference\", w = 60, h= 40, bgc= (.7,.5,.3), c=exploreReference)\n\n cmds.formLayout(widgets[\"bannerFLO\"], e=True, af = [(widgets[\"bannerImage\"], \"top\", 0),\n (widgets[\"bannerImage\"], \"left\", 0),\n (widgets[\"projectText\"], \"left\", 400),\n (widgets[\"projectText\"], \"top\", 5),\n (widgets[\"sceneText\"], \"top\", 25),\n (widgets[\"spotImage\"], \"left\", 335), \n (widgets[\"sceneText\"], \"left\", 400),\n (widgets[\"projectButton\"], \"left\", 740),\n (widgets[\"projectButton\"], \"top\", 5),\n (widgets[\"refreshButton\"], \"left\", 850),\n (widgets[\"refreshButton\"], \"top\", 5),\n (widgets[\"exploreButton\"], \"left\", 920),\n (widgets[\"exploreButton\"], \"top\", 5), \n ])\n\n ######################\n #bottom layout\n ########################\n cmds.setParent(widgets[\"mainCLO\"])\n widgets[\"lowFLO\"] = cmds.formLayout()\n widgets[\"lowTLO\"] = cmds.tabLayout(bgc = (.2, .2, .2 ))\n\n ################\n #shots tab\n ################\n cmds.setParent(widgets[\"lowTLO\"])\n widgets[\"shotsFLO\"] = cmds.formLayout(\"Shots - Anim, Light and FX\",w=1000, h=500, bgc = (.4,.4,.4))\n \n ##############\n #shot asset List layout\n ###############\n widgets[\"shotAssListCLO\"] = cmds.columnLayout(w=240, bgc = (.5, .5,.5))\n widgets[\"shotAssListFLO\"] = cmds.formLayout(w=240, h= 500)\n widgets[\"shotAssListTSL\"] = cmds.textScrollList(w=240, h=465, ams=True) \n\n widgets[\"shotAssListTitleText\"] = cmds.text(l=\"Referenced Assets In Current Scene\", font = \"boldLabelFont\", al=\"center\", ann=ann[\"reffedAssets\"])\n\n cmds.formLayout(widgets[\"shotAssListFLO\"], e=True, af = [\n (widgets[\"shotAssListTSL\"], \"top\", 35),\n (widgets[\"shotAssListTSL\"], \"left\", 0),\n \n (widgets[\"shotAssListTitleText\"], \"top\", 5),\n (widgets[\"shotAssListTitleText\"], \"left\", 20),\n ])\n\n ##############\n #shot List layout\n ###############\n cmds.setParent(widgets[\"shotsFLO\"])\n widgets[\"shotListCLO\"] = cmds.columnLayout(w=130, bgc = (.5, .5, .5))\n widgets[\"shotListFLO\"] = cmds.formLayout(w=130, h= 500)\n widgets[\"shotListTSL\"] = cmds.textScrollList(w=130, h=460)\n widgets[\"shotListTitleText\"] = cmds.text(l=\"Shot List\", font = \"boldLabelFont\", ann=ann[\"shotList\"])\n widgets[\"shotListCharText\"] = cmds.text(l=\"Shots\")\n\n cmds.formLayout(widgets[\"shotListFLO\"], e=True, af = [\n (widgets[\"shotListTSL\"], \"top\", 40), \n (widgets[\"shotListTSL\"], \"left\", 0),\n (widgets[\"shotListTitleText\"], \"top\", 5),\n (widgets[\"shotListTitleText\"], \"left\", 30),\n (widgets[\"shotListCharText\"], \"top\", 25),\n (widgets[\"shotListCharText\"], \"left\", 5),\n ])\n\n ##############\n #shot Status layout\n ############### \n cmds.setParent(widgets[\"shotsFLO\"])\n widgets[\"shotInfoAssListTLO\"] = cmds.tabLayout(w=200, h=500)\n widgets[\"shotInfoFLO\"] = cmds.formLayout(\"ShotInfo\", w=200, h=500, bgc= (.5, .5, .5))\n widgets[\"shotInfoTitleText\"] = cmds.text(l=\"Shot Information\", font = \"boldLabelFont\")\n widgets[\"shotInfoNameText\"] = cmds.text(l=\"<Shot Name>\", font = \"boldLabelFont\", al=\"center\", w=200)\n widgets[\"shotInfoVariantText\"] = cmds.text(l=\"<Var Name>\", font = \"boldLabelFont\", al=\"center\", w=200) \n widgets[\"shotInfoPic\"] = cmds.image(image = \"{0}/kitten-photo-632-3.jpg\".format(pi.images), w= 154, h=154)\n widgets[\"shotAnnCB\"] = cmds.checkBox(l=\"Tooltips popups?\", value=tooltips, changeCommand=tooltipSet)\n\n cmds.formLayout(widgets[\"shotInfoFLO\"], e=True, af =[\n (widgets[\"shotInfoNameText\"], \"top\", 60),\n (widgets[\"shotInfoNameText\"], \"left\", 0),\n (widgets[\"shotInfoVariantText\"], \"top\", 80),\n (widgets[\"shotInfoVariantText\"], \"left\", 0), \n (widgets[\"shotInfoPic\"], \"top\", 110),\n (widgets[\"shotInfoPic\"], \"left\", 23),\n (widgets[\"shotInfoTitleText\"], \"top\", 5),\n (widgets[\"shotInfoTitleText\"], \"left\", 35),\n (widgets[\"shotAnnCB\"], \"top\", 420),\n (widgets[\"shotAnnCB\"], \"left\", 50), \n ])\n\n cmds.setParent(widgets[\"shotInfoAssListTLO\"])\n widgets[\"shotAssRigListTLO\"] = cmds.tabLayout(\"ProjAssets\", w=200, h=500) \n widgets[\"shotAssRigCharListCLO\"] = cmds.columnLayout(\"Chars\", w=200, h=500)\n widgets[\"shotAssRigCharListTSL\"] = cmds.textScrollList(w=200, h=450) \n cmds.setParent(widgets[\"shotAssRigListTLO\"])\n widgets[\"shotAssRigPropListCLO\"] = cmds.columnLayout(\"Props\", w=200, h=500)\n widgets[\"shotAssRigPropListTSL\"] = cmds.textScrollList(w=200, h=450) \n cmds.setParent(widgets[\"shotAssRigListTLO\"])\n widgets[\"shotAssRigSetListCLO\"] = cmds.columnLayout(\"Sets\", w=200, h=500)\n widgets[\"shotAssRigSetListTSL\"] = cmds.textScrollList(w=200, h=450) \n cmds.setParent(widgets[\"shotAssRigListTLO\"])\n widgets[\"shotAnmMstListCLO\"] = cmds.columnLayout(\"Anm\", w=200, h=500)\n widgets[\"shotAnmMstListTSL\"] = cmds.textScrollList(w=200, h=450) \n ###############\n #Shot Action layout\n ################\n cmds.setParent(widgets[\"shotsFLO\"])\n widgets[\"shotActionFLO\"] = cmds.formLayout(w=150, h=500, bgc =(.5, .5, .5))\n widgets[\"shotActionRefAssBut\"] = cmds.button(l=\"-> Ref Asset In ->\", w=130, h=20, bgc = (.7,.7,.7), c=referenceAsset, ann=ann[\"refAsset\"]) \n widgets[\"shotActionReplaceBut\"] = cmds.button(l=\"-> Replace Reference ->\", w=130, h=20, en=True, bgc = (.7,.7,.7), ann=ann[\"replace\"], c=replaceReference)\n widgets[\"shotActionRefMultBut\"] = cmds.button(l=\"-> Ref Multiple ->\", w=100, h=20, en=True, bgc = (.7,.7,.7), ann=ann[\"refMult\"], c=referenceMultiple)\n widgets[\"shotActionRefMultIFG\"] = cmds.intFieldGrp(w=20, v1=1)\n widgets[\"shotActionReloadBut\"] = cmds.button(l=\"Reload Reference ->\", w=130, h=20, bgc = (.7,.7,.7), c=reloadReference, ann=ann[\"reload\"])\n widgets[\"shotActionUnloadBut\"] = cmds.button(l=\"Unload Reference ->\", w=130, h=20, bgc = (.7,.7,.7), c=unloadReference, ann=ann[\"unload\"])\n widgets[\"shotActionRemoveBut\"] = cmds.button(l=\"Remove Reference ->\", w=130, h=20, bgc = (.7,.7,.7), c=removeReference, ann=ann[\"remove\"])\n widgets[\"shotActionQIncrBut\"] = cmds.button(l=\"Quick Increment\", w=130, h=20, en=True, bgc = (.7,.7,.7), c=quickIncrement, ann=ann[\"qkIncr\"])\n widgets[\"shotActionNewShotBut\"] = cmds.button(l=\"Create new shot\", en=True, w=130, h=20, bgc = (.7,.7,.7), c=createNewShot, ann=ann[\"crtShot\"]) \n widgets[\"shotActionTitle\"] = cmds.text(l=\"Shot Actions\", font = \"boldLabelFont\")\n\n # create an embedded tab layout for each type of button!\n widgets[\"shotActionTypeTLO\"] = cmds.tabLayout(\"Specific Actions\", w=150, h=180, bgc=(.2,.2,.2))\n\n widgets[\"shotActionTypeAnmSLO\"] = cmds.scrollLayout(\"Anm\", w=150, h=180, verticalScrollBarThickness=5) \n widgets[\"shotActionTypeAnmFLO\"] = cmds.formLayout(w=150,h=240, bgc=(.4, .45, .4))\n widgets[\"shotActionExpAnimBut\"] = cmds.button(l=\"Export Anim ->\", w=130, h=20, en=True, bgc=(.7,.7,.7), c=exportAnimation, ann=ann[\"expAnim\"])\n widgets[\"shotActionImpAnimBut\"] = cmds.button(l=\"Import Anim ->\", w=130, h=20, en=True, bgc=(.7,.7,.7), c=importAnimation, ann=ann[\"impAnim\"])\n widgets[\"shotActionRefToBut\"] = cmds.button(l=\"-> Reference To\", w=130, h=20, en=True, bgc=(.7,.7,.7), c=referenceTo, ann=ann[\"refTo\"])\n widgets[\"shotActionCtrlMkBut\"] = cmds.button(l=\"Ctrl On Selection\", w=130, h=20, en=True, bgc=(.7,.7,.7), c=controlMaker, ann=ann[\"ctrlMk\"])\n\n cmds.setParent(widgets[\"shotActionTypeTLO\"])\n widgets[\"shotActionTypeLgtSLO\"] = cmds.scrollLayout(\"Lgt\", w=150, h=180, verticalScrollBarThickness=5) \n widgets[\"shotActionTypeLgtFLO\"] = cmds.formLayout(w=150,h=240, bgc=(.4, .4, .45))\n widgets[\"shotActionGenericBut\"] = cmds.button(l=\"Render Setup\", w=130, h=20, en=True, bgc = (.7,.7,.7), c=renderSetup, ann=ann[\"rendGen\"])\n\n widgets[\"shotActionMtlBut\"] = cmds.button(l=\"-> Apply Mtl To Sel ->\", w=130, h=20, en=False, bgc = (.7,.7,.7), ann=ann[\"mtlApply\"])\n\n cmds.setParent(widgets[\"shotActionTypeTLO\"])\n widgets[\"shotActionTypeFxSLO\"] = cmds.scrollLayout(\"Fx\", w=150, h=240, verticalScrollBarThickness=5) \n widgets[\"shotActionTypeFxFLO\"] = cmds.formLayout(w=150,h=180, bgc=(.45, .4, .4))\n \n\n#---------------- add any fx buttons here and then postion them below \n\n cmds.formLayout(widgets[\"shotActionTypeLgtFLO\"], e=True, af = [\n (widgets[\"shotActionGenericBut\"], \"top\", 10),\n (widgets[\"shotActionGenericBut\"], \"left\", 2),\n (widgets[\"shotActionMtlBut\"], \"top\", 40),\n (widgets[\"shotActionMtlBut\"], \"left\", 2) \n ])\n\n cmds.formLayout(widgets[\"shotActionTypeAnmFLO\"], e=True, af = [\n (widgets[\"shotActionExpAnimBut\"], \"top\", 10),\n (widgets[\"shotActionExpAnimBut\"], \"left\", 2),\n (widgets[\"shotActionImpAnimBut\"], \"top\", 40),\n (widgets[\"shotActionImpAnimBut\"], \"left\", 2),\n (widgets[\"shotActionRefToBut\"], \"top\", 70),\n (widgets[\"shotActionRefToBut\"], \"left\", 2),\n (widgets[\"shotActionCtrlMkBut\"], \"top\", 100),\n (widgets[\"shotActionCtrlMkBut\"], \"left\", 2) \n ])\n\n cmds.formLayout(widgets[\"shotActionFLO\"], e=True, af = [\n (widgets[\"shotActionTitle\"], \"top\", 5),\n (widgets[\"shotActionTitle\"], \"left\", 35),\n (widgets[\"shotActionRefAssBut\"], \"top\", 30),\n (widgets[\"shotActionRefAssBut\"], \"left\", 10),\n (widgets[\"shotActionRefMultBut\"], \"top\", 60),\n (widgets[\"shotActionRefMultBut\"], \"left\", 10),\n (widgets[\"shotActionRefMultIFG\"], \"top\", 60),\n (widgets[\"shotActionRefMultIFG\"], \"left\", 110),\n (widgets[\"shotActionReloadBut\"], \"top\", 90),\n (widgets[\"shotActionReloadBut\"], \"left\", 10),\n (widgets[\"shotActionUnloadBut\"], \"top\", 120),\n (widgets[\"shotActionUnloadBut\"], \"left\", 10),\n (widgets[\"shotActionRemoveBut\"], \"top\", 150),\n (widgets[\"shotActionRemoveBut\"], \"left\", 10),\n (widgets[\"shotActionReplaceBut\"], \"top\", 180),\n (widgets[\"shotActionReplaceBut\"], \"left\", 10),\n (widgets[\"shotActionQIncrBut\"], \"top\", 210),\n (widgets[\"shotActionQIncrBut\"], \"left\", 10),\n (widgets[\"shotActionTypeTLO\"], \"top\", 270),\n (widgets[\"shotActionTypeTLO\"], \"left\", 0), \n (widgets[\"shotActionNewShotBut\"], \"top\", 470),\n (widgets[\"shotActionNewShotBut\"], \"left\", 10), \n ])\n\n ###############\n #Shot anmLgt tab layout\n ################\n cmds.setParent(widgets[\"shotsFLO\"])\n widgets[\"anmLgtFLO\"] = cmds.formLayout(w=250, h=500, bgc = (.4, .4, .4))\n widgets[\"anmLgtTLO\"] = cmds.tabLayout(w=250, h=500, bgc = (.4,.4,.4), changeCommand = varTabChange)\n ###############\n #shot anm tab layout\n ###############\n widgets[\"anmTabCLO\"] = cmds.columnLayout(\"ANM\", w=250, bgc = (.4, .45, .4))\n #################\n #anm info frame and column layouts\n ################# \n cmds.separator(h=5)\n widgets[\"anmVariationsTSL\"] = cmds.textScrollList(w=250, h=90)\n widgets[\"anmLastWSTFG\"] = cmds.textFieldGrp(l=\"Latest WS: \", w=250, cw = [(1, 70), (2,170)], cal = [(1,\"left\"), (2, \"left\")],ed=False)\n widgets[\"anmLastMasterTFG\"] = cmds.textFieldGrp(l=\"Master: \", w=250, cw = [(1, 70), (2,170)], cal = [(1,\"left\"), (2, \"left\")],ed=False)\n cmds.separator(h=5)\n\n #################\n #anm 'workshop' frame and column layouts\n #################\n cmds.setParent(widgets[\"anmTabCLO\"])\n widgets[\"anmWSFLO\"] = cmds.frameLayout(\"Animation Workshop\", w=250, h=165, bgc= (.3, .3, .3))\n widgets[\"anmWSFoLO\"] = cmds.formLayout(w=250, h=165, bgc = (.4,.45,.4))\n\n widgets[\"anmWSOpenBut\"] = cmds.button(l=\"Open Latest\\nAnim\\nWorkshop\", w=70, h=50, en=False, bgc = (.4,.5,.8), ann=ann[\"openWS\"])\n widgets[\"anmWSIncrBut\"] = cmds.button(l=\"Increment Anim Workshop\", w=160, h=50, en=True, bgc = (.7,.6,.4), ann=ann[\"incrWS\"], c = partial(incrementWorkshop, \"anm\"))\n widgets[\"anmWSPrevBut\"] = cmds.button(l=\"Previous Anim Workshops\", w=160, bgc = (.7,.7,.7), en=False, ann=ann[\"prevWS\"])\n widgets[\"anmWSInfoBut\"] = cmds.button(l=\"WS Info\", w=70, bgc = (.7, .7, .7), en=False, ann=ann[\"WSInfo\"]) \n widgets[\"anmWSNewVarBut\"] = cmds.button(l=\"Create New Variant\", w=160, h=30, bgc = (.2,.2,.2), c=partial(createVariant, \"anm\"), ann=ann[\"crtVariant\"])\n widgets[\"anmVarIconBut\"] = cmds.button(l=\"Create Var\\nIcon\", w=70, h=30, bgc = (.7,.7,.7), en=False, c=createShotIcon, ann=ann[\"crtIcon\"]) \n\n cmds.formLayout(widgets[\"anmWSFoLO\"], e=True, af = [\n (widgets[\"anmWSOpenBut\"], \"left\", 5),\n (widgets[\"anmWSOpenBut\"], \"top\", 10),\n (widgets[\"anmWSIncrBut\"], \"left\", 80),\n (widgets[\"anmWSIncrBut\"], \"top\", 10),\n (widgets[\"anmWSInfoBut\"], \"left\", 5),\n (widgets[\"anmWSInfoBut\"], \"top\", 65),\n (widgets[\"anmWSPrevBut\"], \"left\", 80),\n (widgets[\"anmWSPrevBut\"], \"top\", 65),\n (widgets[\"anmWSNewVarBut\"], \"left\", 5),\n (widgets[\"anmWSNewVarBut\"], \"top\", 105),\n (widgets[\"anmVarIconBut\"], \"left\", 170),\n (widgets[\"anmVarIconBut\"], \"top\", 105), \n ])\n #################\n #anm 'master' frame and column layouts\n #################\n cmds.setParent(widgets[\"anmTabCLO\"])\n widgets[\"anmMstFLO\"] = cmds.frameLayout(\"Animation Master\", w=250, h=200, bgc= (.3, .3, .3))\n widgets[\"anmMstFoLO\"] = cmds.formLayout(w=250, h=200, bgc = (.4,.45,.4))\n widgets[\"anmMstOpenBut\"] = cmds.button(l=\"Open Anim\\nMaster\", w=70, h=50, en=False, bgc = (.5,.7,.5), ann=ann[\"openMst\"])\n widgets[\"anmMstIncrBut\"] = cmds.button(l=\"Publish Anim Master\\n(Import Refs)\", w=160, h=50, en=False, bgc = (.7,.5,.5), ann=ann[\"pubRefMst\"])\n widgets[\"anmMstBgIncrBut\"] = cmds.button(l=\"BG Publish Anim Master (Import Refs)\", w=235, en=False, bgc = (.3,.3,.3), ann=ann[\"pubBGMst\"])\n widgets[\"anmMstPrevBut\"] = cmds.button(l=\"Previous Anim Masters\", w=160, en=False, bgc = (.7,.7,.7), ann=ann[\"prevMst\"])\n widgets[\"anmMstInfoBut\"] = cmds.button(l=\"Mst Info\", w=70, bgc = (.7, .7, .7), en=False, ann=ann[\"MstInfo\"])\n\n\n \n cmds.formLayout(widgets[\"anmMstFoLO\"], e=True, af = [\n (widgets[\"anmMstOpenBut\"], \"left\", 5),\n (widgets[\"anmMstOpenBut\"], \"top\", 10),\n (widgets[\"anmMstIncrBut\"], \"left\", 80),\n (widgets[\"anmMstIncrBut\"], \"top\", 10),\n (widgets[\"anmMstBgIncrBut\"], \"left\", 5),\n (widgets[\"anmMstBgIncrBut\"], \"top\", 65), \n (widgets[\"anmMstInfoBut\"], \"left\", 5),\n (widgets[\"anmMstInfoBut\"], \"top\", 95), \n (widgets[\"anmMstPrevBut\"], \"left\", 80),\n (widgets[\"anmMstPrevBut\"], \"top\", 95), \n \n ])\n ###############\n #shot Lgt tab layout\n ################ \n cmds.setParent(widgets[\"anmLgtTLO\"]) \n widgets[\"lgtTabCLO\"] = cmds.columnLayout(\"LGT\", w=250, bgc = (.4,.4,.45))\n #################\n #lgt info frame and column layouts\n ################# \n cmds.separator(h=5)\n widgets[\"lgtVariationsTSL\"] = cmds.textScrollList(w=250, h=90)\n widgets[\"lgtLastWSTFG\"] = cmds.textFieldGrp(l=\"Latest WS: \", w=250, cw = [(1, 70), (2,170)], cal = [(1,\"left\"), (2, \"left\")],ed=False)\n widgets[\"lgtLastMasterTFG\"] = cmds.textFieldGrp(l=\"Master: \", w=250, cw = [(1, 70), (2,170)], cal = [(1,\"left\"), (2, \"left\")],ed=False) \n cmds.separator(h=5)\n #################\n #lgt 'workshop' frame and column layouts\n #################\n cmds.setParent(widgets[\"lgtTabCLO\"])\n widgets[\"lgtWSFLO\"] = cmds.frameLayout(\"Lighting Workshop\", w=250, h=165, bgc= (.3, .3, .3))\n widgets[\"lgtWSFoLO\"] = cmds.formLayout(w=250, h=165, bgc = (.4,.4,.45))\n\n widgets[\"lgtWSOpenBut\"] = cmds.button(l=\"Open Latest\\nLight\\nWorkshop\", w=70, h=50, en=False, bgc = (.4,.5,.8), ann=ann[\"openWS\"])\n widgets[\"lgtWSIncrBut\"] = cmds.button(l=\"Increment Light Workshop\", w=160, h=50, en=True, bgc = (.7,.6,.4), c = partial(incrementWorkshop, \"lgt\"), ann=ann[\"incrWS\"])\n widgets[\"lgtWSInfoBut\"] = cmds.button(l=\"WS Info\", w=70, bgc = (.7, .7, .7), en=False, ann=ann[\"WSInfo\"])\n widgets[\"lgtWSPrevBut\"] = cmds.button(l=\"Previous Light Workshops\", w=160, en=False, bgc = (.7,.7,.7), ann=ann[\"prevWS\"])\n widgets[\"lgtWSNewVarBut\"] = cmds.button(l=\"Create New Variant\", w=160, h=30, bgc = (.2,.2,.2), c=partial(createVariant, \"lgt\"), ann=ann[\"crtVariant\"]) \n widgets[\"lgtVarIconBut\"] = cmds.button(l=\"Create Var\\nIcon\", w=70, h=30, en=False, bgc = (.7,.7,.7), c=createShotIcon, ann=ann[\"crtIcon\"])\n\n cmds.formLayout(widgets[\"lgtWSFoLO\"], e=True, af = [\n (widgets[\"lgtWSOpenBut\"], \"left\", 5),\n (widgets[\"lgtWSOpenBut\"], \"top\", 10),\n (widgets[\"lgtWSIncrBut\"], \"left\", 80),\n (widgets[\"lgtWSIncrBut\"], \"top\", 10),\n (widgets[\"lgtWSInfoBut\"], \"left\", 5),\n (widgets[\"lgtWSInfoBut\"], \"top\", 65),\n (widgets[\"lgtWSPrevBut\"], \"left\", 80),\n (widgets[\"lgtWSPrevBut\"], \"top\", 65),\n (widgets[\"lgtWSNewVarBut\"], \"left\", 5),\n (widgets[\"lgtWSNewVarBut\"], \"top\", 105),\n (widgets[\"lgtVarIconBut\"], \"left\", 170),\n (widgets[\"lgtVarIconBut\"], \"top\", 105), \n ]) \n #################\n #lgt 'master' frame and column layouts\n #################\n cmds.setParent(widgets[\"lgtTabCLO\"])\n widgets[\"lgtMstFLO\"] = cmds.frameLayout(\"Lighting Master\", w=250, h=200, bgc= (.3, .3, .3))\n widgets[\"lgtMstFoLO\"] = cmds.formLayout(w=250, h=200, bgc = (.4,.4,.45))\n widgets[\"lgtMstOpenBut\"] = cmds.button(l=\"Open\\nLight Master\", w=70, h=50, en=True, bgc = (.5,.7,.5), c=partial(openShotMaster, \"lgt\"), ann=ann[\"openMst\"])\n widgets[\"lgtMstIncrBut\"] = cmds.button(l=\"Publish Light Master\\n(Keep Refs)\", w=160, h=50, en=False, bgc = (.7,.5,.5), ann=ann[\"pubRefMst\"])\n widgets[\"lgtMstInfoBut\"] = cmds.button(l=\"Mst Info\", w=70, bgc = (.7, .7, .7), en=False, ann=ann[\"MstInfo\"]) \n widgets[\"lgtMstPrevBut\"] = cmds.button(l=\"Previous Light Masters\", w=160, en=False, bgc = (.7,.7,.7), ann=ann[\"prevMst\"])\n widgets[\"lgtMstBgIncrBut\"] = cmds.button(l=\" BG Publish Light Master (Import Refs)\", w=235, en=False, bgc = (.3,.3,.3), ann=ann[\"pubBGMst\"]) \n\n cmds.formLayout(widgets[\"lgtMstFoLO\"], e=True, af = [\n (widgets[\"lgtMstOpenBut\"], \"left\", 5),\n (widgets[\"lgtMstOpenBut\"], \"top\", 10),\n (widgets[\"lgtMstIncrBut\"], \"left\", 80),\n (widgets[\"lgtMstIncrBut\"], \"top\", 10),\n (widgets[\"lgtMstBgIncrBut\"], \"left\", 5),\n (widgets[\"lgtMstBgIncrBut\"], \"top\", 65), \n (widgets[\"lgtMstInfoBut\"], \"left\", 5),\n (widgets[\"lgtMstInfoBut\"], \"top\", 95),\n (widgets[\"lgtMstPrevBut\"], \"left\", 80),\n (widgets[\"lgtMstPrevBut\"], \"top\", 95),\n \n ]) \n\n ###############\n #shot anm tab layout\n ###############\n cmds.setParent(widgets[\"anmLgtTLO\"])\n widgets[\"fxTabCLO\"] = cmds.columnLayout(\"FX\", w=250, bgc = (.45, .4, .4))\n #################\n #fx info frame and column layouts\n ################# \n cmds.separator(h=5)\n widgets[\"fxVariationsTSL\"] = cmds.textScrollList(w=250, h=90)\n widgets[\"fxLastWSTFG\"] = cmds.textFieldGrp(l=\"Latest WS: \", w=250, cw = [(1, 70), (2,170)], cal = [(1,\"left\"), (2, \"left\")],ed=False)\n widgets[\"fxLastMasterTFG\"] = cmds.textFieldGrp(l=\"Master: \", w=250, cw = [(1, 70), (2,170)], cal = [(1,\"left\"), (2, \"left\")],ed=False) \n cmds.separator(h=5)\n #################\n #lgt 'workshop' frame and column layouts\n #################\n cmds.setParent(widgets[\"fxTabCLO\"])\n widgets[\"fxWSFLO\"] = cmds.frameLayout(\"FX Workshop\", w=250, h=165, bgc= (.3, .3, .3))\n widgets[\"fxWSFoLO\"] = cmds.formLayout(w=250, h=165, bgc = (.45,.4,.4))\n\n widgets[\"fxWSOpenBut\"] = cmds.button(l=\"Open Latest\\nFX\\nWorkshop\", w=70, h=50, en=False, bgc = (.4,.5,.8), ann=ann[\"openWS\"])\n widgets[\"fxWSIncrBut\"] = cmds.button(l=\"Increment FX Workshop\", w=160, h=50, en=True, bgc = (.7,.6,.4), c = partial(incrementWorkshop, \"fx\"), ann=ann[\"incrWS\"])\n widgets[\"fxWSInfoBut\"] = cmds.button(l=\"WS Info\", w=70, bgc = (.7, .7, .7), en=False, ann=ann[\"WSInfo\"]) \n widgets[\"fxWSPrevBut\"] = cmds.button(l=\"Previous FX Workshops\", w=160, en=False, bgc = (.7,.7,.7), ann=ann[\"prevWS\"])\n widgets[\"fxWSNewVarBut\"] = cmds.button(l=\"Create New Variant\", w=160, h=30, bgc = (.2,.2,.2), c=partial(createVariant, \"fx\"), ann=ann[\"crtVariant\"])\n widgets[\"fxVarIconBut\"] = cmds.button(l=\"Create Var\\nIcon\", w=70, h=30, en=False, bgc = (.7,.7,.7), c=createShotIcon, ann=ann[\"crtIcon\"]) \n \n cmds.formLayout(widgets[\"fxWSFoLO\"], e=True, af = [\n (widgets[\"fxWSOpenBut\"], \"left\", 5),\n (widgets[\"fxWSOpenBut\"], \"top\", 10),\n (widgets[\"fxWSIncrBut\"], \"left\", 80),\n (widgets[\"fxWSIncrBut\"], \"top\", 10),\n (widgets[\"fxWSInfoBut\"], \"left\", 5),\n (widgets[\"fxWSInfoBut\"], \"top\", 65),\n (widgets[\"fxWSPrevBut\"], \"left\", 80),\n (widgets[\"fxWSPrevBut\"], \"top\", 65),\n (widgets[\"fxWSNewVarBut\"], \"left\", 5),\n (widgets[\"fxWSNewVarBut\"], \"top\", 105),\n (widgets[\"fxVarIconBut\"], \"left\", 170),\n (widgets[\"fxVarIconBut\"], \"top\", 105), \n ]) \n #################\n #lgt 'master' frame and column layouts\n #################\n cmds.setParent(widgets[\"fxTabCLO\"])\n widgets[\"fxMstFLO\"] = cmds.frameLayout(\"FX Master\", w=250, h=200, bgc= (.3, .3, .3))\n widgets[\"fxMstFoLO\"] = cmds.formLayout(w=250, h=200, bgc = (.45,.4,.4))\n widgets[\"fxMstOpenBut\"] = cmds.button(l=\"Open\\nFX Master\", w=70, h=50, en=False, bgc = (.5,.7,.5), ann=ann[\"openMst\"])\n widgets[\"fxMstIncrBut\"] = cmds.button(l=\"Publish FX Master\\n(Import Refs)\", w=160, h=50, en=False, bgc = (.7,.5,.5), ann=ann[\"pubRefMst\"])\n widgets[\"fxMstInfoBut\"] = cmds.button(l=\"Mst Info\", w=70, bgc = (.7, .7, .7), en=False, ann=ann[\"MstInfo\"]) \n widgets[\"fxMstPrevBut\"] = cmds.button(l=\"Previous FX Masters\", w=160, en=False, bgc = (.7,.7,.7), ann=ann[\"prevMst\"])\n widgets[\"fxMstBgIncrBut\"] = cmds.button(l=\" BG Publish FX Master (Import Refs)\", w=235, en=False, bgc = (.3,.3,.3), ann=ann[\"pubBGMst\"]) \n\n cmds.formLayout(widgets[\"fxMstFoLO\"], e=True, af = [\n (widgets[\"fxMstOpenBut\"], \"left\", 5),\n (widgets[\"fxMstOpenBut\"], \"top\", 10),\n (widgets[\"fxMstIncrBut\"], \"left\", 80),\n (widgets[\"fxMstIncrBut\"], \"top\", 10),\n (widgets[\"fxMstBgIncrBut\"], \"left\", 5),\n (widgets[\"fxMstBgIncrBut\"], \"top\", 65), \n (widgets[\"fxMstInfoBut\"], \"left\", 5),\n (widgets[\"fxMstInfoBut\"], \"top\", 95),\n (widgets[\"fxMstPrevBut\"], \"left\", 80),\n (widgets[\"fxMstPrevBut\"], \"top\", 95),\n \n ]) \n\n\n cmds.setParent(widgets[\"anmLgtFLO\"])\n widgets[\"anmLgtTitleText\"] = cmds.text(l=\"Variant Files\", font = \"boldLabelFont\", ann=ann[\"varFile\"]) \n\n cmds.formLayout(widgets[\"anmLgtFLO\"], e=True, af = [(widgets[\"anmLgtTitleText\"], \"top\", 5), (widgets[\"anmLgtTitleText\"], \"left\", 135)])\n\n ###################\n # - -- Shot Tab form setup\n ##################\n cmds.formLayout(widgets[\"shotsFLO\"], e=True, af = [\n (widgets[\"shotListCLO\"], \"left\", 0),\n (widgets[\"shotListCLO\"], \"top\", 0),\n (widgets[\"anmLgtFLO\"], \"left\", 134),\n (widgets[\"anmLgtFLO\"], \"top\", 0), \n (widgets[\"shotInfoAssListTLO\"], \"left\", 387),\n (widgets[\"shotInfoAssListTLO\"], \"top\", 0),\n (widgets[\"shotActionFLO\"], \"top\", 0),\n (widgets[\"shotActionFLO\"], \"left\", 594),\n (widgets[\"shotAssListCLO\"], \"top\", 0),\n (widgets[\"shotAssListCLO\"], \"left\", 752)\n ])\n\n ################\n #Misc tab\n ################\n cmds.setParent(widgets[\"lowTLO\"])\n widgets[\"miscFLO\"] = cmds.formLayout(\"Other Shot Tools\",width=1000, height=500, backgroundColor = (.4,.4,.4))\n\n widgets[\"animationTLO\"] = cmds.tabLayout(width=500, height=250, backgroundColor = (.3, .35, .3))\n widgets[\"animationRCLO\"] = cmds.rowColumnLayout(\"animation\", numberOfColumns = 4, columnSpacing=[(1, 0), (2,5), (3,5), (4,5)], rowSpacing=[1,5])\n\n cmds.setParent(widgets[\"miscFLO\"])\n widgets[\"lightingTLO\"] = cmds.tabLayout(width=500, height=250, backgroundColor = (.3, .32, .35))\n widgets[\"lightingRCLO\"] = cmds.rowColumnLayout(\"lighting\", numberOfColumns = 4, columnSpacing=[(1, 0), (2,5), (3,5), (4,5)], rowSpacing=[1,5]) \n\n cmds.setParent(widgets[\"miscFLO\"])\n widgets[\"fxTLO\"] = cmds.tabLayout(width=500, height=250, backgroundColor = (.35, .3, .3))\n widgets[\"fxRCLO\"] = cmds.rowColumnLayout(\"fx\", numberOfColumns = 4, columnSpacing=[(1, 0), (2,5), (3,5), (4,5)], rowSpacing=[1,5])\n\n cmds.setParent(widgets[\"miscFLO\"])\n widgets[\"charlexTLO\"] = cmds.tabLayout(width=500, height=250, backgroundColor = (.55, .55, .55))\n widgets[\"charlexRCLO\"] = cmds.rowColumnLayout(\"charlex_general\", numberOfColumns = 4, columnSpacing=[(1, 0), (2,5), (3,5), (4,5)], rowSpacing=[1,5])\n\n cmds.formLayout(widgets[\"miscFLO\"], e=True, af=[\n (widgets[\"charlexTLO\"], \"top\", 0),\n (widgets[\"charlexTLO\"], \"left\", 0),\n (widgets[\"animationTLO\"], \"top\", 0),\n (widgets[\"animationTLO\"], \"left\", 500),\n (widgets[\"lightingTLO\"], \"top\", 250),\n (widgets[\"lightingTLO\"], \"left\", 0),\n (widgets[\"fxTLO\"], \"top\", 250),\n (widgets[\"fxTLO\"], \"left\", 500) \n ])\n\n # get the dictionary of scripts, calls and annotations from the database\n dbPath =os.path.join(os.getenv(\"MAYA_ROOT\"), \"scripts\", \"chrlx_pipe\", \"chrlxScriptList.json\")\n with open(dbPath, \"r\") as f:\n scriptList = json.load(f)\n\n # populate the row column layouts with buttons and funcs from the database\n btl.buttonsToLayout(widgets[\"animationRCLO\"], scriptList[\"shot\"][\"animation\"], width=117, height=40, color=(.38, .3, .38))\n btl.buttonsToLayout(widgets[\"lightingRCLO\"], scriptList[\"shot\"][\"lighting\"], width=117, height=40, color=(.37,.34, .3))\n btl.buttonsToLayout(widgets[\"fxRCLO\"], scriptList[\"shot\"][\"fx\"], width=117, height=40, color=(.35, .3, .3))\n btl.buttonsToLayout(widgets[\"charlexRCLO\"], scriptList[\"shot\"][\"charlex\"], width=117, height=40, color=(.3, .3, .3))\n\n # widgets[\"miscCLO\"] = cmds.columnLayout(\"Other Pipeline Tools\",w=1000, h=500, bgc = (.4,.4,.4))\n # cmds.text(l=\"------ANIM STUFF-------\")\n # cmds.text(l=\"TODO - export cam(s) for nuke, etc\")\n # cmds.text(l=\"TODO - create a new prop from selected geo (propify)\") \n # cmds.text(l=\"TODO - blasting, rendering stuff?\")\n # cmds.text(l=\"TODO - export data (text file of scene locations?)\")\n # cmds.text(l=\"TODO - create render cam? Should this be in the main anim increment? (probably both)\")\n\n # cmds.text(l=\"------LGT STUFF--------\")\n # cmds.text(l=\"TODO - set up current scene for maxwell, arnold\")\n # cmds.text(l=\"TODO - convert an external image to icon (char or project)\")\n # cmds.text(l=\"TODO - revert ['ROLL BACK'] to master version? (replaces master and grabs that workshop\")\n # cmds.text(l=\"TODO - function to add your folder to the WIP folder in this project - save current to WIP folder\")\n # cmds.text(l=\"TODO - explore various frame (render) folders in explorer\")\n # cmds.text(l=\"TODO - various preset light setups/rigs? \")\n\n\n ######################\n #show window\n ######################\n cmds.window(widgets[\"win\"], e=True, w=1000, h=580)\n cmds.showWindow(widgets[\"win\"])\n\n #start us off\n populateWindow()",
"def __init__(self, x, y, w, h, r, c, gamewin):\n\n super().__init__(x, y, w, h)\n \n self.gamewin = gamewin\n \n self.tiles = list()\n self.r = r\n self.c = c\n \n # Assumes equal w & h, r & c\n self.dim = round(w / c)\n \n self.gentiles()\n \n self.end()",
"def __init__(self, x, y, w, h, gamewin, x_ind, y_ind):\n\n super().__init__(x, y, w, h)\n\n self.gamewin = gamewin\n self.x_ind = x_ind\n self.y_ind = y_ind\n \n # Customize visuals\n self.box(FL_BORDER_BOX)\n self.color(FL_BLUE)\n self.clear_visible_focus()\n\n # Flags for drawing\n self.hit = False\n self.miss = False",
"def draw(self, graphwin):\n\n if self.canvas and not self.canvas.isClosed(): raise GraphicsError, OBJ_ALREADY_DRAWN\n if graphwin.isClosed(): raise GraphicsError, \"Can't draw to closed window\"\n self.canvas = graphwin\n #self.id = self._draw(graphwin, self.config)\n self.id = _tkCall(self._draw, graphwin, self.config)\n if graphwin.autoflush:\n #_root.update()\n _tkCall(_root.update)",
"def render(self, mode='human'):\n screen_width = 800\n screen_height = 550\n\n # Width is one column for each variable\n n_sect = 7\n world_width = n_sect*2 # X axis is just pixels\n \n buff_axis = cfg['buff_axis']\n #bottom of the screen scales to the input/output range of values\n world_height_bottom = np.max(self.maxes)+buff_axis\n \n # Top is for counting steps\n world_height_top = 100\n\n #Split the screen:\n world_top = .3\n world_bottom = 1-world_top\n screen_height_bottom = world_bottom*screen_height\n\n #Set where to draw the steps axis\n axes_line1 = screen_height*(world_bottom + .2)\n\n # Scale the pixels in the screen:\n scalex = screen_width/world_width\n scaley_bottom= screen_height_bottom/world_height_bottom\n\n # Some adjustments to move some objects up/ right\n move_oval = -scalex*.2\n move_up= scaley_bottom * buff_axis*.5\n\n #set sizes of shapes:\n self.oval_length = 25.0\n self.oval_width = 50.0\n self.rect_width = 70.0\n self.rect_height = 5.0 \n\n #Step plot:\n scalestep = screen_width/cfg['scalestep']\n\n #color shades:\n light_col = .7\n dark_col = 1\n c11 = .6\n c22 = .8\n c33 = 1\n\n if self.viewer is None:\n #TO DO: find an alternative to copy-paste to generate multiple similar shapes\n self.viewer = rendering.Viewer(screen_width, screen_height)\n \n #Input states:\n\n #the temp action\n self.temptrans1 = self.make_oval(0,0,light_col)\n self.temptrans2 = self.make_oval(0,0,dark_col)\n #flow action:\n self.flowtrans1 = self.make_oval(light_col,0,light_col)\n self.flowtrans2 = self.make_oval(dark_col,0,dark_col)\n\n #output states:\n #out1:\n #the gauge is a rectangle \n self.outgauge1 = self.make_rect(0,c33,0)\n #goal is red rectangle\n self.outgoal1= self.make_rect(c33,0,0)\n \n #out2:\n #the gauge is a rectangle \n self.outgauge2 = self.make_rect(0,c22,0)\n #goal is red rectangle\n self.outgoal2= self.make_rect(c22,0,0)\n\n #out3:\n #the gauge is a rectangle \n self.outgauge3 = self.make_rect(0,c11,0)\n #goal is red rectangle\n self.outgoal3 = self.make_rect(c11,0,0)\n\n #lines on which \"controls\" sit\n for l in range(n_sect): \n self.make_line(scalex*((l*2)+1),0, scalex*((l*2)+1),screen_height*world_bottom)\n\n # Line separating the top and bottom of the screen. \n self.make_line(0,world_bottom*screen_height,screen_width,world_bottom*screen_height)\n # Step # axis.\n self.make_line(scalex*1.5,axes_line1,screen_width-scalex*1,axes_line1)\n\n # The dot tracking the step #\n dot = rendering.make_circle(self.oval_length)\n self.dottrans = rendering.Transform()\n dot.add_attr(self.dottrans)\n dot.set_color(0,0,0)\n self.viewer.add_geom(dot)\n\n #labels: \n num = 0\n label_buff_y = 1.07\n label_buff_x = .2\n img_scale = .5\n img_wid = 179 *img_scale\n img_height = 124 * img_scale\n\n for label in self.labels:\n pth = (self.label_dir+label+'.png')\n self.txt = rendering.Image(pth,img_wid,img_height)\n locx = (num*2)+1\n self.txtis = rendering.Transform(translation=(scalex*locx +locx* label_buff_x,world_bottom*screen_height*label_buff_y))\n self.txt.add_attr(self.txtis)\n self.viewer.add_geom(self.txt)\n num = num+1\n\n #step label\n pth = (self.label_dir+'Step.png')\n self.txt = rendering.Image(pth,img_wid,img_height)\n self.txtis = rendering.Transform(translation=(scalex*.5,axes_line1))\n self.txt.add_attr(self.txtis)\n self.viewer.add_geom(self.txt)\n\n if self.state is None: return None\n\n x = self.state\n\n # 4 ins:\n self.flowtrans1.set_translation(move_oval+scalex*1,move_up+scaley_bottom*x[0])\n self.temptrans1.set_translation(move_oval+scalex*3,move_up+scaley_bottom*x[1])\n self.flowtrans2.set_translation(move_oval+scalex*5,move_up+scaley_bottom*x[2])\n self.temptrans2.set_translation(move_oval+scalex*7,move_up+scaley_bottom*x[3])\n\n # 3 outs: current & goal:\n self.outgauge1.set_translation(scalex*9,move_up+scaley_bottom*x[4])\n self.outgoal1.set_translation(scalex*9,move_up+scaley_bottom*x[7])\n self.outgauge2.set_translation(scalex*11,move_up+scaley_bottom*x[5])\n self.outgoal2.set_translation(scalex*11,move_up+scaley_bottom*x[8])\n self.outgauge3.set_translation(scalex*13,move_up+scaley_bottom*x[6])\n self.outgoal3.set_translation(scalex*13,move_up+scaley_bottom*x[9])\n\n #step info:\n self.dottrans.set_translation(scalex*1.5 + self.steps*scalestep, axes_line1)\n done_grow = .5*self.done\n self.dottrans.set_scale(1+done_grow,1+done_grow) #expand size when done\n\n return self.viewer.render(return_rgb_array = mode=='rgb_array')",
"def draw(self, screen, winWidth, winHeight):\n\t\t#draws player\n\t\tself.player.draw(screen)\n\t\t#draws circle\n\t\tfor circle in self.circles:\n\t\t# to make the circle fall constant\n\t\t\tif circle.pos[1] > winHeight + circle.radius: \n\t\t\t\tcircle.radius = randrange(10,25)\n\t\t\t\tcircle.color = getRandColor()\n\t\t\t\ta = randrange(0,winWidth-2*(circle.radius))\n\t\t\t\tb = randrange(-2*circle.radius, 0)\n\t\t\t\tcircle.pos[0] = a\n\t\t\t\tcircle.pos[1] = b\n\t\t\t\tcircle.speed = (randrange(1,5))\n\t\t\tcircle.draw(screen)\n\t\t#draws special object\n\t\tfor iM in self.special:\n\t\t\timage = iM.input()\n\t\t\tscreen.blit(image, (iM.pos[0], iM.pos[1]))\n\t\tmyfont = pygame.font.SysFont(\"Times New Roman\", 22)\n\t\tlives = myfont.render(\"LIVES: \"+str(self.lives), 1, (51, 255, 51))\n\t\tpoints = myfont.render(\"POINTS: \"+str(self.points), 1, (51, 255, 51))\n\t\tseconds, minutes = (pygame.time.get_ticks()//1000), (pygame.time.get_ticks()//60000)\n\t\tprintTime = myfont.render(\"TIME: \" +str(minutes) + \":\" + str(seconds), 1, (102,178,255))\n\t\tif seconds < 10:\n\t\t\tprintTime = myfont.render(\"TIME: \" +str(minutes) + \":0\" + str(seconds), 1, (102,178,255))\n\t\telif seconds >= 60:\n\t\t\tseconds -= 60*(minutes)\n\t\t\tif seconds < 60:\n\t\t\t\tprintTime = myfont.render(\"TIME: \" +str(minutes) + \":\" + str(seconds), 1, (102,178,255))\n\t\t\tif seconds < 10:\n\t\t\t\tprintTime = myfont.render(\"TIME: \" +str(minutes) + \":0\" + str(seconds), 1, (102,178,255))\n\t\tself.minutes = minutes\n\t\tself.seconds = seconds\n\t\tscreen.blit(printTime, (250,20))\n\t\tscreen.blit(lives, (20,20))\n\t\tscreen.blit(points, (466,20))",
"def createGameWindow():\n gameWindow = g.GraphWin(\"game\", 450, 800) #Window to show game\n\n return gameWindow",
"def __init__(self, cam: DashboardCamera, window_shape=(80, 61), search_margin=200, max_frozen_dur=15):\n self.camera = cam\n\n # Create windows\n self.windows_left = []\n self.windows_right = []\n for level in range(cam.img_height // window_shape[0]):\n x_init_l = cam.img_width / 4\n x_init_r = cam.img_width / 4 * 3\n self.windows_left.append(Window(level, window_shape, cam.img_size, x_init_l, max_frozen_dur))\n self.windows_right.append(Window(level, window_shape, cam.img_size, x_init_r, max_frozen_dur))\n self.search_margin = search_margin\n\n # Initialize visuals\n VIZ_OPTIONS = ('dash_undistorted', 'overhead', 'lab_b', 'lab_b_binary', 'lightness', 'lightness_binary',\n 'value', 'value_binary', 'pixel_scores', 'windows_raw', 'windows_filtered', 'highlighted_lane',\n 'presentation')\n self.visuals = {name: None for name in VIZ_OPTIONS} # Storage location of visualization images\n self.__viz_desired = None # The visuals we want to save\n self.__viz_dependencies = {'windows_raw': ['pixel_scores'], # Dependencies of visuals on other visuals\n 'windows_filtered': ['pixel_scores'],\n 'presentation': ['highlighted_lane', 'overhead', 'windows_raw', 'windows_filtered',\n 'pixel_scores']}",
"def draw(self, win):\n img = self.tower_imgs\n win.blit(img, (self.x - img.get_width() // 2, self.y - img.get_height() // 2))\n\n if self.selected:\n self.menu.draw(win)",
"def draw(self, win):\n pygame.draw.rect(win, self.color, self.rect)",
"def draw(self, win, player, displayList, enemyHitboxList, mapWidth, mapHeight):\n cameraX = player.rect.left + (player.rect.width // 2) - (SCREEN_WIDTH // 2)\n cameraY = player.rect.top + (player.rect.height // 2) - (SCREEN_HEIGHT // 2)\n\n # On centre la camera tant que le joueurs n'atteind pas les bords\n if cameraX >= 0 and cameraX < mapWidth - SCREEN_WIDTH:\n self.x = cameraX\n\n if cameraY >= 0 and cameraY < mapHeight - SCREEN_HEIGHT:\n self.y = cameraY\n\n # Calcul de l'X du joueur en fonction s'il est en haut, bas ou entre les 2\n if cameraX >= 0 and cameraX < mapWidth - SCREEN_WIDTH:\n playerX = (SCREEN_WIDTH // 2) - (player.rect.width // 2)\n else:\n # Si le joueur est a droite\"\"\"\n if cameraX >= mapWidth - SCREEN_WIDTH:\n self.x = mapWidth - SCREEN_WIDTH\n playerX = player.rect.left - mapWidth + SCREEN_WIDTH\n # Si le joueur est a gauche\"\"\"\n else:\n self.x = 0\n playerX = player.rect.left\n\n\n # Calcul de l'Y du joueur en fonction s'il est a gauche, droite ou entre les 2\n if cameraY >= 0 and cameraY < mapHeight - SCREEN_HEIGHT:\n playerY = (SCREEN_HEIGHT // 2) - (player.rect.height // 2)\n else:\n # Si le joueur est en dessous\n if cameraY >= mapHeight - SCREEN_HEIGHT:\n self.y = mapHeight - SCREEN_HEIGHT\n playerY = player.rect.top - mapHeight + SCREEN_HEIGHT\n # Si le joueur est au dessus \n else:\n self.y = 0\n playerY = player.rect.top\n\n for element in displayList:\n element.draw(win,element.rect.left - self.x,element.rect.top - self.y)\n #for elem in enemyHitboxList:\n #pg.draw.rect(win, (200, 200, 200), pg.Rect(elem.left - self.x,elem.top - self.y, elem.width, elem.height))\n player.draw(win, playerX, playerY)",
"def draw_windows():\n martin.begin_fill() # lines 88-118 draw out a row consisting of 3 rectangles for windows\n for i in range(2):\n martin.pendown()\n martin.forward(13)\n martin.right(90)\n martin.forward(20)\n martin.right(90)\n martin.penup()\n martin.end_fill()\n\n martin.forward(30)\n martin.begin_fill()\n for i in range(2):\n martin.pendown()\n martin.forward(13)\n martin.right(90)\n martin.forward(20)\n martin.right(90)\n martin.penup()\n martin.end_fill()\n\n martin.forward(30)\n martin.begin_fill()\n for i in range(2):\n martin.pendown()\n martin.forward(13)\n martin.right(90)\n martin.forward(20)\n martin.right(90)\n martin.penup()\n martin.end_fill()\n martin.hideturtle()",
"def draw_window_pane():\n houseturtle.begin_fill()\n for y in range(4):\n houseturtle.pendown()\n houseturtle.forward(35)\n houseturtle.left(90)\n houseturtle.penup()\n houseturtle.end_fill()",
"def _setwin(self, win):\n\t\tself.win = win",
"def draw(self, window):\n if self.selected:\n self.menu.draw(window) #Drawing menu\n window.blit(kill_count_table, (self.x + self.width // 2 - 15, self.y - self.height // 2 + 35))\n kills = self.font.render(str(self.kill_count) + \" Kills\", 1, (255, 255, 255))\n window.blit(kills, (self.x + self.width // 2 + 5, self.y - self.height // 2 + 43))\n\n tower_image = self.tower_images[self.level-1]\n\n if not self.level_up_animation: #Always draw the tower except when leveling up\n window.blit(tower_image, (self.x - tower_image.get_width() // 2, self.y - tower_image.get_height() // 2))\n\n else: #Leveling up animation procedure\n window.blit(self.level_up[self.level_animation // 2], (self.x - tower_image.get_width() - 75, self.y - 225))\n self.level_animation += 1\n if self.level_animation == len(level_up) * 2:\n self.level_up_animation = False\n self.level_animation = 0",
"def _configureWindow(self):\n if self._win_type == WindowType.IMMERSIVE:\n pg.setConfigOptions(\n foreground='d',\n background=(_DARK_COLOUR if self._dark else _LIGHT_COLOUR))\n self._win = pg.plot(title=\"Abstact Map Visualisation\")\n self._plt = self._win.plotItem\n self._plt.setAspectLocked(True, 1)\n self._plt.hideAxis('left')\n self._plt.hideAxis('bottom')\n else: # DEFAULT\n pg.setConfigOptions(foreground='k', background='w')\n self._win = pg.plot(title=\"Abstact Map Visualisation\")\n self._plt = self._win.plotItem\n\n # Set up the overlay objects as they are static\n self._overlay_items = [\n QtWidgets.QGraphicsRectItem(-_OVERLAY_WIDTH / 2,\n -_OVERLAY_HEIGHT / 2, _OVERLAY_WIDTH,\n _OVERLAY_HEIGHT)\n ]\n self._overlay_items[0].setBrush(pg.mkBrush(_OVERLAY_COLOUR))\n self._overlay_items[0].setZValue(1000)\n self._win.addItem(self._overlay_items[0])\n self.toggleOverlay(enable=False)\n\n # Do any last settings in the window\n # self._win.parentWidget().showMaximized()\n limit = 30\n self._win.setRange(xRange=[-limit, limit], yRange=[-limit, limit])",
"def draw(self, win):\n self.rect.draw(win)\n self.text.draw(win)",
"def render(self, mode=\"human\", close=False):\n if close and self._viewer is None:\n if self._viewer is not None:\n self._viewer.close()\n self._viewer = None\n return\n\n screen_width = 600\n screen_height = 600\n if self._viewer is None:\n from gym.envs.classic_control import rendering\n self._viewer = rendering.Viewer(screen_width, screen_height)\n\n # generate the grid\n xs, self._xstep = np.linspace(\n 0, screen_width, self._width + 1, retstep=True)\n ys, self._ystep = np.linspace(\n 0, screen_height, self._height + 1, retstep=True)\n\n # render the grid\n for x in xrange(self._width):\n for y in xrange(self._height):\n l, r, t, b = (0, self._xstep, self._ystep, 0)\n tile = rendering.FilledPolygon([\n (l, b), (l, t), (r, t), (r, b)])\n tile.add_attr(rendering.Transform(translation=(\n x * self._xstep, y * self._ystep)))\n tile.set_color(*CASE_COLORS[chr(self._grid[x, y])])\n self._viewer.add_geom(tile)\n\n # render starting point\n l, r, t, b = (0, self._xstep, self._ystep, 0)\n tile = rendering.FilledPolygon([\n (l, b), (l, t), (r, t), (r, b)])\n tile.add_attr(rendering.Transform(translation=(\n self._trajectory[0][0] * self._xstep,\n self._trajectory[0][1] * self._ystep)))\n tile.set_color(0, 1.0, 1.0)\n self._viewer.add_geom(tile)\n\n # render grid lines\n for x in xs[1:len(xs) - 1]:\n # not including the first and last one\n line = rendering.Line((x, 0), (x, screen_height))\n self._viewer.add_geom(line)\n for y in ys[1: len(ys) - 1]:\n line = rendering.Line((0, y), (screen_width, y))\n self._viewer.add_geom(line)\n\n agent = rendering.make_circle(\n radius=min(\n screen_width / (self._width + 1) / 3,\n screen_height / (self._height + 1) / 3),\n res=30)\n self._agentTrans = rendering.Transform(translation=(\n self._currentPos[0] * self._xstep + (self._xstep / 2),\n self._currentPos[1] * self._ystep + (self._ystep / 2)))\n agent.add_attr(self._agentTrans)\n self._viewer.add_geom(agent)\n\n self._renderTrajectory()\n\n self._agentTrans.set_translation(\n self._currentPos[0] * self._xstep + (self._xstep / 2),\n self._currentPos[1] * self._ystep + (self._ystep / 2))\n\n self._viewer.render(return_rgb_array=(mode == 'rgb_array'))\n\n if close:\n if self._viewer is not None:\n self._viewer.close()\n self._viewer = None\n return",
"def drawmaze(self):\n win=GraphWin(\"Perfect Maze\",600,600) \n win.setBackground(\"White\")\n scale=600/self.N #Used to generalize the size difference for the input of larger numbers. The background resolution/ grid size, N\n\n x1=scale\n y1=0\n x2=scale\n y2=scale\n\n ##VERTICAL LINES ####\n for i in range(self.N,0,-1):\n for j in range(1,self.N):\n if self.East[j][i]: #If East is true, draw a line.\n \n line=Line(Point(x1,y1),Point(x2,y2)) #lines | |\n line.setFill(\"red\")\n line.draw(win)\n x1+=scale #Increment causes |->|\n x2+=scale #Increment causes |->|\n y1+=scale #Used to draw two more\n y2+=scale #of the same spaced lines further down.\n x1=scale #Reset\n x2=scale #Reset\n\n\n ##HORIZONTAL LINES##\n x1=0\n y1=scale\n x2=scale\n y2=scale\n\n\n for i in range(self.N,1,-1):\n for j in range(1,self.N+1):\n if self.South[j][i]: #If South is true, draw a line.\n \n line=Line(Point(x1,y1),Point(x2,y2))\n line.setFill(\"red\")\n line.draw(win)\n x1+=scale\n x2+=scale\n y1+=scale\n y2+=scale\n x1=0\n x2=scale\n\n const=scale//5 #Very useful const which helps in placing circles on grid.\n x=scale//2\n y=600-scale//2\n #radius=(scale-(4*scale//self.N))/2\n radius=scale//2-(const)\n start=Point(x,y) #START POINT HERE \n circ=Circle(start,radius)\n circ.setFill(\"Red\")\n label=Text(start,\"Start\")\n label.setFill(\"Black\")\n circ.draw(win)\n label.draw(win)\n #print(self.CurrentCell)\n #Using the current cell from the finished algorithm(last place visited), a circle can be placed at that point.\n endpointx=(self.CurrentCell[0]-1)*scale +scale//2 ####MAKING END POINT X\n endpointy=600-(self.CurrentCell[1]-1)*scale-scale//2 ####MAKING END POINT Y\n endpoint=Point(endpointx,endpointy)\n circ2=Circle(endpoint,radius)\n circ2.setFill(\"White\")\n label2=Text(endpoint,\"End\")\n circ2.draw(win)\n label2.draw(win)\n \n ###############CREATE KEY########################\n \n \n keypointx=(self.MazeKey[0]-1)*scale +scale//2 ####MAKING END POINT X\n keypointy=600-(self.MazeKey[1]-1)*scale-scale//2 ####MAKING END POINT Y\n keypoint=Point(keypointx,keypointy)\n circ3=Circle(keypoint,radius)\n circ3.setFill(\"Blue\")\n label3=Text(keypoint,\"Key\")\n circ3.draw(win)\n label3.draw(win)\n pathcol=\"Yellow\"\n##\n\n \n for i in range(1,len(self.EntirePath)): \n pathpointx=(self.EntirePath[i][0]-1)*scale +scale//2 ####MAKING END POINT X\n pathpointy=600-(self.EntirePath[i][1]-1)*scale-scale//2 ####MAKING END POINT Y\n pathpoint=Point(pathpointx,pathpointy)\n drawpath=Circle(pathpoint,radius)\n drawpath.setFill(pathcol)\n if self.EntirePath[i]==self.KeyPath[-1]:\n pathcol=\"Violet\"\n label4=Text(keypoint,\"Key\")\n label4.draw(win) \n drawpath.draw(win)\n drawpath.setWidth(1)\n sleep(0.1)\n \n #drawpath.draw(win)\n \n label5=Text(endpoint,\"Maze Solved \")\n label5.draw(win)\n circ4=Circle(start,radius)\n circ4.setFill(\"Red\")\n circ4.draw(win) \n label6=Text(start,\"Start \")\n label6.draw(win)",
"def draw(self, win):\n font = pygame.font.SysFont(\"comicsans\", 40)\n\n gap = self.width / 9\n x = self.col * gap\n y = self.row * gap\n\n if self.temp != 0 and self.value == 0:\n text = font.render(str(self.temp), 1, (128,128,128))\n win.blit(text, (x+5, y+5))\n elif not(self.value == 0):\n text = font.render(str(self.value), 1, (0, 0, 0))\n win.blit(text, (x + (gap/2 - text.get_width()/2), y + (gap/2 - text.get_height()/2)))\n\n if self.selected:\n pygame.draw.rect(win, (255,0,0), (x,y, gap ,gap), 3)",
"def _display_setup(self):\r\n display_file = \"{}/display.json\".format(self.settings_dir)\r\n with open(display_file) as json_file:\r\n win_settings = json.load(json_file)\r\n self.win = visual.Window(**win_settings)\r\n framerate = self.win.fps()\r\n self.frame_duration = 1.0/framerate\r\n self.mouse = event.Mouse(visible=False, win=self.win)",
"def show_plot(self):\n runs = self.GetParent().runs\n if len(runs) <= 0: return\n\n t1 = time.time()\n total_width = self.GetParent().total_width\n\n newwidth = total_width * (self.GetParent().zoom / 100)\n newmid = total_width * (self.GetParent().pan/100)\n newxmin = newmid - (newwidth/2)\n newxmax = newxmin + newwidth\n\n if newxmin < 0:\n newxmin = 0\n newxmax = newwidth\n elif newxmax > total_width:\n newxmax = total_width\n newxmin = newxmax - newwidth\n\n assert newxmin >= 0 and newxmin <= total_width\n\n #print \"**** Zoom: %s, pan: %s, total_width: %s, newwidth: %s, newmid: %s, newxmin: %s, newxmax: %s\" \\\n # %(self.GetParent().zoom,self.GetParent().pan,total_width,newwidth,newmid,newxmin,newxmax)\n\n left = 0\n width_so_far = 0\n self.figure.clear()\n braggsmax = max(flex.max(r.culled_braggs) for r in runs)\n braggsmin = min(flex.min(r.culled_braggs) for r in runs)\n distsmax = max(flex.max(r.culled_distances) for r in runs)\n distsmin = min(flex.min(r.culled_distances) for r in runs)\n sifomax = max(flex.max(r.culled_sifoils) for r in runs)\n sifomin = min(flex.min(r.culled_sifoils) for r in runs)\n wavemax = max(flex.max(r.culled_wavelengths) for r in runs)\n wavemin = min(flex.min(r.culled_wavelengths) for r in runs)\n\n #above tricks don't work for hit rates as they can be empty if the run is new\n goodruns = []\n for run in runs:\n if len(run.hit_rates) > 0: goodruns.append(run)\n if len(goodruns) > 0:\n hitsmax = max(flex.max(r.hit_rates) for r in goodruns)\n hitsmin = min(flex.min(r.hit_rates) for r in goodruns)\n else:\n hitsmax = hitsmin = 0\n\n first_run = True\n for run in runs:\n right = left + run.width()\n\n if right < newxmin or left > newxmax:\n left += run.width()\n #print \"Not showing run %s\"%run.runId\n continue\n\n if left < newxmin:\n xmin = run.min() + (newxmin - left)\n else:\n xmin = run.min()\n\n if right > newxmax:\n xmax = run.min() + (newxmax - left)\n else:\n xmax = run.max()\n\n #print \"Run: %s, run.width(): %s, left: %s, right: %s, run.min(): %s, run.max(): %s, xmin: %s, xmax: %s, width_so_far: %s, xmax-xmin: %s\" \\\n #%(run.runId,run.width(),left,right,run.min(),run.max(),xmin,xmax,width_so_far,xmax-xmin)\n\n ax1 = self.figure.add_axes([0.05+(0.9*width_so_far/newwidth), 0.05, 0.9*(xmax-xmin)/newwidth, 0.4])\n ax2 = self.figure.add_axes([0.05+(0.9*width_so_far/newwidth), 0.45, 0.9*(xmax-xmin)/newwidth, 0.2], sharex=ax1)\n ax3 = self.figure.add_axes([0.05+(0.9*width_so_far/newwidth), 0.65, 0.9*(xmax-xmin)/newwidth, 0.1], sharex=ax1)\n ax4 = self.figure.add_axes([0.05+(0.9*width_so_far/newwidth), 0.75, 0.9*(xmax-xmin)/newwidth, 0.1], sharex=ax1)\n ax5 = self.figure.add_axes([0.05+(0.9*width_so_far/newwidth), 0.85, 0.9*(xmax-xmin)/newwidth, 0.1], sharex=ax1)\n left += run.width()\n width_so_far += (xmax-xmin)\n\n ax1.grid(True, color=\"0.75\")\n ax2.grid(True, color=\"0.75\")\n ax3.grid(True, color=\"0.75\")\n ax4.grid(True, color=\"0.75\")\n ax5.grid(True, color=\"0.75\")\n ax1.plot(run.culled_bragg_times.select(run.culled_indexed),\n run.culled_braggs.select(run.culled_indexed), 'd', color=[0.0,1.0,0.0])\n ax1.plot(run.culled_bragg_times.select(~run.culled_indexed),\n run.culled_braggs.select(~run.culled_indexed), 'd', color=[0.0,0.5,1.0])\n ax2.plot(run.hit_rates_times, run.hit_rates, 'o-', color=[0.0,1.0,0.0])\n ax3.plot(run.culled_bragg_times, run.culled_wavelengths, '^', color=[0.8,0.0,0.2])\n ax4.plot(run.culled_bragg_times, run.culled_sifoils, '<', color=[0.8,0.0,0.2])\n ax5.plot(run.culled_bragg_times, run.culled_distances, '>', color=[0.8,0.0,0.2])\n ax1.set_ylabel(\"# of Bragg spots\")\n ax2.set_ylabel(\"Hit rate (%)\")\n ax3.set_ylabel(\"WaveL\")\n ax4.set_ylabel(\"SiFoils(mm)\")\n ax5.set_ylabel(\"Dist (mm)\")\n ax1.set_xlim(xmin, xmax)\n ax1.set_ylim(braggsmin, braggsmax)\n ax2.set_ylim(hitsmin, hitsmax)\n ax3.set_ylim(wavemin, wavemax)\n ax4.set_ylim(sifomin-10, sifomax+10)\n ax5.set_ylim(distsmin-3, distsmax+3)\n ax1.set_xlabel(\"Time\")\n for ax in ax1, ax2, ax3, ax4, ax5:\n if (ax is not ax1) :\n for label in ax.get_xticklabels():\n label.set_visible(False)\n ax.get_yticklabels()[0].set_visible(False)\n if not first_run:\n ax.get_yaxis().set_visible(False)\n\n ax1.xaxis.set_major_formatter(ticker.FuncFormatter(status_plot.format_time))\n ax3.yaxis.set_major_formatter(ticker.FormatStrFormatter(\"%.3f\"))\n ax5.yaxis.set_major_formatter(ticker.FormatStrFormatter(\"%.0f\"))\n ax5.set_title(\"%d:%d/%d:%.1f%% I:%d\"%(run.runId, run.hits_count, len(run.braggs), 100*run.hits_count/len(run.braggs),run.indexed.count(True)))\n\n labels = ax1.get_xticklabels()\n for label in labels:\n label.set_rotation(30)\n\n first_run = False\n\n self.figure.autofmt_xdate()\n self.canvas.draw()\n self.parent.Refresh()\n\n t2 = time.time()\n print(\"Plotted in %.2fs\" % (t2 - t1))",
"def __draw_game(self) -> None:\n self.__draw_window()\n self.pipes.draw(self.win)\n self.player.draw(self.win)\n pygame.display.update()",
"def move(self, window):\r\n self.save_pos = (self.center_x, self.center_y) # sauvegarde la position avant de bouger\r\n self.center_x = math.cos(self.angle) * self.velocity + self.center_x\r\n self.center_y = math.sin(self.angle) * self.velocity + self.center_y\r\n self.rectangle = pygame.draw.circle(window, self.color, (self.center_x, self.center_y), self.radius) # update le rectangle\r"
] | [
"0.61466646",
"0.611178",
"0.60369253",
"0.59023196",
"0.58787185",
"0.58351535",
"0.5771538",
"0.5740812",
"0.5733873",
"0.5672222",
"0.5656955",
"0.5644705",
"0.563213",
"0.55671704",
"0.554327",
"0.54865324",
"0.54692745",
"0.54463905",
"0.5439828",
"0.5418802",
"0.53883034",
"0.5353524",
"0.5338825",
"0.5326757",
"0.5325126",
"0.5322348",
"0.5320941",
"0.5305704",
"0.52993083",
"0.52913314"
] | 0.61944795 | 0 |
Testing eratosthenes function in task 559 | def test_task559_eratosthenes(number, expected_value):
assert algo.Task559.eratosthenes(number) == expected_value | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def eratosthenes(x):\n multiples = []\n for i in range(2, x+1):\n if i not in multiples:\n print (i)\n for j in range(i*i, x+1, i):\n multiples.append(j)",
"def main():\n doctest.testmod()\n print(eratosthenes(2))",
"def eratosthenes2(n):\n multiples = set()\n for i in range(2, n+1):\n if i not in multiples:\n yield i\n multiples.update(range(i*i, n+1, i))",
"def eratosthenes_np(limit):\n if isinstance(limit, (int, float)):\n limit = int(limit)\n else:\n raise ValueError\n mask = np.ones(limit+1, dtype=np.bool)\n mask[:2] = False\n for i in range(2, int(np.sqrt(limit))+1):\n if mask[i]:\n mask[i*i::i] = False\n return np.nonzero(mask)[0]",
"def eratosthenes_npo(limit):\n if isinstance(limit, (int, float)):\n limit = int(limit)\n else:\n raise ValueError\n mask = np.ones(limit//2, dtype=np.bool)\n for i in range(3, int(limit**0.5)+1, 2):\n if mask[i//2]:\n mask[i*i//2::i] = False\n return np.r_[2, 2*np.nonzero(mask)[0][1::]+1]",
"def eratosthenes(limit):\n if isinstance(limit, (int, float)) and limit == int(limit):\n limit = int(limit)\n else:\n raise ValueError\n primes = []\n mask = [1]*(limit+1)\n for i in range(2, limit+1):\n if mask[i]:\n primes.append(i)\n for j in range(i*i, limit+1, i):\n mask[j] = 0\n return np.asarray(primes)",
"def eratosthenes_mem(limit):\n if isinstance(limit, (int, float)) and limit == int(limit):\n limit = int(limit)\n else:\n raise ValueError\n primes = [2]\n multiples = [2]\n limit += 1\n for candidate in range(3, limit):\n if candidate not in multiples:\n primes.append(candidate)\n multiples.append(2*candidate)\n for i, m in enumerate(multiples):\n if m <= candidate:\n multiples[i] += primes[i]\n return np.asarray(primes)",
"def gen_eratosthenes():\n n=3\n yield 2\n while True:\n count = 2 #set count to 2 because if count=1; all numbers are divisible by 1, so it is not a case we need to check\n this = True\n while count < n/2 + 1: #set to n/2 + 1 so that the amount of times iterated is minimized.\n if n%count == 0: #i.e. if n is divisble by count, then n is not prime\n count = n #ends this loop; if n is not prime, there is no reason to continue the loop\n this = False\n count += 1\n if this == True: #i.e. if this == True, then we know that the while loop was completely executed and n has no divisors except 1 and n\n yield n #yield n since it went through the entire loop without finding divisors\n n += 1 #increment n to see if n+1 is prime. will continue incrimenting until another prime is found and yields it",
"def seive_of_eratosthenes(n):\n sieve = [ True for i in range(n+1) ]\n def markOff(pv):\n for i in range(pv+pv, n+1, pv):\n sieve[i] = False\n markOff(2)\n for i in range(3, n+1):\n if sieve[i]:\n markOff(i)\n return [ i for i in range(2, n+1) if sieve[i] ]",
"def EratosthenesSieve(N):\n numbers = [True] * (N+1)\n max_p = int(math.sqrt(N))\n for p in (i for i in range(2, max_p+1) if numbers[i]):\n for q in range(p*p, N+1, p):\n numbers[q] = False\n return [i for i in range(2, N+1) if numbers[i]]",
"def EratosthenesSieve(N):\n numbers = [True] * (N + 1)\n max_p = int(math.sqrt(N))\n for p in (i for i in range(2, max_p + 1) if numbers[i]):\n for q in range(p * p, N + 1, p):\n numbers[q] = False\n return [i for i in range(2, N + 1) if numbers[i]]",
"def test_15(self):\n\t self.assertTrue(prime_generator(15), [2, 3, 5, 7, 11, 13])",
"def eratosthenes(n):\n assert n>1 #asserting n be a positive integer\n prime_list = []\n for i in range(2,n+1): #fills prime_list with all integers 2 <= i <= n\n prime_list.append(i)\n multiple = 2 #set to 2 because if set to 1 it will remove all elements from the list\n while multiple <= n/multiple:\n count = 2 #set to 2 because if set to 1 it will remove the prime itself from the list\n while count <= n/multiple:\n if count*multiple in prime_list: #checks if count*multiple is in list. needed because it could have already been removed\n prime_list.remove(count*multiple) #removes count*multiple\n count = count + 1\n multiple = multiple + 1\n #print(prime_list) #for testing only\n return prime_list",
"def sieve_of_eratosthenes(n):\n res = [2]\n i = 3\n marked = set()\n while i <= n**.5:\n if i not in marked:\n res.append(i)\n j = 0\n while j <= n/i:\n marked.add(i + j*i)\n j += 1\n i += 2\n while i <= n:\n if i not in marked:\n res.append(i)\n i += 2\n return res",
"def gen_prime():\n\n n = 100\n if n == 2:\n return [2]\n elif n < 2:\n return []\n s = range(3, n + 1, 2)\n mroot = n ** 0.5\n half = (n + 1) / 2 - 1\n i = 0\n m = 3\n while m <= mroot:\n if s[i]:\n j = (m * m - 3) / 2\n s[j] = 0\n while j < half:\n s[j] = 0\n j += m\n i = i + 1\n m = 2 * i + 3\n primes = [2] + [x for x in s if x]\n return (primes[random.randint(1, len(primes) - 1)])",
"def es_primo(n):\n \n for i in range(2, n):\n if n % i == 0:\n return False\n return True",
"def eratosthenes(upperbound: int) -> list:\n if upperbound < 0 or type(upperbound) != int:\n raise ValueError(\"The value is not valid. The upperbound should be a positive integer.\")\n numbers = list(range(2, upperbound + 1)) # create a list between 0 and the upperbound inclusive\n counter = 0 # begin the counter at 2 as 1 and zero are not prime numbers\n while numbers[counter] < upperbound ** (1/2): # loop thru numbers until it reaches the square root of upperbound\n numbers = remove_multiples(numbers, numbers[counter]) # update numbers by removing multiples of current number\n counter += 1 # move on to the next number to check\n return numbers",
"def solution(n: int = 2000000) -> int:\n\n return sum(takewhile(lambda x: x < n, prime_generator()))",
"def test_prime_12(self):\n\t self.assertTrue(prime_generator(12), [2, 3, 5, 7, 11])",
"def eratosthenes_sieve(n):\r\n\tnumbers = [True for i in range(n + 1)]\r\n\t\r\n\tp = 2\r\n\twhile (p**2 <= n):\r\n\t\tif numbers[p]:\r\n\t\t\tfor i in range(p**2, n + 1, p):\r\n\t\t\t\tnumbers[i] = False\r\n\t\tp += 1\r\n\t\t\r\n\tprimes = compress(range(2, n + 1),numbers[2:])\r\n\treturn list(primes)",
"def main() -> int:\n\n a = None\n for n, g in enumerate(gen_primes(100000, 1000000)):\n repeat, indices = check_if_has_3_repeated_digits(str(g))\n if repeat:\n a = check_for_family_of_primes(repeat, indices, list(str(g)))\n if len(a) > 7 and min(a) > 100000:\n EULER_LOGGER.debug(f\"{a}\")\n a = min([int(i) for i in a])\n break\n\n return a",
"def test_rand_func2(self):\n for i in range(0, 100000):\n num = random.randint(0, 32535143990)\n func2_comp(num)",
"def primes(m):\n if m <= 2:\n return ()\n sieve = [True] * m\n for i in sixn(m):\n if sieve[i]:\n yield i\n for mult in range(i * i, m, i):\n sieve[mult] = False",
"def esprimo(n):\n toret = False\n if x == 2:\n toret = True\n elif x % 2 == 0:\n toret = False\n else:\n for i in range(3, x, 2):\n if x % i == 0:\n break\n else:\n toret = True\n # Se ejecuta cuando no se rompe el bucle\n\n return toret",
"def test_prime_10(self):\n\t self.assertTrue(prime_generator(10), [2, 3, 5, 7])",
"def primfact(e):\n for n in range(2, e):\n for x in range(2, n):\n if n % x == 0:\n break\n else:\n print n,",
"def test_stress(self):\n primorial100 = 4711930799906184953162487834760260422020574773409675520188634839616415335845034221205289256705544681972439104097777157991804380284218315038719444943990492579030720635990538452312528339864352999310398481791730017201031090\n for i in range(10000):\n self.assertEqual(primorial(100), primorial100)",
"def primes():\n yield 2\n candidate = 3\n while True:\n for i in range(3, int(sqrt(candidate)) + 1, 2):\n if (candidate % i) == 0:\n break\n else:\n yield candidate\n candidate += 2",
"def primes():\n yield 2\n candidate = 3\n while True:\n for i in range(3, int(sqrt(candidate)) + 1, 2):\n if (candidate % i) == 0:\n break\n else:\n yield candidate\n candidate += 2",
"def rand_prime(k=10): \n i = random.randint(2**(k-2),2**(k-1))\n i,l=2*i+1,0\n while True:\n j = 3\n l +=1\n while i%j!=0:\n j += 1\n if i == j:\n return i\n #return i,len(bin(i)[2:]),l\n i += 2"
] | [
"0.7555729",
"0.7525924",
"0.7279653",
"0.7068233",
"0.6793754",
"0.6754159",
"0.6716757",
"0.66996837",
"0.66710955",
"0.657833",
"0.6571549",
"0.6510445",
"0.64366025",
"0.6392734",
"0.62452984",
"0.62227535",
"0.6184394",
"0.6181353",
"0.61811525",
"0.61747223",
"0.616319",
"0.6157256",
"0.6116748",
"0.60148513",
"0.60068923",
"0.5995163",
"0.5969272",
"0.5963861",
"0.5963861",
"0.5959063"
] | 0.7610841 | 0 |
Testing mersen_numbers function in task 559 | def test_task559_mersen_number(number, expected_value):
assert algo.Task559.mersen_numbers(number) == expected_value | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_numbers(number):\n print(\"\\nRunning test_numbers with {}\".format(number))",
"def test_numbers(self):\n \n result = gen_expansion(sym.pi, 2)\n self.assertEqual(result, '14')\n result = gen_expansion(sym.exp(1), 2)\n self.assertEqual(result, '72')",
"def test_anglicize100to999():\n print('Testing anglicize100to999')\n\n result = funcs.anglicize100to999(100)\n introcs.assert_equals(\"one hundred\", result)\n\n result = funcs.anglicize100to999(301)\n introcs.assert_equals(\"three hundred one\", result)\n\n result = funcs.anglicize100to999(999)\n introcs.assert_equals(\"nine hundred ninety nine\", result)",
"def getMantisse(number):\n mantisse = number / np.power(10, np.floor(np.log10(number)))\n return(mantisse)",
"def rabin_miller(n, target=128):\n ###############\n ## Start your code here\n return True\n ## End of your code\n ###############",
"def numerize():\n pass",
"def demo_a_number(random_number):",
"def test_inverse(self):\n from sosbeacon.utils import number_decode\n from sosbeacon.utils import number_encode\n\n for number in range(0, 500000, 339):\n encoded = number_encode(number)\n decoded = number_decode(encoded)\n self.assertEqual(number, decoded)",
"def test_hackerrank_sample1(self):\n result = find_digits(12)\n self.assertEquals(result, 2)",
"def run_suite():\n\n # test case 54\n twilio.run(\"+528451020032\", \"+12074264782\")\n # test case 55\n twilio.run(\"+528451020032\", \"+12074264782\")\n # test case 58\n twilio.run(\"+528451020031\", \"+12074264782\")\n # test case 59\n twilio.run(\"+528451020032\", \"+12074264781\")\n # test case 62\n twilio.run(\"+528451020032\", \"\")\n # test case 63\n twilio.run(\"\", \"+12074264782\")\n # test case 64\n twilio.run(\"\", \"\")\n # test case 65\n twilio.run(\"+528461043690\", \"+12074264782\")\n # test case 68\n twilio.run(\"+528451020032\", \"+12074264782\")\n # test case 69\n twilio.run(\"+528451020032\", \"+12074264782\")\n # test case 70\n twilio.run(\"+528451020032\", \"+12074264782\")\n # test case 75\n twilio.run(\"+524494934471\", \"+12074264782\")\n # test case 76\n twilio.run(\"+524494934471\", \"+12074264782\")\n # test case 79\n twilio.run(\"+524494934473\", \"+12074264782\")\n # test case 80\n twilio.run(\"+524494934471\", \"+12074264781\")\n # test case 83\n twilio.run(\"+524494934471\", \"\")\n # test case 84\n twilio.run(\"\", \"+12074264782\")\n # test case 85\n twilio.run(\"\", \"\")\n # test case 86\n twilio.run(\"+528451020323\", \"+12074264782\")\n # test case 89\n twilio.run(\"+524494934471\", \"+12074264782\")\n # test case 90\n twilio.run(\"+524494934471\", \"+12074264782\")\n # test case 91\n twilio.run(\"+524494934471\", \"+12074264782\")",
"def test_number_to_string(doctest):",
"def test_getnumber(self):\n convert = cnv()\n\n convert.setnum('einhundertdreiundzwanzig')\n self.assertEqual(convert.getnum(), 123)",
"def test_convert_word_with_numbers():\n for word in [\"1\", \"a1\", \"1a\"]:\n assert convert(word) == word",
"def thirteen():\r\n \r\n numbers = [37107287533902102798797998220837590246510135740250,\r\n 46376937677490009712648124896970078050417018260538,\r\n 74324986199524741059474233309513058123726617309629,\r\n 91942213363574161572522430563301811072406154908250,\r\n 23067588207539346171171980310421047513778063246676,\r\n 89261670696623633820136378418383684178734361726757,\r\n 28112879812849979408065481931592621691275889832738,\r\n 44274228917432520321923589422876796487670272189318,\r\n 47451445736001306439091167216856844588711603153276,\r\n 70386486105843025439939619828917593665686757934951,\r\n 62176457141856560629502157223196586755079324193331,\r\n 64906352462741904929101432445813822663347944758178,\r\n 92575867718337217661963751590579239728245598838407,\r\n 58203565325359399008402633568948830189458628227828,\r\n 80181199384826282014278194139940567587151170094390,\r\n 35398664372827112653829987240784473053190104293586,\r\n 86515506006295864861532075273371959191420517255829,\r\n 71693888707715466499115593487603532921714970056938,\r\n 54370070576826684624621495650076471787294438377604,\r\n 53282654108756828443191190634694037855217779295145,\r\n 36123272525000296071075082563815656710885258350721,\r\n 45876576172410976447339110607218265236877223636045,\r\n 17423706905851860660448207621209813287860733969412,\r\n 81142660418086830619328460811191061556940512689692,\r\n 51934325451728388641918047049293215058642563049483,\r\n 62467221648435076201727918039944693004732956340691,\r\n 15732444386908125794514089057706229429197107928209,\r\n 55037687525678773091862540744969844508330393682126,\r\n 18336384825330154686196124348767681297534375946515,\r\n 80386287592878490201521685554828717201219257766954,\r\n 78182833757993103614740356856449095527097864797581,\r\n 16726320100436897842553539920931837441497806860984,\r\n 48403098129077791799088218795327364475675590848030,\r\n 87086987551392711854517078544161852424320693150332,\r\n 59959406895756536782107074926966537676326235447210,\r\n 69793950679652694742597709739166693763042633987085,\r\n 41052684708299085211399427365734116182760315001271,\r\n 65378607361501080857009149939512557028198746004375,\r\n 35829035317434717326932123578154982629742552737307,\r\n 94953759765105305946966067683156574377167401875275,\r\n 88902802571733229619176668713819931811048770190271,\r\n 25267680276078003013678680992525463401061632866526,\r\n 36270218540497705585629946580636237993140746255962,\r\n 24074486908231174977792365466257246923322810917141,\r\n 91430288197103288597806669760892938638285025333403,\r\n 34413065578016127815921815005561868836468420090470,\r\n 23053081172816430487623791969842487255036638784583,\r\n 11487696932154902810424020138335124462181441773470,\r\n 63783299490636259666498587618221225225512486764533,\r\n 67720186971698544312419572409913959008952310058822,\r\n 95548255300263520781532296796249481641953868218774,\r\n 76085327132285723110424803456124867697064507995236,\r\n 37774242535411291684276865538926205024910326572967,\r\n 23701913275725675285653248258265463092207058596522,\r\n 29798860272258331913126375147341994889534765745501,\r\n 18495701454879288984856827726077713721403798879715,\r\n 38298203783031473527721580348144513491373226651381,\r\n 34829543829199918180278916522431027392251122869539,\r\n 40957953066405232632538044100059654939159879593635,\r\n 29746152185502371307642255121183693803580388584903,\r\n 41698116222072977186158236678424689157993532961922,\r\n 62467957194401269043877107275048102390895523597457,\r\n 23189706772547915061505504953922979530901129967519,\r\n 86188088225875314529584099251203829009407770775672,\r\n 11306739708304724483816533873502340845647058077308,\r\n 82959174767140363198008187129011875491310547126581,\r\n 97623331044818386269515456334926366572897563400500,\r\n 42846280183517070527831839425882145521227251250327,\r\n 55121603546981200581762165212827652751691296897789,\r\n 32238195734329339946437501907836945765883352399886,\r\n 75506164965184775180738168837861091527357929701337,\r\n 62177842752192623401942399639168044983993173312731,\r\n 32924185707147349566916674687634660915035914677504,\r\n 99518671430235219628894890102423325116913619626622,\r\n 73267460800591547471830798392868535206946944540724,\r\n 76841822524674417161514036427982273348055556214818,\r\n 97142617910342598647204516893989422179826088076852,\r\n 87783646182799346313767754307809363333018982642090,\r\n 10848802521674670883215120185883543223812876952786,\r\n 71329612474782464538636993009049310363619763878039,\r\n 62184073572399794223406235393808339651327408011116,\r\n 66627891981488087797941876876144230030984490851411,\r\n 60661826293682836764744779239180335110989069790714,\r\n 85786944089552990653640447425576083659976645795096,\r\n 66024396409905389607120198219976047599490197230297,\r\n 64913982680032973156037120041377903785566085089252,\r\n 16730939319872750275468906903707539413042652315011,\r\n 94809377245048795150954100921645863754710598436791,\r\n 78639167021187492431995700641917969777599028300699,\r\n 15368713711936614952811305876380278410754449733078,\r\n 40789923115535562561142322423255033685442488917353,\r\n 44889911501440648020369068063960672322193204149535,\r\n 41503128880339536053299340368006977710650566631954,\r\n 81234880673210146739058568557934581403627822703280,\r\n 82616570773948327592232845941706525094512325230608,\r\n 22918802058777319719839450180888072429661980811197,\r\n 77158542502016545090413245809786882778948721859617,\r\n 72107838435069186155435662884062257473692284509516,\r\n 20849603980134001723930671666823555245252804609722,\r\n 53503534226472524250874054075591789781264330331690]\r\n \r\n sum = 0\r\n \r\n for n in numbers:\r\n sum += n\r\n \r\n return int(str(sum)[:10])",
"def test_setnumber(self):\n convert1 = cnv()\n\n convert1.setnum('einhundertdreiundzwanzig')\n self.assertEqual(convert1.numstring, 'einhundertdreiundzwanzig')",
"def test_big_numbers(self):\n arr, result = [535555555555, 5500000000000000000000000,\n 10, 15, 35, 0, -2, -67, -55], []\n fizz_buzz(arr, result)\n self.assertEqual(result, ['fizz', 'fizz', 'fizz', 'fizzbuzz',\n 'fizz', 'fizzbuzz', -2, -67, 'fizz'])",
"def getNumber():",
"def test_numbers(number):\n assert number ** 2 == number ** 2",
"def solve(number):\n if number == 0:\n return \"INSOMNIA\"\n else:\n total_digits = 10 # there are 10 digits [0-9]\n digits_seen = set()\n multiplier = 0\n while len(digits_seen) < total_digits:\n multiplier += 1\n digits_in_n = {int(i) for i in str(multiplier*number)}\n digits_seen = digits_seen.union(digits_in_n)\n return multiplier*number",
"def test_build_number(converted_tests):\n submission = SubmissionBuilder(\"t\", \"b\", converted_tests).build()\n assert submission.get(\"number\") == \"b\", submission",
"def test_anglicize1000():\n print('Testing anglicize1000')\n\n result = funcs.anglicize1000(1)\n introcs.assert_equals(\"one\", result)\n\n result = funcs.anglicize1000(19)\n introcs.assert_equals(\"nineteen\", result)\n\n result = funcs.anglicize1000(20)\n introcs.assert_equals(\"twenty\", result)\n\n result = funcs.anglicize1000(35)\n introcs.assert_equals(\"thirty five\", result)\n\n result = funcs.anglicize1000(50)\n introcs.assert_equals(\"fifty\", result)\n\n result = funcs.anglicize1000(99)\n introcs.assert_equals(\"ninety nine\", result)\n\n result = funcs.anglicize1000(100)\n introcs.assert_equals(\"one hundred\", result)\n\n result = funcs.anglicize1000(301)\n introcs.assert_equals(\"three hundred one\", result)\n\n result = funcs.anglicize1000(999)\n introcs.assert_equals(\"nine hundred ninety nine\", result)",
"def numbers2words():\n\tmy_num = None\n\twhile my_num != \"0\":\n\t\tmy_num = input(\"Please enter a number greater than 0 and less than 1 trillion: \")\n\t\tprint(name_num(int(my_num.replace(\",\",\"\"))))",
"def test_add_numbers(self):\n self.assertEqual(addNums(3, 8), 11)",
"def test_non_numberic_validation(self):",
"def test_non_numberic_validation(self):",
"def multiply_numbers(first_number, second_number):",
"def test_ok_mm_primer(self):\r\n primers = ['AAAA', 'GGGG']\r\n self.assertEqual(ok_mm_primer('AAAA', primers, 0), True)\r\n self.assertEqual(ok_mm_primer('AAAA', primers, 3), True)\r\n self.assertEqual(ok_mm_primer('CCCC', primers, 0), False)\r\n self.assertEqual(ok_mm_primer('CCCA', primers, 3), True)\r\n self.assertEqual(ok_mm_primer('CCCA', primers, 2), False)\r\n self.assertEqual(ok_mm_primer('CCGG', primers, 2), True)\r\n self.assertEqual(ok_mm_primer('CCGA', primers, 2), False)",
"def test_converter_number_system(self):\n \n input_values = [0,0,2,'97']\n\n output = []\n expected_result = \"Result: 97 Dec equals to 61 Hex\"\n\n def mock_input(s):\n output.append(s)\n return input_values.pop(0)\n\n mp2.input = mock_input\n mp2.print = lambda s:output.append(s)\n mp2.main()\n self.assertEqual(output[-1],expected_result)",
"def test_number(self):\n\n tokens = list(Lexer(\"123 123.456 .456 .123 .\").generate_tokens())\n answer = [Token(TokenType.NUMBER, 123),\n Token(TokenType.NUMBER, 123.456),\n Token(TokenType.NUMBER, 0.456),\n Token(TokenType.NUMBER, 0.123),\n Token(TokenType.NUMBER, 0.0)]\n self.assertEqual(tokens, answer)",
"def evaluate_number(number : int)->int:\n if type(number) == int and number >1 and number < 100:\n num = total_numbers = porc = 0\n while porc < number:\n num = num + 1\n clasificate = is_bouncy(str(num))\n result = evaluate(clasificate , num)\n if result:\n total_numbers = total_numbers + 1\n porc = total_numbers * 100 / num\n return num\n return 0"
] | [
"0.5994305",
"0.58658266",
"0.5729014",
"0.56989163",
"0.5615644",
"0.5593863",
"0.5531031",
"0.55126804",
"0.54742104",
"0.5439089",
"0.543804",
"0.54284734",
"0.54218674",
"0.54073083",
"0.5352614",
"0.5331817",
"0.53211087",
"0.5284186",
"0.5279782",
"0.52637136",
"0.5258012",
"0.5257852",
"0.52443284",
"0.52387244",
"0.52387244",
"0.5237288",
"0.52255994",
"0.5213397",
"0.5212256",
"0.5209041"
] | 0.7466113 | 0 |
treq should be lazy imported since importing treq will install reactor. twisted.web.client.HTTPConnectionPool is patched here too. | def get_treq():
patch_twisted_http_connection_pool_bug()
import treq
return treq | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fake_twisted_request(*args, **kwargs):\n kwargs.setdefault(\n 'Request', lambda channel: Request(channel=channel, queued=False))\n request = fake_nevow_request(*args, **kwargs)\n request.finish = lambda: next(request.finish.counter)\n request.finish.counter = itertools.count()\n return request",
"def setUp(self):\n self.reactor = self.Reactor()\n self.url = 'https://www.example.com/someresource.html#andatag'",
"def fakehttp(solrconn, *fakedata):\n\n class FakeOutput(list):\n\n \"\"\"helper class to organize output from fake connections\"\"\"\n\n conn = solrconn\n\n def log(self, item):\n self.current.append(item)\n\n def get(self, skip=0):\n self[:] = self[skip:]\n return b\"\".join(self.pop(0)).replace(b\"\\r\", b\"\")\n\n def new(self):\n self.current = []\n self.append(self.current)\n\n def __len__(self):\n self.conn.flush() # send out all pending xml\n return super(FakeOutput, self).__len__()\n\n def __str__(self):\n self.conn.flush() # send out all pending xml\n if self:\n return \"\".join([chunk.decode(\"utf-8\") for chunk in self[0]]).replace(\n \"\\r\", \"\"\n )\n else:\n return \"\"\n\n output = FakeOutput()\n\n class FakeSocket(six.BytesIO):\n\n \"\"\"helper class to fake socket communication\"\"\"\n\n def sendall(self, str):\n output.log(str)\n\n if six.PY2:\n\n def makefile(self, mode, name):\n return self\n\n else:\n\n def makefile(self, mode):\n return self\n\n def read(self, amt=None):\n if self.closed:\n return b\"\"\n return six.BytesIO.read(self, amt)\n\n def readline(self, length=None):\n if self.closed:\n return b\"\"\n return six.BytesIO.readline(self, length)\n\n class FakeHTTPConnection(HTTPConnection):\n\n \"\"\"helper class to fake a http connection object from httplib.py\"\"\"\n\n def __init__(self, host, *fakedata):\n HTTPConnection.__init__(self, host)\n self.fakedata = list(fakedata)\n\n def putrequest(self, *args, **kw):\n self.url = args[1]\n response = self.fakedata.pop(0) # get first response\n self.sock = FakeSocket(response) # and set up a fake socket\n output.new() # as well as an output buffer\n HTTPConnection.putrequest(self, *args, **kw)\n\n def setTimeout(self, timeout):\n pass\n\n solrconn.conn = FakeHTTPConnection(solrconn.conn.host, *fakedata)\n return output",
"def __http_request_maker(\n req_type,\n url,\n headers,\n retries,\n time_sleep,\n timeout_sec=None,\n data=None,\n content_type=None,\n socks_proxy=None,\n):\n if socks_proxy is not None:\n socks_version = (\n socks.SOCKS5\n if socks_proxy.startswith(\"socks5://\")\n else socks.SOCKS4\n )\n socks_proxy = socks_proxy.rsplit(\"://\")[1]\n if \"@\" in socks_proxy:\n socks_username = socks_proxy.rsplit(\":\")[0]\n socks_password = socks_proxy.rsplit(\":\")[1].rsplit(\"@\")[0]\n socks.set_default_proxy(\n socks_version,\n str(socks_proxy.rsplit(\"@\")[1].rsplit(\":\")[0]),\n int(socks_proxy.rsplit(\":\")[-1]),\n username=socks_username,\n password=socks_password,\n )\n socket.socket = socks.socksocket\n socket.getaddrinfo = getaddrinfo\n else:\n socks.set_default_proxy(\n socks_version,\n str(socks_proxy.rsplit(\":\")[0]),\n int(socks_proxy.rsplit(\":\")[1]),\n )\n socket.socket = socks.socksocket\n socket.getaddrinfo = getaddrinfo\n exits = 0\n r = None\n while True:\n try:\n req_type = req_type.lower()\n if req_type in [\"post\", \"put\", \"patch\"]:\n if content_type == \"application/data\":\n r = eval(\n \"requests.{}(url=url, headers=headers, data=data,\\\n timeout=timeout_sec, verify=False)\".format(\n req_type\n )\n )\n elif content_type == \"application/json\":\n r = eval(\n \"requests.{}(url=url, headers=headers, json=data,\\\n timeout=timeout_sec, verify=False)\".format(\n req_type\n )\n )\n elif req_type in [\"get\", \"head\", \"delete\"]:\n r = eval(\n \"requests.{}(url=url, headers=headers,\\\n verify=False, timeout=timeout_sec)\".format(\n req_type\n )\n )\n break\n except Exception as _:\n exits += 1\n if exits is retries:\n return 0\n else:\n time.sleep(time_sleep)\n continue\n return r",
"def test_issue_50():\n utils.set_http_mock()\n\n client = Github(proxy_host=\"my.proxy.com\", proxy_port=9000)\n setup_args = client.request._http.called_with\n assert_equals(type(setup_args['proxy_info']), httplib2.ProxyInfo)\n assert_equals(setup_args['proxy_info'].proxy_host, 'my.proxy.com')\n assert_equals(setup_args['proxy_info'].proxy_port, 9000)\n\n utils.unset_http_mock()",
"def __init__(self, server, conn):\n super(CPWSGIHTTPRequest, self).__init__(\n server, conn, proxy_mode=True\n )",
"def patch_http_connection_pool(**constructor_kwargs):\n class MyHTTPConnectionPool(connectionpool.HTTPConnectionPool):\n def __init__(self, *args, **kwargs):\n kwargs.update(constructor_kwargs)\n super(MyHTTPConnectionPool, self).__init__(*args, **kwargs)\n poolmanager.pool_classes_by_scheme['http'] = MyHTTPConnectionPool",
"def use_twisted(app):\n activity.EventLoop <<= activity.TwistedEventLoop\n REACTOR_INIT.notify(app)",
"def test_get_response_with_retry__connection_reset(self, mock_get_thread_session):\n\n mock_requests_response = mock.Mock(status_code=206)\n mock_requests_session = mock.create_autospec(requests.Session)\n mock_requests_session.get.side_effect = [\n ConnectionResetError(),\n mock_requests_response,\n ]\n mock_get_thread_session.return_value = mock_requests_session\n\n mock_presigned_url_provider = mock.create_autospec(\n download_threads.PresignedUrlProvider\n )\n presigned_url_info = download_threads.PresignedUrlInfo(\n \"foo.txt\", \"synapse.org/foo.txt\", datetime.datetime.utcnow()\n )\n\n mock_presigned_url_provider.get_info.return_value = presigned_url_info\n start = 5\n end = 42\n\n mock_syn = mock.Mock(spec=Synapse)\n mock_executor = mock.Mock(spec=concurrent.futures.Executor)\n downloader = _MultithreadedDownloader(mock_syn, mock_executor, 5)\n assert (start, mock_requests_response) == downloader._get_response_with_retry(\n mock_presigned_url_provider, start, end\n )\n\n expected_get_call_args_list = [\n mock.call(presigned_url_info.url, headers={\"Range\": \"bytes=5-42\"})\n ] * 2\n assert mock_requests_session.get.call_args_list == expected_get_call_args_list",
"def test_client_can_load_client_page_requests_directly(self):\n\n req = self.httpbin_3.get_request_data('get_my_ip')\n\n self.assertEqual(req, self.httpbin_3.client[\"homepage\"]['get_my_ip'])\n req = self.httpbin_3.get_request_data('test_requests_patch_method')\n self.assertEqual(req, self.httpbin_3.client[\"homepage\"]['test_requests_patch_method'])\n req = self.httpbin_3.get_request_data('test_requests_delete_method')\n self.assertEqual(req, self.httpbin_3.client[\"second_page\"]['test_requests_delete_method'])\n\n req = self.httpbin_4.get_request_data('get_my_ip')\n self.assertEqual(req, self.httpbin_4.client['get_my_ip'])\n req = self.httpbin_4.get_request_data('get_user_my_agent')\n self.assertEqual(req, self.httpbin_4.client['get_user_my_agent'])\n req = self.httpbin_4.get_request_data('test_requests_put_method')\n self.assertEqual(req, self.httpbin_4.client['test_requests_put_method'])\n req = self.httpbin_4.get_request_data('test_requests_post_method')\n self.assertEqual(req, self.httpbin_4.client['test_requests_post_method'])",
"def test_client_can_load_client_requests_directly(self):\n\n req = self.httpbin.get_request_data('get_my_ip')\n self.assertEqual(req, self.httpbin.client['get_my_ip'])\n req = self.httpbin.get_request_data('get_my_headers')\n self.assertEqual(req, self.httpbin.client['get_my_headers'])\n\n req = self.httpbin_2.get_request_data('get_my_ip')\n self.assertEqual(req, self.httpbin_2.client['get_my_ip'])\n req = self.httpbin_2.get_request_data('get_my_headers')\n self.assertEqual(req, self.httpbin_2.client['get_my_headers'])",
"def setup_client():\n webtest.WebCase.PORT = cherrypy.server.socket_port\n webtest.WebCase.HOST = cherrypy.server.socket_host\n if cherrypy.server.ssl_certificate:\n CPWebCase.scheme = 'https'",
"def test_http_request(self):\n\n response = requests.get(self.live_server_url)\n assert response.status_code == 200",
"def req():\n return Request()",
"def test_reuse():\n class Handler(RequestHandler):\n def get(self):\n self.write('Hello')\n\n app = Application([url('/hello', Handler)])\n\n tester = Tester(app)\n with tester:\n response = yield tester.http_client.fetch(tester.url_for('/hello'))\n assert 'Hello' == text_body(response)\n\n with pytest.raises(RuntimeError):\n tester.setup()",
"def patch():\n\n config(\"127.0.0.1\", 9050)\n\n socket.socket = socks.socksocket\n socket.create_connection = create_connection",
"def _process_request(self):\n if not self._requests:\n if self._stream:\n self._stream.close()\n self._stream = None\n if self._processing:\n self._processing = False\n Engine.instance().stop()\n return\n\n request = self._requests[0]\n\n request.append(\n Engine.instance().defer(request[5], self._request_timeout, request))\n\n port = request[2].port\n if not port:\n if request[2].scheme.lower() == 'https':\n port = 443\n else:\n port = 80\n\n host = \"%s:%d\" % (request[2].hostname, port)\n\n if self._stream:\n if not self._server == host.lower() or not \\\n self._is_secure == (request[2].scheme.lower() == 'https'):\n self._stream.end()\n return\n\n if not self._stream:\n # Store the current server.\n self._server = host.lower()\n\n # Create a Stream, hook into it, and connect.\n self._stream = Stream()\n\n self._stream.on_close = self._on_close\n self._stream.on_connect = self._on_connect\n\n self._is_secure = request[2].scheme.lower() == 'https'\n if self._is_secure:\n raise Exception(\"SSL has not yet been implemented in this version of Pants.\")\n self._stream.startTLS()\n\n self._stream.connect((request[2].hostname, port))\n return\n\n # If we got here, we're connected, and to the right server. Do stuff.\n self._stream.write('%s %s HTTP/1.1%s' % (request[0], request[8], CRLF))\n for k, v in request[3].iteritems():\n self._stream.write('%s: %s%s' % (k, v, CRLF))\n\n if request[4]:\n self._stream.write('%s%s' % (CRLF, request[4]))\n else:\n self._stream.write(CRLF)\n\n # Now, wait for a response.\n self._stream.on_read = self._read_headers\n self._stream.read_delimiter = DOUBLE_CRLF",
"def tls_http_server(request):\n return functools.partial(make_tls_http_server, request=request)",
"def __init__(self, request_timeout=3, max_concurrency=3, backlog=16, debug=False):\n self.loop = asyncio.get_event_loop()\n self.request_timeout = request_timeout\n self.max_concurrency = max_concurrency\n self.backlog = backlog\n self.debug = debug\n self.explicit_url_map = {}\n self.catch_all_handler = None\n self.parameterized_url_map = {}\n # Currently opened connections\n self.conns = {}\n # Statistics\n self.processed_connections = 0",
"def stub_http(hass):\n mock_http_component(hass)",
"def test_client_twrr_performance(self):\n pass",
"def init():\n # make sure pool is initialized\n global pool\n if not pool:\n pool = aiohttp.ClientSession(\n connector=aiohttp.TCPConnector(limit=config.MAX_PARALLEL_REQUESTS),\n raise_for_status=False,\n trust_env=True,\n auth=aiohttp.BasicAuth( config.CACHE_USERNAME, config.CACHE_PASSWORD ),\n )",
"def _getClientConnection(self):\n self.client = twisted_client.DivvyClient(self.host, self.port, timeout=1.0)\n return self.client.connection.deferred",
"def _set_requestor(self, pool_options):\n # We had been importing this at the top of the module, but that seemed\n # to break some CI environments\n import requests\n\n if not pool_options['enable']:\n self._requestor = requests\n return\n\n session = requests.Session()\n adapter = requests.adapters.HTTPAdapter(\n pool_block=pool_options['block'],\n pool_connections=pool_options['number'],\n pool_maxsize=pool_options['maxsize'],\n )\n logger.info(\n 'Created connection pool (block={}, number={}, maxsize={})'.format(\n pool_options['block'],\n pool_options['number'],\n pool_options['maxsize']))\n\n prefix = _get_protocol_prefix(self.api_root)\n if prefix:\n session.mount(prefix, adapter)\n logger.info('Mounted connection pool for \"{}\"'.format(prefix))\n else:\n session.mount('http://', adapter)\n session.mount('https://', adapter)\n logger.info(\n 'Could not find protocol prefix in API root, mounted '\n 'connection pool on both http and https.')\n\n self._requestor = session",
"async def test_get_chunked_response_and_not_read_it(app, aiohttp_server):\n server = await aiohttp_server(app)\n url = \"http://localhost:%d/chunked\" % server.port\n\n async with aiosonic.HTTPClient() as client:\n res = await client.get(url)\n assert client.connector.pool.free_conns(), 24\n del res\n assert client.connector.pool.free_conns(), 25\n\n connector = aiosonic.TCPConnector(pool_cls=CyclicQueuePool)\n async with aiosonic.HTTPClient(connector) as client:\n res = await client.get(url)\n assert client.connector.pool.free_conns(), 24\n del res\n assert client.connector.pool.free_conns(), 25\n await server.close()",
"def __init__(self):\n\n # Every WSGI application must have an application object - a callable\n # object that accepts two arguments. For that purpose, we're going to\n # use a function (note that you're not limited to a function, you can\n # use a class for example). The first argument passed to the function\n # is a dictionary containing CGI-style environment variables and the\n # second variable is the callable object (see PEP 333).\n def application(environ, start_response):\n \"\"\"\n WSGI application object. Returns request status.\n For specific endpoints (e.g. get_with_params), returns\n specific response bodies.\n \"\"\"\n\n response_text = 'Hello World!'\n endpoint = environ['PATH_INFO'][1:]\n\n if endpoint == 'get_with_params':\n #echo back uri parameters as dict...\n response_text = str(dict(urlparse.parse_qsl(environ['QUERY_STRING'])))\n\n #set status code for response based on request...\n requested_status = environ['PATH_INFO'][1:]\n\n status = self.codes.get(requested_status, '200 OK') # HTTP Status\n headers = [('Content-type', 'text/plain')] # HTTP Headers\n start_response(status, headers)\n #print(environ)\n #print('pathInfo: {0}'.format(environ.get('PATH_INFO')))\n #print('queryString: {0}'.format(environ.get('QUERY_STRING')))\n #print('requestMethod:{0}'.format(environ['REQUEST_METHOD']))\n # The returned object is going to be printed\n return response_text\n\n threading.Thread.__init__(self)\n self.httpd = make_server('', 0, application)\n self.address = self.httpd.server_address",
"def test_deprecated(self):\n client.ThreadedResolver()\n warnings = self.flushWarnings(offendingFunctions=[self.test_deprecated])\n self.assertEquals(\n warnings[0]['message'],\n \"twisted.names.client.ThreadedResolver is deprecated since \"\n \"Twisted 9.0, use twisted.internet.base.ThreadedResolver \"\n \"instead.\")\n self.assertEquals(warnings[0]['category'], DeprecationWarning)\n self.assertEquals(len(warnings), 1)",
"async def test_pool_acquire_timeout(app, aiohttp_server, mocker):\n server = await aiohttp_server(app)\n url = \"http://localhost:%d/slow_request\" % server.port\n\n connector = TCPConnector(pool_size=1, timeouts=Timeouts(pool_acquire=0.3))\n async with aiosonic.HTTPClient(connector) as client:\n with pytest.raises(ConnectionPoolAcquireTimeout):\n await asyncio.gather(\n client.get(url),\n client.get(url),\n )\n await server.close()",
"def setUp(self):\r\n super(SSLClientTest, self).setUp()\r\n self.client = Client()\r\n self.factory = RequestFactory()\r\n self.mock = Mock()",
"def __init__( self, site, debug=False, encoding=None, guess_encoding=False, requests_before_reconnect=0, proxy_must_match=None, print_requests=True):\n\t\tobject.__init__(self)\n\t\tself.debug = debug\n\t\tself.encoding = encoding\n\t\tself.guess_encoding = guess_encoding\n\t\tself.proxy_must_match = proxy_must_match # regular expression\n\t\tself.__proxy = None\n\t\t\n\t\tself.add_referer = False\n\t\tself.redirect_automatically = True\n\t\t\n\t\tself.print_requests = print_requests\n\t\t\n\t\tif requests_before_reconnect > 0:\n\t\t\tself.requests_before_reconnect = requests_before_reconnect\n\t\t\tself.requests_count = 1\n\t\telse:\n\t\t\tself.requests_before_reconnect = -1\n\t\t\n\t\tself.headers = {\n\t\t\t\"User-Agent\" : \"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)\",\n\t\t}\n\t\t\n\t\tself.https = None\n\t\tself.http = None\n\t\t\n\t\t# pick_a_new_proxy_ip needs to access self.site to create HTTPConnect object\n\t\t# then setup_browser_for_site needs to set up properly\n\t\tself.__site = site\n\t\tself.pick_a_new_proxy_ip()\n\t\tself.setup_browser_for_site(site)"
] | [
"0.5803267",
"0.5696364",
"0.565525",
"0.554367",
"0.5538993",
"0.5526741",
"0.55221415",
"0.54777414",
"0.54584825",
"0.54484373",
"0.5435534",
"0.5435227",
"0.5414661",
"0.5396609",
"0.53723156",
"0.53629166",
"0.53303",
"0.5325433",
"0.5303499",
"0.5299356",
"0.5226295",
"0.52241343",
"0.5220322",
"0.5195203",
"0.5185376",
"0.5177418",
"0.51737916",
"0.5168047",
"0.51588756",
"0.5147947"
] | 0.81258976 | 0 |
Given an instance retrieve the expected test configurations for instance's datastore. | def expected_instance_datastore_configs(instance_id):
instance = instance_info.dbaas.instances.get(instance_id)
datastore_type = instance.datastore['type']
datastore_test_configs = CONFIG.get(datastore_type, {})
return datastore_test_configs.get("configurations", {}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_configuration_details_from_instance_validation(self):\n print(\"instance_info.id: %s\" % instance_info.id)\n inst = instance_info.dbaas.instances.get(instance_info.id)\n configuration_id = inst.configuration['id']\n print(\"configuration_info: %s\" % configuration_id)\n assert_not_equal(None, configuration_id)\n _test_configuration_is_applied_to_instance(instance_info,\n configuration_id)",
"def test_get_configuration_details_from_instance_validation(self):\n inst = instance_info.dbaas.instances.get(configuration_instance.id)\n configuration_id = inst.configuration['id']\n assert_not_equal(None, configuration_id)\n _test_configuration_is_applied_to_instance(configuration_instance,\n configuration_id)",
"def test_configurations_get(self):\n result = instance_info.dbaas.configurations.get(configuration_info.id)\n assert_equal(configuration_info.id, result.id)\n assert_equal(configuration_info.name, result.name)\n assert_equal(configuration_info.description, result.description)\n\n # check the result field types\n with TypeCheck(\"configuration\", result) as check:\n check.has_field(\"id\", str)\n check.has_field(\"name\", str)\n check.has_field(\"description\", str)\n check.has_field(\"values\", dict)\n check.has_field(\"created\", str)\n check.has_field(\"updated\", str)\n check.has_field(\"instance_count\", int)\n\n print(result.values)\n\n # check for valid timestamps\n assert_true(_is_valid_timestamp(result.created))\n assert_true(_is_valid_timestamp(result.updated))\n\n # check that created and updated timestamps differ, since\n # test_appending_to_existing_configuration should have changed the\n # updated timestamp\n if not CONFIG.fake_mode:\n assert_not_equal(result.created, result.updated)\n\n assert_equal(result.instance_count, 1)\n\n with CollectionCheck(\"configuration_values\", result.values) as check:\n # check each item has the correct type according to the rules\n for (item_key, item_val) in result.values.items():\n print(\"item_key: %s\" % item_key)\n print(\"item_val: %s\" % item_val)\n dbaas = instance_info.dbaas\n param = dbaas.configuration_parameters.get_parameter(\n instance_info.dbaas_datastore,\n instance_info.dbaas_datastore_version,\n item_key)\n if param.type == 'integer':\n check.has_element(item_key, int)\n if param.type == 'string':\n check.has_element(item_key, str)\n if param.type == 'boolean':\n check.has_element(item_key, bool)\n\n # Test to make sure that another user is not able to GET this config\n reqs = Requirements(is_admin=False)\n test_auth_user = instance_info.user.auth_user\n other_user = CONFIG.users.find_user(reqs, black_list=[test_auth_user])\n other_user_tenant_id = other_user.tenant_id\n client_tenant_id = instance_info.user.tenant_id\n if other_user_tenant_id == client_tenant_id:\n other_user = CONFIG.users.find_user(\n reqs, black_list=[instance_info.user.auth_user,\n other_user])\n print(other_user)\n print(other_user.__dict__)\n other_client = create_dbaas_client(other_user)\n assert_raises(exceptions.NotFound, other_client.configurations.get,\n configuration_info.id)",
"def expected_default_datastore_configs():\n default_datastore = CONFIG.get('dbaas_datastore', None)\n datastore_test_configs = CONFIG.get(default_datastore, {})\n return datastore_test_configs.get(\"configurations\", {})",
"def test_start_instance_with_configuration(self):\n global configuration_instance\n databases = []\n databases.append({\"name\": \"firstdbconfig\", \"character_set\": \"latin2\",\n \"collate\": \"latin2_general_ci\"})\n databases.append({\"name\": \"db2\"})\n configuration_instance.databases = databases\n users = []\n users.append({\"name\": \"liteconf\", \"password\": \"liteconfpass\",\n \"databases\": [{\"name\": \"firstdbconfig\"}]})\n configuration_instance.users = users\n configuration_instance.name = \"TEST_\" + str(uuid.uuid4()) + \"_config\"\n flavor_href = instance_info.dbaas_flavor_href\n configuration_instance.dbaas_flavor_href = flavor_href\n configuration_instance.volume = instance_info.volume\n configuration_instance.dbaas_datastore = instance_info.dbaas_datastore\n configuration_instance.dbaas_datastore_version = \\\n instance_info.dbaas_datastore_version\n configuration_instance.nics = instance_info.nics\n\n result = instance_info.dbaas.instances.create(\n configuration_instance.name,\n configuration_instance.dbaas_flavor_href,\n configuration_instance.volume,\n configuration_instance.databases,\n configuration_instance.users,\n nics=configuration_instance.nics,\n availability_zone=\"nova\",\n datastore=configuration_instance.dbaas_datastore,\n datastore_version=configuration_instance.dbaas_datastore_version,\n configuration=configuration_href)\n assert_equal(200, instance_info.dbaas.last_http_code)\n assert_equal(\"BUILD\", result.status)\n configuration_instance.id = result.id",
"def test_instance_api(self):\n\n # Test creating a db instance.\n # ----------------------------\n LOG.info(\"* Creating db instance\")\n body = r\"\"\"\n {\"instance\": {\n \"name\": \"%s\",\n \"flavorRef\": \"103\",\n \"port\": \"3306\",\n \"dbtype\": {\n \"name\": \"mysql\",\n \"version\": \"5.5\"\n }\n }\n }\"\"\" % INSTANCE_NAME\n\n client = httplib2.Http(\".cache\", timeout=TIMEOUTS['http'], disable_ssl_certificate_validation=True)\n resp, content = self._execute_request(client, \"instances\", \"POST\", body)\n\n # Assert 1) that the request was accepted and 2) that the response\n # is in the expected format.\n self.assertEqual(201, resp.status, (\"Expecting 201 as response status of create instance but received %s\" % resp.status))\n content = self._load_json(content,'Create Instance')\n self.assertTrue(content.has_key('instance'), \"Response body of create instance does not have 'instance' field\")\n\n credential = content['instance']['credential']\n\n self.instance_id = content['instance']['id']\n LOG.debug(\"Instance ID: %s\" % self.instance_id)\n\n\n # Test listing all db instances.\n # ------------------------------\n LOG.info(\"* Listing all db instances\")\n resp, content = self._execute_request(client, \"instances\", \"GET\", \"\")\n \n # Assert 1) that the request was accepted and 2) that the response is\n # in the expected format (e.g. a JSON object beginning with an\n # 'instances' key).\n self.assertEqual(200, resp.status, (\"Expecting 200 as response status of list instance but received %s\" % resp.status))\n content = self._load_json(content,'List all Instances')\n self.assertTrue(content.has_key('instances'), \"Response body of list instances does not contain 'instances' field.\")\n\n\n # Test getting a specific db instance.\n # ------------------------------------\n LOG.info(\"* Getting instance %s\" % self.instance_id)\n resp, content = self._execute_request(client, \"instances/\" + self.instance_id, \"GET\", \"\")\n \n # Assert 1) that the request was accepted and 2) that the returned\n # instance is the same as the accepted instance.\n self.assertEqual(200, resp.status, (\"Expecting 200 as response status of show instance but received %s\" % resp.status))\n content = self._load_json(content,'Get Single Instance')\n self.assertEqual(self.instance_id, str(content['instance']['id']), \"Instance ID not found in Show Instance response\")\n\n\n # Check to see if the instance we previously created is \n # in the 'running' state\n # -----------------------------------------------------\n wait_so_far = 0\n status = content['instance']['status']\n pub_ip = content['instance']['hostname']\n while status != 'running' or pub_ip is None or len(pub_ip) <= 0:\n # wait a max of max_wait for instance status to show running\n time.sleep(POLL_INTERVALS['boot'])\n wait_so_far += POLL_INTERVALS['boot']\n if wait_so_far >= TIMEOUTS['boot']:\n break\n \n resp, content = self._execute_request(client, \"instances/\" + self.instance_id, \"GET\", \"\")\n self.assertEqual(200, resp.status, (\"Expecting 200 as response status of show instance but received %s\" % resp.status))\n content = self._load_json(content,'Get Single Instance')\n status = content['instance']['status']\n pub_ip = content['instance']['hostname']\n\n if status != 'running':\n\n self.fail(\"for some reason the instance did not switch to 'running' in %s\" % TIMEOUT_STR)\n else:\n # try to connect to mysql instance\n pub_ip = content['instance']['hostname']\n # user/pass = credentials\n db_user = credential['username']\n db_passwd = credential['password']\n db_name = 'mysql'\n\n LOG.info(\"* Trying to connect to mysql DB on first boot: %s, %s, %s\" %(db_user, db_passwd, pub_ip))\n conn = self.db_connect(db_user, db_passwd, pub_ip, db_name)\n if conn is None:\n self.fail(\"* maximum trials reached, db connection failed on first boot over %s: \" % pub_ip)\n conn.close()\n\n\n\n # Test resetting the password on a db instance.\n # ---------------------------------------------\n LOG.info(\"* Resetting password on instance %s\" % self.instance_id)\n resp, content = self._execute_request(client, \"instances/\" + self.instance_id +\"/resetpassword\", \"POST\", \"\")\n self.assertEqual(200, resp.status, (\"Expecting 200 as response status of reset password but received %s\" % resp.status))\n content = self._load_json(content,'Get new password')\n\n if resp.status == 200 :\n db_new_passwd = content['password']\n LOG.info(\"* Trying to connect to mysql DB after resetting password: %s, %s, %s\" %(db_user, db_new_passwd, pub_ip))\n conn = self.db_connect(db_user, db_new_passwd, pub_ip, db_name)\n if conn is None:\n LOG.exception(\"* something is wrong with mysql connection after resetting password\")\n conn.close()\n LOG.info(\"* Maybe the old password still works ?\")\n conn_2 = self.db_connect(db_user, db_passwd, pub_ip, db_name)\n if conn_2 is None:\n LOG.exception(\"* no, old password does not work anymore\")\n else:\n LOG.info(\"* old password still works, new password has not kicked in\")\n conn_2.close()\n self.fail(\"* maximum trials reached, db connection failed after resetting password over %s: \" % pub_ip)\n\n\n # XXX: Suspect restarting too soon after a \"reset password\" command is putting the instance in a bad mood on restart\n time.sleep(DELAYS['between_reset_and_restart'])\n\n # Test restarting a db instance.\n # ------------------------------\n LOG.info(\"* Restarting instance %s\" % self.instance_id)\n resp, content = self._execute_request(client, \"instances/\" + self.instance_id +\"/restart\", \"POST\", \"\")\n self.assertEqual(204, resp.status, (\"Expecting 204 as response status of restart instance but received %s\" % resp.status))\n\n # Test getting a specific db instance.\n LOG.info(\"* Getting instance %s\" % self.instance_id)\n resp, content = self._execute_request(client, \"instances/\" + self.instance_id , \"GET\", \"\")\n self.assertEqual(200, resp.status, (\"Expecting 200 as response status of show instance but received %s\" % resp.status))\n content = self._load_json(content,'Get Single Instance after Restart')\n \n wait_so_far = 0\n status = content['instance']['status']\n while status != 'running':\n # wait a max of max_wait for instance status to show running\n time.sleep(POLL_INTERVALS['boot'])\n wait_so_far += POLL_INTERVALS['boot']\n if wait_so_far >= TIMEOUTS['boot']:\n break\n \n resp, content = self._execute_request(client, \"instances/\" + self.instance_id , \"GET\", \"\")\n self.assertEqual(200, resp.status, (\"Expecting 200 as response status of show instance but received %s\" % resp.status))\n content = self._load_json(content,'Get Single Instance')\n status = content['instance']['status']\n\n if status != 'running':\n self.fail(\"Instance %s did not go to running after a reboot and waiting %s\" % (self.instance_id, TIMEOUT_STR))\n else:\n # try to connect to mysql instance\n time.sleep(DELAYS['between_reboot_and_connect'])\n LOG.info(\"* Trying to connect to mysql DB after rebooting the instance: %s, %s, %s\" %(db_user, db_new_passwd, pub_ip))\n\n conn = self.db_connect(db_user, db_new_passwd, pub_ip, db_name)\n if conn is None:\n self.fail(\"* maximum trials reached, db connection failed after rebooting instance over %s: \" % pub_ip)\n conn.close()\n\n # Test deleting a db instance.\n # ----------------------------\n LOG.info(\"* Deleting instance %s\" % self.instance_id)\n resp, content = self._execute_request(client, \"instances/\" + self.instance_id , \"DELETE\", \"\")\n\n # Assert 1) that the request was accepted and 2) that the instance has\n # been deleted.\n self.assertEqual(204, resp.status, \"Response status of instance delete did not return 204\")\n\n LOG.debug(\"Verifying that instance %s has been deleted\" % self.instance_id)\n resp, content = self._execute_request(client, \"instances\", \"GET\", \"\")\n \n if not content:\n pass\n else:\n content = json.loads(content)\n for each in content['instances']:\n self.assertFalse(each['id'] == self.instance_id, (\"Instance %s did not actually get deleted\" % self.instance_id))\n\n LOG.debug(\"Sleeping...\")\n time.sleep(DELAYS['after_delete'])",
"def test_valid_configurations_create(self):\n expected_configs = self.expected_default_datastore_configs()\n values = json.dumps(expected_configs.get('valid_values'))\n expected_values = json.loads(values)\n result = instance_info.dbaas.configurations.create(\n CONFIG_NAME,\n values,\n CONFIG_DESC,\n datastore=instance_info.dbaas_datastore,\n datastore_version=instance_info.dbaas_datastore_version)\n resp, body = instance_info.dbaas.client.last_response\n assert_equal(resp.status, 200)\n with TypeCheck('Configuration', result) as configuration:\n configuration.has_field('name', str)\n configuration.has_field('description', str)\n configuration.has_field('values', dict)\n configuration.has_field('datastore_name', str)\n configuration.has_field('datastore_version_id', str)\n configuration.has_field('datastore_version_name', str)\n global configuration_info\n configuration_info = result\n assert_equal(configuration_info.name, CONFIG_NAME)\n assert_equal(configuration_info.description, CONFIG_DESC)\n assert_equal(configuration_info.values, expected_values)",
"def get_test_settings():\n from youtube_podcast_api.config import Settings\n settings = Settings()\n settings.db_path = \"./sql_test.db\"\n return settings",
"def test_all_configs_values():\n\n app_configs = application_services.get_configs()\n\n assert app_configs['TITLE'] == 'pyrin_unit_tests'\n assert app_configs['ENCODING'] == 'utf-8'\n assert app_configs['FLASK_LOG_LEVEL'] == 'DEBUG'\n assert app_configs['SERVER_NAME'] is None\n assert app_configs['SERVER_HOST'] == '127.0.0.1'\n assert app_configs['SERVER_PORT'] == 5001\n assert app_configs['ENV'] == 'testing'\n assert app_configs['DEBUG'] is False\n assert app_configs['TESTING'] is True\n assert app_configs['UNIT_TESTING'] is True",
"def test_expected_configurations_parameters(self):\n allowed_attrs = [\"configuration-parameters\"]\n instance_info.dbaas.configuration_parameters.parameters(\n instance_info.dbaas_datastore,\n instance_info.dbaas_datastore_version)\n resp, body = instance_info.dbaas.client.last_response\n attrcheck = AttrCheck()\n config_parameters_dict = json.loads(body.decode())\n attrcheck.contains_allowed_attrs(\n config_parameters_dict, allowed_attrs,\n msg=\"Configurations parameters\")\n # sanity check that a few options are in the list\n config_params_list = config_parameters_dict['configuration-parameters']\n config_param_keys = []\n for param in config_params_list:\n config_param_keys.append(param['name'])\n expected_configs = self.expected_default_datastore_configs()\n expected_config_params = expected_configs.get('parameters_list')\n # check for duplicate configuration parameters\n msg = \"check for duplicate configuration parameters\"\n assert_equal(len(config_param_keys), len(set(config_param_keys)), msg)\n for expected_config_item in expected_config_params:\n assert_true(expected_config_item in config_param_keys)",
"def test_get_configuration(self, clean_mongo, test_case):\n self.logger.info(\"RUN: %s\", test_case[\"name\"])\n\n uuidv4 = str(uuid.uuid4())\n tenant, username, password = (\n \"test.mender.io-\" + uuidv4,\n \"some.user+\" + uuidv4 + \"@example.com\",\n \"secretsecret\",\n )\n tenant = create_org(tenant, username, password, \"enterprise\")\n update_tenant(tenant.id, addons=[\"configure\"])\n login(tenant.users[0], test_case[\"use_personal_access_token\"])\n\n admin_user = tenant.users[0]\n test_case[\"user\"][\"name\"] = test_case[\"user\"][\"name\"].replace(\"UUID\", uuidv4)\n if test_case[\"roles\"]:\n create_roles(tenant.users[0].token, test_case[\"roles\"])\n test_user = create_user(tid=tenant.id, **test_case[\"user\"])\n login(test_user, test_case[\"use_personal_access_token\"])\n\n # Initialize tenant's devices\n grouped_devices = setup_tenant_devices(tenant, test_case[\"device_groups\"])\n\n deviceconf_MGMT = ApiClient(deviceconfig.URL_MGMT)\n\n device_id = grouped_devices[test_case[\"view_group\"]][0].id\n\n # set the configuration using admin account\n rsp = deviceconf_MGMT.with_auth(admin_user.token).call(\n \"PUT\",\n deviceconfig.URL_MGMT_DEVICE_CONFIGURATION.format(id=device_id),\n body={\"foo\": \"bar\"},\n )\n assert rsp.status_code == 204, rsp.text\n\n # Attempt to get configuration\n rsp = deviceconf_MGMT.with_auth(test_user.token).call(\n \"GET\", deviceconfig.URL_MGMT_DEVICE_CONFIGURATION.format(id=device_id)\n )\n assert rsp.status_code == test_case[\"get_configuration_status_code\"], rsp.text\n self.logger.info(\"PASS: %s\" % test_case[\"name\"])",
"def test_config():\n\n # assert create_app().testing\n assert create_app(\"testing\", settings={\n \"TESTING\": True,\n \"SQLALCHEMY_TRACK_MODIFICATIONS\": False\n }).testing",
"def test_configs_engine():\n app = AppBuilder(CONFIG1).build_app()\n assert isinstance(app.engine, CustomEngine)\n assert app.engine.Formatter is MockFormat\n assert app.engine.extract_timeout == 20\n assert app.engine.extract_tmp_dir == \"/tmp\"",
"def testGetConfig():\n configs = GetConfig()\n # print(configs.host_ip)\n # print(configs.proxy_local)\n \n # print(configs.proxy_online)\n # print(configs.user_img_url)\n # print(configs.user_login_url)\n print(configs.user_start_id)\n\n # assert isinstance(configs.proxy_getter_functions, list)\n # print(configs.proxy_getter_functions)",
"def test_with_localsite_in_data_and_instance(self):\n config = IntegrationConfig.objects.create(\n integration_id=self.integration.integration_id)\n\n form = MyConfigForm(\n integration=self.integration,\n request=self.request,\n data={\n 'name': 'Test',\n 'my_conditions_last_id': '0',\n 'my_conditions_mode': 'all',\n 'my_conditions_choice[0]': 'review-groups',\n 'my_conditions_operator[0]': 'contains-any',\n 'my_conditions_value[0]': [self.local_site_1_group.pk],\n 'group': self.local_site_1_group.pk,\n 'local_site': self.local_site_1.pk,\n },\n instance=config)\n self.assertTrue(form.is_valid())\n\n new_config = form.save()\n self.assertEqual(config.pk, new_config.pk)\n self.assertEqual(new_config.local_site, self.local_site_1)",
"def test_get_configs():\n\n configs = application_services.get_configs()\n\n assert isinstance(configs, dict)\n assert len(configs) > 0",
"def get_instance():\n if not TestConfiguration._instance:\n TestConfiguration._instance = TestConfiguration()\n return TestConfiguration._instance",
"def xtest_instance_api_negative(self):\n\n # Test creating a db instance.\n LOG.info(\"* Creating db instance\")\n body = r\"\"\"\n {\"instance\": {\n \"name\": \"dbapi_test\",\n \"flavorRef\": \"medium\",\n \"port\": \"3306\",\n \"dbtype\": {\n \"name\": \"mysql\",\n \"version\": \"5.5\"\n }\n }\n }\"\"\"\n\n req = httplib2.Http(\".cache\")\n resp, content = req.request(API_URL + \"instances\", \"POST\", body, AUTH_HEADER)\n LOG.debug(content)\n content = json.loads(content)\n LOG.debug(resp)\n LOG.debug(content)\n\n self.instance_id = content['instance']['id']\n LOG.debug(\"Instance ID: %s\" % self.instance_id)\n\n # Assert 1) that the request was accepted and 2) that the response\n # is in the expected format.\n self.assertEqual(201, resp.status)\n self.assertTrue(content.has_key('instance'))\n\n\n # Test creating an instance without a body in the request.\n LOG.info(\"* Creating an instance without a body\")\n resp, content = req.request(API_URL + \"instances\", \"POST\", \"\", AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n\n\n # Test creating an instance with a malformed body.\n LOG.info(\"* Creating an instance with a malformed body\")\n resp, content = req.request(API_URL + \"instances\", \"POST\", r\"\"\"{\"instance\": {}}\"\"\", AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(500, resp.status)\n \n # Test listing all db instances with a body in the request.\n LOG.info(\"* Listing all db instances with a body\")\n resp, content = req.request(API_URL + \"instances\", \"GET\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n \n # Test getting a specific db instance with a body in the request.\n LOG.info(\"* Getting instance %s with a body in the request\" % self.instance_id)\n resp, content = req.request(API_URL + \"instances/\" + self.instance_id, \"GET\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n\n\n # Test getting a non-existent db instance.\n LOG.info(\"* Getting dummy instance\")\n resp, content = req.request(API_URL + \"instances/dummy\", \"GET\", \"\", AUTH_HEADER)\n content = json.loads(content)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n\n\n # Test immediately resetting the password on a db instance with a body in the request.\n LOG.info(\"* Resetting password on instance %s with a body in the request\" % self.instance_id)\n resp, content = req.request(API_URL + \"instances/\" + self.instance_id + \"/resetpassword\", \"POST\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n \n\n # Test resetting the password on a db instance for a non-existent instance\n LOG.info(\"* Resetting password on dummy instance\")\n resp, content = req.request(API_URL + \"instances/dummy/resetpassword\", \"POST\", \"\", AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status) \n \n # Test restarting a db instance for a non-existent instance\n LOG.info(\"* Restarting dummy instance\")\n resp, content = req.request(API_URL + \"instances/dummy/restart\", \"POST\", \"\", AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status) \n\n # Test immediately restarting a db instance with a body in the request.\n LOG.info(\"* Restarting instance %s\" % self.instance_id)\n resp, content = req.request(API_URL + \"instances/\" + self.instance_id + \"/restart\", \"POST\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status) \n\n # Test deleting an instance with a body in the request.\n LOG.info(\"* Testing delete of instance %s with a body in the request\" % self.instance_id)\n resp, content = req.request(API_URL + \"instances/\" + self.instance_id, \"DELETE\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n\n # Test that trying to delete an already deleted instance returns\n # the proper error code.\n LOG.info(\"* Testing re-delete of instance %s\" % self.instance_id)\n resp, content = req.request(API_URL + \"instances/\" + self.instance_id, \"DELETE\", \"\", AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)",
"def settings():\n return SettingsMock.instance()",
"def test_config_metadata(self):\n result = self.driver.get_config_metadata()\n self.assert_(isinstance(result, dict))\n\n self.assert_(isinstance(result[ConfigMetadataKey.DRIVER], dict))\n self.assert_(isinstance(result[ConfigMetadataKey.COMMANDS], dict))\n self.assert_(isinstance(result[ConfigMetadataKey.PARAMETERS], dict))\n\n self.assertEquals(len(result[ConfigMetadataKey.DRIVER]), 1)\n self.assertEquals(result[ConfigMetadataKey.DRIVER],\n {DriverDictKey.VENDOR_SW_COMPATIBLE: True})\n\n # Check a few in the cmd list...the leaves in the structure are\n # tested in the cmd dict test cases\n self.assertEquals(len(result[ConfigMetadataKey.COMMANDS]), 2)\n self.assert_(\"cmd1\" in result[ConfigMetadataKey.COMMANDS].keys())\n self.assert_(\"cmd2\" in result[ConfigMetadataKey.COMMANDS].keys())\n\n # Check a few in the param list...the leaves in the structure are\n # tested in the param dict test cases\n self.assertEquals(len(result[ConfigMetadataKey.PARAMETERS]), 4)\n self.assert_(\"foo\" in result[ConfigMetadataKey.PARAMETERS].keys())\n self.assert_(\"bar\" in result[ConfigMetadataKey.PARAMETERS].keys())\n self.assert_(\"baz\" in result[ConfigMetadataKey.PARAMETERS].keys())\n self.assert_(\"bat\" in result[ConfigMetadataKey.PARAMETERS].keys())",
"def get_test_config():\n config = get_config()\n config.batch_size = 2\n config.eval_batch_size = 2\n config.eval_num = 2\n config.eval_avg_num = 1\n config.num_train_steps = 2\n config.log_loss_every_steps = 1\n config.eval_every_steps = 1\n config.checkpoint_every_steps = 1\n config.df_dim = 16\n config.gf_dim = 16\n config.z_dim = 8\n config.show_num = 4\n config.num_epochs = 1\n config.shuffle_buffer_size = 10\n return config",
"def test_config():\n assert not sample.create_app().testing\n assert sample.create_app({\"TESTING\": True}).testing",
"def get_app_instances_configs(self, instance_id=None, instance_alias=None, app_full_name=None):\n try:\n if instance_id:\n return filter(lambda app_inst: app_inst[\"id\"] == instance_id, self.app_instances_configs)\n elif instance_alias:\n return filter(lambda app_inst: app_inst[\"alias\"] == instance_alias, self.app_instances_configs)\n elif app_full_name:\n return filter(lambda app_inst: app_inst[\"app_full_name\"] == app_full_name, self.app_instances_configs)\n except Exception as ex:\n log.error(\"Instance can't be found because of error %s\" % ex)\n return None",
"def test_utils_get_backend_instance(options, expected):\n\n class DummyBackendSettings(InstantiableSettingsItem):\n \"\"\"Represents a dummy backend setting.\"\"\"\n\n foo: str = \"foo\" # pylint: disable=disallowed-name\n\n def get_instance(self, **init_parameters): # pylint: disable=no-self-use\n \"\"\"Returns the init_parameters.\"\"\"\n return init_parameters\n\n class TestBackendType(BaseModel):\n \"\"\"A backend type including the DummyBackendSettings.\"\"\"\n\n DUMMY: DummyBackendSettings = DummyBackendSettings()\n\n backend_instance = ralph_utils.get_backend_instance(\n TestBackendType(), \"dummy\", options\n )\n assert isinstance(backend_instance, dict)\n assert backend_instance == expected",
"def test_renderer_discovers_special_config(self):\n datastore = Mock(spec=DatastoreVersion)\n datastore.datastore_name = 'mysql'\n datastore.name = 'mysql-test'\n datastore.manager = 'mysql'\n config = template.SingleInstanceConfigTemplate(datastore,\n self.flavor_dict,\n self.server_id)\n self.validate_template(config.render(), \"hyper\",\n {'ram': 0}, self.server_id)",
"def test_configuration(self):\n self.assertEqual(self.Test.adapter_config['write'],\n { 'adapter': TestAdapter, 'foo': 'bar' })",
"def test_data():\n db = current_app.db\n Site = db.tables.Site\n Endpoint = db.tables.Endpoint\n if Site.query.count():\n return # DB not empty\n entries = [\n Site(site_id=1,\n site_name='Site1',\n site_desc='First Test Site',\n site_owner=1,\n user_ca_cert='USERCERT1',\n service_ca_cert='',\n auth_type=0,\n auth_uri='localhost:49998',\n public=False,\n def_path='/~'),\n Site(site_id=2,\n site_name='Site2',\n site_desc='Second Test Site',\n site_owner=123,\n user_ca_cert='USERCERT2',\n service_ca_cert='SERVICECERT2',\n auth_type=0,\n auth_uri='localhost:49998',\n public=True,\n def_path='/project'),\n Endpoint(ep_id=1,\n site_id=1,\n ep_uri='localhost:49999'),\n Endpoint(ep_id=2,\n site_id=1,\n ep_uri='localhost2:49999'),\n Endpoint(ep_id=3,\n site_id=2,\n ep_uri='localhost:50000'),\n Endpoint(ep_id=4,\n site_id=2,\n ep_uri='localhost2:50000'),\n Site(site_id=3,\n site_name='CloudSite1',\n site_desc='Testing site in cloud (1)',\n site_owner=1,\n user_ca_cert=TEST_HOST_CA,\n service_ca_cert=UK_ESCIENCE_CA,\n auth_type=0,\n auth_uri='pdmtest1.grid.hep.ph.ic.ac.uk:49998',\n public=True,\n def_path='/~'),\n Endpoint(ep_id=5,\n site_id=3,\n ep_uri='pdmtest1.grid.hep.ph.ic.ac.uk:49999'),\n Site(site_id=4,\n site_name='CloudSite2',\n site_desc='Testing site in cloud (2)',\n site_owner=1,\n user_ca_cert=TEST_HOST_CA,\n service_ca_cert=UK_ESCIENCE_CA,\n auth_type=0,\n auth_uri='pdmtest2.grid.hep.ph.ic.ac.uk:49998',\n public=True,\n def_path='/~'),\n Endpoint(ep_id=6,\n site_id=4,\n ep_uri='pdmtest2.grid.hep.ph.ic.ac.uk:49999'),\n Site(site_id=5,\n site_name='UKI-LT2-IC-HEP',\n site_desc='Imperial College GridPP Site',\n site_owner=0,\n user_ca_cert=None,\n service_ca_cert=None,\n auth_type=1,\n auth_uri='myproxy.grid.hep.ph.ic.ac.uk:7512',\n public=True,\n def_path='/pnfs/hep.ph.ic.ac.uk/data'),\n Endpoint(ep_id=7,\n site_id=5,\n ep_uri='gfe02.grid.hep.ph.ic.ac.uk:2811'),\n Site(site_id=6,\n site_name='NERSC DTN',\n site_desc='NERSC DTN Service',\n site_owner=0,\n user_ca_cert=None,\n service_ca_cert=None,\n auth_type=0,\n auth_uri='myproxy.grid.hep.ph.ic.ac.uk:7512',\n public=True,\n def_path='/~'),\n Endpoint(ep_id=8,\n site_id=6,\n ep_uri='dtn01.nersc.gov:2811'),\n ]\n for entry in entries:\n db.session.add(entry)\n db.session.commit()",
"def configuration(request):\n config = testing.setUp(settings={\n 'sqlalchemy.url': 'postgres:///test_database'\n })\n config.include(\"space_rocks.models\")\n\n def teardown():\n testing.tearDown()\n\n request.addfinalizer(teardown)\n return config",
"def test_get_config(self):\r\n config = self.profile.get_config('testing.conf', TestConfig, storage_args=['this_section'])\r\n self.assertIsInstance(config, TestConfig)\r\n self.assertIsNone(config.save())",
"def test_get_property_success(self):\r\n self.assertEqual(self.config.option1, 1337)"
] | [
"0.6595712",
"0.641086",
"0.63042694",
"0.59832126",
"0.5969446",
"0.5716063",
"0.56343365",
"0.55800563",
"0.549223",
"0.5459904",
"0.5443902",
"0.52584755",
"0.52367705",
"0.52236694",
"0.5125736",
"0.5114252",
"0.51125515",
"0.5100617",
"0.5088254",
"0.50832015",
"0.50808233",
"0.5077696",
"0.506221",
"0.5059841",
"0.50596046",
"0.50508493",
"0.5044264",
"0.50408757",
"0.5031122",
"0.5022892"
] | 0.82026744 | 0 |
Returns the expected test configurations for the default datastore defined in the Test Config as dbaas_datastore. | def expected_default_datastore_configs():
default_datastore = CONFIG.get('dbaas_datastore', None)
datastore_test_configs = CONFIG.get(default_datastore, {})
return datastore_test_configs.get("configurations", {}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def expected_instance_datastore_configs(instance_id):\n instance = instance_info.dbaas.instances.get(instance_id)\n datastore_type = instance.datastore['type']\n datastore_test_configs = CONFIG.get(datastore_type, {})\n return datastore_test_configs.get(\"configurations\", {})",
"def test_valid_configurations_create(self):\n expected_configs = self.expected_default_datastore_configs()\n values = json.dumps(expected_configs.get('valid_values'))\n expected_values = json.loads(values)\n result = instance_info.dbaas.configurations.create(\n CONFIG_NAME,\n values,\n CONFIG_DESC,\n datastore=instance_info.dbaas_datastore,\n datastore_version=instance_info.dbaas_datastore_version)\n resp, body = instance_info.dbaas.client.last_response\n assert_equal(resp.status, 200)\n with TypeCheck('Configuration', result) as configuration:\n configuration.has_field('name', str)\n configuration.has_field('description', str)\n configuration.has_field('values', dict)\n configuration.has_field('datastore_name', str)\n configuration.has_field('datastore_version_id', str)\n configuration.has_field('datastore_version_name', str)\n global configuration_info\n configuration_info = result\n assert_equal(configuration_info.name, CONFIG_NAME)\n assert_equal(configuration_info.description, CONFIG_DESC)\n assert_equal(configuration_info.values, expected_values)",
"def test_all_configs_values():\n\n app_configs = application_services.get_configs()\n\n assert app_configs['TITLE'] == 'pyrin_unit_tests'\n assert app_configs['ENCODING'] == 'utf-8'\n assert app_configs['FLASK_LOG_LEVEL'] == 'DEBUG'\n assert app_configs['SERVER_NAME'] is None\n assert app_configs['SERVER_HOST'] == '127.0.0.1'\n assert app_configs['SERVER_PORT'] == 5001\n assert app_configs['ENV'] == 'testing'\n assert app_configs['DEBUG'] is False\n assert app_configs['TESTING'] is True\n assert app_configs['UNIT_TESTING'] is True",
"def test_expected_configurations_parameters(self):\n allowed_attrs = [\"configuration-parameters\"]\n instance_info.dbaas.configuration_parameters.parameters(\n instance_info.dbaas_datastore,\n instance_info.dbaas_datastore_version)\n resp, body = instance_info.dbaas.client.last_response\n attrcheck = AttrCheck()\n config_parameters_dict = json.loads(body.decode())\n attrcheck.contains_allowed_attrs(\n config_parameters_dict, allowed_attrs,\n msg=\"Configurations parameters\")\n # sanity check that a few options are in the list\n config_params_list = config_parameters_dict['configuration-parameters']\n config_param_keys = []\n for param in config_params_list:\n config_param_keys.append(param['name'])\n expected_configs = self.expected_default_datastore_configs()\n expected_config_params = expected_configs.get('parameters_list')\n # check for duplicate configuration parameters\n msg = \"check for duplicate configuration parameters\"\n assert_equal(len(config_param_keys), len(set(config_param_keys)), msg)\n for expected_config_item in expected_config_params:\n assert_true(expected_config_item in config_param_keys)",
"def get_test_settings():\n from youtube_podcast_api.config import Settings\n settings = Settings()\n settings.db_path = \"./sql_test.db\"\n return settings",
"def get_default_dataset_config():\n code_path = utils.get_code_path()\n default_config = {\n 'GT_FOLDER': os.path.join(code_path, 'data/gt/kitti/kitti_mots_val'), # Location of GT data\n 'TRACKERS_FOLDER': os.path.join(code_path, 'data/trackers/kitti/kitti_mots_val'), # Trackers location\n 'OUTPUT_FOLDER': None, # Where to save eval results (if None, same as TRACKERS_FOLDER)\n 'TRACKERS_TO_EVAL': None, # Filenames of trackers to eval (if None, all in folder)\n 'CLASSES_TO_EVAL': ['car', 'pedestrian'], # Valid: ['car', 'pedestrian']\n 'SPLIT_TO_EVAL': 'val', # Valid: 'training', 'val'\n 'INPUT_AS_ZIP': False, # Whether tracker input files are zipped\n 'PRINT_CONFIG': True, # Whether to print current config\n 'TRACKER_SUB_FOLDER': 'data', # Tracker files are in TRACKER_FOLDER/tracker_name/TRACKER_SUB_FOLDER\n 'OUTPUT_SUB_FOLDER': '', # Output files are saved in OUTPUT_FOLDER/tracker_name/OUTPUT_SUB_FOLDER\n 'TRACKER_DISPLAY_NAMES': None, # Names of trackers to display, if None: TRACKERS_TO_EVAL\n 'SEQMAP_FOLDER': None, # Where seqmaps are found (if None, GT_FOLDER)\n 'SEQMAP_FILE': None, # Directly specify seqmap file (if none use seqmap_folder/split_to_eval.seqmap)\n 'SEQ_INFO': None, # If not None, directly specify sequences to eval and their number of timesteps\n 'GT_LOC_FORMAT': '{gt_folder}/label_02/{seq}.txt', # format of gt localization\n }\n return default_config",
"def get_test_config():\n config = get_config()\n config.batch_size = 2\n config.eval_batch_size = 2\n config.eval_num = 2\n config.eval_avg_num = 1\n config.num_train_steps = 2\n config.log_loss_every_steps = 1\n config.eval_every_steps = 1\n config.checkpoint_every_steps = 1\n config.df_dim = 16\n config.gf_dim = 16\n config.z_dim = 8\n config.show_num = 4\n config.num_epochs = 1\n config.shuffle_buffer_size = 10\n return config",
"def test01_default_values(self):\n config = Config()\n self.assertEqual(config.max_token_count, 1024 * 1023)\n self.assertEqual(config.max_buffer_size, 64_000_000)\n self.assertEqual(config.max_token_size, 64_000_000)\n self.assertEqual(config.enforce_schema, False)\n self.assertEqual(config.id_type, 'STRING')\n self.assertEqual(config.skip_invalid_nodes, False)\n self.assertEqual(config.skip_invalid_edges, False)\n self.assertEqual(config.store_node_identifiers, False)\n self.assertEqual(config.separator, ',')\n self.assertEqual(config.quoting, 3)",
"def test_set_databases(self):\n Config.set_databases({\n 'default': {\n 'url': 'bolt://cypher-db:7687',\n 'username': 'neo4j',\n 'password': 'cypher',\n },\n })\n\n default_database = Config.databases.get('default', None)\n self.assertIsNotNone(default_database)",
"def test_get_configuration(self, clean_mongo, test_case):\n self.logger.info(\"RUN: %s\", test_case[\"name\"])\n\n uuidv4 = str(uuid.uuid4())\n tenant, username, password = (\n \"test.mender.io-\" + uuidv4,\n \"some.user+\" + uuidv4 + \"@example.com\",\n \"secretsecret\",\n )\n tenant = create_org(tenant, username, password, \"enterprise\")\n update_tenant(tenant.id, addons=[\"configure\"])\n login(tenant.users[0], test_case[\"use_personal_access_token\"])\n\n admin_user = tenant.users[0]\n test_case[\"user\"][\"name\"] = test_case[\"user\"][\"name\"].replace(\"UUID\", uuidv4)\n if test_case[\"roles\"]:\n create_roles(tenant.users[0].token, test_case[\"roles\"])\n test_user = create_user(tid=tenant.id, **test_case[\"user\"])\n login(test_user, test_case[\"use_personal_access_token\"])\n\n # Initialize tenant's devices\n grouped_devices = setup_tenant_devices(tenant, test_case[\"device_groups\"])\n\n deviceconf_MGMT = ApiClient(deviceconfig.URL_MGMT)\n\n device_id = grouped_devices[test_case[\"view_group\"]][0].id\n\n # set the configuration using admin account\n rsp = deviceconf_MGMT.with_auth(admin_user.token).call(\n \"PUT\",\n deviceconfig.URL_MGMT_DEVICE_CONFIGURATION.format(id=device_id),\n body={\"foo\": \"bar\"},\n )\n assert rsp.status_code == 204, rsp.text\n\n # Attempt to get configuration\n rsp = deviceconf_MGMT.with_auth(test_user.token).call(\n \"GET\", deviceconfig.URL_MGMT_DEVICE_CONFIGURATION.format(id=device_id)\n )\n assert rsp.status_code == test_case[\"get_configuration_status_code\"], rsp.text\n self.logger.info(\"PASS: %s\" % test_case[\"name\"])",
"def test_get_spec_config_defaults(self):\n spec_conf = get_spec_config({\n 'defaults': {\n 'foo': 'bar'\n }\n }, '')\n self.assertEqual(spec_conf, {'foo': 'bar'})",
"def test_default_config():\n\n from app.config import Default\n \n assert Default.DEBUG == False\n assert Default.TESTING == False\n assert Default.JWT_BLACKLIST_ENABLED == True\n assert Default.JWT_BLACKLIST_TOKEN_CHECKS == ['access', 'refresh']\n assert Default.SQLALCHEMY_TRACK_MODIFICATIONS == False",
"def get_test_config(cls, cluster, role, env, job, filler=''):\n return cls.CONFIG_BASE % {'job': job, 'role': role, 'env': env, 'cluster': cluster,\n 'inner': filler}",
"def test_get_config_default_value(configs):\n assert get_config('SOURCE_FOLDER') == configs['SOURCE_FOLDER']",
"def test_300_keystone_default_config(self):\n u.log.debug('Checking keystone config file...')\n unit = self.keystone_sentry\n conf = '/etc/keystone/keystone.conf'\n ks_ci_rel = unit.relation('identity-service',\n 'cinder:identity-service')\n my_ks_rel = self.pxc_sentry.relation('shared-db',\n 'keystone:shared-db')\n db_uri = \"mysql://{}:{}@{}/{}\".format('keystone',\n my_ks_rel['password'],\n my_ks_rel['db_host'],\n 'keystone')\n expected = {\n 'DEFAULT': {\n 'debug': 'False',\n 'admin_token': ks_ci_rel['admin_token'],\n 'use_syslog': 'False',\n 'log_config_append': '/etc/keystone/logging.conf',\n 'public_endpoint': u.valid_url, # get specific\n 'admin_endpoint': u.valid_url, # get specific\n },\n 'extra_headers': {\n 'Distribution': 'Ubuntu'\n },\n 'database': {\n 'connection': db_uri,\n 'idle_timeout': '200'\n }\n }\n\n if self._get_openstack_release() < self.trusty_mitaka:\n expected['DEFAULT']['verbose'] = 'False'\n expected['DEFAULT']['log_config'] = \\\n expected['DEFAULT']['log_config_append']\n del expected['DEFAULT']['log_config_append']\n\n if self._get_openstack_release() >= self.trusty_kilo and \\\n self._get_openstack_release() < self.trusty_mitaka:\n # Kilo and Liberty\n expected['eventlet_server'] = {\n 'admin_bind_host': '0.0.0.0',\n 'public_bind_host': '0.0.0.0',\n 'admin_port': '35347',\n 'public_port': '4990',\n }\n elif self._get_openstack_release() <= self.trusty_icehouse:\n # Juno and earlier\n expected['DEFAULT'].update({\n 'admin_port': '35347',\n 'public_port': '4990',\n 'bind_host': '0.0.0.0',\n })\n\n for section, pairs in expected.iteritems():\n ret = u.validate_config_data(unit, conf, section, pairs)\n if ret:\n message = \"keystone config error: {}\".format(ret)\n amulet.raise_status(amulet.FAIL, msg=message)",
"def defaultconfig(self):\r\n\r\n config_data = {\r\n \"path_to_database\": \"FUDB/FOLLOWUP.DB\",\r\n \"path_to_frontend\": \"FUDB/\",\r\n \"path_to_dcs_info\": \"FUDB/\",\r\n \"path_to_bin\": \"bin/\",\r\n \"path_to_excels_exported_from_database\": \"excels exported/\",\r\n \"path_to_excels_to_be_imported_in_database\": \"excels to be imported/\",\r\n \"path_to_new_opfiles\": \"DC BATCHES IN WORK/0 NEW/\",\r\n \"path_to_batches_unassigned\": \"DC BATCHES IN WORK/1 UNASSIGNED/\",\r\n \"path_to_batches_prepfiles\": \"DC BATCHES IN WORK/2 PREPARED FILES/\",\r\n \"path_to_batches_assigned\": \"DC BATCHES IN WORK/3 ASSIGNED/\",\r\n \"path_to_batches_tobechecked\": \"DC BATCHES IN WORK/4 TO BE CHECKED/\",\r\n \"path_to_batches_tbimported\": \"DC BATCHES IN WORK/5 TO BE IMPORTED/\",\r\n \"path_to_batches_finished\": \"DC BATCHES IN WORK/6 FINISHED/\",\r\n \"path_to_batches_instandby\": \"DC BATCHES IN WORK/7 IN STANDBY/\",\r\n \"path_to_batches_unrecordable\": \"DC BATCHES IN WORK/8 UNRECORDABLE/\",\r\n \"batch_status_options_responsible\": \"PREP. OP FILE, IMPORTATION & SPLIT FILE, RELIABILITY & DATA UPGRADE, CHECK OP FILE, CHECK SPLIT FILE, CHECK FRONT END, **TO BE CHECKED\",\r\n \"batch_status_options_proofreader\": \"OP FILE OK, SPLIT FILE OK, FRONT END OK, **TO BE IMPORTED, **FINISHED, **REWORK, **STANDBY, **UNRECORDABLE\",\r\n \"batch_status_options_overall\": \"ONGOING, STANDBY, FINISHED, UNRECORDABLE\",\r\n \"aircrafts\": \"A300, A300-600, A310, A320, A330, A340, A350, A380\",\r\n \"split_batch_factor\": \"2, 3, 4, 5, 6, 7, 8, 9\",\r\n \"IDlentgh\": \"6\",\r\n \"port\": \"5000\"\r\n }\r\n \r\n if not os.path.isfile(os.path.join(self.cwd, \"config.json\")):\r\n self.func.write_json(config_data, self.cwd, fname=\"config.json\")",
"def test_get_all_configurations(self):\n\n time_series = ['test-all-conf-1', 'test-all-conf-2', 'test-all-conf-3']\n [timeserie_configuration.get_timeserie_configure(self.get_local_dynamo_cli(),\n ts) for ts in time_series]\n\n all_configurations = timeserie_configuration.get_all_configurations(\n self.get_local_dynamo_cli())\n self.assertEquals(3, len(all_configurations))\n self.assertTrue(all([conf.default for conf in all_configurations]))",
"def get_test_config() -> Config:\n # overwrite some settings for unit tests\n args = dict(\n datapath=os.path.abspath(os.path.join(os.path.dirname(__file__), 'testdata')),\n debug=True\n )\n return Config(**args)",
"def default_configs(cls):\n config = super().default_configs()\n config.update(\n {\n \"entry_type\": \"ft.onto.base_ontology.Document\",\n \"model_name\": \"ktrapeznikov/biobert_v1.1_pubmed_squad_v2\",\n \"question\": \"Where do I live\",\n \"max_answer_len\": 15,\n \"cuda_devices\": -1,\n \"handle_impossible_answer\": False,\n }\n )\n return config",
"def configs() -> Path:\n return TEST_ROOT.parent / \"fixtures\" / \"configs\"",
"def test_config():\n\n # assert create_app().testing\n assert create_app(\"testing\", settings={\n \"TESTING\": True,\n \"SQLALCHEMY_TRACK_MODIFICATIONS\": False\n }).testing",
"def test_renderer_discovers_special_config(self):\n datastore = Mock(spec=DatastoreVersion)\n datastore.datastore_name = 'mysql'\n datastore.name = 'mysql-test'\n datastore.manager = 'mysql'\n config = template.SingleInstanceConfigTemplate(datastore,\n self.flavor_dict,\n self.server_id)\n self.validate_template(config.render(), \"hyper\",\n {'ram': 0}, self.server_id)",
"def get_test_preferences(self, name: str) -> Dict[str, Any]:\n return self.raw_config.get(name, {})",
"def datastore_options(self) -> Optional['outputs.PreventionJobTriggerInspectJobStorageConfigDatastoreOptions']:\n return pulumi.get(self, \"datastore_options\")",
"def _get_conf():\n configs = [\"mds_cache_memory_limit\",\n \"mds_cache_reservation\",\n \"mds_health_cache_threshold\"]\n holder = {}\n for config in configs:\n cmd = \"sudo ceph daemon mds.\" \\\n \"$HOSTNAME config show | grep {}\".format(config)\n conf = model.run_on_unit(self.TESTED_UNIT, cmd)\n for i in (conf['Stdout'].replace('\"', '')\n .replace(',', '')\n .strip()\n .split(\"\\n\")):\n key, val = i.split(\":\")\n holder[key] = val.strip()\n return holder",
"def test_defaults():\n config = Config(\n env_var='DO_NOT_USE',\n env_prefix='DO_NOT_USE',\n entry_point_name='DO_NOT_USE',\n )\n\n assert not config.keys()",
"def test_config():\n assert not sample.create_app().testing\n assert sample.create_app({\"TESTING\": True}).testing",
"def test_configuration(self):\n self.assertEqual(self.Test.adapter_config['write'],\n { 'adapter': TestAdapter, 'foo': 'bar' })",
"def get_default_datastore(self):\n try:\n return self.client.list_datastores()[0]['datastore']\n except VMwareError as e:\n raise VMwareBackendError(e)",
"def get_default_config(self):\n config = super(DiskHealthCollector, self).get_default_config()\n config.update({\n 'enabled': 'True',\n 'devices': ('PhysicalDrive[0-9]+$'\n + '|md[0-9]+$'\n + '|sd[a-z]+[0-9]*$'\n + '|x?vd[a-z]+[0-9]*$'\n + '|disk[0-9]+$'\n + '|dm\\-[0-9]+$'),\n 'fs_types': ','.join(self.SUPPORTED_FS_TYPES),\n 'raw_stats_only': False,\n 'test_file_name': self.TEST_FILE_NAME,\n 'test_file_size': self.TEST_FILE_SIZE\n })\n return config"
] | [
"0.778061",
"0.63270247",
"0.6234825",
"0.6132579",
"0.5991797",
"0.5882307",
"0.58755237",
"0.58285725",
"0.58114976",
"0.5798612",
"0.5786422",
"0.5751315",
"0.57506293",
"0.57444537",
"0.5743285",
"0.5723634",
"0.5672421",
"0.56719977",
"0.566023",
"0.5647218",
"0.5636959",
"0.5634379",
"0.56159997",
"0.5607275",
"0.5600682",
"0.5595318",
"0.5590905",
"0.557601",
"0.55564046",
"0.5554896"
] | 0.9044427 | 0 |
Test create configurations with invalid values. | def test_configurations_create_invalid_values(self):
values = '{"this_is_invalid": 123}'
try:
instance_info.dbaas.configurations.create(
CONFIG_NAME,
values,
CONFIG_DESC)
except exceptions.UnprocessableEntity:
resp, body = instance_info.dbaas.client.last_response
assert_equal(resp.status, 422) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_configurations_create_invalid_value_type(self):\n values = '{\"key_buffer_size\": \"this is a string not int\"}'\n assert_unprocessable(instance_info.dbaas.configurations.create,\n CONFIG_NAME, values, CONFIG_DESC)",
"def test_configurations_create_value_out_of_bounds(self):\n expected_configs = self.expected_default_datastore_configs()\n values = json.dumps(expected_configs.get('out_of_bounds_over'))\n assert_unprocessable(instance_info.dbaas.configurations.create,\n CONFIG_NAME, values, CONFIG_DESC)\n values = json.dumps(expected_configs.get('out_of_bounds_under'))\n assert_unprocessable(instance_info.dbaas.configurations.create,\n CONFIG_NAME, values, CONFIG_DESC)",
"def test_valid_configurations_create(self):\n expected_configs = self.expected_default_datastore_configs()\n values = json.dumps(expected_configs.get('valid_values'))\n expected_values = json.loads(values)\n result = instance_info.dbaas.configurations.create(\n CONFIG_NAME,\n values,\n CONFIG_DESC,\n datastore=instance_info.dbaas_datastore,\n datastore_version=instance_info.dbaas_datastore_version)\n resp, body = instance_info.dbaas.client.last_response\n assert_equal(resp.status, 200)\n with TypeCheck('Configuration', result) as configuration:\n configuration.has_field('name', str)\n configuration.has_field('description', str)\n configuration.has_field('values', dict)\n configuration.has_field('datastore_name', str)\n configuration.has_field('datastore_version_id', str)\n configuration.has_field('datastore_version_name', str)\n global configuration_info\n configuration_info = result\n assert_equal(configuration_info.name, CONFIG_NAME)\n assert_equal(configuration_info.description, CONFIG_DESC)\n assert_equal(configuration_info.values, expected_values)",
"def test_set_value_invalid(self):\r\n name = 'option1'\r\n option = self.config.options[name]\r\n value = 'invalid'\r\n initial_value = self.config.values[name]\r\n\r\n self.assertRaises(InvalidOptionValueError, self.config.set_value, name, option, value)\r\n self.assertEqual(self.config.values[name], initial_value)",
"def test_set_property_invalid(self):\r\n try:\r\n initial_value = self.config.values['option1']\r\n self.config.option1 = 'invalid'\r\n except Exception as e:\r\n self.assertIsInstance(e, InvalidOptionValueError)\r\n self.assertEqual(self.config.values['option1'], initial_value)",
"def test_invalid_config() -> None:\n config = {\"statsd\": {\"host1\": \"host1\"}}\n\n with pytest.raises(vol.Invalid):\n statsd.CONFIG_SCHEMA(None)\n with pytest.raises(vol.Invalid):\n statsd.CONFIG_SCHEMA(config)",
"def test_invalid_config_options_output():\n\n with pytest.raises(InputError):\n _check_input_config({\"unknown_key_1\": 1})",
"def test_create_instance(self):\n with self.assertRaises(exceptions.NoInitiation):\n Config()",
"def test_construct_3_bad_bootsraps(self):\n with self.assertRaises(KeyError):\n configerus.new_config(bootstraps=[\"I do not exist\"])",
"def test_config():\n\n assert Config.get(\"abc\") is None\n assert Config.get(1234) is None\n\n for key in (\"coerce\", \"debug\"):\n assert Config.get(key) is True\n Config.set(key, False)\n assert Config.get(key) is False\n\n with pytest.raises(ValueError):\n Config.set(key, \"something\")\n\n with pytest.raises(ValueError):\n Config.set(key, int)",
"def test_config_must_exist(cls, values):\n configs = [c.config for c in values.get('configs')]\n for test in values.get('tests'):\n if test.config not in configs:\n raise ValueError(\n f\"Test '{test.test}' gave the config '{test.config}', but \"\n \"this config does not exist in the file \"\n f\"'{values.get('yaml')}'. Configs detected : {configs} \\n\")\n return values",
"def test_set_defaults(self):\r\n self.assertEqual(self.config.values['option1'], 1337)\r\n self.assertNotIn('option2', self.config.values)",
"def test_instantiate_no_target(self):\n # create test configs\n test_configs = [\n {},\n {\"a\": 1, \"b\": 2}\n ]\n\n # check that instantiate raises ValueError for each test config\n for test_conf in test_configs:\n self.assertRaises(ValueError, instantiate, test_conf)",
"def test_adding_config_keys():\n\n with pytest.raises(ValueError) as error:\n Config.config()[\"something_fake\"] = True\n\n assert \"something_fake is not a valid config key.\" in error.value.args",
"def test_init_validation(self):\n\n # Try minimal acceptable configuration\n ExecutionConfiguration()\n\n # Invalid version\n config = {'version': 'BAD'}\n self.assertRaises(InvalidExecutionConfiguration, ExecutionConfiguration, config)",
"def test_invalid_configuration(self):\n\n config = copy.deepcopy(self.configuration)\n config['version'] = 'BAD'\n json_data = {\n \"input\" : {\n 'version': '6',\n 'files': {'input_a': [self.source_file.id]},\n 'json': {}\n },\n \"job_type_id\" : self.job_type1.pk,\n \"configuration\" : config\n }\n\n url = '/%s/jobs/' % self.api\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)",
"def test_valid_configuration(self):\n\n conf = [\n 'gasoline', '228i', 'model_luxury_line', 'silver', 'rims_384',\n 'tapistry_black', 'steptronic', 'smoker_package', 'tow_hook'\n ]\n\n attr_val_ids = self.get_attr_val_ids(conf)\n validation = self.cfg_tmpl.validate_configuration(attr_val_ids)\n self.assertTrue(validation, \"Valid configuration failed validation\")",
"def test_missing_paths():\n with pytest.raises(InputError):\n make_config([])",
"def test_config_key_error():\n c = core.Config()\n\n with pytest.raises(KeyError):\n c['doesNotExist']",
"def test_config_wrong_config(self):\n test_data_1 = (\"[gnupg_missing]\\n\"\n \"recipients = [email protected]\\n\"\n \"signer = [email protected]\\n\"\n \"[amazon-s3]\\n\"\n \"access_key = ACCESSKEY\\n\"\n \"secret_access_key = SECRETACCESSKEY\\n\"\n \"[data]\\n\"\n \"bucket = DATABUCKET\\n\"\n \"[metadata]\\n\"\n \"bucket = METADATABUCKET\\n\")\n test_data_2 = (\"[gnupg]\\n\"\n \"recipients_missing = [email protected]\\n\"\n \"signer = [email protected]\\n\"\n \"[amazon-s3]\\n\"\n \"access_key = ACCESSKEY\\n\"\n \"secret_access_key = SECRETACCESSKEY\\n\"\n \"[data]\\n\"\n \"bucket = DATABUCKET\\n\"\n \"[metadata]\\n\"\n \"bucket = METADATABUCKET\\n\")\n if os.path.isfile(\"test_config.conf\"):\n os.remove(\"test_config.conf\")\n file(\"test_config.conf\", \"wb\").write(test_data_1)\n config = Config(\"test_config.conf\")\n self.assertRaises(\n ConfigError, config.check, \"gnupg\", [\"recipients\", \"signer\"])\n file(\"test_config.conf\", \"wb\").write(test_data_2)\n config = Config(\"test_config.conf\")\n self.assertRaises(\n ConfigError, config.check, \"gnupg\", [\"recipients\", \"signer\"])\n os.remove(\"test_config.conf\")",
"def test_parameter_redundancy_invalid(self, mock_ghn, mock_grnam,\n mock_pwnam):\n # Should pass\n self.driver.check_for_setup_error()\n # Should throw exceptions\n self.configuration.hgst_redundancy = ''\n self.assertRaises(exception.VolumeDriverException,\n self.driver.check_for_setup_error)\n self.configuration.hgst_redundancy = 'Fred'\n self.assertRaises(exception.VolumeDriverException,\n self.driver.check_for_setup_error)",
"def test_bogus_configs():\n with pytest.raises(FileNotFoundError):\n phmdoctest.main.generate_using(config_file=Path(\"bogus.toml\"))\n with pytest.raises(ValueError):\n # Can't generate from a .py file.\n phmdoctest.main.generate_using(config_file=Path(\"setup.py\"))",
"def test_validate_failure_bad_config(self, value):\n sch = scheme.Scheme()\n with pytest.raises(errors.SchemeValidationError):\n sch.validate(value)",
"def test_check_required_fail():\n settings = SettingsModel()\n\n with pytest.raises(InvalidSettings):\n settings.check()",
"def test_invalid_machine():\n config = load_json_fixture(\"basic-addon-config.json\")\n\n config[\"machine\"] = [\n \"intel-nuc\",\n \"raspberrypi3\",\n \"raspberrypi4-64\",\n \"raspberrypi4\",\n \"tinkerxy\",\n ]\n\n with pytest.raises(vol.Invalid):\n assert vd.SCHEMA_ADDON_CONFIG(config)\n\n config[\"machine\"] = [\n \"intel-nuc\",\n \"intel-nuc\",\n ]\n\n with pytest.raises(vol.Invalid):\n assert vd.SCHEMA_ADDON_CONFIG(config)",
"def test_parameter_mode_invalid(self, mock_ghn, mock_grnam, mock_pwnam):\n # Should pass\n self.driver.check_for_setup_error()\n # Should throw exceptions\n self.configuration.hgst_space_mode = ''\n self.assertRaises(exception.VolumeDriverException,\n self.driver.check_for_setup_error)\n self.configuration.hgst_space_mode = 'Fred'\n self.assertRaises(exception.VolumeDriverException,\n self.driver.check_for_setup_error)",
"def test_safe(self):\n\n conf = configuration(\n category(\n 'test',\n Parameter('test', svalue='=open')\n )\n )\n\n configurable = Configurable(conf=conf, autoconf=False)\n\n self.assertRaises(\n Parameter.Error,\n configurable.applyconfiguration,\n targets=configurable, paths='test'\n )",
"def test_invalid_adapter_opts(self):\n self.oslo_config_dict['heat'] = {\n 'interface': 'public',\n 'valid_interfaces': 'private',\n }\n self.assert_service_disabled(\n 'orchestration',\n \"Encountered an exception attempting to process config for \"\n \"project 'heat' (service type 'orchestration'): interface and \"\n \"valid_interfaces are mutually exclusive.\",\n )",
"def test_bad_config_recovery(mock_empty_os_environ):\n\n def check(d):\n if d and \"wrong\" in d:\n raise KeyError(\"Invalid config\")\n return d\n\n climate = core.Climate(prefix=\"this\", settings_file_suffix=\"suffix\", parser=check)\n assert dict(climate.settings) == {}\n\n # Try to set incorrect config\n with pytest.raises(KeyError):\n climate.update({\"wrong\": 2})\n assert dict(climate.settings) == {}, \"Setting should not have been updated\"\n assert climate._updates == [], \"No external data should have been set.\"\n\n # Updating with other fields will still trigger the error\n climate.update({\"right\": 2})\n assert dict(climate.settings) == {\"right\": 2}\n assert climate._updates == [{\"right\": 2}], \"External data should have been set.\"",
"def test_config():\n if not os.path.exists(CONFIG_DIR):\n raise mupub.BadConfiguration('Configuration folder not found.')\n if not os.path.exists(_CONFIG_FNM):\n raise mupub.BadConfiguration('Configuration file not found.')\n if not os.path.exists(getDBPath()):\n raise mupub.BadConfiguration('Local database not found.')\n if len(CONFIG_DICT) == 0:\n raise mupub.BadConfiguration('Configuration was not loaded.')"
] | [
"0.7433865",
"0.73964936",
"0.71891207",
"0.715679",
"0.7153514",
"0.70355123",
"0.6980659",
"0.6954836",
"0.6950144",
"0.69497925",
"0.6934946",
"0.6908571",
"0.68892264",
"0.68862695",
"0.6879564",
"0.68716174",
"0.6868052",
"0.6838863",
"0.68274814",
"0.67229503",
"0.67174137",
"0.6700938",
"0.6689465",
"0.6687932",
"0.66439396",
"0.66322243",
"0.6630166",
"0.65905887",
"0.6585142",
"0.65724045"
] | 0.83784807 | 0 |
Test create configuration with invalid value type. | def test_configurations_create_invalid_value_type(self):
values = '{"key_buffer_size": "this is a string not int"}'
assert_unprocessable(instance_info.dbaas.configurations.create,
CONFIG_NAME, values, CONFIG_DESC) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_configurations_create_invalid_values(self):\n values = '{\"this_is_invalid\": 123}'\n try:\n instance_info.dbaas.configurations.create(\n CONFIG_NAME,\n values,\n CONFIG_DESC)\n except exceptions.UnprocessableEntity:\n resp, body = instance_info.dbaas.client.last_response\n assert_equal(resp.status, 422)",
"def test_validate_failure_bad_config(self, value):\n sch = scheme.Scheme()\n with pytest.raises(errors.SchemeValidationError):\n sch.validate(value)",
"def test_set_value_invalid(self):\r\n name = 'option1'\r\n option = self.config.options[name]\r\n value = 'invalid'\r\n initial_value = self.config.values[name]\r\n\r\n self.assertRaises(InvalidOptionValueError, self.config.set_value, name, option, value)\r\n self.assertEqual(self.config.values[name], initial_value)",
"def test_incompatible_option_type(key, value):\n wrong_types = {int, str, list, bool} - {type(value)}\n for wrong_type in wrong_types:\n test_value = wrong_type()\n with pytest.raises(InputError):\n _check_input_config({key: test_value})",
"def test_set_property_invalid(self):\r\n try:\r\n initial_value = self.config.values['option1']\r\n self.config.option1 = 'invalid'\r\n except Exception as e:\r\n self.assertIsInstance(e, InvalidOptionValueError)\r\n self.assertEqual(self.config.values['option1'], initial_value)",
"def test_invalid_config() -> None:\n config = {\"statsd\": {\"host1\": \"host1\"}}\n\n with pytest.raises(vol.Invalid):\n statsd.CONFIG_SCHEMA(None)\n with pytest.raises(vol.Invalid):\n statsd.CONFIG_SCHEMA(config)",
"def test_new_invalid(self) -> None:\n with pytest.raises(TypeError) as excinfo:\n RunwayTestDefinition({}) # type: ignore\n assert str(excinfo.value).startswith(\"expected data of type\")",
"def test_configurations_create_value_out_of_bounds(self):\n expected_configs = self.expected_default_datastore_configs()\n values = json.dumps(expected_configs.get('out_of_bounds_over'))\n assert_unprocessable(instance_info.dbaas.configurations.create,\n CONFIG_NAME, values, CONFIG_DESC)\n values = json.dumps(expected_configs.get('out_of_bounds_under'))\n assert_unprocessable(instance_info.dbaas.configurations.create,\n CONFIG_NAME, values, CONFIG_DESC)",
"def test_invalid_config_options_output():\n\n with pytest.raises(InputError):\n _check_input_config({\"unknown_key_1\": 1})",
"def test_invalid_configuration(self):\n\n config = copy.deepcopy(self.configuration)\n config['version'] = 'BAD'\n json_data = {\n \"input\" : {\n 'version': '6',\n 'files': {'input_a': [self.source_file.id]},\n 'json': {}\n },\n \"job_type_id\" : self.job_type1.pk,\n \"configuration\" : config\n }\n\n url = '/%s/jobs/' % self.api\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)",
"def test_creation_dict():\n with pytest.raises(ValueError) as __:\n value = dict()\n __ = param.Integer(value=value)",
"def test_config():\n\n assert Config.get(\"abc\") is None\n assert Config.get(1234) is None\n\n for key in (\"coerce\", \"debug\"):\n assert Config.get(key) is True\n Config.set(key, False)\n assert Config.get(key) is False\n\n with pytest.raises(ValueError):\n Config.set(key, \"something\")\n\n with pytest.raises(ValueError):\n Config.set(key, int)",
"def test_valid_configurations_create(self):\n expected_configs = self.expected_default_datastore_configs()\n values = json.dumps(expected_configs.get('valid_values'))\n expected_values = json.loads(values)\n result = instance_info.dbaas.configurations.create(\n CONFIG_NAME,\n values,\n CONFIG_DESC,\n datastore=instance_info.dbaas_datastore,\n datastore_version=instance_info.dbaas_datastore_version)\n resp, body = instance_info.dbaas.client.last_response\n assert_equal(resp.status, 200)\n with TypeCheck('Configuration', result) as configuration:\n configuration.has_field('name', str)\n configuration.has_field('description', str)\n configuration.has_field('values', dict)\n configuration.has_field('datastore_name', str)\n configuration.has_field('datastore_version_id', str)\n configuration.has_field('datastore_version_name', str)\n global configuration_info\n configuration_info = result\n assert_equal(configuration_info.name, CONFIG_NAME)\n assert_equal(configuration_info.description, CONFIG_DESC)\n assert_equal(configuration_info.values, expected_values)",
"def test_validate_type_failure(self, field_type, value):\n opt = scheme.Option('test-option', field_type=field_type)\n with pytest.raises(errors.SchemeValidationError):\n opt.validate('foo', value)",
"def test_feed_value_throws_on_invalid_data(self):\n self.assertRaises(\n ValueError, self.factory.make_from_feed_value, \"foo\", 1\n )",
"def test_create_instance(self):\n with self.assertRaises(exceptions.NoInitiation):\n Config()",
"def test_creation_notallow_none():\n with pytest.raises(ValueError) as __:\n value = None\n __ = param.Integer(value=value, allow_None=False)",
"def test_adding_config_keys():\n\n with pytest.raises(ValueError) as error:\n Config.config()[\"something_fake\"] = True\n\n assert \"something_fake is not a valid config key.\" in error.value.args",
"def test_init_validation(self):\n\n # Try minimal acceptable configuration\n ExecutionConfiguration()\n\n # Invalid version\n config = {'version': 'BAD'}\n self.assertRaises(InvalidExecutionConfiguration, ExecutionConfiguration, config)",
"def test_validate_bad_data(self, value):\n opt = scheme.DictOption('test-opt', scheme.Scheme())\n with pytest.raises(errors.SchemeValidationError):\n opt.validate('foo', value)",
"def test_invalid_input_config(self):\n msg1 = 'Must raise `TypeError` when input `config` is invalid.'\n msg2 = 'Inconsistent error message.'\n examples = (\n False, True, 0, 1, -1, 0.0, 1.0, math.nan, -math.nan, math.inf,\n -math.inf, 0j, 1j, '', b'', (), [], {}, set(), object(),\n lambda x: x, type, None, NotImplemented, ...\n )\n\n for invalid_input in examples:\n with self.assertRaises(TypeError, msg=msg1) as ctx_man:\n lmp.util.load_tokenizer_by_config(\n checkpoint=self.checkpoint,\n config=invalid_input\n )\n\n self.assertEqual(\n ctx_man.exception.args[0],\n '`config` must be an instance of `lmp.config.BaseConfig`.',\n msg=msg2\n )",
"def test_should_raise_error_if_type_is_invalid(self):\r\n with self.assertRaises(ValueError):\r\n self.spec_parser.parse_statement({'type': 'sugar'})",
"def test_set_invalid_project_type(self):\n setting_name = 'project_category_bool_setting'\n url = reverse(\n 'projectroles:api_project_setting_set',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'app_name': EX_APP_NAME,\n 'setting_name': setting_name,\n 'value': True,\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 400, msg=response.content)\n self.assertEqual(AppSetting.objects.count(), 0)",
"def test_config_key_error():\n c = core.Config()\n\n with pytest.raises(KeyError):\n c['doesNotExist']",
"def test_value_error_for_computing_missing_type():\n with pytest.raises(ValueError):\n compute_type(\"missing_type\", {})",
"def test_type_validation(self):\r\n with self.assertRaises(ValidationError):\r\n TestMapModel.create(int_map={'key': 2, uuid4(): 'val'}, text_map={2: 5})",
"def test_validate_bad_data(self, value):\n opt = scheme.ListOption('test-opt')\n with pytest.raises(errors.SchemeValidationError):\n opt.validate('foo', value)",
"def test_parameter_mode_invalid(self, mock_ghn, mock_grnam, mock_pwnam):\n # Should pass\n self.driver.check_for_setup_error()\n # Should throw exceptions\n self.configuration.hgst_space_mode = ''\n self.assertRaises(exception.VolumeDriverException,\n self.driver.check_for_setup_error)\n self.configuration.hgst_space_mode = 'Fred'\n self.assertRaises(exception.VolumeDriverException,\n self.driver.check_for_setup_error)",
"def test__validate_channels__type_error(input_value):\n validate_channels(input_value)",
"def test_set_value_valid(self):\r\n name = 'option2'\r\n option = self.config.options[name]\r\n value = 'hello'\r\n\r\n self.config.set_value(name, option, value)\r\n self.assertEqual(self.config.values[name], value)"
] | [
"0.79776704",
"0.71556807",
"0.7127362",
"0.7073835",
"0.70391345",
"0.6960719",
"0.6936632",
"0.6775403",
"0.6733872",
"0.67331606",
"0.6680334",
"0.665938",
"0.6640967",
"0.6639813",
"0.66186374",
"0.66141385",
"0.6563347",
"0.65561086",
"0.65486705",
"0.6546909",
"0.6539906",
"0.65045446",
"0.64892036",
"0.64613825",
"0.6432098",
"0.64118224",
"0.6397514",
"0.6387448",
"0.6367932",
"0.6358159"
] | 0.85598314 | 0 |
Test create configuration with value out of bounds. | def test_configurations_create_value_out_of_bounds(self):
expected_configs = self.expected_default_datastore_configs()
values = json.dumps(expected_configs.get('out_of_bounds_over'))
assert_unprocessable(instance_info.dbaas.configurations.create,
CONFIG_NAME, values, CONFIG_DESC)
values = json.dumps(expected_configs.get('out_of_bounds_under'))
assert_unprocessable(instance_info.dbaas.configurations.create,
CONFIG_NAME, values, CONFIG_DESC) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_creation_outside_bounds():\n with pytest.raises(ValueError) as __:\n value = 42\n __ = param.Integer(value=value, hardbounds=[0, 41])",
"def test_creation_incorrect_hardbounds_count():\n with pytest.raises(ValueError) as __:\n value = 1\n __ = param.Integer(value=value, hardbounds=[0, 10, 20])",
"def test_creation_incorrect_softbounds_count():\n with pytest.raises(ValueError) as __:\n value = 1\n __ = param.Integer(value=value, softbounds=[0, 10, 20])",
"def test_creation_incorrect_change_hardbounds():\n with pytest.raises(ValueError) as __:\n value = 1\n int_a = param.Integer(value=value, hardbounds=[0, 10])\n int_a.hardbounds = [0, 10, 20]",
"def test_init_chunk_size_field_below_range(self):\n test_config = TestConfig(chunk_size=-1)\n with self.assertRaises(ValidationError):\n test_config.clean_fields()",
"def test_creation_bounds_not_inclusive():\n with pytest.raises(ValueError) as __:\n value = -42\n __ = param.Integer(value=value, hardbounds=[-42, 100], inclusive_bounds=[False, False])",
"def test_init_minimum_gap_field_below_range(self):\n test_config = TestConfig(minimum_gap=-1)\n with self.assertRaises(ValidationError):\n test_config.clean_fields()",
"def test_configurations_create_invalid_value_type(self):\n values = '{\"key_buffer_size\": \"this is a string not int\"}'\n assert_unprocessable(instance_info.dbaas.configurations.create,\n CONFIG_NAME, values, CONFIG_DESC)",
"def test_creation_incorrect_change_softbounds():\n with pytest.raises(ValueError) as __:\n value = 1\n int_a = param.Integer(value=value, softbounds=[0, 10])\n int_a.softbounds = [0, 10, 20]",
"def test_configurations_create_invalid_values(self):\n values = '{\"this_is_invalid\": 123}'\n try:\n instance_info.dbaas.configurations.create(\n CONFIG_NAME,\n values,\n CONFIG_DESC)\n except exceptions.UnprocessableEntity:\n resp, body = instance_info.dbaas.client.last_response\n assert_equal(resp.status, 422)",
"def test_invalid_max_depth_configuration(self):\n self.write_configuration_file(\n '[spider]\\n'\n 'max_depth: -1\\n'\n ) \n with self.assertRaises(mini_spider.ConfigurationException):\n mini_spider.parse_configuration(self.configuration_file_path)",
"def test_set_outside_bounds_default_value(self):\n with pytest.raises(ValueError):\n Integer(\"yolo\", \"uniform\", -3, 2, default_value=4)",
"def test_init_bytes_field_below_range(self):\n test_config = TestConfig(bytes=-1)\n with self.assertRaises(ValidationError):\n test_config.clean_fields()",
"def test_set_property_invalid(self):\r\n try:\r\n initial_value = self.config.values['option1']\r\n self.config.option1 = 'invalid'\r\n except Exception as e:\r\n self.assertIsInstance(e, InvalidOptionValueError)\r\n self.assertEqual(self.config.values['option1'], initial_value)",
"def test_set_value_invalid(self):\r\n name = 'option1'\r\n option = self.config.options[name]\r\n value = 'invalid'\r\n initial_value = self.config.values[name]\r\n\r\n self.assertRaises(InvalidOptionValueError, self.config.set_value, name, option, value)\r\n self.assertEqual(self.config.values[name], initial_value)",
"def test_construct_3_bad_bootsraps(self):\n with self.assertRaises(KeyError):\n configerus.new_config(bootstraps=[\"I do not exist\"])",
"def testConfigE(self):\n assert type(self.config['game_width']) == int, \"Not parsing the game width correctly\"",
"def test_snmpset_value_out_of_range_error():\n with pytest.raises(SNMPWriteError) as excinfo:\n snmpset(ipaddress=SNMP_SRV_ADDR, oid='SNMPv2-MIB::sysName.0',\n value_type='s', value='Thiiiiiiiiiiiiiiiiiiiiiiiiiiiiis '\n 'sssssttttttttrrrriiiiiiiiiiiiiiinnnnnnnnnnnnng is '\n 'wwwwwwaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaayyyyyyyyyy '\n 'tttoooooooooooooooooooooooooooooooooooooooooooooo '\n 'lllooooooooooooooooooooooonnnnnnnnnnnnnnnnnnnggggg'\n ' !!!!!!!!!!!!!!!!!!!!!!!!!!!!', port=SNMP_SRV_PORT)\n assert 'Value out of range' in str(excinfo.value)",
"def test_safe(self):\n\n conf = configuration(\n category(\n 'test',\n Parameter('test', svalue='=open')\n )\n )\n\n configurable = Configurable(conf=conf, autoconf=False)\n\n self.assertRaises(\n Parameter.Error,\n configurable.applyconfiguration,\n targets=configurable, paths='test'\n )",
"def test_set_defaults(self):\r\n self.assertEqual(self.config.values['option1'], 1337)\r\n self.assertNotIn('option2', self.config.values)",
"def testConfigF(self):\n assert type(self.config['game_height']) == int, \"Not parsing the game width correctly\"",
"def test_creation_hardbounds():\n value = -42\n hardbounds = [-100, 100]\n\n num_a = param.Integer(value=value, hardbounds=hardbounds)\n assert num_a.value == value\n assert num_a.hardbounds == hardbounds",
"def test_adding_config_keys():\n\n with pytest.raises(ValueError) as error:\n Config.config()[\"something_fake\"] = True\n\n assert \"something_fake is not a valid config key.\" in error.value.args",
"def _test_out_of_range(self):\n self.cdbconf.setup('KKG')\n self.cdbconf.setConfiguration('CUSTOM_OPT')\n az, el, latitude = [radians(50)] * 3\n site_info = {'latitude': latitude}\n self.p.setup(site_info, self.source, self.device)\n self.p.setRewindingMode('AUTO')\n offset = 20\n max_limit = self.device.getMaxLimit() \n min_limit = self.device.getMinLimit()\n Pis = max_limit - offset/2\n time.sleep(0.2) if self.using_mock else time.sleep(3)\n self.p.setPosition(Pis)\n time.sleep(0.2) # Wait a bit for the setup\n max_rewinding_steps = (max_limit - min_limit) // self.device.getStep()\n expected = Pis - max_rewinding_steps*self.device.getStep() + offset\n self.source.setAzimuth(az)\n self.source.setElevation(el)\n self.p.startUpdating('MNG_TRACK', 'ANT_NORTH', az, el, None, None)\n time.sleep(0.2) if self.using_mock else time.sleep(3)\n self.p.setOffset(offset)\n time.sleep(0.2) if self.using_mock else time.sleep(3)\n self.assertEqual(self.device.getActPosition(), expected)",
"def test_invalid_config() -> None:\n config = {\"statsd\": {\"host1\": \"host1\"}}\n\n with pytest.raises(vol.Invalid):\n statsd.CONFIG_SCHEMA(None)\n with pytest.raises(vol.Invalid):\n statsd.CONFIG_SCHEMA(config)",
"def testOutOfRangeParameterRaisesErrors(self):\n self.c.set_speciation_parameters(speciation_rates=[0.1, 0.2], record_spatial=False, record_fragments=False)\n self.c.c_community.add_protracted_parameters(70.0, 2000.0)\n with self.assertRaises(RuntimeError):\n self.c.apply()\n self.c.set_speciation_parameters(speciation_rates=[0.1, 0.2], record_spatial=False, record_fragments=False)\n self.c.c_community.add_protracted_parameters(50.0, 2100.0)\n with self.assertRaises(RuntimeError):\n self.c.apply()",
"def testOutOfRangeParameterRaisesErrors(self):\n self.c.set_speciation_parameters(speciation_rates=[0.1, 0.2], record_spatial=False, record_fragments=False)\n self.c.c_community.add_protracted_parameters(70.0, 2000.0)\n with self.assertRaises(RuntimeError):\n self.c.apply()\n self.c.set_speciation_parameters(speciation_rates=[0.1, 0.2], record_spatial=False, record_fragments=False)\n self.c.c_community.add_protracted_parameters(50.0, 2100.0)\n with self.assertRaises(RuntimeError):\n self.c.apply()",
"def testOutOfRangeParameterRaisesErrors(self):\n self.c.set_speciation_parameters(speciation_rates=[0.1, 0.2], record_spatial=False, record_fragments=False)\n self.c.c_community.add_protracted_parameters(70.0, 2000.0)\n with self.assertRaises(RuntimeError):\n self.c.apply()\n self.c.set_speciation_parameters(speciation_rates=[0.1, 0.2], record_spatial=False, record_fragments=False)\n self.c.c_community.add_protracted_parameters(50.0, 2100.0)\n with self.assertRaises(RuntimeError):\n self.c.apply()",
"def test_invalid_config_options_output():\n\n with pytest.raises(InputError):\n _check_input_config({\"unknown_key_1\": 1})",
"def test_bit_set_bit_index_out_of_range(self):\n value = bytearray()\n value.append(255)\n ops = [bitwise_operations.bit_set(self.test_bin_zeroes, 41, 8, 1, value, None)]\n with pytest.raises(e.OpNotApplicable):\n self.as_connection.operate(self.test_key, ops)"
] | [
"0.73345673",
"0.7262614",
"0.709626",
"0.6822857",
"0.6742535",
"0.67301244",
"0.66877306",
"0.6641707",
"0.65475726",
"0.6366669",
"0.63600886",
"0.6332785",
"0.63235784",
"0.62723964",
"0.62111956",
"0.61827594",
"0.61725974",
"0.6138109",
"0.6133746",
"0.6107105",
"0.61033773",
"0.6100039",
"0.60960484",
"0.6082596",
"0.6055031",
"0.60161304",
"0.60161304",
"0.60161304",
"0.6012258",
"0.60070264"
] | 0.8452791 | 0 |
test assigning a configuration to an instance | def test_assign_configuration_to_valid_instance(self):
print("instance_info.id: %s" % instance_info.id)
print("configuration_info: %s" % configuration_info)
print("configuration_info.id: %s" % configuration_info.id)
config_id = configuration_info.id
instance_info.dbaas.instances.modify(instance_info.id,
configuration=config_id)
resp, body = instance_info.dbaas.client.last_response
assert_equal(resp.status, 202) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_set_property_success(self):\r\n self.config.option1 = 9001\r\n self.assertEqual(self.config.values['option1'], 9001)\r\n\r\n self.config.option2 = 'bar'\r\n self.assertEqual(self.config.values['option2'], 'bar')",
"def test_assign_configuration_to_instance_with_config(self):\n config_id = configuration_info.id\n assert_raises(exceptions.BadRequest,\n instance_info.dbaas.instances.modify, instance_info.id,\n configuration=config_id)",
"def test_config_class():\n assert config is not None",
"def test_configure_to_reconfigure_param(self):\n\n class ToConfigure(object):\n \"\"\"Class to configure.\"\"\"\n\n def __init__(self):\n super(ToConfigure, self).__init__()\n self.test = None\n\n target = ToConfigure()\n\n param = 'test'\n\n conf = configuration(category('TEST', Parameter(param, value=True)))\n\n self.configurable.configure(conf=conf, targets=[target])\n self.assertTrue(target.test)",
"def test_start_instance_with_configuration(self):\n global configuration_instance\n databases = []\n databases.append({\"name\": \"firstdbconfig\", \"character_set\": \"latin2\",\n \"collate\": \"latin2_general_ci\"})\n databases.append({\"name\": \"db2\"})\n configuration_instance.databases = databases\n users = []\n users.append({\"name\": \"liteconf\", \"password\": \"liteconfpass\",\n \"databases\": [{\"name\": \"firstdbconfig\"}]})\n configuration_instance.users = users\n configuration_instance.name = \"TEST_\" + str(uuid.uuid4()) + \"_config\"\n flavor_href = instance_info.dbaas_flavor_href\n configuration_instance.dbaas_flavor_href = flavor_href\n configuration_instance.volume = instance_info.volume\n configuration_instance.dbaas_datastore = instance_info.dbaas_datastore\n configuration_instance.dbaas_datastore_version = \\\n instance_info.dbaas_datastore_version\n configuration_instance.nics = instance_info.nics\n\n result = instance_info.dbaas.instances.create(\n configuration_instance.name,\n configuration_instance.dbaas_flavor_href,\n configuration_instance.volume,\n configuration_instance.databases,\n configuration_instance.users,\n nics=configuration_instance.nics,\n availability_zone=\"nova\",\n datastore=configuration_instance.dbaas_datastore,\n datastore_version=configuration_instance.dbaas_datastore_version,\n configuration=configuration_href)\n assert_equal(200, instance_info.dbaas.last_http_code)\n assert_equal(\"BUILD\", result.status)\n configuration_instance.id = result.id",
"def test_config_change():\n clean_tables()\n config = set_configuration()\n assert config['age']['value'] == \"72\"\n assert config['retainUnsent']['value'] == \"False\" \n\n config = update_configuration(age=0, retain_unsent=True) \n assert config['age']['value'] == \"0\" \n assert config['retainUnsent']['value'] == \"True\"\n\n clean_tables()",
"def test_configuration(self):\n self.assertEqual(self.Test.adapter_config['write'],\n { 'adapter': TestAdapter, 'foo': 'bar' })",
"def test_configuration():\n config = Configuration()\n\n assert config.relay_pin is not None\n assert config.relay_pin >= 1\n assert config.relay_pin < 32\n assert config.seconds_between_checks > 0\n assert config.seconds_to_power_off > 0\n assert config.seconds_to_wait_after_power_on > config.seconds_to_power_off",
"def test_create_instance(self):\n with self.assertRaises(exceptions.NoInitiation):\n Config()",
"def test_get_property_success(self):\r\n self.assertEqual(self.config.option1, 1337)",
"def test_object(self):\n\n configurable = Configurable(\n conf=configuration(category('', Parameter('test', value=True)))\n )\n\n class Test(object):\n pass\n\n test = Test()\n\n configurable(test)\n configurable.applyconfiguration(targets=[test])\n\n self.assertTrue(test.test)\n\n test.test = False\n\n applyconfiguration(targets=[test])\n\n self.assertTrue(test.test)",
"def configure_test(self, test, config_json):\n pass",
"def test_call_config(self):\n self.assertTrue(self.Foo._passed)",
"def test_config_object():\n assert isinstance(CFG, Configuration)",
"def test_init(self):\n test_config = TestConfig()\n msg = 'Did not receive a TestConfig object.'\n self.assertIsInstance(test_config, TestConfig, msg)",
"def setUpConfig(self):\n pass",
"def test_set_config_options(self):\n # set up config\n config.set_config_file(os.path.join(path_to_module, \"test_config.conf\"))\n config.setup()\n # Make sure id is initially set to what we expect\n self.assertEqual(config.read_config_option('client_id'), 'uploader')\n # Set and test to a new id\n config.set_config_options(client_id=\"new_id\")\n self.assertEqual(config.read_config_option('client_id'), \"new_id\")",
"def test_set_config__success(self):\n test_datafile = json.dumps(self.config_dict_with_features)\n mock_logger = mock.Mock()\n mock_notification_center = mock.Mock()\n\n with mock.patch('optimizely.config_manager.BaseConfigManager._validate_instantiation_options'):\n project_config_manager = config_manager.StaticConfigManager(\n datafile=test_datafile, logger=mock_logger, notification_center=mock_notification_center,\n )\n\n project_config_manager._set_config(test_datafile)\n mock_logger.debug.assert_called_with(\n 'Received new datafile and updated config. ' 'Old revision number: None. New revision number: 1.'\n )\n mock_notification_center.send_notifications.assert_called_once_with('OPTIMIZELY_CONFIG_UPDATE')\n\n self.assertIsInstance(\n project_config_manager.optimizely_config,\n optimizely_config.OptimizelyConfig\n )",
"def test_applyconfiguration(self):\n\n conf = configuration(category('', Parameter('test', value=True)))\n\n @Configurable(conf=conf)\n class Test(object):\n pass\n\n test = Test()\n\n self.assertTrue(test.test)\n\n test.test = False\n\n applyconfiguration(targets=[test])\n\n self.assertTrue(test.test)\n\n class Test(object):\n pass\n\n test = Test()\n\n self.assertFalse(hasattr(test, 'test'))\n\n applyconfiguration(targets=[test], conf=conf)\n\n self.assertTrue(test.test)",
"def test_instance():\n AgentCheck()\n # rely on default\n check = AgentCheck()\n assert check.init_config == {}\n assert check.instances == []\n\n # pass dict for 'init_config', a list for 'instances'\n init_config = {'foo': 'bar'}\n instances = [{'bar': 'baz'}]\n check = AgentCheck(init_config=init_config, instances=instances)\n assert check.init_config == {'foo': 'bar'}\n assert check.instances == [{'bar': 'baz'}]",
"def testConfigA(self):\n assert type(self.config) == dict, \"Read setting not returning a dictionary\"",
"def test_SpecConfig_class():\n res = SpecConfig(**SPEC_CONFIG)\n assert res.path_out == SPEC_CONFIG['path_out']",
"def test_set_value_valid(self):\r\n name = 'option2'\r\n option = self.config.options[name]\r\n value = 'hello'\r\n\r\n self.config.set_value(name, option, value)\r\n self.assertEqual(self.config.values[name], value)",
"def test_set_returns_self():\n return_value = config.set('returns_self', True)\n assert return_value is config",
"def test_set_and_deploy_configuration(self, clean_mongo, test_case):\n self.logger.info(\"RUN: %s\", test_case[\"name\"])\n\n uuidv4 = str(uuid.uuid4())\n tenant, username, password = (\n \"test.mender.io-\" + uuidv4,\n \"some.user+\" + uuidv4 + \"@example.com\",\n \"secretsecret\",\n )\n tenant = create_org(tenant, username, password, \"enterprise\")\n\n update_tenant(tenant.id, addons=[\"configure\"])\n login(tenant.users[0], test_case[\"use_personal_access_token\"])\n\n test_case[\"user\"][\"name\"] = test_case[\"user\"][\"name\"].replace(\"UUID\", uuidv4)\n if test_case[\"roles\"]:\n create_roles(tenant.users[0].token, test_case[\"roles\"])\n test_user = create_user(tid=tenant.id, **test_case[\"user\"])\n login(test_user, test_case[\"use_personal_access_token\"])\n\n # Initialize tenant's devices\n grouped_devices = setup_tenant_devices(tenant, test_case[\"device_groups\"])\n\n deviceconf_MGMT = ApiClient(deviceconfig.URL_MGMT)\n\n device_id = grouped_devices[test_case[\"deploy_group\"]][0].id\n\n # Attempt to set configuration\n rsp = deviceconf_MGMT.with_auth(test_user.token).call(\n \"PUT\",\n deviceconfig.URL_MGMT_DEVICE_CONFIGURATION.format(id=device_id),\n body={\"foo\": \"bar\"},\n )\n assert rsp.status_code == test_case[\"set_configuration_status_code\"], rsp.text\n\n # Attempt to deploy the configuration\n rsp = deviceconf_MGMT.with_auth(test_user.token).call(\n \"POST\",\n deviceconfig.URL_MGMT_DEVICE_CONFIGURATION_DEPLOY.format(id=device_id),\n body={\"retries\": 0},\n )\n assert (\n rsp.status_code == test_case[\"deploy_configuration_status_code\"]\n ), rsp.text\n self.logger.info(\"PASS: %s\" % test_case[\"name\"])",
"def test_set_property_invalid(self):\r\n try:\r\n initial_value = self.config.values['option1']\r\n self.config.option1 = 'invalid'\r\n except Exception as e:\r\n self.assertIsInstance(e, InvalidOptionValueError)\r\n self.assertEqual(self.config.values['option1'], initial_value)",
"def test_assign_configuration_to_invalid_instance(self):\n invalid_id = \"invalid-inst-id\"\n try:\n instance_info.dbaas.instances.modify(invalid_id,\n configuration_info.id)\n except exceptions.NotFound:\n resp, body = instance_info.dbaas.client.last_response\n assert_equal(resp.status, 404)",
"def store(self, config_instance):\r\n pass",
"def test_configure(self):\r\n params = {\r\n 'test_str': 'This is only a test',\r\n 'test_empty': '',\r\n 'test_int': 12345,\r\n 'test_float': 123.45,\r\n 'test_dict': { 'test_key': 'test_val' },\r\n 'test_empty_dict': {},\r\n 'test_unicode': u'\\u2603 the snowman',\r\n 'test_none': None,\r\n 'test_boolean': False\r\n }\r\n\r\n for key, val in params.iteritems():\r\n\r\n # JSON-encode each parameter\r\n post_params = {key: json.dumps(val)}\r\n response = requests.put(self.url, data=post_params)\r\n self.assertEqual(response.status_code, 200)\r\n\r\n # Check that the expected values were set in the configuration\r\n for key, val in params.iteritems():\r\n self.assertEqual(self.server.config.get(key), val)",
"def set_configuration(self, configuration):\n self.configuration = configuration"
] | [
"0.73201257",
"0.722626",
"0.71452546",
"0.71109563",
"0.7043339",
"0.69083005",
"0.68408525",
"0.6799425",
"0.6775065",
"0.6765029",
"0.6743073",
"0.6678476",
"0.66665244",
"0.6651853",
"0.66037625",
"0.66030836",
"0.6543711",
"0.6542435",
"0.64977694",
"0.64900345",
"0.64890146",
"0.64448094",
"0.6441722",
"0.6432094",
"0.6429473",
"0.64148724",
"0.63894594",
"0.63847536",
"0.6316457",
"0.6306517"
] | 0.774803 | 0 |
test assigning a configuration to an instance conflicts | def test_assign_configuration_to_instance_with_config(self):
config_id = configuration_info.id
assert_raises(exceptions.BadRequest,
instance_info.dbaas.instances.modify, instance_info.id,
configuration=config_id) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_assign_configuration_to_valid_instance(self):\n print(\"instance_info.id: %s\" % instance_info.id)\n print(\"configuration_info: %s\" % configuration_info)\n print(\"configuration_info.id: %s\" % configuration_info.id)\n config_id = configuration_info.id\n instance_info.dbaas.instances.modify(instance_info.id,\n configuration=config_id)\n resp, body = instance_info.dbaas.client.last_response\n assert_equal(resp.status, 202)",
"def test_assign_configuration_to_invalid_instance(self):\n invalid_id = \"invalid-inst-id\"\n try:\n instance_info.dbaas.instances.modify(invalid_id,\n configuration_info.id)\n except exceptions.NotFound:\n resp, body = instance_info.dbaas.client.last_response\n assert_equal(resp.status, 404)",
"def test_unassign_configuration_from_instances(self):\n instance_info.dbaas.instances.update(configuration_instance.id,\n remove_configuration=True)\n resp, body = instance_info.dbaas.client.last_response\n assert_equal(resp.status, 202)\n\n instance_info.dbaas.instances.update(instance_info.id,\n remove_configuration=True)\n resp, body = instance_info.dbaas.client.last_response\n assert_equal(resp.status, 202)\n instance_info.dbaas.instances.get(instance_info.id)\n\n def result_has_no_configuration():\n instance = instance_info.dbaas.instances.get(inst_info.id)\n if hasattr(instance, 'configuration'):\n return False\n else:\n return True\n\n inst_info = instance_info\n poll_until(result_has_no_configuration)\n inst_info = configuration_instance\n poll_until(result_has_no_configuration)\n\n instance = instance_info.dbaas.instances.get(instance_info.id)\n assert_equal('RESTART_REQUIRED', instance.status)",
"def test_update_reg_ex_config(self):\n pass",
"def test_configuration_changes(self):\n config = serialization.load_file(join(EXAMPLES, 'complete.yml'))[0]\n s = simulation.from_config(config)\n for i in range(5):\n s.run_simulation(dry_run=True)\n nconfig = s.to_dict()\n del nconfig['topology']\n assert config == nconfig",
"def test_set_property_invalid(self):\r\n try:\r\n initial_value = self.config.values['option1']\r\n self.config.option1 = 'invalid'\r\n except Exception as e:\r\n self.assertIsInstance(e, InvalidOptionValueError)\r\n self.assertEqual(self.config.values['option1'], initial_value)",
"def test_unassign_configuration_after_patch(self):\n instance_info.dbaas.instances.update(instance_info.id,\n remove_configuration=True)\n assert_equal(202, instance_info.dbaas.last_http_code)\n instance = instance_info.dbaas.instances.get(instance_info.id)\n assert_equal('RESTART_REQUIRED', instance.status)\n # restart to be sure configuration has been unassigned\n instance_info.dbaas.instances.restart(instance_info.id)\n assert_equal(202, instance_info.dbaas.last_http_code)\n sleep(2)\n\n def result_is_active():\n instance = instance_info.dbaas.instances.get(\n instance_info.id)\n if instance.status in CONFIG.running_status:\n return True\n else:\n assert_equal(\"REBOOT\", instance.status)\n return False\n\n poll_until(result_is_active)\n result = instance_info.dbaas.configurations.get(configuration_info.id)\n assert_equal(result.instance_count, 0)",
"def test_start_instance_with_configuration(self):\n global configuration_instance\n databases = []\n databases.append({\"name\": \"firstdbconfig\", \"character_set\": \"latin2\",\n \"collate\": \"latin2_general_ci\"})\n databases.append({\"name\": \"db2\"})\n configuration_instance.databases = databases\n users = []\n users.append({\"name\": \"liteconf\", \"password\": \"liteconfpass\",\n \"databases\": [{\"name\": \"firstdbconfig\"}]})\n configuration_instance.users = users\n configuration_instance.name = \"TEST_\" + str(uuid.uuid4()) + \"_config\"\n flavor_href = instance_info.dbaas_flavor_href\n configuration_instance.dbaas_flavor_href = flavor_href\n configuration_instance.volume = instance_info.volume\n configuration_instance.dbaas_datastore = instance_info.dbaas_datastore\n configuration_instance.dbaas_datastore_version = \\\n instance_info.dbaas_datastore_version\n configuration_instance.nics = instance_info.nics\n\n result = instance_info.dbaas.instances.create(\n configuration_instance.name,\n configuration_instance.dbaas_flavor_href,\n configuration_instance.volume,\n configuration_instance.databases,\n configuration_instance.users,\n nics=configuration_instance.nics,\n availability_zone=\"nova\",\n datastore=configuration_instance.dbaas_datastore,\n datastore_version=configuration_instance.dbaas_datastore_version,\n configuration=configuration_href)\n assert_equal(200, instance_info.dbaas.last_http_code)\n assert_equal(\"BUILD\", result.status)\n configuration_instance.id = result.id",
"def _check_config(self):",
"def test_config_change():\n clean_tables()\n config = set_configuration()\n assert config['age']['value'] == \"72\"\n assert config['retainUnsent']['value'] == \"False\" \n\n config = update_configuration(age=0, retain_unsent=True) \n assert config['age']['value'] == \"0\" \n assert config['retainUnsent']['value'] == \"True\"\n\n clean_tables()",
"def test_set_property_success(self):\r\n self.config.option1 = 9001\r\n self.assertEqual(self.config.values['option1'], 9001)\r\n\r\n self.config.option2 = 'bar'\r\n self.assertEqual(self.config.values['option2'], 'bar')",
"def test_set_config__twice__with_diff_content(self):\n test_datafile = json.dumps(self.config_dict_with_features)\n mock_logger = mock.Mock()\n mock_notification_center = mock.Mock()\n\n with mock.patch('optimizely.config_manager.BaseConfigManager._validate_instantiation_options'):\n project_config_manager = config_manager.StaticConfigManager(\n datafile=test_datafile, logger=mock_logger, notification_center=mock_notification_center,\n )\n\n mock_logger.debug.assert_called_with(\n 'Received new datafile and updated config. ' 'Old revision number: None. New revision number: 1.'\n )\n self.assertEqual(1, mock_logger.debug.call_count)\n mock_notification_center.send_notifications.assert_called_once_with('OPTIMIZELY_CONFIG_UPDATE')\n self.assertEqual('1', project_config_manager.optimizely_config.revision)\n\n mock_logger.reset_mock()\n mock_notification_center.reset_mock()\n\n # Call set config again\n other_datafile = json.dumps(self.config_dict_with_multiple_experiments)\n project_config_manager._set_config(other_datafile)\n mock_logger.debug.assert_called_with(\n 'Received new datafile and updated config. ' 'Old revision number: 1. New revision number: 42.'\n )\n self.assertEqual(1, mock_logger.debug.call_count)\n mock_notification_center.send_notifications.assert_called_once_with('OPTIMIZELY_CONFIG_UPDATE')\n self.assertEqual('42', project_config_manager.optimizely_config.revision)",
"def test_create_instance(self):\n with self.assertRaises(exceptions.NoInitiation):\n Config()",
"def test_set_config__twice__with_same_content(self):\n test_datafile = json.dumps(self.config_dict_with_features)\n mock_logger = mock.Mock()\n mock_notification_center = mock.Mock()\n\n with mock.patch('optimizely.config_manager.BaseConfigManager._validate_instantiation_options'), \\\n mock.patch('optimizely.optimizely_config.OptimizelyConfigService.get_config') as mock_opt_service:\n project_config_manager = config_manager.StaticConfigManager(\n datafile=test_datafile, logger=mock_logger, notification_center=mock_notification_center,\n )\n\n project_config_manager._set_config(test_datafile)\n mock_logger.debug.assert_called_with(\n 'Received new datafile and updated config. ' 'Old revision number: None. New revision number: 1.'\n )\n self.assertEqual(1, mock_logger.debug.call_count)\n mock_notification_center.send_notifications.assert_called_once_with('OPTIMIZELY_CONFIG_UPDATE')\n self.assertEqual(1, mock_opt_service.call_count)\n\n mock_logger.reset_mock()\n mock_notification_center.reset_mock()\n mock_opt_service.reset_mock()\n\n # Call set config again and confirm that no new log message denoting config update is there\n project_config_manager._set_config(test_datafile)\n self.assertEqual(0, mock_logger.debug.call_count)\n self.assertEqual(0, mock_notification_center.call_count)\n # Assert that mock_opt_service is not called again.\n self.assertEqual(0, mock_opt_service.call_count)",
"def test_configure_to_reconfigure_param(self):\n\n class ToConfigure(object):\n \"\"\"Class to configure.\"\"\"\n\n def __init__(self):\n super(ToConfigure, self).__init__()\n self.test = None\n\n target = ToConfigure()\n\n param = 'test'\n\n conf = configuration(category('TEST', Parameter(param, value=True)))\n\n self.configurable.configure(conf=conf, targets=[target])\n self.assertTrue(target.test)",
"async def test_abort_if_already_setup(hass: HomeAssistant, client_single) -> None:\n MockConfigEntry(\n domain=DOMAIN,\n data={CONF_LATITUDE: CITY_1_LAT, CONF_LONGITUDE: CITY_1_LON},\n unique_id=f\"{CITY_1_LAT}, {CITY_1_LON}\",\n ).add_to_hass(hass)\n\n # Should fail, same CITY same postal code (import)\n result = await hass.config_entries.flow.async_init(\n DOMAIN,\n context={\"source\": SOURCE_IMPORT},\n data={CONF_CITY: CITY_1_POSTAL},\n )\n assert result[\"type\"] == data_entry_flow.FlowResultType.ABORT\n assert result[\"reason\"] == \"already_configured\"\n\n # Should fail, same CITY same postal code (flow)\n result = await hass.config_entries.flow.async_init(\n DOMAIN,\n context={\"source\": SOURCE_USER},\n data={CONF_CITY: CITY_1_POSTAL},\n )\n assert result[\"type\"] == data_entry_flow.FlowResultType.ABORT\n assert result[\"reason\"] == \"already_configured\"",
"def test_active_configs(self):\n # one config and one active config\n if self.mod.knobs.n_configs != 1:\n self.mod.knobs.n_configs = 1\n self.assertTrue(len(self.map.active_configs) == 1)\n self.assertTrue(self.map.active_configs[0] == 'config01')\n\n # three configs and one active config\n self.mod.knobs.n_configs = 3\n self.mod.knobs.active_config = 'config02'\n self.assertTrue(len(self.map.active_configs) == 1)\n self.assertTrue(self.map.active_configs[0] == 'config02')",
"def test_config_wrong_config(self):\n test_data_1 = (\"[gnupg_missing]\\n\"\n \"recipients = [email protected]\\n\"\n \"signer = [email protected]\\n\"\n \"[amazon-s3]\\n\"\n \"access_key = ACCESSKEY\\n\"\n \"secret_access_key = SECRETACCESSKEY\\n\"\n \"[data]\\n\"\n \"bucket = DATABUCKET\\n\"\n \"[metadata]\\n\"\n \"bucket = METADATABUCKET\\n\")\n test_data_2 = (\"[gnupg]\\n\"\n \"recipients_missing = [email protected]\\n\"\n \"signer = [email protected]\\n\"\n \"[amazon-s3]\\n\"\n \"access_key = ACCESSKEY\\n\"\n \"secret_access_key = SECRETACCESSKEY\\n\"\n \"[data]\\n\"\n \"bucket = DATABUCKET\\n\"\n \"[metadata]\\n\"\n \"bucket = METADATABUCKET\\n\")\n if os.path.isfile(\"test_config.conf\"):\n os.remove(\"test_config.conf\")\n file(\"test_config.conf\", \"wb\").write(test_data_1)\n config = Config(\"test_config.conf\")\n self.assertRaises(\n ConfigError, config.check, \"gnupg\", [\"recipients\", \"signer\"])\n file(\"test_config.conf\", \"wb\").write(test_data_2)\n config = Config(\"test_config.conf\")\n self.assertRaises(\n ConfigError, config.check, \"gnupg\", [\"recipients\", \"signer\"])\n os.remove(\"test_config.conf\")",
"def test_get_configuration_details_from_instance_validation(self):\n inst = instance_info.dbaas.instances.get(configuration_instance.id)\n configuration_id = inst.configuration['id']\n assert_not_equal(None, configuration_id)\n _test_configuration_is_applied_to_instance(configuration_instance,\n configuration_id)",
"def test_conflicting_actions(self):\n\n err = pyramid_config.ConfigurationError\n self.assertRaises(err, self.factory) # calls the includeme",
"def check_configs(self):\n\n pass",
"def test_conf(self):\n self.TESTED_UNIT = 'ceph-fs/0'\n\n def _get_conf():\n \"\"\"get/parse ceph daemon response into dict for specified configs.\n\n :returns dict: conf options selected from configs\n :rtype: dict\n \"\"\"\n configs = [\"mds_cache_memory_limit\",\n \"mds_cache_reservation\",\n \"mds_health_cache_threshold\"]\n holder = {}\n for config in configs:\n cmd = \"sudo ceph daemon mds.\" \\\n \"$HOSTNAME config show | grep {}\".format(config)\n conf = model.run_on_unit(self.TESTED_UNIT, cmd)\n for i in (conf['Stdout'].replace('\"', '')\n .replace(',', '')\n .strip()\n .split(\"\\n\")):\n key, val = i.split(\":\")\n holder[key] = val.strip()\n return holder\n\n @retry(wait=wait_exponential(multiplier=1, min=4, max=10),\n stop=stop_after_attempt(10))\n def _change_conf_check(mds_config):\n \"\"\"Change configs, then assert to ensure config was set.\n\n Doesn't return a value.\n \"\"\"\n loop = asyncio.get_event_loop()\n crt = model.async_set_application_config('ceph-fs', mds_config)\n loop.run_until_complete(crt)\n results = _get_conf()\n self.assertEquals(\n results['mds_cache_memory_limit'],\n mds_config['mds-cache-memory-limit'])\n self.assertAlmostEqual(\n float(results['mds_cache_reservation']),\n float(mds_config['mds-cache-reservation']))\n self.assertAlmostEqual(\n float(results['mds_health_cache_threshold']),\n float(mds_config['mds-health-cache-threshold']))\n\n # ensure defaults are set\n _get_conf()\n mds_config = {'mds-cache-memory-limit': '4294967296',\n 'mds-cache-reservation': '0.05',\n 'mds-health-cache-threshold': '1.5'}\n _change_conf_check(mds_config)\n\n # change defaults\n mds_config = {'mds-cache-memory-limit': '8589934592',\n 'mds-cache-reservation': '0.10',\n 'mds-health-cache-threshold': '2'}\n _change_conf_check(mds_config)\n\n # Restore config to keep tests idempotent\n mds_config = {'mds-cache-memory-limit': '4294967296',\n 'mds-cache-reservation': '0.05',\n 'mds-health-cache-threshold': '1.5'}\n _change_conf_check(mds_config)",
"def test_new_config_already_exists(self, context):\n\n context.config_exists.return_value = True\n\n runner = CliRunner()\n result = runner.invoke(cli_node_new_configuration, [\n \"--name\", \"some-name\",\n \"--environment\", \"application\"\n ])\n\n # check that error is produced\n self.assertEqual(result.output[:7], \"[error]\")\n\n # check non-zero exit code\n self.assertEqual(result.exit_code, 1)",
"def check_config_conflicts(config: CfgNode):\n if config.task == \"generation\":\n assert config['train'].teacher_forcing == True, \"You should use teacher forcing to train generation!\"\n \n if config.task == \"generation\":\n if config.dataloader.max_seq_length >= config.generation.max_length:\n logger.warning(\"In generation, your config.generation.max_length is shorter than config.max_seq_length\"\n \"This can lead to unexpected behavior. You should consider increasing ``config.generation.max_length``.\"\n )\n raise RuntimeError",
"def test_safe(self):\n\n conf = configuration(\n category(\n 'test',\n Parameter('test', svalue='=open')\n )\n )\n\n configurable = Configurable(conf=conf, autoconf=False)\n\n self.assertRaises(\n Parameter.Error,\n configurable.applyconfiguration,\n targets=configurable, paths='test'\n )",
"def test_config_class():\n assert config is not None",
"def test_get_configuration_details_from_instance_validation(self):\n print(\"instance_info.id: %s\" % instance_info.id)\n inst = instance_info.dbaas.instances.get(instance_info.id)\n configuration_id = inst.configuration['id']\n print(\"configuration_info: %s\" % configuration_id)\n assert_not_equal(None, configuration_id)\n _test_configuration_is_applied_to_instance(instance_info,\n configuration_id)",
"async def test_abort_if_already_setup(hass: HomeAssistant) -> None:\n config_entry = MockConfigEntry(\n domain=DOMAIN,\n data={\n CONF_HOST: \"1.1.1.1\",\n CONF_PORT: 123,\n CONF_RESOURCES: [\"battery.voltage\"],\n },\n )\n config_entry.add_to_hass(hass)\n\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n\n mock_pynut = _get_mock_pynutclient(\n list_vars={\"battery.voltage\": \"voltage\"},\n list_ups={\"ups1\": \"UPS 1\"},\n )\n\n with patch(\n \"homeassistant.components.nut.PyNUTClient\",\n return_value=mock_pynut,\n ):\n result2 = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n {\n CONF_HOST: \"1.1.1.1\",\n CONF_PORT: 123,\n },\n )\n\n assert result2[\"type\"] == data_entry_flow.FlowResultType.ABORT\n assert result2[\"reason\"] == \"already_configured\"",
"def test_connection_duplication():",
"def test_config_changed_non_leader(\n self,\n ) -> NoReturn:\n self.harness.set_leader(is_leader=False)\n self.harness.charm.on.config_changed.emit()\n\n # Assertions\n self.assertIsInstance(self.harness.charm.unit.status, ActiveStatus)"
] | [
"0.7306956",
"0.6951928",
"0.6661572",
"0.6550587",
"0.6458475",
"0.6450685",
"0.6450343",
"0.6390336",
"0.6361332",
"0.63590527",
"0.6295276",
"0.62598777",
"0.6243222",
"0.6216796",
"0.61950976",
"0.6121061",
"0.61144525",
"0.6114061",
"0.60864514",
"0.60847384",
"0.6062219",
"0.60558295",
"0.6050416",
"0.60456294",
"0.6032673",
"0.6025419",
"0.60220915",
"0.6020869",
"0.60057354",
"0.6000287"
] | 0.7328827 | 0 |
test that a new instance will apply the configuration on create | def test_start_instance_with_configuration(self):
global configuration_instance
databases = []
databases.append({"name": "firstdbconfig", "character_set": "latin2",
"collate": "latin2_general_ci"})
databases.append({"name": "db2"})
configuration_instance.databases = databases
users = []
users.append({"name": "liteconf", "password": "liteconfpass",
"databases": [{"name": "firstdbconfig"}]})
configuration_instance.users = users
configuration_instance.name = "TEST_" + str(uuid.uuid4()) + "_config"
flavor_href = instance_info.dbaas_flavor_href
configuration_instance.dbaas_flavor_href = flavor_href
configuration_instance.volume = instance_info.volume
configuration_instance.dbaas_datastore = instance_info.dbaas_datastore
configuration_instance.dbaas_datastore_version = \
instance_info.dbaas_datastore_version
configuration_instance.nics = instance_info.nics
result = instance_info.dbaas.instances.create(
configuration_instance.name,
configuration_instance.dbaas_flavor_href,
configuration_instance.volume,
configuration_instance.databases,
configuration_instance.users,
nics=configuration_instance.nics,
availability_zone="nova",
datastore=configuration_instance.dbaas_datastore,
datastore_version=configuration_instance.dbaas_datastore_version,
configuration=configuration_href)
assert_equal(200, instance_info.dbaas.last_http_code)
assert_equal("BUILD", result.status)
configuration_instance.id = result.id | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_create_instance(self):\n with self.assertRaises(exceptions.NoInitiation):\n Config()",
"def test_create(self):\n pass",
"def test_assign_configuration_to_valid_instance(self):\n print(\"instance_info.id: %s\" % instance_info.id)\n print(\"configuration_info: %s\" % configuration_info)\n print(\"configuration_info.id: %s\" % configuration_info.id)\n config_id = configuration_info.id\n instance_info.dbaas.instances.modify(instance_info.id,\n configuration=config_id)\n resp, body = instance_info.dbaas.client.last_response\n assert_equal(resp.status, 202)",
"def test_create_instance(self):\n engine = Engine(self.config_file, self.api_token)\n\n assert isinstance(engine, Engine) is True\n assert isinstance(engine.backend, Backend) is True\n assert isinstance(engine.backend, BossBackend) is True\n assert isinstance(engine.validator, Validator) is True\n assert isinstance(engine.validator, BossValidatorV02) is True\n assert isinstance(engine.config, Configuration) is True\n\n # Schema loaded\n assert isinstance(engine.config.schema, dict) is True\n assert engine.config.schema[\"type\"] == \"object\"",
"def test_create_config_with_save(self) -> None:\n config = self.integration.create_config(name='Config 1', save=True)\n self.assertFalse(config.enabled)\n self.assertIsNotNone(config.pk)",
"def test_cant_call_after_creation(self):\n self.assertTrue(not hasattr(self.Foo, '_config'))",
"def test_create(self):\n self.app\n pass",
"def initCreate(self , initialconfig):\n return",
"def test_create_run(self):\n pass",
"def create(self):\n ...",
"def test_assign_configuration_to_instance_with_config(self):\n config_id = configuration_info.id\n assert_raises(exceptions.BadRequest,\n instance_info.dbaas.instances.modify, instance_info.id,\n configuration=config_id)",
"def test_valid_configurations_create(self):\n expected_configs = self.expected_default_datastore_configs()\n values = json.dumps(expected_configs.get('valid_values'))\n expected_values = json.loads(values)\n result = instance_info.dbaas.configurations.create(\n CONFIG_NAME,\n values,\n CONFIG_DESC,\n datastore=instance_info.dbaas_datastore,\n datastore_version=instance_info.dbaas_datastore_version)\n resp, body = instance_info.dbaas.client.last_response\n assert_equal(resp.status, 200)\n with TypeCheck('Configuration', result) as configuration:\n configuration.has_field('name', str)\n configuration.has_field('description', str)\n configuration.has_field('values', dict)\n configuration.has_field('datastore_name', str)\n configuration.has_field('datastore_version_id', str)\n configuration.has_field('datastore_version_name', str)\n global configuration_info\n configuration_info = result\n assert_equal(configuration_info.name, CONFIG_NAME)\n assert_equal(configuration_info.description, CONFIG_DESC)\n assert_equal(configuration_info.values, expected_values)",
"def test_create(self):\n from supvisors.statistics import StatisticsInstance\n instance = StatisticsInstance(17, 10)\n # check attributes\n self.assertEqual(3, instance.period)\n self.assertEqual(10, instance.depth)\n self.assertEqual(-1, instance.counter)\n self.assertIsNone(instance.ref_stats)\n self.assertIs(list, type(instance.cpu))\n self.assertFalse(instance.cpu)\n self.assertIs(list, type(instance.mem))\n self.assertFalse(instance.mem)\n self.assertIs(dict, type(instance.io))\n self.assertFalse(instance.io)\n self.assertIs(dict, type(instance.proc))\n self.assertFalse(instance.proc)",
"def test_instance_created(self):\n\n base_model = BaseModel()\n self.assertIsInstance(base_model, BaseModel)\n self.assertTrue(hasattr(base_model, \"created_at\"))\n self.assertTrue(hasattr(base_model, \"updated_at\"))",
"def create(self):\n pass",
"def create(self):\n pass",
"def create(self):\n pass",
"def test_client_create(self):\n pass",
"def test_construct_1_naked(self):\n config = configerus.new_config(bootstraps=[])\n self.assertIsInstance(config, Config)",
"def test_instance(self):\n self.assertIsInstance(self.new_project, Project)",
"def create(self):\n\n pass",
"def test_create_from_config(self) -> None:\n self.assertEqual(self.flag.name, 'test')\n self.assertEqual(self.flag.flag_type, FlagType.INT)\n self.assertEqual(self.flag.default_value, 0)",
"def test_instance():\n AgentCheck()\n # rely on default\n check = AgentCheck()\n assert check.init_config == {}\n assert check.instances == []\n\n # pass dict for 'init_config', a list for 'instances'\n init_config = {'foo': 'bar'}\n instances = [{'bar': 'baz'}]\n check = AgentCheck(init_config=init_config, instances=instances)\n assert check.init_config == {'foo': 'bar'}\n assert check.instances == [{'bar': 'baz'}]",
"def create(cls,configuration):\n raise NotImplementedError('Abstract method has not been implemented')",
"def test_new(self):",
"def test_new(self):",
"def test_basic_instance_creation(self):\n first = self.constituencies[0]\n self.assertEqual(first.slug, 'my-place')\n self.assertEqual(first.get_absolute_url(),\n u\"/constituency/%s/\" % first.slug)\n count = 0\n for user in self.users:\n self.assertEqual(user.postcode, USERS[count]['postcode'])\n count += 1",
"def test_create(self):\n retreat = Retreat.objects.create(\n name=\"random_retreat\",\n details=\"This is a description of the retreat.\",\n seats=40,\n address_line1=\"123 random street\",\n postal_code=\"123 456\",\n state_province=\"Random state\",\n country=\"Random country\",\n timezone=\"America/Montreal\",\n price=3,\n start_time=LOCAL_TIMEZONE.localize(datetime(2130, 1, 15, 8)),\n end_time=LOCAL_TIMEZONE.localize(datetime(2130, 1, 17, 12)),\n min_day_refund=7,\n min_day_exchange=7,\n refund_rate=100,\n is_active=True,\n accessibility=True,\n form_url=\"example.com\",\n carpool_url='example2.com',\n review_url='example3.com',\n has_shared_rooms=True,\n room_type=Retreat.DOUBLE_OCCUPATION,\n toilet_gendered=True,\n )\n\n self.assertEqual(retreat.__str__(), \"random_retreat\")",
"def test_create(self):\n\n res = self.metadata.create_or_update(data=self.create)\n\n self.assertEqual(res.name, self.entity.name)\n self.assertEqual(res.service.id, self.entity.service.id)\n self.assertEqual(res.owner, None)",
"def test_0_0_create(self):\n\n self.assertTrue(self.b1)"
] | [
"0.7183207",
"0.6918183",
"0.68747693",
"0.67909354",
"0.66707224",
"0.6631046",
"0.6573294",
"0.655102",
"0.64721066",
"0.6381421",
"0.63577807",
"0.63453907",
"0.6345164",
"0.63238394",
"0.6323695",
"0.6323695",
"0.6323695",
"0.6312962",
"0.62646854",
"0.6260534",
"0.6241234",
"0.6208142",
"0.6197847",
"0.6196894",
"0.6171614",
"0.6171614",
"0.61695004",
"0.6151523",
"0.61495566",
"0.6136293"
] | 0.7056933 | 1 |
wait for the instance created with configuration | def test_instance_with_configuration_active(self):
def result_is_active():
instance = instance_info.dbaas.instances.get(
configuration_instance.id)
if instance.status in CONFIG.running_status:
return True
else:
assert_equal("BUILD", instance.status)
return False
poll_until(result_is_active) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __create(self):\n pass\n\n # create at cluster-provider\n # get kubeconfig\n # wait for api\n # ^ could be async and seperate steps?",
"async def _config_async(self):\n pass",
"def test_start_instance_with_configuration(self):\n global configuration_instance\n databases = []\n databases.append({\"name\": \"firstdbconfig\", \"character_set\": \"latin2\",\n \"collate\": \"latin2_general_ci\"})\n databases.append({\"name\": \"db2\"})\n configuration_instance.databases = databases\n users = []\n users.append({\"name\": \"liteconf\", \"password\": \"liteconfpass\",\n \"databases\": [{\"name\": \"firstdbconfig\"}]})\n configuration_instance.users = users\n configuration_instance.name = \"TEST_\" + str(uuid.uuid4()) + \"_config\"\n flavor_href = instance_info.dbaas_flavor_href\n configuration_instance.dbaas_flavor_href = flavor_href\n configuration_instance.volume = instance_info.volume\n configuration_instance.dbaas_datastore = instance_info.dbaas_datastore\n configuration_instance.dbaas_datastore_version = \\\n instance_info.dbaas_datastore_version\n configuration_instance.nics = instance_info.nics\n\n result = instance_info.dbaas.instances.create(\n configuration_instance.name,\n configuration_instance.dbaas_flavor_href,\n configuration_instance.volume,\n configuration_instance.databases,\n configuration_instance.users,\n nics=configuration_instance.nics,\n availability_zone=\"nova\",\n datastore=configuration_instance.dbaas_datastore,\n datastore_version=configuration_instance.dbaas_datastore_version,\n configuration=configuration_href)\n assert_equal(200, instance_info.dbaas.last_http_code)\n assert_equal(\"BUILD\", result.status)\n configuration_instance.id = result.id",
"def _wait(self):\n conn = None\n try:\n conn = libvirt.open(\"qemu:///system\")\n while True:\n time.sleep(10)\n try:\n state = conn.lookupByName(self.domain).info()[0]\n except (libvirt.libvirtError, TypeError, IndexError):\n break\n if state in [4, 5, 6]: # crashed or shutdown\n break\n finally:\n if conn is not None:\n conn.close()",
"def wait_for_ssh(self):\n self.wait_for_status(16)\n printy(\"The instance is now running ...\")\n # The instance is running, but we give it 60 more seconds for running\n # SSHD\n printy(\"Waiting 60 seconds for SSH server to start ...\")\n time.sleep(60)",
"def wait_for_instances(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"wait_for_instances\")",
"def wait(self):\n pass",
"def wait(self):\n pass",
"def create_instance(\n self, base_config: dict, labels: dict, wait_for_operation: bool = True\n ) -> Tuple[dict, str]:\n return",
"def test_create_instance(self):\n with self.assertRaises(exceptions.NoInitiation):\n Config()",
"def wait(self):\n global driver\n return driver(**self.kwargs).wait(exists=True, timeout=self.time_out)",
"def wait_for_instance_status(config, status):\n client = config.create_api_client()\n InstanceId = config.get('InstanceId')\n while True:\n time.sleep(20)\n req = DescribeInstancesRequest.DescribeInstancesRequest()\n result = do_action(client, req)\n items = result[\"Instances\"][\"Instance\"]\n lookups = {item['InstanceId']: item for item in items}\n if lookups[InstanceId]['Status'] == status:\n return\n else:\n click.echo(\"Instance's current status: {}; transfer to status {} ...\".format(\n lookups[InstanceId]['Status'], status\n ))",
"async def prepare(self):\n pass",
"def initCreate(self , initialconfig):\n return",
"def wait_vm_deployment(self, is_setup: bool, params: dict) -> Tuple[\"Status\", dict]:",
"def create(self):\n return self.start()",
"def _wait_initialized(client, instance_id_list):\n logging.info('Waiting for instances to be initialized.')\n while True:\n res = client.describe_instance_status(InstanceIds=instance_id_list)\n if len(res['InstanceStatuses']) == 0:\n time.sleep(10)\n continue\n if all([ s['InstanceStatus']['Status'] == 'ok' for s in res['InstanceStatuses'] ]):\n logging.info('Instances are initialized now.')\n return\n time.sleep(10)",
"def do_wait(self):\n pass",
"def wait_to_create(name, namespace, timeout):\n return watch.wait_created_cr(name, namespace,\n timeout=timeout, group=GROUP, plural=PLURAL,\n version=VERSION)",
"def started_check(self, timeout=None):\n wait(\n lambda: self.extract_values(),\n timeout or self.cfg.timeout,\n raise_on_timeout=True,\n )",
"def spawn(\n configfile,\n time,\n flavor,\n operating_system,\n region,\n currency,\n sshkey,\n kickstart,\n force):\n if configfile:\n try:\n with open(configfile, 'r') as file:\n data = file.read()\n file.close()\n data = json.loads(data)\n except FileNotFoundError as err: # Sublime give an error, but it's not.\n print(Bcolors.FAIL + 'ERROR: Config File path entered not found.' + Bcolors.ENDC)\n print(str(err))\n sys.exit(2)\n except PermissionError as err:\n print(Bcolors.FAIL + 'ERROR: Config File path entered, Permission Denied.' + Bcolors.ENDC)\n print(str(err))\n sys.exit(2)\n else:\n sshkey = file_to_string(sshkey) if sshkey else None\n kickstart = file_to_string(kickstart) if kickstart else None\n data = {\n 'hours_time': time,\n 'flavor': flavor,\n 'operating_system': operating_system,\n 'region': region,\n 'ssh_key': sshkey,\n 'kickstart': kickstart,\n 'currency': currency\n }\n\n validation = False\n\n if not force:\n while not validation:\n print(json.dumps(data, indent=4, sort_keys=True))\n val_question = input('Is theses parameter are correct ? [Y / N] : ')\n if val_question in ['Y', 'y']:\n validation = True\n elif val_question in ['N', 'n']:\n print(Bcolors.FAIL + 'Instance creation/spawning stoped.' + Bcolors.ENDC)\n sys.exit(2)\n\n api_returned_info = api_post('create', data)\n\n if api_returned_info and api_returned_info.get('status') == 200:\n instance_info = api_returned_info.get('Request_instance')\n status = instance_info.get('Status')\n transaction = instance_info.get('Transaction')\n color = Bcolors.OKGREEN if status == 'SUCCESS' else Bcolors.FAIL\n\n print('New Instance requested... ' + color + status + Bcolors.ENDC)\n for message in instance_info.get('Message'):\n print(Bcolors.OKBLUE + message + Bcolors.ENDC)\n\n if transaction and status == 'SUCCESS':\n print(' ')\n print('---------- QR CODE ----------')\n print(convert_to_qrcode(transaction.get('Address')))\n print(Bcolors.WARNING + json.dumps(transaction, indent=4, sort_keys=True) + Bcolors.ENDC)\n print(' ')\n print('You can now look at the transaction and instance status, using the subcommand \"status\" with above \"Transaction_ID\".')\n print('E.G. : \"facelesscloud status -i 13c3febe-ac0a-448f-9404-005b4475063e\" (transaction_id)')\n print(' ')\n return True # For assert test.\n else:\n print(Bcolors.FAIL + 'ERROR : ' + Bcolors.ENDC + 'Something went wrong calling the server.')\n print(json.dumps(api_returned_info, indent=4, sort_keys=True))\n sys.exit(2)\n else:\n print(Bcolors.FAIL + 'ERROR : ' + Bcolors.ENDC + 'Something went wrong calling the server.')\n print(json.dumps(api_returned_info, indent=4, sort_keys=True))\n sys.exit(2)",
"def _wait_for_cassandra_service(self, instance):\n wait_time = 3\n with settings(host_string=instance.public_dns_name, warn_only=True):\n with FULL_HIDE:\n try:\n while not files.exists(\"/var/run/cassandra.pid\", use_sudo=use_sudo()):\n self.logger.debug(\"Sleeping for %d seconds...\" % wait_time)\n time.sleep(wait_time)\n # catch SystemExit because paramiko will call abort when it detects a failure\n # in establishing an SSH connection\n except SystemExit:\n pass",
"def test_assign_configuration_to_valid_instance(self):\n print(\"instance_info.id: %s\" % instance_info.id)\n print(\"configuration_info: %s\" % configuration_info)\n print(\"configuration_info.id: %s\" % configuration_info.id)\n config_id = configuration_info.id\n instance_info.dbaas.instances.modify(instance_info.id,\n configuration=config_id)\n resp, body = instance_info.dbaas.client.last_response\n assert_equal(resp.status, 202)",
"def test_create(self):\n assert self.worker.connection is None or self.worker.connection.is_alive()\n # TODO(orlade): Mock this stuff.\n # assert_queue_size({TEST_REQUEST_QUEUE: 0, TEST_RESULT_QUEUE: 0})",
"def wait_for_container(self):\n i = 0\n while True:\n ip_address = self.btcd_container.attrs[\"NetworkSettings\"][\"IPAddress\"]\n if ip_address.startswith(\"172\"):\n self.rpcconn.ipaddress = ip_address\n break\n self.btcd_container.reload()\n time.sleep(0.5)\n i = i + 1\n if i > 20:\n raise Exception(\"Timeout while starting bitcoind-docker-container!\")",
"def test_retrieve_instances_schedule_state(self):\n pass",
"def test_get_configuration_details_from_instance_validation(self):\n print(\"instance_info.id: %s\" % instance_info.id)\n inst = instance_info.dbaas.instances.get(instance_info.id)\n configuration_id = inst.configuration['id']\n print(\"configuration_info: %s\" % configuration_id)\n assert_not_equal(None, configuration_id)\n _test_configuration_is_applied_to_instance(instance_info,\n configuration_id)",
"def spawn(self, instance):\n\n # Update state to inform the nova stack that the VE is launching\n db.instance_set_state(context.get_admin_context(),\n instance['id'],\n power_state.NOSTATE,\n 'launching')\n LOG.debug('instance %s: is launching' % instance['name'])\n\n # Go through the steps of creating a container\n # TODO(imsplitbit): Need to add conditionals around this stuff to make\n # it more durable during failure. And roll back changes made leading\n # up to the error.\n self._cache_image(instance)\n self._create_vz(instance)\n self._set_vz_os_hint(instance)\n self._configure_vz(instance)\n self._set_name(instance)\n self._add_netif(instance)\n self._add_ip(instance)\n self._set_hostname(instance)\n self._set_nameserver(instance)\n self._start(instance)\n self._initial_secure_host(instance)\n \n # Begin making our looping async call\n timer = utils.LoopingCall(f=None)\n\n # I stole this from the libvirt driver but it is appropriate to\n # have this looping timer call so that if a VE doesn't start right\n # away we can defer all of this.\n def _wait_for_boot():\n try:\n state = self.get_info(instance['name'])['state']\n db.instance_set_state(context.get_admin_context(),\n instance['id'], state)\n if state == power_state.RUNNING:\n LOG.debug('instance %s: booted' % instance['name'])\n timer.stop()\n\n except:\n LOG.exception('instance %s: failed to boot' %\n instance['name'])\n db.instance_set_state(context.get_admin_context(),\n instance['id'],\n power_state.SHUTDOWN)\n timer.stop()\n\n timer.f = _wait_for_boot\n return timer.start(interval=0.5, now=True)",
"def wait(self):\n\t\traise NotImplementedError(\"must be redeclared\")",
"def create_snapshot(self):\n # Don't create if it already exists\n if self.image_available(self.snapshot_name):\n print('Snapshot already exists')\n return\n\n self.spawn()\n\n sleep_len = 10\n # Make sure the network is up\n t = 0\n networks = None\n while not networks:\n try:\n networks = self.instances[0].networks\n except:\n # not ready yet\n pass\n print('Waited {0}s for network to be up'.format(t))\n if not networks:\n time.sleep(sleep_len)\n t += sleep_len\n self.instances[0] = self.nova.servers.get(self.instances[0].id)\n\n # make sure an ip is received that we can ssh to\n # self.instances[0].add_floating_ip('129.16.125.236')\n t = 0\n ip = None\n while not ip:\n networks = self.instances[0].networks\n for key in networks:\n if 'IPv4' in key:\n ips = networks[key]\n for i in ips:\n # change to not if we want a floating ip\n if i.startswith('192'):\n ip = i\n break\n break\n if not ip:\n time.sleep(sleep_len)\n print('Waited {0}s for ip'.format(t))\n t += sleep_len\n self.instances[0] = self.nova.servers.get(self.instances[0].id)\n\n # make sure cloud init finishes\n t = 0\n while not self._exists_remote(ip):\n print('Waited {0}s for cloud-init to finish'.format(t))\n time.sleep(sleep_len*3)\n t += sleep_len*3\n # create snapshot and make sure it gets active\n self.nova.servers.create_image(self.instances[0].id, self.snapshot_name, None)\n snapshot = self.nova.glance.find_image(self.snapshot_name)\n\n # Wait until snap\n t = 0\n status = snapshot.status\n while status != 'active':\n print('Waited {0}s for snapshot. Status is {1}'.format(t, status))\n snapshot = self.nova.glance.find_image(self.snapshot_name)\n status = snapshot.status\n time.sleep(sleep_len*3)\n t += sleep_len*3\n print('Snapshot successfully uploaded. Now terminating worker.')\n # kill created worker\n self.terminate_all()"
] | [
"0.6413858",
"0.6308735",
"0.6107467",
"0.5980934",
"0.59375745",
"0.5925346",
"0.5910224",
"0.5910224",
"0.5906443",
"0.5879241",
"0.58150035",
"0.5814853",
"0.58117807",
"0.5788292",
"0.57844514",
"0.5767152",
"0.5764334",
"0.574734",
"0.57401824",
"0.57239306",
"0.57191616",
"0.56877494",
"0.5675186",
"0.5672318",
"0.56302595",
"0.55969536",
"0.5595847",
"0.5575871",
"0.5561593",
"0.5538583"
] | 0.6520709 | 0 |
test to unassign configuration from instance | def test_unassign_configuration_from_instances(self):
instance_info.dbaas.instances.update(configuration_instance.id,
remove_configuration=True)
resp, body = instance_info.dbaas.client.last_response
assert_equal(resp.status, 202)
instance_info.dbaas.instances.update(instance_info.id,
remove_configuration=True)
resp, body = instance_info.dbaas.client.last_response
assert_equal(resp.status, 202)
instance_info.dbaas.instances.get(instance_info.id)
def result_has_no_configuration():
instance = instance_info.dbaas.instances.get(inst_info.id)
if hasattr(instance, 'configuration'):
return False
else:
return True
inst_info = instance_info
poll_until(result_has_no_configuration)
inst_info = configuration_instance
poll_until(result_has_no_configuration)
instance = instance_info.dbaas.instances.get(instance_info.id)
assert_equal('RESTART_REQUIRED', instance.status) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def deconfigure(self):\n\n pass",
"def test_unassign_configuration_after_patch(self):\n instance_info.dbaas.instances.update(instance_info.id,\n remove_configuration=True)\n assert_equal(202, instance_info.dbaas.last_http_code)\n instance = instance_info.dbaas.instances.get(instance_info.id)\n assert_equal('RESTART_REQUIRED', instance.status)\n # restart to be sure configuration has been unassigned\n instance_info.dbaas.instances.restart(instance_info.id)\n assert_equal(202, instance_info.dbaas.last_http_code)\n sleep(2)\n\n def result_is_active():\n instance = instance_info.dbaas.instances.get(\n instance_info.id)\n if instance.status in CONFIG.running_status:\n return True\n else:\n assert_equal(\"REBOOT\", instance.status)\n return False\n\n poll_until(result_is_active)\n result = instance_info.dbaas.configurations.get(configuration_info.id)\n assert_equal(result.instance_count, 0)",
"def tearDown(self):\n # set the config module level variables back to None\n config.config._conf_parser = None\n config.config._user_config_file = None",
"def discard_config(self):\n raise NotImplementedError",
"def unassign_instance(InstanceId=None):\n pass",
"def teardown_method(self, test_method):\n self.wo_obj = None\n self.config_data = None",
"def tearDown(self):\n\n # Remove the config\n del self.config\n\n # Nothing to Teardown\n return super().tearDown()",
"def on_unassign(self):",
"def deinit(self) -> None:",
"def deinit(self) -> None:",
"def deinit(self) -> None:",
"def deinit(self) -> None:",
"def deinit(self) -> None:",
"def deinit(self) -> None:",
"def deinit(self) -> None:",
"def deinit(self) -> None:",
"def deinit(self) -> None:",
"def deinit(self) -> None:",
"def deconfigure(self):\n\n self.platform.deconfigure()",
"def deinit(self) -> None:\n ...",
"def deinit(self) -> None:\n ...",
"def deinit(self):\n pass",
"def deinit(self):\n pass",
"def pytest_unconfigure(config):\n db = Graph(DEFAULT_DB)\n db.delete_all()",
"def test_unassign_managing_team(self):\n pass",
"def tearDown(self):\n test_utils.delete_test_config()",
"async def test_unload_config_entry(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n entry = await init_integration(hass, aioclient_mock)\n assert hass.data[DOMAIN]\n\n await hass.config_entries.async_unload(entry.entry_id)\n await hass.async_block_till_done()\n assert not hass.data.get(DOMAIN)",
"def _reset(cls):\r\n cls._CONFIGURED = False\r\n cls._ENABLED = {}",
"def test_component_configuration_removed_from_agent_config(self):\n with cd(self._get_cwd()):\n self.run_cli_command(\n \"add\", \"--local\", self.ITEM_TYPE, str(self.ITEM_PUBLIC_ID)\n )\n self.run_cli_command(\"add\", \"--local\", \"connection\", \"fetchai/http_server\")\n\n self.runner.invoke(\n cli,\n [\n \"config\",\n \"set\",\n \"vendor.fetchai.connections.soef.config.api_key\",\n \"some_api_key\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )\n self.runner.invoke(\n cli,\n [\n \"config\",\n \"set\",\n \"vendor.fetchai.connections.http_server.config.port\",\n \"9000\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )\n config = self.load_config()\n assert config.component_configurations\n assert (\n PackageId(self.ITEM_TYPE, self.ITEM_PUBLIC_ID)\n in config.component_configurations\n )\n\n self.run_cli_command(\"remove\", self.ITEM_TYPE, str(self.ITEM_PUBLIC_ID))\n\n config = self.load_config()\n assert (\n PackageId(self.ITEM_TYPE, self.ITEM_PUBLIC_ID)\n not in config.component_configurations\n )\n assert config.component_configurations",
"def tearDown(self):\n updateConfigurationCmd = updateConfiguration.updateConfigurationCmd()\n updateConfigurationCmd.name = \"use.external.dns\"\n updateConfigurationCmd.value = \"false\"\n updateConfigurationCmd.scopename = \"zone\"\n updateConfigurationCmd.scopeid = 1\n self.apiClient.updateConfiguration(updateConfigurationCmd)"
] | [
"0.7048801",
"0.6946879",
"0.6614459",
"0.649544",
"0.6400702",
"0.63704515",
"0.6354921",
"0.63240206",
"0.63082576",
"0.63082576",
"0.63082576",
"0.63082576",
"0.63082576",
"0.63082576",
"0.63082576",
"0.63082576",
"0.63082576",
"0.63082576",
"0.6148849",
"0.6103507",
"0.6103507",
"0.60947424",
"0.60947424",
"0.60795695",
"0.60690594",
"0.59551334",
"0.5940398",
"0.59396803",
"0.5936178",
"0.5908097"
] | 0.7414041 | 0 |
test that after restarting the instance it becomes active | def test_restart_service_should_return_active(self):
instance_info.dbaas.instances.restart(instance_info.id)
resp, body = instance_info.dbaas.client.last_response
assert_equal(resp.status, 202)
def result_is_active():
instance = instance_info.dbaas.instances.get(
instance_info.id)
if instance.status in CONFIG.running_status:
return True
else:
assert_equal("REBOOT", instance.status)
return False
poll_until(result_is_active) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_workflows_restart(self):\n pass",
"def restart(self) -> None:",
"def restart(self):",
"def test_update_instances_schedule_state(self):\n pass",
"def restart(self):\r\n pass",
"def restart(self):\n pass",
"def test_relaunch_deployment_run(self):\n pass",
"def _restart(self):\n pass",
"def is_restarting(self) -> bool:\r\n return False",
"def test_start_stop(self):\n if not os.path.isfile(twillm.CONFIG_FILE):\n raise EnvironmentError(\"'%s' config file not found\" % \\\n twillm.CONFIG_FILE)\n\n twillm.use_aws_creds('me')\n\n assert twillm.showinstances() == 0, 'there should be 0 instances ' \\\n 'running, there are %d' % twillm.showinstances()\n twillm.startinstance('ubuntu1010x64')\n assert twillm.showinstances() == 1, 'there should be 1 instance ' \\\n 'running, there are %d' % twillm.showinstances()\n \n twillm.stopinstances()\n assert twillm.showinstances() == 0, 'there should be 0 instances ' \\\n 'running, there are %d' % twillm.showinstances()",
"def test_run_started(self):",
"def active():\n if env.get('active_instance'):\n print \"Active Instance: \" + env.get('active_instance')\n else:\n print \"No active instance\"",
"def restart():\n stop()\n start()",
"def acquire_restart(self):\n self.bus.write('ACQ:STATE RUN')",
"def restart(self):\n self.client.post(self.path+'/action', { 'restart': {} })\n return True",
"def started(self):",
"def test_901_no_restart_on_config_change_when_paused(self):\n u.log.info('Checking that system services do not get restarted '\n 'when charm config changes but unit is paused...')\n sentry = self.swift_proxy_sentry\n juju_service = 'swift-proxy'\n\n # Expected default and alternate values\n set_default = {'node-timeout': '60'}\n set_alternate = {'node-timeout': '90'}\n\n services = ['swift-proxy', 'haproxy', 'apache2', 'memcached']\n\n # Pause the unit\n u.log.debug('Pausing the unit...')\n pause_action_id = u.run_action(sentry, \"pause\")\n assert u.wait_on_action(pause_action_id), \"Pause action failed.\"\n # Make config change, check for service restarts\n u.log.debug('Making config change on {}...'.format(juju_service))\n self.d.configure(juju_service, set_alternate)\n\n for service in services:\n u.log.debug(\"Checking that service didn't start while \"\n \"paused: {}\".format(service))\n # No explicit assert because get_process_id_list will do it for us\n u.get_process_id_list(\n sentry, service, expect_success=False)\n\n self.d.configure(juju_service, set_default)\n resume_action_id = u.run_action(sentry, \"resume\")\n assert u.wait_on_action(resume_action_id), \"Resume action failed.\"",
"def restart():\n log.info('restart')\n samuraix.restarting = True\n samuraix.app.stop()",
"def test_restart(self):\n\n first_session_id = self._open_session()\n\n self.restart(\"iml-http-agent\")\n\n # If we try to continue our session, it will tell us to terminate\n response = self._get()\n self.assertResponseOk(response)\n forwarded_messages = response.json()[\"messages\"]\n self.assertEqual(len(forwarded_messages), 1)\n self.assertDictEqual(\n forwarded_messages[0],\n {\n \"fqdn\": self.CLIENT_NAME,\n \"type\": \"SESSION_TERMINATE_ALL\",\n \"plugin\": None,\n \"session_seq\": None,\n \"session_id\": None,\n \"body\": None,\n },\n )\n\n # And we can open a new session which will get a new ID\n second_session_id = self._open_session(expect_initial=False)\n self.assertNotEqual(first_session_id, second_session_id)",
"def running(self):\n pass",
"def test_terminate_run(self):\n pass",
"def test_heartbeat(self):\n pass",
"def restart():\n with cd('/apps/sharejs-rethinkdb-example'):\n run('fig -f prod.yml stop')\n run('fig -f prod.yml up -d')",
"def restart(self):\n self.__init__()\n return",
"def at_server_reload(self):\n self.db.started = True",
"def start(self):\n self.active = True",
"def test_restart(self, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call\n self.assertTrue(rpc.restart())\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call()],\n self.supervisor.supvisors.fsm.on_restart.call_args_list)",
"def restart_salt():\n stop_salt()\n start_salt()",
"def restart(self):\n\t\treturn self.reset().start()",
"def is_alive(self):"
] | [
"0.7256215",
"0.696777",
"0.69622266",
"0.68948066",
"0.6872024",
"0.68592834",
"0.6850953",
"0.67642987",
"0.66722554",
"0.6623265",
"0.6588452",
"0.64710295",
"0.6453188",
"0.64142096",
"0.63846517",
"0.6372318",
"0.6364346",
"0.6354914",
"0.6351463",
"0.635094",
"0.63443327",
"0.6327162",
"0.6317734",
"0.62779385",
"0.62747496",
"0.6253057",
"0.6247144",
"0.62332165",
"0.62320834",
"0.6224762"
] | 0.7464111 | 0 |
Return a given noise sim. Provide either 'psa' or all of 'season', 'pa', and 'patch' . Will return a stack of enmaps with shape [n_freqs, 3, Ny, Nx], where the second element has the elements (T, Q, U). n_freqs will be 1 for pa1 and pa2. | def getActpolSim(iterationNum = 0, patch = 'deep5',
season = 's13', \
array = 'pa1', \
psa = None,\
noiseDictFile = 'templateInputsMr3c.dict', \
noiseDictFilePath = os.path.join(os.path.dirname(os.path.abspath(__file__)),'../inputParams/'), \
signalDictFile = 'signal.dict',\
signalDictFilePath = os.path.join(os.path.dirname(os.path.abspath(__file__)),'../inputParams/'),\
verbose = True,\
simType = 'noise',
cmbSet = 0,
doBeam = True, applyWindow = True, noiseDiagsOnly = False, cmbMaptype = 'LensedCMB', splitWanted = None,
applyModulation = True):
#update the last one to True if possible
#FIXME: get rid of absolute pathnames
nDict = flipperDict.flipperDict()
nDict.read_from_file(noiseDictFilePath + '/' + noiseDictFile)
sDict = flipperDict.flipperDict()
sDict.read_from_file(signalDictFilePath + '/' + signalDictFile)
if psa == None: #psa stands for patch, season,
psa = '%s_%s_%s' %(patch, season, array)
#Figure out what frequencies correspond to this array, using the function defined above.
psaFreqs = freqsInPsas(psa, nDict['freqsInArrays'])
#unroll psa names (normally stored as a nested list of lists)
psaList = [item for sublist in nDict['psaList'] for item in sublist]
if psa not in psaList:
raise ValueError('psa %s not found in psaList; options are ' % (psa ), psaList)
noiseSeed = (cmbSet, psaList.index(psa), noiseSeedInd * 4 + (0 if splitWanted is None else splitWanted), iterationNum)
#load up one sample map, just to get the shape and wcs info. Do this for "I" at one frequency
sampleMap = enmap.read_map(os.path.join(os.path.dirname(os.path.abspath(__file__)))+"/"+nDict['dataMapDir'] + 'totalWeightMap' \
+ 'I' + '_' + psa + '_' + psaFreqs[0] + '_fromenlib.fits')
#Note! Foreground seed is the same for every sky patch, season, and frequency!
#This is because they are used to generate fullsky alm's
foregroundSeed = (cmbSet, 0, fgSeedInd, iterationNum)
if simType == 'noise':
return getActpolNoiseSim(noiseSeed = noiseSeed, \
psa = psa, \
noisePsdDir = os.path.dirname(os.path.abspath(__file__))+"/"+nDict['dataMapDir'],
freqs = psaFreqs,
verbose = verbose,
noiseDiagsOnly = noiseDiagsOnly,
splitWanted = splitWanted)
elif simType == 'cmb' or simType == 'foregrounds':
return getActpolCmbFgSim(beamfileDict = sDict['beamNames'],
shape = sampleMap.shape, wcs = sampleMap.wcs,
iterationNum = iterationNum,
cmbDir = os.path.dirname(os.path.abspath(__file__))+"/"+sDict['cmbDir'],
freqs = psaFreqs,
psa = psa,
cmbSet = cmbSet,
doBeam = doBeam, applyWindow = applyWindow,
verbose = verbose, cmbMaptype = cmbMaptype, foregroundSeed = foregroundSeed,
simType = simType, foregroundPowerFile = sDict['foregroundPowerFile'],
applyModulation = applyModulation)
else:
raise ValueError("bad input") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run_many_fits(spectrum,rms,guesses,nruns):\n tk_fit = []\n tex_fit = []\n ntot_fit = []\n width_fit = []\n for i in range(nruns):\n noisy_spectrum = add_noise(spectrum,rms)\n noisy_spectrum.specfit(fittype='cold_ammonia',guesses=guesses,fixed=[F,F,F,F,F,T])\n parcopy = copy.deepcopy(noisy_spectrum.specfit.parinfo)\n tk_fit = np.append(tk_fit,parcopy[0].value)\n tex_fit = np.append(tex_fit,parcopy[1].value)\n ntot_fit = np.append(ntot_fit,parcopy[2].value)\n width_fit = np.append(width_fit,parcopy[3].value)\n return tk_fit,tex_fit,ntot_fit,width_fit",
"def get_spike_times_ps(nn, n_ps=1, frac=1.):\n sp = []\n n = 0\n for gr in nn.p_ass_index[n_ps]:\n for nrn in gr[0:frac * len(gr)]:\n for t in nn.mon_spike_e[nrn]:\n sp.append((n, t))\n n += 1\n\n return sp",
"def gen_return_signals(s, ny, nx):\n r = np.tile(s.reshape((len(s),1,1)),(1,ny,nx))\n base_noise = np.random.normal(loc=0.0, scale=1./(ny*nx), size=len(s))\n noise_multi = np.arange(0, ny*nx, 1).reshape(ny,nx)\n noise_cube = base_noise[:,None,None]*noise_multi\n return r+noise_cube",
"def gen_ep_data(self,ntrials,trlen):\n self.resort_emat()\n ## instruction\n # for each trial, generate random instruction encoding sequence\n i_encoding_input = np.array([\n np.random.permutation(np.arange(1,self.nmaps+1)) \n for i in range(ntrials)\n ])\n i_test_input = np.zeros([ntrials,trlen])\n i_input = np.concatenate([\n i_encoding_input,i_test_input],\n 1).astype(int).reshape(-1) # (ntrials,trlen+)\n ## stimulus\n x_encoding_input = i_encoding_input\n x_test_input = np.random.randint(1,self.nmaps+1,[ntrials,trlen])\n x_input = np.concatenate([x_encoding_input,x_test_input],1)\n # print('X',x_input)\n ''' \n embed x_input: \n [ntrials,nmaps+trlen] -> s_input [ntrials*(nmaps+trlen),edim]\n explicit loop required for flatten and embedd x_input\n because if switchmaps=1, matrix is resorted between trials\n and therefore same stimulus token integers correspond to\n different stimulus embeddings on different trials\n '''\n s_input = -np.ones([ntrials,(self.nmaps+trlen),self.stimdim])\n for trialn,x_input_trial in enumerate(x_input): \n if self.switchmaps: self.resort_emat()\n s_input[trialn] = self.emat[x_input_trial]\n # format output\n i_input = tr.unsqueeze(tr.LongTensor(i_input),1)\n s_input = tr.unsqueeze(tr.Tensor(np.concatenate(s_input)),1)\n yseq = tr.unsqueeze(tr.LongTensor(x_input.reshape(-1)),1)\n return i_input,s_input,yseq",
"def gen_ep_data(self,ntrials,trlen):\n ## instruction\n # for each trial, generate random instruction encoding sequence\n i_encoding_input = np.array([\n np.random.permutation(np.arange(1,self.nmaps+1)) \n for i in range(ntrials)\n ])\n i_test_input = np.zeros([ntrials,trlen])\n i_input = np.concatenate([\n i_encoding_input,i_test_input],\n 1).astype(int).reshape(-1) # (ntrials,trlen+)\n ## stimulus\n x_encoding_input = i_encoding_input\n x_test_input = np.random.randint(1,self.nmaps+1,[ntrials,trlen])\n x_input = np.concatenate([i_encoding_input,x_test_input],1)\n ''' \n embed x_input: \n [ntrials,nmaps+trlen] -> s_input [ntrials*(nmaps+trlen),edim]\n explicit loop required for flatten and embedd x_input\n because if switchmaps=1, matrix is resorted between trials\n and therefore same stimulus token integers correspond to\n different stimulus embeddings on different trials\n '''\n s_input = -np.ones([ntrials,(self.nmaps+trlen),self.stimdim])\n for trialn,x_input_trial in enumerate(x_input): \n if self.switchmaps: self.resort_emat()\n s_input[trialn] = self.emat[x_input_trial]\n \n # format output\n i_input = tr.unsqueeze(tr.LongTensor(i_input),1)\n s_input = tr.unsqueeze(tr.Tensor(np.concatenate(s_input)),1)\n yseq = tr.unsqueeze(tr.LongTensor(x_input.reshape(-1)),1)\n if return_trial_flag:\n tr_flag = np.concatenate([i*np.ones(self.nmaps+trlen) for i in range(ntrials)])\n tr_flag = tr.unsqueeze(tr.LongTensor(tr_flag),1)\n return tr_flag,i_input,s_input,yseq,\n else:\n return i_input,s_input,yseq",
"def get_spectra(self, map1, map2=None, nl=None, pseudo=False, analytic_errors=False):\n\t\tmap1 = np.asarray(map1)\n\t\tif map2 is None: # Auto\n\t\t\tpcl = hp.anafast(map1 * self.mask, lmax=self.lmax)\n\t\telse: # Cross\n\t\t\tmap2 = np.asarray(map2)\n\t\t\tpcl = hp.anafast(map1 * self.mask, map2=map2 * self.mask, lmax=self.lmax)\n\t\t\n\t\tif analytic_errors: \n\t\t\tpcl_tot = pcl\n\n\t\tif nl is not None:\n\t\t\tif nl.size - 1 < self.lmax:\n\t\t\t\traise ValueError('The noise power spectrum does not have enough l.')\n\n\t\tif self.MASTER:\n\t\t\tif nl is None: \n\t\t\t\tcl = np.dot(self.K_bb_inv, np.dot(self.P_bl, pcl))\n\t\t\telse:\n\t\t\t\tif pseudo:\n\t\t\t\t\tcl = np.dot(self.K_bb_inv, np.dot(self.P_bl, pcl - nl[:self.lmax+1]))\n\t\t\t\telse:\n\t\t\t\t\tcl = np.dot(self.K_bb_inv, np.dot(self.P_bl, pcl)) - self.bin_spectra(nl[:self.lmax+1])\t\t\t\n\t\t\tif analytic_errors and map2 is None:\n\t\t\t\tcl_tot = np.dot(self.K_bb_inv, np.dot(self.P_bl, pcl_tot))\n\t\telse: # f_sky approx\n\t\t\tif nl is None:\n\t\t\t\tcl = np.dot(self.P_bl, pcl/self.weight)/self.fsky\n\t\t\telse:\n\t\t\t\tif pseudo:\n\t\t\t\t\tcl = self.bin_spectra(pcl/self.weight - nl[:self.lmax+1]) / self.fsky\n\t\t\t\telse:\n\t\t\t\t\tcl = self.bin_spectra(pcl/self.weight) / self.fsky - self.bin_spectra(nl[:self.lmax+1])\n\t\t\tif analytic_errors and map2 is None:\n\t\t\t\tcl_tot = self.bin_spectra(pcl_tot/self.weight) / self.fsky\n\n\t\t# Analytic error bars estimation \n\t\t# TODO: moving this into another method?\n\t\tif analytic_errors:\n\t\t\tif map2 is None: # Auto\n\t\t\t\tcl_err = np.sqrt(2./((2. * self.lb + 1) * self.delta_ell * self.fsky)) * cl_tot\n\t\t\telse: # Cross\n\t\t\t\t# Extracting TOTAL pseudo-power spectra\n\t\t\t\tpcl_1 = hp.anafast(map1 * self.mask, lmax=self.lmax)\n\t\t\t\tpcl_2 = hp.anafast(map2 * self.mask, lmax=self.lmax)\n\t\t\t\t\n\t\t\t\tif self.fwhm_smooth is not None:\n\t\t\t\t\tB2_1_ll = np.diag(self.B_1_l**2)\n\t\t\t\t\tB2_2_ll = np.diag(self.B_2_l**2)\n\n\t\t\t\tif self.MASTER:\n\t\t\t\t\tK_ll_1 = self.M_ll\n\t\t\t\t\tK_ll_2 = self.M_ll\n\t\t\t\t\t\n\t\t\t\t\tif self.pixwin:\n\t\t\t\t\t\tK_ll_1 = np.dot(K_ll_1, self.pw2_ll)\n\t\t\t\t\t\tK_ll_2 = np.dot(K_ll_2, self.pw2_ll)\n\t\t\t\t\tif self.fwhm_smooth is not None:\n\t\t\t\t\t\tK_ll_1 = np.dot(K_ll_1, B2_1_ll)\n\t\t\t\t\t\tK_ll_2 = np.dot(K_ll_2, B2_2_ll)\n\n\t\t\t\t\tK_bb_1 = np.dot(np.dot(self.P_bl, K_ll_1), self.Q_lb)\n\t\t\t\t\tK_bb_2 = np.dot(np.dot(self.P_bl, K_ll_2), self.Q_lb)\n\n\t\t\t\t\ttry:\n\t\t\t\t\t\tK_bb_inv_1 = self.inv_routine(K_bb_1)\n\t\t\t\t\texcept:\n\t\t\t\t\t\tprint(\"\\t! Problem with Coupling Matrix inversion: let me try a little trick ! \")\n\t\t\t\t\t\tK_bb_inv_1 = self.inv_routine(K_bb_1 + np.eye(K_bb_1.shape[0])*self.eps)\n\n\t\t\t\t\ttry:\n\t\t\t\t\t\tK_bb_inv_2 = self.inv_routine(K_bb_2)\n\t\t\t\t\texcept:\n\t\t\t\t\t\tprint(\"\\t! Problem with Coupling Matrix inversion: let me try a little trick ! \")\n\t\t\t\t\t\tK_bb_inv_2 = self.inv_routine(K_bb_2 + np.eye(K_bb_2.shape[0])*self.eps)\n\n\t\t\t\t\t# K_bb_inv_1 = self.inv_routine(K_bb_1)\n\t\t\t\t\t# K_bb_inv_2 = self.inv_routine(K_bb_2)\n\n\t\t\t\t\tcl1 = np.dot(K_bb_inv_1, np.dot(self.P_bl, pcl_1))\n\t\t\t\t\tcl2 = np.dot(K_bb_inv_2, np.dot(self.P_bl, pcl_2))\n\n\n\t\t\t\telse:\n\t\t\t\t\tweight_1 = np.ones(self.lmax+1)\n\t\t\t\t\tweight_2 = np.ones(self.lmax+1)\n\n\t\t\t\t\tif self.pixwin:\n\t\t\t\t\t\tweight_1 *= self.pw2_l\n\t\t\t\t\t\tweight_2 *= self.pw2_l\n\t\t\t\t\tif self.fwhm_smooth is not None:\n\t\t\t\t\t\tweight_1 *= np.diag(B2_1_ll)\n\t\t\t\t\t\tweight_2 *= np.diag(B2_2_ll)\n\n\t\t\t\t\tcl1 = np.dot(self.P_bl, pcl_1/weight_1) / self.fsky\n\t\t\t\t\tcl2 = np.dot(self.P_bl, pcl_2/weight_2) / self.fsky\n\n\t\t\t\tcl_err = np.sqrt(2./((2. * self.lb + 1) * self.delta_ell * self.fsky) * (cl**2 + cl1 * cl2))\n\n\t\t\treturn cl, cl_err\n\t\telse:\n\t\t\treturn cl",
"def noisePreset() :\n s.noisePreset()",
"def init_pta(params_all):\n\n ptas = dict.fromkeys(params_all.models)\n for ii, params in params_all.models.items():\n\n allpsr_model = params_all.noise_model_obj(psr=params_all.psrs,\n params=params)\n\n models = list()\n from_par_file = list()\n ecorrexists = np.zeros(len(params_all.psrs))\n\n # Including parameters common for all pulsars\n if params.tm=='default':\n tm = gp_signals.TimingModel()\n elif params.tm=='ridge_regression':\n log10_variance = parameter.Uniform(-20, -10)\n basis = scaled_tm_basis()\n prior = ridge_prior(log10_variance=log10_variance)\n tm = gp_signals.BasisGP(prior, basis, name='ridge')\n\n # Adding common noise terms for all pulsars\n # Only those common signals are added that are listed in the noise model\n # file, getting Enterprise models from the noise model object.\n if 'm_all' in locals():\n del m_all\n for psp, option in params.common_signals.items():\n if 'm_all' in locals():\n m_all += getattr(allpsr_model, psp)(option=option)\n else:\n m_all = tm + getattr(allpsr_model, psp)(option=option)\n\n # Including single pulsar noise models\n for pnum, psr in enumerate(params_all.psrs):\n\n singlepsr_model = params_all.noise_model_obj(psr=psr, params=params)\n\n # Determine if ecorr is mentioned in par file\n try:\n for key,val in psr.t2pulsar.noisemodel.items():\n if key.startswith('ecorr') or key.startswith('ECORR'):\n ecorrexists[pnum]=True\n except Exception as pint_problem:\n print(pint_problem)\n ecorrexists[pnum]=False\n\n # Add noise models\n if psr.name in params.noisemodel.keys():\n noise_model_dict_psr = params.noisemodel[psr.name]\n else:\n noise_model_dict_psr = params.universal\n for psp, option in noise_model_dict_psr.items():\n if 'm_sep' in locals():\n m_sep += getattr(singlepsr_model, psp)(option=option)\n elif 'm_all' in locals():\n m_sep = m_all + getattr(singlepsr_model, psp)(option=option)\n else:\n m_sep = tm + getattr(singlepsr_model, psp)(option=option)\n\n models.append(m_sep(psr))\n del m_sep\n\n pta = signal_base.PTA(models)\n\n if 'noisefiles' in params.__dict__.keys():\n noisedict = get_noise_dict(psrlist=[p.name for p in params_all.psrs],\\\n noisefiles=params.noisefiles)\n print('For constant parameters using noise files in PAL2 format')\n pta.set_default_params(noisedict)\n\n print('Model',ii,'params (',len(pta.param_names),') in order: ', \\\n pta.param_names)\n\n if params.opts is not None:\n if params.opts.mpi_regime != 2:\n np.savetxt(params.output_dir + '/pars.txt', pta.param_names, fmt='%s')\n \n ptas[ii]=pta\n\n return ptas",
"def create_synthetic_noise_dataset(cfg):\n from colorednoise import powerlaw_psd_gaussian\n\n betas = np.linspace(cfg['data.mix_synthetic_noise.min_beta'],\n cfg['data.mix_synthetic_noise.max_beta'],\n num=cfg['data.mix_synthetic_noise.num_samples'])\n sample_rate = cfg['data.sample_rate']\n segment_length = 2 * cfg['data.len_min']\n wavs = [powerlaw_psd_gaussian(beta, sample_rate * segment_length)\n for beta in betas]\n wavs = [audio.normalize(wav, low=-1, high=1) for wav in wavs]\n return NoiseDataset(wavs)",
"def get_sky(plate, mjd, output_path, verbose=False):\n tag = f'PLATE {plate:05d} MJD {mjd:05d} PATH {output_path}'\n if verbose:\n print('Starting {}'.format(tag))\n # Initialize output data.\n last_nexp = None\n plugmaps = []\n wlens = {'b': [], 'r': []}\n wdisps = {'b': [], 'r': []}\n fluxes = {'b': [], 'r': []}\n ivars = {'b': [], 'r': []}\n flats = {'b': [], 'r': []}\n rdnoises = {'b': [], 'r': []}\n masks = {'b': [], 'r': []}\n obskeys = ('EXPOSURE', 'TAI-BEG', 'EXPTIME', 'AZ', 'ALT', 'AIRMASS',\n 'PRESSURE', 'AIRTEMP',\n 'RDNOISE0', 'RDNOISE1', 'RDNOISE2', 'RDNOISE3')\n obsvals = {key: [] for key in obskeys}\n # Size of each amplifier in raw image pixels along (wlen, tracex) axes.\n ampsize = {'b': (2056, 2048), 'r': (2064, 2057)}\n # ampx[band] tabulates whether each wavelength index is readout by\n # amplifier 0/2 (=0) or 1/3 (=1).\n ampx = {'b': 1 * (np.arange(4112) >= 2056),\n 'r': 1 * (np.arange(4128) >= 2064)}\n # amplifer[band] is a function that takes a traceset as input an returns an\n # array that tabulates whether each wavelength index is readout by\n # amplifier 0-3.\n amplifier = {'b': lambda x: 2 * (x >= 2048) + ampx['b'],\n 'r': lambda x: 2 * (x >= 2057) + ampx['r']}\n # Scaling such that RMS = rdnoise_scale * RDNOISEn * neff.\n rdnoise_scale = (4 * np.pi) ** 0.25\n # Conversion from constant log-lambda pixels to wavelength ratio.\n wdisp_const = 1e-4 * np.log(10)\n # Allowed pixel mask bits.\n valid_mask = (1 << 32) - 1\n # Slices of valid data to save. These trim pixels at each end where\n # IVAR=0 or other serious pixel mask bits are often set.\n valid_slices = {'b': slice(767, 3299), 'r': slice(483, 3668) }\n # Initialize data access.\n finder = bossdata.path.Finder()\n mirror = bossdata.remote.Manager()\n # Loop over spectrographs.\n expected_fibers = []\n for specidx in 1, 2:\n # Load the list of science exposures used for this spectrograph's coadd.\n fiber = 500 * (specidx - 1) + 1\n spec_name = finder.get_spec_path(plate, mjd, fiber=fiber, lite=True)\n exposures = bossdata.spec.SpecFile(mirror.get(spec_name)).exposures\n for band in 'b', 'r':\n camera = '{}{}'.format(band, specidx)\n use = valid_slices[band]\n # Loop over science exposures for this camera.\n nexp = exposures.num_by_camera[camera]\n if not (last_nexp is None or nexp == last_nexp):\n print(f'Different nexp for {camera} {tag}')\n return None\n last_nexp = nexp\n for expidx in range(nexp):\n # Load this camera's spFrame file.\n name = exposures.get_exposure_name(expidx, camera, 'spFrame')\n path = mirror.get(finder.get_plate_path(plate, name))\n spFrame = bossdata.plate.FrameFile(path, calibrated=False)\n # Lookup this spectrograph's sky fibers.\n sky_name = binary_type('SKY ', 'ascii')\n fiberidx = np.where(\n spFrame.plug_map['OBJTYPE'] == sky_name)[0]\n if expidx == 0 and band == 'b':\n # Save plugmap metadata.\n plugmaps.append(spFrame.plug_map[\n ['FIBERID','RA','DEC','XFOCAL','YFOCAL']][fiberidx])\n if specidx == 2:\n plugmap = astropy.table.vstack(plugmaps)\n if specidx == 1 and band == 'b':\n # Record observation metadata.\n for key in obskeys:\n try:\n value = spFrame.header[key]\n except KeyError:\n value = -999 # invalid value for int/float types\n obsvals[key].append(value)\n # Load the sky fiber data.\n fibers = spFrame.plug_map['FIBERID'][fiberidx].data\n assert np.all(fiberidx == spFrame.get_fiber_offsets([fibers]))\n if expidx == 0 and band == 'b':\n expected_fibers.append(fibers)\n if verbose:\n print('Found {} sky fibers on spec{}: {}.'.format(\n len(fibers), specidx,\n ','.join([str(f) for f in fibers])))\n else:\n if not np.all(fibers == expected_fibers[specidx - 1]):\n print('Did not get expected fibers for {} exp {}'\n .format(camera, expidx))\n data = spFrame.get_valid_data(\n fibers, include_sky=True, include_wdisp=True, use_ivar=True,\n pixel_quality_mask=valid_mask)\n if verbose:\n print('Reading {} for exposure {} / {}...'\n .format(camera, expidx + 1, nexp))\n assert data.shape == (len(fibers), 2 * ampsize[band][0])\n mask = spFrame.get_pixel_masks(fibers)\n masks[band].append(mask[:, use])\n # Identify pixels with valid data.\n valid = ~data['ivar'].mask\n bad_fibers = ~np.any(valid, axis=1)\n if verbose and np.any(bad_fibers):\n print(' bad fibers: {}'.format(fibers[bad_fibers]))\n ivar = data['ivar'].data\n assert np.all(ivar[valid] > 0)\n ivars[band].append(ivar[:, use])\n # Load the superflat and trace vectors for sky fibers.\n superflat = spFrame.get_superflat(fibers)\n tracex = spFrame.hdulist[7].read()[fiberidx]\n # Load fiberflat and neff vectors from this camera's spFlat.\n name = exposures.get_exposure_name(expidx, camera, 'spFlat')\n path = mirror.get(finder.get_plate_path(plate, name))\n with fits.open(path) as spFlat:\n fiberflat = spFlat[0].data[fiberidx]\n neff = bossdata.plate.TraceSet(spFlat[3]).get_y()[fiberidx]\n if np.any(neff[valid] <= 0):\n print(f'WARNING: neff <= 0 for {camera} {expidx} {tag}')\n # Lookup the per-amplifier readnoise values.\n readnoises = np.array([\n spFrame.header['RDNOISE{}'.format(amp)]\n for amp in range(4)], dtype=np.float32)\n # Determine which amplifier (0-3) each pixel along the trace is\n # read out by and scale to RMS readnoise per wavelength pixel.\n amp = amplifier[band](tracex)\n rdnoise = rdnoise_scale * readnoises[amp] * neff\n rdnoises[band].append(rdnoise[:, use].astype(np.float32))\n # Combine the superflat and fiberflat.\n flat = superflat * fiberflat\n assert np.all(flat[valid] > 0)\n flats[band].append(flat[:, use])\n # Save wavelength solutions in angstroms.\n wlen = data['wavelength'].data\n wlens[band].append(wlen[:, use])\n # Save wavelength dispersions in angstroms.\n wdisp = data['wdisp'].data\n assert np.all(wdisp[valid] > 0)\n wdisp = wlen * np.expm1(wdisp_const * wdisp)\n wdisps[band].append(wdisp[:, use])\n # Save the combined flat-fielded sky models + residuals,\n # which might be negative due to readnoise.\n flux = data['flux'].data + data['sky'].data\n fluxes[band].append(flux[:, use])\n # Build observation metadata table.\n obslist = astropy.table.Table()\n for key in obskeys:\n obslist[key] = obsvals[key]\n # Build the output HDU list.\n hdus = fits.HDUList()\n cards = dict(PLATE=plate, MJD=mjd, NFIBERS=len(plugmap), NEXP=nexp)\n hdus.append(fits.PrimaryHDU(header=fits.Header(cards)))\n hdus.append(fits.table_to_hdu(obslist))\n hdus[-1].name = 'OBSLIST'\n hdus.append(fits.table_to_hdu(plugmap))\n hdus[-1].name = 'PLUGMAP'\n for band in 'b', 'r':\n Band = band.upper()\n # Combine arrays for each band and save an an image HDU.\n hdus.append(fits.ImageHDU(np.vstack(wlens[band]),\n name='{}WLEN'.format(Band)))\n hdus.append(fits.ImageHDU(np.vstack(wdisps[band]),\n name='{}WDISP'.format(Band)))\n hdus.append(fits.ImageHDU(np.vstack(rdnoises[band]),\n name='{}RDNOISE'.format(Band)))\n hdus.append(fits.ImageHDU(np.vstack(flats[band]),\n name='{}FLAT'.format(Band)))\n hdus.append(fits.ImageHDU(np.vstack(fluxes[band]),\n name='{}FLUX'.format(Band)))\n hdus.append(fits.ImageHDU(np.vstack(ivars[band]),\n name='{}IVAR'.format(Band)))\n hdus.append(fits.ImageHDU(np.vstack(masks[band]),\n name='{}MASK'.format(Band)))\n name = os.path.join(output_path, 'sky-{}-{}.fits'.format(plate, mjd))\n hdus.writeto(name, overwrite=True)\n print('Completed {}'.format(tag))\n return obslist",
"def simulate_data(self, theory, noise=2.):\n Q, R = theory\n dR = np.asarray(noise)\n offset = 0\n for p in self.probes:\n n = len(p.Q)\n if len(self.dR.shape) > 0:\n noise = dR[offset:offset+n]\n p.simulate_data(theory=(Q[offset:offset+n], R[offset:offset+n]),\n noise=noise)\n offset += n",
"def get_power_spectra(self, n_psbins=50, k_min=None, k_max=None, save=True):\n\n if self.Park19:\n # chunk_z_list_HERA = [27.408, 20.306, 16.0448, 13.204, 11.17485714,\n # 9.653, 8.46933333, 7.5224, 6.74763636, 6.102,\n # 5.55569231, 5.08742857]\n chunk_z_list_HERA = [27.15742, 22.97586, 19.66073, 16.98822, 14.80234,\n 12.99172, 11.4751, 10.19206, 9.09696, 8.15475,\n 7.33818, 6.62582, 6.0006]\n else:\n chunk_z_list_HERA = [27.4, 23.4828, 20.5152, 18.1892, 16.3171, 14.7778, 13.4898, 12.3962,\n 11.4561, 10.6393, 9.92308, 9.28986, 8.72603, 8.22078, 7.76543,\n 7.35294, 6.97753, 6.63441, 6.31959, 6.0297, 5.7619, 5.51376, 5.28319,\n 5.06838]#, 4.86777, 4.68]\n\n if self.lightcones is None:\n self.get_lightcones()\n chunk_indices_HERA = [np.argmin(np.abs(self.lc_redshifts - z_HERA)) for z_HERA in chunk_z_list_HERA][::-1]\n\n if self.vb: print(f' Making powerspectra in {len(chunk_z_list_HERA)} chunks')\n\n self.PS = {}\n use_ETHOS = self.lightcones[0].flag_options.pystruct['USE_ETHOS']\n\n for lc in self.lightcones:\n if use_ETHOS:\n h_PEAK = np.round(lc.astro_params.pystruct['h_PEAK'],1)\n key = f'h_PEAK={h_PEAK:.1f}'\n else:\n key = self.cosmology\n\n theta = lc.astro_params.pystruct[self.param_21cmfast]\n\n if self.param == 'k_PEAK':\n theta = 1./theta**self.k_PEAK_order\n\n if self.param == 'L_X' or 'F' in self.param or self.param == 'M_TURN':\n theta = np.log10(theta) # make L_X, F log10\n\n if key not in self.PS:\n self.PS[key] = {} ##### TODO load PS nicely\n\n if self.vb: print(f' Getting PS for {key}, {self.param}={theta}')\n\n # Make PS\n if k_min is None:\n k_min = self.k_fundamental\n if k_max is None:\n k_max = self.k_max\n\n self.PS_z_HERA, self.PS[key][f'{self.param}={theta}'] = powerspectra_chunks(lc,\n n_psbins=n_psbins,\n chunk_indices=chunk_indices_HERA,\n k_min=k_min,\n k_max=k_max)\n\n del lc\n\n if save:\n np.save(self.PS_file, self.PS, allow_pickle=True)\n if self.vb: print(f' saved PS to {self.PS_file}')\n\n np.save(self.PS_z_HERA_file, self.PS_z_HERA, allow_pickle=True)\n if self.vb: print(f' saved PS_z_HERA to {self.PS_z_HERA_file}')\n\n return",
"def gen_ep_data(self,num_trials=1,trial_len=20,pm_probe_positions=None):\n # insert extra positive trials than expected by chance\n pos_og_bias=np.random.randint(1,100,1)\n # initialize returnables\n ep_len = num_trials*(trial_len+self.nmaps)\n inst_seq = -np.ones([ep_len])\n stim_seq = -np.ones([ep_len,self.sdim])\n action_seq = -np.ones([ep_len])\n\n # loop over trails\n for trial in range(num_trials):\n ## randomize emats\n self.shuffle_pms()\n # generate trial idx_seq\n inst_stim_seq_int,inst_action_seq_int = self.gen_trial_inst_phase()\n resp_stim_seq_int,resp_action_seq_int = self.gen_trial_resp_phase(\n trial_len,pos_og_bias,pm_probe_positions)\n # embed stim idx_seq\n inst_stim_seq = self.emat[inst_stim_seq_int]\n resp_stim_seq = self.emat[resp_stim_seq_int]\n # collect\n t0 = trial*(trial_len+self.nmaps)\n t1 = t0+trial_len+self.nmaps\n inst_seq[t0:t1] = np.concatenate([inst_stim_seq_int,np.zeros(trial_len)],axis=0)\n stim_seq[t0:t1] = np.concatenate([inst_stim_seq,resp_stim_seq],axis=0)\n action_seq[t0:t1] = np.concatenate([inst_action_seq_int,resp_action_seq_int],axis=0)\n inst_seq = tr.LongTensor(inst_seq).unsqueeze(1) # batch dim\n stim_seq = tr.Tensor(stim_seq).unsqueeze(1) \n action_seq = tr.LongTensor(action_seq).unsqueeze(1) \n return inst_seq,stim_seq,action_seq",
"def preprocessing(self, spectra, windownum, peaknum):\n sampleDict = {}\n for spectrum in spectra:\n assert spectrum.getAnnotation() is None\n parentMass = spectrum.getParentMass()\n winSize = parentMass / windownum\n winList = [0]\n mass = 0\n for k in range(windownum+1):\n mass = mass + winSize\n winList.append(mass)\n \n i = 1 \n selecpeaks = []\n peaks = []\n for peak in spectrum.getPeaks():\n mz = peak.getMz()\n while i < len(winList)-1: \n if mz > winList[i]: \n i += 1\n if peaks != []:\n temppeaks = self.sortedPeak(peaks,peaknum)\n selecpeaks.extend(temppeaks)\n peaks = []\n else: \n peaks.append(peak)\n break\n if i >= len(winList) : break \n title = spectrum.getTitle() \n sampleDict[title] = selecpeaks\n# print spectrum.getParentMass()\n# print spectrum.getTitle()\n# break\n return sampleDict",
"def multitaper_psd(signal, fs=1, nw=4, num_tapers=None, peak_resolution=None,\n attach_units=True):\n\n # When the input is AnalogSignal, the data is added after rolling the axis\n # for time index to the last\n data = np.asarray(signal)\n if isinstance(signal, neo.AnalogSignal):\n data = np.moveaxis(data, 0, 1)\n fs = signal.sampling_rate\n\n if isinstance(fs, pq.Quantity):\n fs = fs.rescale('Hz').magnitude\n\n # Add a dim if data has only one dimension\n if data.ndim == 1:\n data = data[np.newaxis, :]\n\n length_signal = np.shape(data)[1]\n\n # If peak resolution is pq.Quantity, get magnitude\n if isinstance(peak_resolution, pq.quantity.Quantity):\n peak_resolution = peak_resolution.rescale('Hz').magnitude\n\n # Determine time-halfbandwidth product from given parameters\n if peak_resolution is not None:\n if peak_resolution <= 0:\n raise ValueError(\"peak_resolution must be positive\")\n nw = length_signal / fs * peak_resolution / 2\n num_tapers = int(np.floor(2*nw) - 1)\n\n if num_tapers is None:\n num_tapers = int(np.floor(2*nw) - 1)\n else:\n if not isinstance(num_tapers, int):\n raise TypeError(\"num_tapers must be integer\")\n if num_tapers <= 0:\n raise ValueError(\"num_tapers must be positive\")\n\n # Generate frequencies of PSD estimate\n freqs = np.fft.rfftfreq(length_signal, d=1/fs)\n\n # Generate Slepian sequences\n slepian_fcts = scipy.signal.windows.dpss(M=length_signal,\n NW=nw,\n Kmax=num_tapers,\n sym=False)\n\n # Calculate approximately independent spectrum estimates\n # Use broadcasting to match dim for point-wise multiplication\n # Shape: (n_channels, n_tapers, n_samples)\n tapered_signal = data[:, np.newaxis, :] * slepian_fcts\n\n # Determine Fourier transform of tapered signal\n spectrum_estimates = np.abs(np.fft.rfft(tapered_signal, axis=-1))**2\n spectrum_estimates[..., 1:] *= 2\n\n # Average Fourier transform windowed signal\n psd = np.mean(spectrum_estimates, axis=-2) / fs\n\n # Attach proper units to return values\n if isinstance(signal, pq.quantity.Quantity) and attach_units:\n psd = psd * signal.units * signal.units / pq.Hz\n freqs = freqs * pq.Hz\n\n return freqs, psd",
"def music(idx, n_music=200):\n f = freqs[idx]\n Rxx = np.dot(X[:, idx], X[:, idx].H)\n lam, V = eig_sorted(Rxx)\n En = V[:, 1:] # Noise subspace for one source\n\n theta_range = np.linspace(0, 2*np.pi, n_music)\n P_music = np.zeros(n_music)\n for i in range(n_music):\n sv = ma.steering_vector(theta_range[i], f)\n vec = np.dot(En.H, ma.steering_vector(theta_range[i], f))\n P_music[i] = 1/np.linalg.norm(vec)**2\n\n vv = V[:, 0].flatten()\n print('----------')\n print('Performing MUSIC at {:.5} Hz'.format(f))\n print('-----------------------------')\n print('Steering vector subspace check:\\n')\n print('At the correct angle of {:.3}, '.format(theta*180/np.pi) +\n 'the real parts of the eigenvalues of R_xx are:')\n print('\\n'.join(' {:.3}'.format(np.real(l)) for l in lam))\n print('\\nSteering vector / eigenvector of max eigenvalue:')\n print((ma.steering_vector(theta, f) / vv).T)\n return P_music, theta_range",
"def simulator_from_instrument(instrument):\r\n\r\n grid = grid_from_instrument(instrument=instrument)\r\n psf = psf_from_instrument(instrument=instrument)\r\n\r\n if instrument in \"vro\":\r\n return ag.SimulatorImaging(\r\n exposure_time_map=ag.Array2D.full(\r\n fill_value=100.0, shape_native=grid.shape_native\r\n ),\r\n psf=psf,\r\n background_sky_map=ag.Array2D.full(\r\n fill_value=1.0, shape_native=grid.shape_native\r\n ),\r\n add_poisson_noise=True,\r\n )\r\n elif instrument in \"euclid\":\r\n return ag.SimulatorImaging(\r\n exposure_time_map=ag.Array2D.full(\r\n fill_value=2260.0, shape_native=grid.shape_native\r\n ),\r\n psf=psf,\r\n background_sky_map=ag.Array2D.full(\r\n fill_value=1.0, shape_native=grid.shape_native\r\n ),\r\n add_poisson_noise=True,\r\n )\r\n elif instrument in \"hst\":\r\n return ag.SimulatorImaging(\r\n exposure_time_map=ag.Array2D.full(\r\n fill_value=2000.0, shape_native=grid.shape_native\r\n ),\r\n psf=psf,\r\n background_sky_map=ag.Array2D.full(\r\n fill_value=1.0, shape_native=grid.shape_native\r\n ),\r\n add_poisson_noise=True,\r\n )\r\n elif instrument in \"hst_up\":\r\n return ag.SimulatorImaging(\r\n exposure_time_map=ag.Array2D.full(\r\n fill_value=2000.0, shape_native=grid.shape_native\r\n ),\r\n psf=psf,\r\n background_sky_map=ag.Array2D.full(\r\n fill_value=1.0, shape_native=grid.shape_native\r\n ),\r\n add_poisson_noise=True,\r\n )\r\n elif instrument in \"ao\":\r\n return ag.SimulatorImaging(\r\n exposure_time_map=ag.Array2D.full(\r\n fill_value=1000.0, shape_native=grid.shape_native\r\n ),\r\n psf=psf,\r\n background_sky_map=ag.Array2D.full(\r\n fill_value=1.0, shape_native=grid.shape_native\r\n ),\r\n add_poisson_noise=True,\r\n )\r\n else:\r\n raise ValueError(\"An invalid instrument was entered - \", instrument)",
"def add_noise(spectra: np.ndarray, maxLevel: float = 0.1, seed: int = 42) -> np.ndarray:\n np.random.seed(seed)\n spectra = spectra.copy()\n spectra[:, 1:] *= (1-maxLevel/2) + np.random.rand(spectra.shape[0], spectra.shape[1]-1) * maxLevel\n return spectra",
"def results_psavg_sims():\n posterior_means = [[1.18040327516, 7.55106444832, 3.27420103073, 3.51998795534, 0.67212630002],\n [0.619197296326, 6.49420626987, 2.22495505139, 2.27682390376, 0.678172183554],\n [0.856628471666, 5.94732402905, 3.97580346111, 3.85788708662, 0.690090617623],\n [0.774906025167, 7.34275742443, 2.69729821931, 2.97994334746, 0.663015258594]]\n\n\n sgr1900_results.results_psavg_sims(posterior_means, [5,6,8,12], \"sgr1806\")\n\n return",
"def get_noise_dict_psr(psrname,noisefiles):\n params = dict()\n with open(noisefiles+psrname+'_noise.json', 'r') as fin:\n params.update(json.load(fin))\n return params",
"def psysPreset(ants=0, band=0, subarray=DEFAULT) :\n antlist = helpers.makeList(ants)\n multiSubarray('psysPreset', subarray, antlist, band)",
"def get_spectra(alm1, alm2=None, spectra=None):\n\n if spectra is None:\n if alm2 is None:\n cls = hp.sphtfunc.alm2cl(alm1)\n else:\n cls = hp.sphtfunc.alm2cl(alm1, alm2)\n l = np.arange(len(cls))\n return l, cls\n \n cls = hp.sphtfunc.alm2cl(alm1, alm2)\n l = np.arange(len(cls[0]))\n \"\"\" spectra_healpix=[TT,EE,BB,TE,EB,TB] \"\"\"\n spectra_healpix = [spectra[0], spectra[5], spectra[8], spectra[1], spectra[6], spectra[2]]\n cl_dict = {spec: cls[i] for i, spec in enumerate(spectra_healpix)}\n\n if alm2 is None:\n #here we set ET=TE, BE=EB and BT=TB\n cl_dict[spectra[3]] = cl_dict[spectra[1]]\n cl_dict[spectra[7]] = cl_dict[spectra[6]]\n cl_dict[spectra[4]] = cl_dict[spectra[2]]\n else:\n #here we need to recompute cls inverting the order of the alm to get ET,BT and BE\n cls = hp.sphtfunc.alm2cl(alm2, alm1)\n # spectra_healpix=[TT,EE,BB,ET,BE,BT]\n spectra_healpix = [spectra[0], spectra[5], spectra[8], spectra[3], spectra[7], spectra[4]]\n for i, spec in enumerate(spectra_healpix):\n cl_dict[spec] = cls[i]\n\n return l, cl_dict",
"def noise_patch(patch,prc=0.2): #X\n npatch = patch.copy().reshape(-1,3)\n height,width = patch.shape[:2]\n nb =int(prc*height*width)\n npatch[np.random.randint(0,height*width,nb),:]=DEAD\n return npatch.reshape(height,width,3)",
"def get_initial_spectra(self, t, E, flavors=Flavor):\n #convert input arguments to 1D arrays\n t = u.Quantity(t, ndmin=1)\n E = u.Quantity(E, ndmin=1)\n #Reshape the Energy array to shape [1,len(E)]\n E = np.expand_dims(E, axis=0)\n\n initialspectra = {}\n\n # Avoid division by zero in energy PDF below.\n E[E==0] = np.finfo(float).eps * E.unit\n\n # Estimate L(t), <E_nu(t)> and alpha(t). Express all energies in erg.\n E = E.to_value('erg')\n\n # Make sure input time uses the same units as the model time grid, or\n # the interpolation will not work correctly.\n t = t.to(self.time.unit)\n\n for flavor in flavors:\n # Use np.interp rather than scipy.interpolate.interp1d because it\n # can handle dimensional units (astropy.Quantity).\n L = get_value(np.interp(t, self.time, self.luminosity[flavor].to('erg/s')))\n Ea = get_value(np.interp(t, self.time, self.meanE[flavor].to('erg')))\n a = np.interp(t, self.time, self.pinch[flavor])\n\n #Reshape the time-related arrays to shape [len(t),1]\n L = np.expand_dims(L, axis=1)\n Ea = np.expand_dims(Ea,axis=1)\n a = np.expand_dims(a, axis=1)\n # For numerical stability, evaluate log PDF and then exponentiate.\n result = \\\n np.exp(np.log(L) - (2+a)*np.log(Ea) + (1+a)*np.log(1+a)\n - loggamma(1+a) + a*np.log(E) - (1+a)*(E/Ea)) / (u.erg * u.s)\n #remove bad values\n result[np.isnan(result)] = 0\n #remove unnecessary dimensions, if E or t was scalar:\n result = np.squeeze(result)\n initialspectra[flavor] = result\n return initialspectra",
"def instruments():\n instr_dict = {}\n #\n instr_dict['LRISr'] = 2**0\n instr_dict['LRISb'] = 2**1\n instr_dict['Kastb'] = 2**2\n instr_dict['shane_kast_red'] = 2**3\n instr_dict['shane_kast_red_ret'] = 2**3\n instr_dict['DEIMOS'] = 2**4\n instr_dict['NIRSPEC'] = 2**5\n instr_dict['GMOS'] = 2**6\n instr_dict['DBSP'] = 2**7\n #\n return instr_dict",
"def sim_dataset(rs, num_encs,M,pos_class_rate = 0.5):\n np.random.seed(seed=rs)\n data = []\n num_timepoints = np.random.randint(30,50, size=num_encs)\n #signal used to modify timeseries of cases:\n channel_vec = np.random.randint(-1,2,M) #vector of values from -1 to 1 of length M \n #Define patient ids and cases & controls \n pat_ids = np.arange(num_encs)\n case_index = int(num_encs*pos_class_rate) \n case_ids = pat_ids[:case_index]\n control_ids = pat_ids[case_index:] \n \n print(f'Simming {num_encs} patients ..') \n #Generate Data for cases and controls\n for i in pat_ids:\n length = num_timepoints[i]\n if i < case_index:\n #generate case\n labels, onset = create_label(length, case=True)\n X = generate_time_series(length, M)\n X = add_signal(X, onset, channel_vec) \n X['SepsisLabel']= labels \n else:\n #generate control\n labels, _ = create_label(length, case=False)\n X = generate_time_series(length, M)\n X['SepsisLabel']= labels\n data.append(X) \n #Shuffle list of patients\n np.random.shuffle(data)\n return data",
"def PCO1S12Noise():\n #Get data\n wdir = '/home/rallured/Dropbox/AXRO/Metrology/' \\\n 'NoiseStudy/TestOptics_PCO1S12/'\n d1,dx1 = met.read4DFits(wdir+'161202_PCO1S12_4InchCut_Avg8_Meas1.fits')\n d2,dx2 = met.read4DFits(wdir+'161202_PCO1S12_4InchCut_Avg8_Meas2.fits')\n d3,dx3 = met.read4DFits(wdir+'161202_PCO1S12_4InchCut_Avg8_Meas3.fits')\n\n #Construct power spectra\n f12,pow12 = fourier.meanPSD((d1-d2)[:,100:-100],\\\n win=np.hanning,dx=dx1,irregular=True)\n f23,pow23 = fourier.meanPSD((d2-d3)[:,100:-100],\\\n win=np.hanning,dx=dx1,irregular=True)\n f13,pow13 = fourier.meanPSD((d1-d3)[:,100:-100],\\\n win=np.hanning,dx=dx1,irregular=True)\n\n #Mid frequency\n midfreq = [1000*np.sqrt(np.sum(p[np.logical_and(f>.1,f<1.)])) \\\n for f,p in zip([f12,f23,f13],[pow12,pow23,pow13])]\n\n #Plot\n plt.loglog(f12,pow12/f12[0],label='1-2: %.2f' % midfreq[0])\n plt.loglog(f23,pow23/f23[0],label='2-3: %.2f' % midfreq[1])\n plt.loglog(f13,pow13/f13[0],label='1-3: %.2f' % midfreq[2])\n plt.legend(loc='lower left')\n plt.grid()\n plt.title('4D Repeatability: PCO1S12')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n\n print midfreq\n\n return f12,pow12",
"def generatePPC(self, nChainEntries=500):\n generatedData = []\n generatedNeutronSpectra=[]\n generatedDeuteronSpectra=[]\n totalChainSamples = len(self.chain[-50:,:,0].flatten())\n \n # TODO: this next line could mean we repeat the same sample, i think\n samplesToGet = np.random.randint(0, totalChainSamples, size=nChainEntries)\n for sampleToGet in samplesToGet:\n modelParams = []\n for nParam in range(self.nParams):\n modelParams.append(self.chain[-50:,:,nParam].flatten()[sampleToGet])\n \n \n e0, loc, scale, s = modelParams[:4]\n scaleFactorEntries = modelParams[4:4+self.nRuns]\n returnedData = [self.generateModelData([e0, loc, scale, s, scaleFactor],\n standoff, tofrange, tofbins,\n self.ddnXSinstance, self.stoppingModel.dEdx,\n self.beamTiming, self.nSamplesFromTOF, True) for\n scaleFactor, standoff, tofrange, tofbins\n in zip(scaleFactorEntries, \n self.standoffs[:self.nRuns],\n self.tof_range[:self.nRuns],\n self.tofRunBins[:self.nRuns])]\n # returned data is an array of .. a tuple (modelData, neutronSpectrum, deuteronSpectrum)\n modelData = []\n modelNeutronSpectrum = []\n modelDeuteronSpectrum=[]\n for retDat in returnedData:\n modelData.append(retDat[0])\n modelNeutronSpectrum.append(retDat[1])\n modelDeuteronSpectrum.append(retDat[2])\n generatedData.append(modelData)\n generatedNeutronSpectra.append(modelNeutronSpectrum)\n generatedDeuteronSpectra.append(modelDeuteronSpectrum)\n \n self.tofData = generatedData\n self.neutronSpectra= generatedNeutronSpectra\n self.deuteronSpectra = generatedDeuteronSpectra\n return (generatedData, \n generatedNeutronSpectra, \n generatedDeuteronSpectra)",
"def read_ps(file_name, spectra=None):\n \n data = np.loadtxt(file_name)\n if spectra is None:\n return data[:, 0], data[:, 1]\n\n l = data[:, 0]\n ps = {spec: data[:, i + 1] for i, spec in enumerate(spectra)}\n return l, ps",
"def add_noise(emg):\n MAX_AMPLITUDE = 32767\n\n # Sampling\n # 1 second of data requires 600 frames. And 600 fps is 600 Hz, sampling rate of EMG.\n Ts = 1/EMG_F_SAMPLE\n\n # Time vector\n t = np.arange(0, len(emg)/EMG_F_SAMPLE, Ts) # each unit of t is a second\n\n # Noise\n randAmplitudeScale = np.random.random()*0.1\n randOffset = np.random.random() * 2*np.pi\n \n fNoise = 50; # Frequency [Hz]\n aNoise = randAmplitudeScale*MAX_AMPLITUDE # Amplitude\n noise = aNoise * np.sin(2 * np.pi * t * fNoise + randOffset)\n\n # Add noise to signal\n for channel in [\"emg1\", \"emg2\", \"emg3\", \"emg4\", \"emg5\", \"emg6\"]:\n emg[channel] += noise\n return emg"
] | [
"0.5349266",
"0.534863",
"0.5337738",
"0.5226224",
"0.5186345",
"0.5164142",
"0.5131979",
"0.5094437",
"0.50330424",
"0.5022629",
"0.5019019",
"0.49728072",
"0.49429372",
"0.4927111",
"0.49185708",
"0.49174252",
"0.491209",
"0.48873687",
"0.4871158",
"0.486897",
"0.48670015",
"0.4852718",
"0.48351735",
"0.48268402",
"0.48199525",
"0.4819701",
"0.48109677",
"0.48072684",
"0.48047894",
"0.48044285"
] | 0.6391047 | 0 |
The relative weight is used to determine how much we want to see the data of this VM. | def update_relative_weight(self):
self.relative_weight = 1
# Add up all of the historical cpu datapoints (higher CPU = more weight)
for i in self.cpu_datapoints:
self.relative_weight += i
# Multiply by the status value (so VMs with red alarm have most weight)
self.relative_weight *= (self.heartbeat_status * 10) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def weight(self):\n return self._base.weight",
"def get_weight(self):\n pass",
"def get_weight(self):\n pass",
"def weight(self):\r\n return self._weight",
"def weight(self) -> float:\r\n return self._weight",
"def weight(self):",
"def weight(self):\n return self._weight",
"def weight(self):\n return self._weight",
"def weight(self):\n return self._weight",
"def getWeight(self):\n return self.weight / (1 + self.numVisits)",
"def getWeight(self) -> float:\n ...",
"def get_weight(self):\n return self.weight",
"def get_weight(self):\n return self.weight",
"def get_weight(self):\n # FIXME: BELUM ADA KEPUTUSAN\n return 0",
"def weight(self) -> int:\n return pulumi.get(self, \"weight\")",
"def _get_weight(self):\n return self.__weight",
"def _get_weight(self):\n return self.__weight",
"def _get_weight(self):\n return self.__weight",
"def _get_weight(self):\n return self.__weight",
"def _get_weight(self):\n return self.__weight",
"def _get_weight(self):\n return self.__weight",
"def _get_weight(self):\n return self.__weight",
"def _get_weight(self):\n return self.__weight",
"def weight(self):\n counters = [\n (\"total_mhz\", self.dominfo.vms_online + self.dominfo.cpus_online / 4.0),\n (\"memory\", self.dominfo.vms_online + self.dominfo.ram_online / 4096.0),\n ]\n load_w = sum((self.node[k] / float(v or 1)) / self.node[k] for k, v in counters)\n return load_w * self.srv_weight",
"def get_weight(self):\n return self.weight # return the weight value",
"def get_weight(self):\r\n weight = self.weight\r\n if weight is None:\r\n weight = 1\r\n return weight",
"def total_weight(self):\n return self.experiences.total_weight",
"def weight(self):\n return self._hx711.get_weight()",
"def weight_multiplier(self):\n return CONF.PF9.vcpu_weight_multiplier",
"def weights(self):\r\n\t\treturn None"
] | [
"0.76758707",
"0.75917065",
"0.75917065",
"0.7509697",
"0.7506455",
"0.74212307",
"0.74037105",
"0.74037105",
"0.74037105",
"0.7399485",
"0.7393575",
"0.7339185",
"0.7339185",
"0.7282254",
"0.72358805",
"0.7191375",
"0.7191375",
"0.7191375",
"0.7191375",
"0.7191375",
"0.7191375",
"0.7191375",
"0.7191375",
"0.7188987",
"0.71710736",
"0.7014391",
"0.6977504",
"0.69186914",
"0.6899119",
"0.68133813"
] | 0.77360976 | 0 |
Exchange the authorization code for an access token. | def exchange_token(self, code):
access_token_url = OAUTH_ROOT + '/access_token'
params = {
'client_id': self.client_id,
'client_secret': self.client_secret,
'redirect_uri': self.redirect_uri,
'code': code,
}
resp = requests.get(access_token_url, params=params)
if not resp.ok:
raise MixcloudOauthError("Could not get access token.")
return resp.json()['access_token'] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def exchange_code(self, code):\n data = {\n 'client_id': self.client_id,\n 'client_secret': self.client_secret,\n 'grant_type': 'authorization_code',\n 'code': code,\n 'redirect_uri': self.redirect_uri,\n 'scope': 'identify'\n }\n\n headers = {\n 'Content-Type': 'application/x-www-form-urlencoded'\n }\n\n access_token = self.http_client.post(\n f'{self.api_endpoint}/oauth2/token', headers, data=data)\n return access_token",
"def code_grant_type(self, areq):\n try:\n _tinfo = self.sdb.upgrade_to_token(areq[\"code\"], issue_refresh=True)\n except AccessCodeUsed:\n error = TokenErrorResponse(\n error=\"invalid_grant\", error_description=\"Access grant used\"\n )\n return Unauthorized(error.to_json(), content=\"application/json\")\n\n logger.debug(\"_tinfo: %s\" % sanitize(_tinfo))\n\n atr = AccessTokenResponse(**by_schema(AccessTokenResponse, **_tinfo))\n\n logger.debug(\"AccessTokenResponse: %s\" % sanitize(atr))\n\n return Response(\n atr.to_json(), content=\"application/json\", headers=OAUTH2_NOCACHE_HEADERS\n )",
"def callback():\n code = request.args.get('code')\n result = http.post(token_uri, data = {\n 'grant_type': 'authorization_code',\n 'code': code,\n 'redirect_uri': redirect_uri,\n 'client_id': client_id,\n 'client_secret': client_secret\n })\n data = result.json()\n \n access_token = data['access_token']\n refresh_token = data['refresh_token']\n \n cache.set('access_token', access_token)\n cache.set('refresh_token', refresh_token)\n\n return redirect('/')",
"def step_impl(context):\n fields = {\n 'grant_type': 'authorization_code',\n 'code': context.code,\n 'client_id': context.vendor_config['auth']['client_id'],\n 'redirect_uri': context.vendor_config['auth']['redirect_uri'],\n }\n\n context.response = token_request(fields,\n context.vendor_config['auth'],\n context.conformance)",
"def exchange_code(self, code):\n params = {'client_id': self.client_id,\n 'client_secret': self.client_secret,\n 'grant_type': 'authorization_code',\n 'code': code}\n result = self._send_request(EXCHANGE_URL, params=params, method='POST',\n data_field=None)\n self.access_token = result['access_token']\n self.refresh_token = result['refresh_token']\n return self.access_token, self.refresh_token",
"def authorize(self, code):\n if self._authenticator.redirect_uri is None:\n raise InvalidInvocation('redirect URI not provided')\n self._request_token(code=code, grant_type='authorization_code',\n redirect_uri=self._authenticator.redirect_uri)",
"def step_impl(context):\n fields = {\n 'grant_type': 'authorization_code',\n 'code': context.code,\n 'client_id': context.vendor_config['versioned_auth']['client_id'],\n 'redirect_uri': context.vendor_config['versioned_auth']['redirect_uri'],\n }\n\n context.response = token_request(fields,\n context.vendor_config['versioned_auth'],\n context.conformance)",
"def authorize_access_token(self, request, **kwargs):\n params = self.retrieve_access_token_params(request)\n params.update(kwargs)\n return self.fetch_access_token(**params)",
"def get_access_token(self, code):\n body = {\n 'grant_type': 'authorization_code',\n 'redirect_uri': self.redirect_uri,\n 'code': code,\n 'client_id': self._client_id,\n 'client_secret': self._client_secret\n }\n response = self._request_token(body)\n\n return response",
"def set_access_token(self, auth_code=None):\n\n oauth_params = {\n 'client_id': self.client_id,\n 'client_secret': self.client_secret,\n 'code': auth_code,\n 'redirect_uri': self.redirect_uri\n }\n token_request = req.post(OAUTH_ENDPOINT, data=oauth_params)\n token_response = token_request.json()\n access_token = token_response['access_token']\n self.access_token = access_token",
"def getAccessToken(self):\r\n\r\n #lets see if we have an oauth code\r\n if self.oauthToken is None:\r\n self.oauthToken = self.createAccessToken\r\n\r\n if self.oauthToken.isExpired(): #check to see if its expired if so refresh it\r\n self.oauthToken = self.refreshAccessToken()\r\n\r\n return self.oauthToken #return out access token\r",
"def createAccessTokenReplacement(self):\r\n\r\n url = self._config['OAUTH2ENDPOINT']['huddleAuthServer'] + \"request?response_type=code\" + \\\r\n \"&client_id=\" + self._config['OAUTH2']['clientID'] + \\\r\n \"&redirect_uri=\" + self._config['OAUTH2']['redirectUri']\r\n webbrowser.open_new(url)\r\n code = input('Please enter the code from your web browser:')\r\n\r\n response = self._oauth.obtainAccessTokenBy3LeggedOAuth(code)\r\n responseBody = json.loads(response['Body'])\r\n\r\n try:\r\n oauthToken = Token(responseBody)\r\n except TypeError as e:\r\n print (\"Bad response when requesting a token \" + str(response))\r\n sys.exit()\r\n\r\n return oauthToken",
"def obtainAccessTokenBy3LeggedOAuth(self, auth_code):\r\n header = {'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded'}\r\n url = self._config['OAUTH2ENDPOINT']['huddleAccessTokenServer']\r\n\r\n body = {\"grant_type\": \"authorization_code\",\r\n \"client_id\": self._config['OAUTH2']['clientID'],\r\n \"redirect_uri\": self._config['OAUTH2']['redirectUri'],\r\n \"code\": auth_code}\r\n\r\n return self._adapter.postRequest(url, header, parse.urlencode(body))",
"def redeem_code_for_token(self, *args, **kwargs):\n if len(args) == 1:\n kwargs = args[0]\n\n grant_type = kwargs.pop('grant_type', None)\n code = kwargs.pop('code', None)\n elif len(args) == 2:\n grant_type = args[0]\n code = args[1]\n\n redirect_uri = kwargs.pop('redirect_uri', None)\n client_id = kwargs.pop('client_id', None)\n\n if redirect_uri == None:\n redirect_uri = self.get_redirect_uri(client_id)\n\n is_redirect_uri_valid = self.verify_redirect_uri(client_id,\n redirect_uri)\n\n if not is_redirect_uri_valid:\n return self.invalid_request()\n\n if grant_type != 'authorization_code':\n return self.unsupported_grant_type(\n redirect_uri = redirect_uri\n )\n\n is_valid_code = self.verify_auth_code(code)\n\n if not is_valid_code:\n return self.unauthorized_client(\n redirect_uri = redirect_uri\n )\n\n\n access_token = self.generate_access_token()\n refresh_token = self.generate_refresh_token()\n\n self.save_auth_token(access_token, refresh_token)\n\n return {\n 'access_token': access_token\n , 'refresh_token': refresh_token\n , 'token_type': self.token_type\n , 'expires_in': self.token_expires_in\n }",
"def step_impl(context):\n fields = {\n 'grant_type': 'authorization_code',\n 'code': context.code,\n 'client_id': context.vendor_config['auth']['client_id'],\n 'redirect_uri': context.vendor_config['auth']['redirect_uri'],\n }\n\n fields.update(dict(context.table))\n\n context.response = token_request(fields,\n context.vendor_config['auth'],\n context.conformance)",
"def exchange_code(authorization_code):\n\n flow = flow_from_clientsecrets(CLIENTSECRET_LOCATION, ' '.join(SCOPES))\n flow.redirect_uri = REDIRECT_URI\n try:\n print authorization_code\n credentials = flow.step2_exchange(authorization_code)\n return credentials\n except FlowExchangeError, error:\n logging.error('An error occurred: %s', error)\n raise CodeExchangeException(None)",
"def step_impl(context):\n fields = {\n 'grant_type': 'authorization_code',\n 'code': context.code,\n 'client_id': context.vendor_config['versioned_auth']['client_id'],\n 'redirect_uri': context.vendor_config['versioned_auth']['redirect_uri'],\n }\n\n fields.update(dict(context.table))\n\n context.response = token_request(fields,\n context.vendor_config['versioned_auth'],\n context.conformance)",
"def create_access_token(oauth):\n #create parameters for API authorization\n\tredirect_uri = 'oob'\n\tparams = {'client_secret': oauth.client_secret,\n\t\t\t 'redirect_uri': redirect_uri,\n\t\t\t 'response_type': 'code'}\n\t#store the access code\n\turl = oauth.get_authorize_url(**params)\n\n\t#open a web browser to get access token and then store it via manual input\n\twebbrowser.open(url)\n\tcode = input('Enter code: ')\n\t#create credentials item\n\tstart_time = time.time()\n\t#create dictionary to hold credentials and store beginning time\n\tcredentials = {'token_time': start_time}\n\n\t#NEED TO ADD IN 'REFRESH TOKEN' FUNCTION HERE SOMEWHERE\n\t#\n\t\n\t#create parameters\n\tdata = {'code': code,\n\t\t\t'redirect_uri': redirect_uri,\n\t\t\t'grant_type': 'authorization_code'}\n\t#build the headers\n\theaders = oauth_headers(oauth)\n\t#create the raw access token\n\traw_access = oauth.get_raw_access_token(data=data, headers=headers)\n\t#parse the raw access token and add to credentials variable\n\tcredentials.update(access_parse(raw_access))\n\n\t#parse access token from credentials\n\taccess_token = credentials['access_token']\n\t#return access token\n\treturn access_token",
"def verifier(self,code):\n \n client = oauth.Client(self.consumer)\n resp, content = client.request(self.access_token_url, \"POST\")\n if resp['status'] != '200':\n print resp\n raise FBError(\"Invalid response %s.\" % resp['status'])\n access_token = dict(urlparse.parse_qsl(content))\n self._access_token = access_token",
"def step_impl(context, field_name):\n fields = {\n 'grant_type': 'authorization_code',\n 'code': context.code,\n 'client_id': context.vendor_config['auth']['client_id'],\n 'redirect_uri': context.vendor_config['auth']['redirect_uri'],\n }\n\n del fields[field_name]\n\n context.response = token_request(fields,\n context.vendor_config['auth'],\n context.conformance)",
"def step_impl(context, field_name):\n fields = {\n 'grant_type': 'authorization_code',\n 'code': context.code,\n 'client_id': context.vendor_config['versioned_auth']['client_id'],\n 'redirect_uri': context.vendor_config['versioned_auth']['redirect_uri'],\n }\n\n del fields[field_name]\n\n context.response = token_request(fields,\n context.vendor_config['versioned_auth'],\n context.conformance)",
"def access_token(self, code, redirect_uri):\r\n params = (base.get_params(None, locals()))\r\n params.update({'client_id': self.client_id,\r\n 'client_secret': self.client_secret,\r\n 'grant_type': 'authorization_code'})\r\n\r\n request = http.Request('POST', self.get_url('token'), params)\r\n\r\n return request, parsers.parse_json",
"def step_impl(context, request_type):\n fields = {\n 'grant_type': 'authorization_code',\n 'code': context.code,\n 'client_id': context.vendor_config['versioned_auth']['client_id'],\n 'redirect_uri': context.vendor_config['versioned_auth']['redirect_uri'],\n }\n\n context.response = token_request(fields,\n context.vendor_config['versioned_auth'],\n context.conformance,\n request_type)",
"def _refresh_access_token(self) -> None:\n response = httpx.post(\n f\"{self._base_url}/oauth2/token\",\n proxies=self._proxies,\n data={\n \"grant_type\": \"client_credentials\",\n \"client_id\": self._api_key,\n \"client_secret\": self._api_secret,\n },\n )\n response.raise_for_status()\n token = response.json()[\"access_token\"]\n c = httpx.Client()\n c.close()\n self._authorization_headers = {\"Authorization\": f\"Bearer {token}\"}",
"def Access(self):\n if datetime.now() < self.access_exp:\n pass\n elif datetime.now() > self.access_exp and datetime.now() < self.refresh_exp:\n grant = 'refresh_token'\n self._postRequest(grant=grant)\n elif datetime.now() > self.refresh_exp:\n grant = 'authorization_code'\n self._getURLcode()\n self._postRequest(grant=grant)",
"def get_access_token(self):\n\n token_work = time.time() < self.expires\n\n if token_work:\n # No need update token\n return self.access_token\n\n data = {\n 'client_id': self.client_id,\n 'grant_type': 'implicit'\n }\n\n response = requests.post('https://api.moltin.com/oauth/access_token', data=data)\n raise_response_errors(response)\n\n response_json = response.json()\n\n self.access_token = response_json['access_token']\n self.expires = response_json['expires']\n\n logger.debug('elasticpathh access token was updated')\n\n return self.access_token",
"def set_access_token(self, value: str) -> None:\n\n self.__requester.set_authorization(value)",
"def oauth2_process_code(self, request, redirect_uri):\n if 'code' in request.GET:\n # We've got a code from an authorisation, so convert it to a access_token\n\n self.oauth2_access_token(request.GET['code'], next=redirect_uri)\n\n request.session['oauth2_token'] = self.oauth2_token\n request.session['oauth2_token_expires'] = self.oauth2_token_expires\n\n return True\n # else: 'error_reason' in request.GET\n \n return False",
"def obtain_access_token(request, consumer_id, consumer_secret, code,\n redirect_uri):\n # NOTE(garcianavalon) right now this method has no use because is a wrapper for a\n # method intented to be use by the client/consumer. For the IdM is much more \n # convenient to simply forward the request, see forward_access_token_request method\n LOG.debug('Exchanging code: {0} by application: {1}'.format(code, consumer_id))\n manager = internal_keystoneclient(request).oauth2.access_tokens\n access_token = manager.create(consumer_id=consumer_id,\n consumer_secret=consumer_secret,\n authorization_code=code,\n redirect_uri=redirect_uri)\n return access_token",
"def step234_exchange_and_tokeninfo_and_userinfo(self, code, http=None):\n\n if http is None:\n http = httplib2.Http()\n\n logging.debug('exchanging code for access token')\n credentials = self.step2_exchange(code, http)\n logging.debug('verifing access token received from the IDP')\n credentials = self.step3_verify_access_token(credentials, http)\n logging.debug('using access token to access user info from the IDP')\n return self.step4_userinfo(credentials, http)"
] | [
"0.7655848",
"0.72651523",
"0.7153681",
"0.71138126",
"0.70811975",
"0.7071909",
"0.7048902",
"0.7033969",
"0.70338416",
"0.69557816",
"0.6936204",
"0.6890346",
"0.6886055",
"0.68474656",
"0.68372333",
"0.6788539",
"0.67677724",
"0.67472637",
"0.67402524",
"0.66617274",
"0.66593075",
"0.6657031",
"0.6644389",
"0.6548801",
"0.6540651",
"0.6539838",
"0.65392375",
"0.6507043",
"0.64405775",
"0.64180714"
] | 0.78054535 | 0 |
Optimise & Modifiy s3xml etree to and produce s3ocr etree | def s3ocr_etree(self):
s3xml_etree = self.resource.struct(options=True,
references=True,
stylesheet=None,
as_json=False,
as_tree=True)
# xml tags
ITEXT = "label"
HINT = "comment"
TYPE = "type"
HASOPTIONS = "has_options"
LINES = "lines"
BOXES = "boxes"
# Components Localised Text added to the etree
# Convering s3xml to s3ocr_xml (nicer to traverse)
s3xml_root = s3xml_etree.getroot()
resource_element = s3xml_root.getchildren()[0]
s3ocr_root = etree.Element("s3ocr")
if self.r.component: # if it is a component
component_sequence, components_l10n_dict = \
self.__rheader_tabs_sequence(self.r.tablename)
resource_element.set(ITEXT,
components_l10n_dict.get(None,
self.resource.tablename))
s3ocr_root.append(resource_element)
else: # if it is main resource
componentetrees = []
# mres is main resource etree
mres = etree.Element("resource")
for attr in resource_element.attrib.keys():
mres.set(attr, resource_element.attrib.get(attr))
for field_element in resource_element:
if field_element.tag == "field": # main resource fields
mres.append(field_element)
elif field_element.tag == "resource": # component resource
componentetrees.append(field_element)
# Serialisation of Component List and l10n
component_sequence, components_l10n_dict = \
self.__rheader_tabs_sequence(self.r.tablename)
mres.set(ITEXT, components_l10n_dict.get(None,
self.resource.tablename))
if component_sequence:
serialised_component_etrees = []
for eachcomponent in component_sequence:
component_table = "%s_%s" % (self.prefix, eachcomponent)
for eachtree in componentetrees:
if eachtree.attrib.get("name", None) == component_table:
# l10n strings are added and sequencing is done here
eachtree.set(ITEXT,
components_l10n_dict.get(eachcomponent,
component_table))
serialised_component_etrees.append(eachtree)
else:
serialised_component_etrees = componentetrees
# create s3ocr tree
s3ocr_root.append(mres)
for res in serialised_component_etrees:
s3ocr_root.append(res)
# remove fields which are not required
# loading user defined configuartions
FIELD_TYPE_LINES = { # mapping types with number of lines
"string": 2,
"textbox": 4,
"integer": 1,
"double": 1,
"date": 1,
"datetime": 1,
}
FIELD_TYPE_BOXES = { # mapping type with numboxes
"integer": 9,
"double": 16,
}
for eachresource in s3ocr_root.iterchildren():
resourcetablename = eachresource.attrib.get("name")
if eachresource.attrib.get("name") in self.exclude_component_list:
# excluded components are removed
s3ocr_root.remove(eachresource)
continue
for eachfield in eachresource.iterchildren():
fieldname = eachfield.attrib.get("name")
# fields which have to be displayed
fieldtype = eachfield.attrib.get(TYPE)
# loading ocr specific fieldtypes
ocrfieldtype = self.generic_ocr_field_type.get(fieldtype,
None)
if ocrfieldtype != None:
eachfield.set(TYPE, ocrfieldtype)
# refresh fieldtypes after update
fieldtype = eachfield.attrib.get(TYPE)
# set num boxes and lines
fieldhasoptions = eachfield.attrib.get(HASOPTIONS)
if fieldhasoptions == "False":
eachfield.set(LINES,
str(FIELD_TYPE_LINES.get(fieldtype,
1)))
if fieldtype in FIELD_TYPE_BOXES.keys():
eachfield.set(BOXES,
str(FIELD_TYPE_BOXES.get(fieldtype)))
# if field is readable but not writable set default value
if eachfield.attrib.get("readable", "False") == "True" and \
eachfield.attrib.get("writable", "False") == "False":
try:
fieldresourcename = \
eachresource.attrib.get("name").split("%s_" %\
self.prefix)[1]
except:
fieldresourcename = \
eachresource.attrib.get("name").split("_")[1]
fieldresource = \
self.resource.components.get(fieldresourcename, None)
if not fieldresource:
fieldresource = self.resource
fieldname = eachfield.attrib.get("name")
try:
fielddefault = self.r.resource.table[fieldname].default
except(KeyError):
fielddefault = "None"
eachfield.set("default",
fielddefault)
# load custom fieldtype specific settings
if fieldtype not in self.generic_ocr_field_type.values() \
and fieldtype in self.custom_fieldtype_properties.keys():
self.__update_custom_fieldtype_settings(eachfield)
# refresh fieldtypes after update
fieldtype = eachfield.attrib.get(TYPE)
# for unknown field types
if fieldtype not in self.generic_ocr_field_type.values():
eachfield.set(TYPE, "string")
eachfield.set(HASOPTIONS, "False")
eachfield.set(LINES, "2")
# refresh fieldtypes after update
fieldtype = eachfield.attrib.get(TYPE)
# loading custom field specific settings
self.__update_custom_field_settings(eachfield,
resourcetablename,
fieldname)
# in ocr boolean fields should be shown as options
if fieldtype == "boolean":
eachfield.set(HASOPTIONS, "True")
# fields removed which need not be displayed
if eachfield.attrib.get("readable", "False") == "False" and \
eachfield.attrib.get("writable", "False") == "False":
eachresource.remove(eachfield)
continue
if eachfield.attrib.get(HASOPTIONS, "False") == "True" and \
eachfield.attrib.get(TYPE) != "boolean":
s3ocrselect = eachfield.getchildren()[0]
for eachoption in s3ocrselect.iterchildren():
if eachoption.text == "" or eachoption.text == None:
s3ocrselect.remove(eachoption)
continue
return s3ocr_root | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def transform_s3_xsl(**kwargs):\n access_id = kwargs.get(\"access_id\")\n access_secret = kwargs.get(\"access_secret\")\n bucket = kwargs.get(\"bucket\")\n dest_prefix = kwargs.get(\"destination_prefix\")\n source_prefix = kwargs.get(\"source_prefix\")\n if kwargs.get(\"dag\"):\n run_id = kwargs.get(\"dag\").dag_id\n else:\n run_id = \"no-dag-provided\"\n\n saxon = prepare_saxon_engine()\n transformed = etree.Element(\"collection\")\n transformed.attrib[\"dag-id\"] = run_id\n transformed.attrib[\"dag-timestamp\"] = kwargs.get(\"timestamp\", \"no-timestamp-provided\")\n xsl = \"https://raw.github.com/{repo}/{branch}/{filename}\".format(\n repo=kwargs.get(\"xsl_repository\", \"tulibraries/aggregator_mdx\"),\n branch=kwargs.get(\"xsl_branch\", \"main\"),\n filename=kwargs.get(\"xsl_filename\")\n )\n\n for s3_key in process.list_s3_content(bucket, access_id, access_secret, source_prefix):\n logging.info(\"Transforming File %s\", s3_key)\n s3_content = process.get_s3_content(bucket, s3_key, access_id, access_secret)\n s3_xml = etree.fromstring(s3_content)\n for record in s3_xml.iterchildren():\n record_id = record.get(\"airflow-record-id\")\n logging.info(\"Transforming Record %s\", record_id)\n result_str = subprocess.check_output([\"java\", \"-jar\", saxon, \"-xsl:\" + xsl, \"-s:-\"], input=etree.tostring(record, encoding=\"utf-8\"))\n result = etree.fromstring(result_str)\n result.attrib[\"airflow-record-id\"] = record_id\n transformed.append(result)\n filename = s3_key.replace(source_prefix, dest_prefix)\n transformed_xml = etree.tostring(transformed, encoding=\"utf-8\")\n process.generate_s3_object(transformed_xml, bucket, filename, access_id, access_secret)",
"def wrez2xml(self,newdoc,newroot):\n\t\twrez = newdoc.createElement('wrez')\n\t\twrez.setAttribute('hasChanged', str(self.hasChanged))\n\t\tnewroot.appendChild(wrez)\n\n\t\tpath = newdoc.createElement('path')\n\t\tpath.setAttribute('value', self.path)\n\t\twrez.appendChild(path)\n\t\n\t\tpath = newdoc.createElement('init_str')\n\t\tpath.setAttribute('value', self.init_str)\n\t\twrez.appendChild(path)\n\n\t\tpath = newdoc.createElement('hash_sha512')\n\t\tpath.setAttribute('value', self.hash_sha512)\n\t\twrez.appendChild(path)\n\t\n\t\tpath = newdoc.createElement('src_rip')\n\t\tpath.setAttribute('value', self.src_rip)\n\t\twrez.appendChild(path)\n\n\t\tpath = newdoc.createElement('quality')\n\t\tpath.setAttribute('value', self.quality)\n\t\twrez.appendChild(path)\n\n\t\tpath = newdoc.createElement('codec')\n\t\tpath.setAttribute('value', self.codec)\n\t\twrez.appendChild(path)\n\t\n\t\tpath = newdoc.createElement('language')\n\t\tpath.setAttribute('value', self.language)\n\t\twrez.appendChild(path)\n\t\n\t\tpath = newdoc.createElement('audio')\n\t\tpath.setAttribute('value', self.audio)\n\t\twrez.appendChild(path)\n\n\t\tpath = newdoc.createElement('encoder')\n\t\tpath.setAttribute('value', self.encoder)\n\t\twrez.appendChild(path)\n\n\t\tpath = newdoc.createElement('version')\n\t\tpath.setAttribute('value', self.version)\n\t\twrez.appendChild(path)\n\t\n\t\tpath = newdoc.createElement('extension')\n\t\tpath.setAttribute('value', self.extension)\n\t\twrez.appendChild(path)\n\n\t\tpath = newdoc.createElement('release_year')\n\t\tpath.setAttribute('value', self.release_year)\n\t\twrez.appendChild(path)\n\t\n\t\tpath = newdoc.createElement('title')\n\t\tpath.setAttribute('value', self.title)\n\t\twrez.appendChild(path)\n\n\t\tpath = newdoc.createElement('size')\n\t\tpath.setAttribute('value', str(self.size))\n\t\twrez.appendChild(path)\n\t\treturn wrez",
"def parse_xml(filename):\r\n tree = ET.parse(filename)\r\n # tree=ElementTree()\r\n # tree.parse(filename)\r\n\r\n baseInfo={}\r\n #baseInfo['folder'] = tree.find('folder').text\r\n baseInfo['filename'] = tree.find('filename').text\r\n baseInfo['path'] = tree.find('path').text\r\n baseInfo['source/database'] = tree.find('source/database').text\r\n #tree.find('database')\r\n baseInfo['size/width'] = tree.find('size/width').text\r\n baseInfo['size/height'] = tree.find('size/height').text\r\n baseInfo['size/depth'] = tree.find('size/depth').text\r\n baseInfo['segmented'] = tree.find('segmented').text\r\n objects = []\r\n for obj in tree.findall('object'):\r\n obj_struct = {}\r\n if obj.find('score') is None:\r\n obj_struct['score']=\"\"\r\n else:\r\n obj_struct['score'] = obj.find('score').text\r\n if obj.find('region') is None:\r\n obj_struct['region']=\"\"\r\n else:\r\n obj_struct['region'] = obj.find('region').text\r\n if obj.find('imageptr') is None:\r\n obj_struct['imageptr']=\"\"\r\n else:\r\n obj_struct['imageptr'] = obj.find('imageptr').text\r\n # obj_struct['score'] = obj.find('score').text\r\n # obj_struct['region'] = obj.find('region').text\r\n # obj_struct['imageptr'] = obj.find('imageptr').text\r\n if obj.find('label_des') is None:\r\n obj_struct['label_des']=\"\"\r\n else:\r\n obj_struct['label_des'] = obj.find('label_des').text\r\n obj_struct['name'] = obj.find('name').text\r\n obj_struct['pose'] = obj.find('pose').text\r\n obj_struct['truncated'] = obj.find('truncated').text #remove int()\r\n obj_struct['difficult'] = obj.find('difficult').text #remove int()\r\n bbox = obj.find('bndbox')\r\n obj_struct['bbox'] = [int(bbox.find('xmin').text),\r\n int(bbox.find('ymin').text),\r\n int(bbox.find('xmax').text),\r\n int(bbox.find('ymax').text)]\r\n objects.append(obj_struct)\r\n\r\n return baseInfo,objects",
"def xml2obj(self, src):\n\n\t\tclass DataNode(object):\n\t\t\tdef __init__(self):\n\t\t\t\tself._attrs = {} # XML attributes and child elements\n\t\t\t\tself.data = None # child text data\n\n\t\t\tdef __len__(self):\n\t\t\t\t# treat single element as a list of 1\n\t\t\t\treturn 1\n\n\t\t\tdef __getitem__(self, key):\n\t\t\t\tif isinstance(key, basestring):\n\t\t\t\t\treturn self._attrs.get(key,None)\n\t\t\t\telse:\n\t\t\t\t\treturn [self][key]\n\n\t\t\tdef __contains__(self, name):\n\t\t\t\treturn self._attrs.has_key(name)\n\n\t\t\tdef __nonzero__(self):\n\t\t\t\treturn bool(self._attrs or self.data)\n\n\t\t\tdef __getattr__(self, name):\n\t\t\t\tif name.startswith('__'):\n\t\t\t\t\t# need to do this for Python special methods???\n\t\t\t\t\traise AttributeError(name)\n\t\t\t\treturn self._attrs.get(name,None)\n\n\t\t\tdef _add_xml_attr(self, name, value):\n\t\t\t\tif name in self._attrs:\n\t\t\t\t\t\t# multiple attribute of the same name are represented by a list\n\t\t\t\t\t\tchildren = self._attrs[name]\n\t\t\t\t\t\tif not isinstance(children, list):\n\t\t\t\t\t\t\tchildren = [children]\n\t\t\t\t\t\t\tself._attrs[name] = children\n\t\t\t\t\t\tchildren.append(value)\n\t\t\t\telse:\n\t\t\t\t\tself._attrs[name] = value\n\n\t\t\tdef __str__(self):\n\t\t\t\treturn self.data or ''\n\n\t\t\tdef __repr__(self):\n\t\t\t\titems = sorted(self._attrs.items())\n\t\t\t\tif self.data:\n\t\t\t\t\titems.append(('data', self.data))\n\t\t\t\treturn u'{%s}' % ', '.join([u'%s:%s' % (k,repr(v)) for k,v in items])\n\n\t\tclass TreeBuilder(xml.sax.handler.ContentHandler):\n\t\t\tdef __init__(self):\n\t\t\t\tself.stack = []\n\t\t\t\tself.root = DataNode()\n\t\t\t\tself.current = self.root\n\t\t\t\tself.text_parts = []\n\t\t\t\tself.publicObjects = {}\n\n\t\t\tdef startElement(self, name, attrs):\n\t\t\t\tself.stack.append((self.current, self.text_parts))\n\t\t\t\tself.current = DataNode()\n\t\t\t\tself.text_parts = []\n\t\t\t\t# xml attributes --> python attributes\n\t\t\t\tfor k, v in attrs.items():\n\t\t\t\t\t# Register PublicObject in lookup map\n\t\t\t\t\tif k == \"publicID\":\n\t\t\t\t\t\tself.publicObjects[v] = self.current\n\t\t\t\t\tself.current._add_xml_attr(k, v)\n\n\t\t\tdef endElement(self, name):\n\t\t\t\ttext = ''.join(self.text_parts).strip()\n\t\t\t\tif text:\n\t\t\t\t\tself.current.data = text\n\t\t\t\tif self.current._attrs:\n\t\t\t\t\tobj = self.current\n\t\t\t\telse:\n\t\t\t\t\t# a text only node is simply represented by the string\n\t\t\t\t\tobj = text or ''\n\t\t\t\t\t# try to store the object as float if possible\n\t\t\t\t\ttry: obj = float(obj)\n\t\t\t\t\texcept: pass\n\t\t\t\tself.current, self.text_parts = self.stack.pop()\n\t\t\t\tself.current._add_xml_attr(name, obj)\n\n\t\t\tdef characters(self, content):\n\t\t\t\tself.text_parts.append(content)\n\n\t\tbuilder = TreeBuilder()\n\t\tif isinstance(src,basestring):\n\t\t\txml.sax.parseString(src, builder)\n\t\telse:\n\t\t\txml.sax.parse(src, builder)\n\t\treturn builder",
"def test_s3():\n vc = vtec.parse(EX1)\n assert vc[0].s3() == \"TO.W.130\"",
"def _s3_stash(self):\n s3_url = 's3://{}/{}'.format(BUCKET, self.atom_file)\n bucketpath = BUCKET.strip(\"/\")\n bucketbase = BUCKET.split(\"/\")[0]\n parts = urlparse.urlsplit(s3_url)\n mimetype = 'application/xml' \n \n conn = boto.connect_s3()\n\n try:\n bucket = conn.get_bucket(bucketbase)\n except boto.exception.S3ResponseError:\n bucket = conn.create_bucket(bucketbase)\n self.logger.info(\"Created S3 bucket {}\".format(bucketbase))\n\n if not(bucket.get_key(parts.path)):\n key = bucket.new_key(parts.path)\n key.set_metadata(\"Content-Type\", mimetype)\n key.set_contents_from_filename(self.atom_file)\n msg = \"created {0}\".format(s3_url)\n self.logger.info(msg)\n else:\n key = bucket.get_key(parts.path)\n key.set_metadata(\"Content-Type\", mimetype)\n key.set_contents_from_filename(self.atom_file)\n msg = \"re-uploaded {}\".format(s3_url)\n self.logger.info(msg)",
"def cleanup(self):\n for element in self.root.iter():\n element.tag = element.tag.partition('}')[-1]",
"def _transform(self, document):\n pass",
"def write_and_clean(urn, lang, parsed, citations,target):\n\n os.makedirs(\"cache\", exist_ok=True)\n\n\n if \"grc\" not in urn and \"lat\" not in urn:\n type_text = \"translation\"\n else:\n type_text = \"edition\"\n\n \"\"\"\n Change TEI.2 tag to TEI \n \"\"\"\n # We change the main tag\n TEI = parsed.getroot()\n # We change the root tag to TEI\n TEI.tag = \"TEI\"\n # We change the main tag\n TEI = parsed.getroot()\n\n \"\"\"\n Moving every children of //body into a new div with a @n attribute\n \"\"\"\n body = parsed.xpath(\"//body\")[0]\n # Get its children\n child_body = body.getchildren()\n\n # For each child of body, remove it from body\n for child in child_body:\n body.remove(child)\n\n # Create a new div with the informations\n div = etree.Element(\n \"div\",\n attrib = { \n \"type\":type_text,\n \"n\": urn,\n \"{http://www.w3.org/XML/1998/namespace}lang\" : lang\n }\n )\n\n # Add the full list of children of body to the newly created div\n div.extend(child_body)\n # Add this new div in body\n body.append(div)\n\n # Add them to the current encodingDesc\n refsDecl = \"\"\"<tei:refsDecl n=\"CTS\" xmlns:tei=\"http://www.tei-c.org/ns/1.0\">\\n\"\"\" + \"\\n\".join([str(citation) for citation in citations]) + \"\"\"\\n</tei:refsDecl>\"\"\"\n # Parse it\n refsDecl = etree.fromstring(refsDecl)\n # Find encodingDesc\n encodingDesc = parsed.xpath(\"//encodingDesc\")[0]\n encodingDesc.append(refsDecl)\n\n \"\"\"\n Search for old //encodingDesc/refsDecl and refsDecl/state and correct them\n \"\"\"\n refsDecls = parsed.xpath(\"//encodingDesc/refsDecl[@doctype]\")\n for refsDecl in refsDecls:\n refsDecl.set(\"n\", refsDecl.get(\"doctype\"))\n del refsDecl.attrib[\"doctype\"]\n\n states = parsed.xpath(\"//encodingDesc/refsDecl/state\")\n for state in states:\n state.tag = \"refState\"\n\n \"\"\"\n Change language@id to ident\n \"\"\"\n languages = parsed.xpath(\"//langUsage/language[@id]\") + parsed.xpath(\"//langUsage/lang[@id]\")\n for lang in languages:\n lang.set(\"ident\", lang.attrib[\"id\"])\n del lang.attrib[\"id\"]\n\n \"\"\"\n Change pb@id to pb@n\n \"\"\"\n pbs = parsed.xpath(\"//pb[@id]\")\n for pb in pbs:\n pb.set(\"n\", pb.attrib[\"id\"])\n del pb.attrib[\"id\"]\n\n \"\"\"\n Clean keyboarding/p\n \"\"\"\n ps = parsed.xpath(\"//sourceDesc/p\")\n for p in ps:\n p.getparent().remove(p)\n\n \"\"\"\n Clear attributes of text and body\n \"\"\"\n body_text = parsed.xpath(\"//body\") + parsed.xpath(\"//text\")\n for tag in body_text:\n for key in tag.attrib:\n del tag.attrib[key]\n\n\n \"\"\"\n Clear refsDecl/step\n \"\"\"\n refsdecls_step = parsed.xpath(\"//refsDecl/step/parent::refsDecl\")\n for step_parent in refsdecls_step:\n step_parent.getparent().remove(step_parent)\n\n \"\"\"\n Clear refsDecl/step\n \"\"\"\n refsdecls_step = parsed.xpath(\"//refsDecl/step/parent::refsDecl\")\n for step_parent in refsdecls_step:\n step_parent.getparent().remove(step_parent)\n\n \"\"\"\n Fix anchored\n \"\"\"\n anchoreds = parsed.xpath(\"//*[@anchored='yes']\")\n for anchored in anchoreds:\n anchored.set(\"anchored\", \"true\")\n\n # Convert to xml\n \"\"\" \n Create a new document so we can have tei namespace \n \"\"\"\n # And now some other CTS Magic\n New_Root = etree.Element(\n \"{http://www.tei-c.org/ns/1.0}TEI\",\n nsmap = { None : \"http://www.tei-c.org/ns/1.0\" } # Creating a new element allows us to use a default namespace\n )\n New_Root.text = \"\\n\"\n # Add children of old root to New_Root\n New_Root.extend(TEI.getchildren())\n\n # We create a new document\n New_Doc = etree.ElementTree(New_Root)\n # And now some other CTS Magic\n \n New_Doc = P4P5(New_Doc)\n\n # save xml\n os.makedirs(os.path.dirname(target), exist_ok=True)\n with open (target, \"w\") as xmlfile:\n xmlfile.write(\"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\\n\"\"\"+etree.tostring(New_Doc, encoding=str))\n\n # And now we write cts informations\n try:\n cts.cts_metadata(urn)\n except Exception as E:\n print(E)",
"def s3_process(self, payload, classifier):\n s3_file_lines = StreamPreParsers.pre_parse_s3(payload.raw_record)\n for line in s3_file_lines:\n data = line.rstrip()\n payload.refresh_record(data)\n self.process_alerts(classifier, payload, data)",
"def fix_html_tables(verbose=True):\n \n roots, dates = get_roots(verbose=verbose)\n \n import os\n from grizli import utils\n import boto3\n \n s3 = boto3.resource('s3')\n \n bucket = s3.Bucket('aws-grivam')\n \n if False:\n for root in roots:\n if '+' not in root:\n continue\n\n print(\"aws s3 cp {0}-fit.html s3://aws-grivam/Pipeline/{0}/Extractions/ --acl public-read\".format(root))\n print(\"aws s3 cp {0}-fit.zq.html s3://aws-grivam/Pipeline/{0}/Extractions/ --acl public-read\".format(root))\n \n for root in roots:\n if '+' not in root:\n continue\n \n for ext in ['fit', 'fit.zq']:\n html_file = './{0}-{1}.html'.format(root, ext)\n Key = 'Pipeline/{0}/Extractions/{0}-{1}.html'.format(root, ext)\n #print(html_file, Key)\n \n s3.meta.client.download_file('aws-grivam', Key, html_file)\n \n lines = open(html_file).readlines()\n \n needs_fix = False\n for line in lines:\n if ('+' in root) & (root in line):\n needs_fix = True\n break\n \n if not needs_fix:\n continue\n \n new_lines = [line.replace(root, root.replace('+','%2B')) for line in lines] \n fp = open(html_file,'w')\n fp.writelines(new_lines)\n fp.close()\n \n print(\"aws s3 cp {0} s3://aws-grivam/Pipeline/{1}/Extractions/ --acl public-read\".format(html_file, root))\n \n #s3.meta.client.upload_file(html_file, 'aws-grivam', Key, ExtraArgs={'ACL':'public-read'})",
"def upload_svg(filename, xml_string):\n s3 = boto3.client('s3')\n response = s3.put_object(\n ACL='public-read',\n Body=xml_string,\n Bucket=BUCKET,\n Key=filename,\n StorageClass='REDUCED_REDUNDANCY',\n )\n\n return 'https://s3.amazonaws.com/%s/%s' % (BUCKET, filename)",
"def keyholemarkup2x(file,output='df'):\n r = re.compile(r'(?<=\\.)km+[lz]?',re.I)\n try:\n extension = r.search(file).group(0) #(re.findall(r'(?<=\\.)[\\w]+',file))[-1]\n \n \n except IOError as e:\n logging.error(\"I/O error {0}\".format(e))\n if (extension.lower()=='kml') is True:\n buffer = file\n elif (extension.lower()=='kmz') is True:\n kmz = ZipFile(file, 'r')\n \n vmatch = np.vectorize(lambda x:bool(r.search(x)))\n A = np.array(kmz.namelist())\n sel = vmatch(A)\n buffer = kmz.open(A[sel][0],'r')\n \n else:\n raise ValueError('Incorrect file format entered. Please provide the '\n 'path to a valid KML or KMZ file.') \n \n \n parser = xml.sax.make_parser()\n handler = PlacemarkHandler()\n parser.setContentHandler(handler)\n parser.parse(buffer)\n \n try:\n kmz.close()\n except:\n pass\n \n df = pd.DataFrame(handler.mapping).T\n names = list(map(lambda x: x.lower(),df.columns))\n if 'description' in names:\n extradata = df.apply(PlacemarkHandler.htmlizer,axis=1)\n df = df.join(extradata)\n \n \n output = output.lower()\n \n if output=='df' or output=='dataframe' or output == None:\n result = df\n \n elif output=='csv':\n out_filename = file[:-3] + \"csv\"\n df.to_csv(out_filename,encoding='utf-8',sep=\"\\t\")\n result = (\"Successfully converted {0} to CSV and output to\"\n \" disk at {1}\".format(file,out_filename))\n \n elif output=='gpd' or output == 'gdf' or output=='geoframe' or output == 'geodataframe':\n try:\n import shapely\n from shapely.geometry import Polygon,LineString,Point\n except ImportError as e:\n raise ImportError('This operation requires shapely. {0}'.format(e))\n try:\n import fiona\n except ImportError as e:\n raise ImportError('This operation requires fiona. {0}'.format(e))\n try:\n import geopandas as gpd\n except ImportError as e:\n raise ImportError('This operation requires geopandas. {0}'.format(e))\n \n geos = gpd.GeoDataFrame(df.apply(PlacemarkHandler.spatializer,axis=1))\n result = gpd.GeoDataFrame(pd.concat([df,geos],axis=1))\n \n \n elif output=='geojson' or output=='json':\n try:\n import shapely\n from shapely.geometry import Polygon,LineString,Point\n except ImportError as e:\n raise ImportError('This operation requires shapely. {0}'.format(e))\n try:\n import fiona\n except ImportError as e:\n raise ImportError('This operation requires fiona. {0}'.format(e))\n try:\n import geopandas as gpd\n except ImportError as e:\n raise ImportError('This operation requires geopandas. {0}'.format(e))\n try:\n import geojson\n except ImportError as e:\n raise ImportError('This operation requires geojson. {0}'.format(e))\n \n geos = gpd.GeoDataFrame(df.apply(PlacemarkHandler.spatializer,axis=1))\n gdf = gpd.GeoDataFrame(pd.concat([df,geos],axis=1))\n out_filename = file[:-3] + \"geojson\"\n gdf.to_file(out_filename,driver='GeoJSON')\n validation = geojson.is_valid(geojson.load(open(out_filename)))['valid']\n if validation == 'yes':\n \n result = (\"Successfully converted {0} to GeoJSON and output to\"\n \" disk at {1}\".format(file,out_filename))\n else:\n raise ValueError('The geojson conversion did not create a '\n 'valid geojson object. Try to clean your '\n 'data or try another file.')\n \n elif output=='shapefile' or output=='shp' or output =='esri shapefile':\n try:\n import shapely\n from shapely.geometry import Polygon,LineString,Point\n except ImportError as e:\n raise ImportError('This operation requires shapely. {0}'.format(e))\n try:\n import fiona\n except ImportError as e:\n raise ImportError('This operation requires fiona. {0}'.format(e))\n \n try:\n import geopandas as gpd\n except ImportError as e:\n raise ImportError('This operation requires geopandas. {0}'.format(e))\n \n try:\n import shapefile\n except ImportError as e:\n raise ImportError('This operation requires pyshp. {0}'.format(e))\n \n \n geos = gpd.GeoDataFrame(df.apply(PlacemarkHandler.spatializer,axis=1))\n gdf = gpd.GeoDataFrame(pd.concat([df,geos],axis=1))\n out_filename = file[:-3] + \"shp\"\n gdf.to_file(out_filename,driver='ESRI Shapefile')\n sf = shapefile.Reader(out_filename)\n import shapefile\n sf = shapefile.Reader(out_filename)\n if len(sf.shapes())>0:\n validation = \"yes\"\n else:\n validation = \"no\"\n if validation == 'yes':\n \n result = (\"Successfully converted {0} to Shapefile and output to\"\n \" disk at {1}\".format(file,out_filename))\n else:\n raise ValueError('The Shapefile conversion did not create a '\n 'valid shapefile object. Try to clean your '\n 'data or try another file.') \n else:\n raise ValueError('The conversion returned no data; check if'\n ' you entered a correct output file type. '\n 'Valid output types are geojson, shapefile,'\n ' csv, geodataframe, and/or pandas dataframe.')\n \n return result",
"def parse_xml1(filename):\r\n tree = ET.parse(filename)\r\n # tree=ElementTree()\r\n # tree.parse(filename)\r\n\r\n baseInfo={}\r\n baseInfo['foder'] = tree.find('foder').text\r\n baseInfo['filename'] = tree.find('filename').text\r\n baseInfo['path'] = tree.find('path').text\r\n baseInfo['source/database'] = tree.find('source/database').text\r\n #tree.find('database')\r\n baseInfo['size/width'] = tree.find('size/width').text\r\n baseInfo['size/height'] = tree.find('size/height').text\r\n baseInfo['size/depth'] = tree.find('size/depth').text\r\n baseInfo['segmented'] = tree.find('segmented').text\r\n objects = []\r\n for obj in tree.findall('object'):\r\n obj_struct = {}\r\n obj_struct['score'] = obj.find('score').text\r\n obj_struct['region'] = obj.find('region').text\r\n obj_struct['imageptr'] = obj.find('imageptr').text\r\n if obj.find('label_des') is None:\r\n obj_struct['label_des']=\"\"\r\n else:\r\n obj_struct['label_des'] = obj.find('label_des').text\r\n obj_struct['name'] = obj.find('name').text\r\n obj_struct['pose'] = obj.find('pose').text\r\n obj_struct['truncated'] = obj.find('truncated').text #remove int()\r\n obj_struct['difficult'] = obj.find('difficult').text #remove int()\r\n bbox = obj.find('bndbox')\r\n obj_struct['bbox'] = [int(bbox.find('xmin').text),\r\n int(bbox.find('ymin').text),\r\n int(bbox.find('xmax').text),\r\n int(bbox.find('ymax').text)]\r\n objects.append(obj_struct)\r\n\r\n return baseInfo,objects",
"def lambda_handler(event, context):\n\n for record in event['Records']:\n\n bucket = record['s3']['bucket']['name']\n key = unquote_plus(record['s3']['object']['key'])\n\n str_value = s3_utils.download_file_as_string(bucket, key)\n data = json.loads(str_value)\n\n normalized_data = {\n 'meta': {\n 'table': 'parcels',\n 'column_names': [\n 'dataset',\n 'as_of',\n 'apn',\n 'objectid',\n 'city',\n 'x_coordinate',\n 'y_coordinate',\n 'area',\n 'length'\n ]\n }\n }\n\n rows = []\n\n dataset = data['meta']['dataset']\n as_of = data['meta']['datetime']\n\n for r in data['results']:\n\n attr = r['attributes']\n\n temp_dict = {\n 'dataset': dataset,\n 'as_of': as_of,\n 'apn': attr.get('APN_SPACE'),\n 'objectid': attr.get('OBJECTID'),\n 'city': attr.get('CITY'),\n 'x_coordinate': attr.get('X'),\n 'y_coordinate': attr.get('Y'),\n 'area': attr.get('Shape.STArea()'),\n 'length': attr.get('Shape.STLength()')\n }\n\n rows.append(temp_dict)\n\n normalized_data['rows'] = rows\n \n bucket = 'gis-data-normalized'\n file_name = 'normalized_' + key\n s3_utils.upload_json_as_file(normalized_data, bucket, file_name)",
"def __call__(self, f):\n tree = f.build_etree(lxml=True)\n return self.xslt(tree)",
"def make_s3_keys(self):\n # Write the data twice:\n for fmt in (VERSION_FMT, LATEST_FMT):\n yield make_s3_keys(self, fmt)",
"def convert(self, tag=\"Data\", delimiter=\",\", noheader=False,\n limit=-1, buffer_size=1000):\n\n\n file_ctr = 0\n item_ctr = 0\n for dirName, subdirList, fileList in os.walk(self.input_directory):\n print('Found directory: %s' % dirName)\n for fname in fileList:\n print('\\t%s' % fname)\n # open the xml file for iteration\n if not fname.endswith(\".xml\"):\n continue\n #pdb.set_trace()\n \n input_file = dirName + \"/\" + fname\n self.context = ETree.iterparse(input_file, events=(\"start\", \"end\"))\n\n # iterate through the xml\n items = [{}]\n\n depth = 0\n min_depth = 0\n row_depth = -1\n n = 0\n for event, elem in self.context:\n if event == \"start\":\n depth += 1\n continue\n else:\n depth -= 1\n if depth < min_depth:\n min_depth = depth\n\n if depth < row_depth and items:\n if noheader:\n noheader = False\n else:\n # new line\n self.output_buffer.append(items)\n items = []\n # flush buffer to disk\n if len(self.output_buffer) > buffer_size:\n self._write_buffer(delimiter)\n\n plain_tag = elem.tag\n last_delim = max(elem.tag.rfind('}'), elem.tag.rfind(':'))\n if 0 < last_delim < len(elem.tag) - 1:\n plain_tag = elem.tag[last_delim + 1:]\n if tag == plain_tag:\n if n == 0:\n min_depth = depth\n elif n == 1:\n row_depth = min_depth\n n += 1\n if 0 < limit < n:\n break\n elem_name = elem.get(\"name\")\n if elem_name in self.output_dict[0].keys():\n if elem_name == 'SamS.ArchivedURL':\n if hash(elem.text) in self.item_titles.keys() and self.item_titles[hash(elem.text)] == elem.text:\n #item is repetative\n self.output_dict[item_ctr]={}\n #item_ctr-=1\n break\n else:\n self.item_titles[hash(elem.text)] = elem.text\n self.output_dict[item_ctr][elem_name]= elem.text and elem.text.encode('utf8') or ''\n\n #if (len(self.output_dict[item_ctr]) > 0 ) :\n if ('SamS.ArchivedURL' in self.output_dict[item_ctr]):\n item_ctr+=1\n self.output_dict.append({})\n else:\n self.output_dict[item_ctr] = {}\n \n file_ctr+=1 #next row in the dictionary array\n print \"processing file no \", file_ctr, \" item no\", item_ctr\n\n #pdb.set_trace()\n self._write_buffer(delimiter) # write rest of the buffer to file\n\n return n",
"def content(tmp_loc, ref_names_dict, order):\n \n fl = '[Content_Types].xml'\n inp_path = '/'.join([tmp_loc, fl])\n out_path = '/'.join([output_path, fl])\n \n cnt_lst = []\n asset_lst = []\n def_att = []\n d = dict()\n \n root1,tree1 = gen_tree(inp_path)\n root2,tree2 = gen_tree(out_path)\n \n # get all the extensions belongs to \"Default\" tag\n for relation in root2:\n if 'Default' in relation.tag:\n def_att.append(relation.attrib['Extension'])\n else:\n break\n \n for relation in root1:\n if 'Override' in relation.tag:\n attrib = relation.attrib['PartName'][1:]\n try:\n cnt = attrib.split('ppt/')[-1]\n ini = '/ppt/'\n except:\n cnt = attrib\n ini = '/'\n if cnt in ref_names_dict.keys():\n relation.attrib['PartName'] = f'{ini}{ref_names_dict[cnt]}'\n cnt_lst.append(relation)\n # asset_lst.append(relation.attrib['PartName'])\n else:\n cnt_lst.append(relation)\n if relation.attrib['PartName'] not in asset_lst:\n asset_lst.append(relation.attrib['PartName'])\n else:\n attrib = relation.attrib['Extension']\n if attrib not in def_att:\n cnt_lst.append(relation)\n # asset_lst.append(relation.attrib['Extension'])\n # deal with the assest_lst\n # print(\"AA: \", asset_lst)\n cnt_lst = natsort.natsorted(cnt_lst)\n for ele in cnt_lst:\n prev = tree2.find(ele.tag)\n prev.addnext(ele)\n \n tree2.write(out_path, pretty_print=True, xml_declaration=True, encoding='UTF-8', standalone=True)\n \n unq_attr = []\n for relation in root2:\n if 'Override' in relation.tag:\n if relation.attrib['PartName'] not in unq_attr:\n unq_attr.append(relation.attrib['PartName'])\n else:\n root2.remove(relation)\n tree2.write(out_path, pretty_print=True, xml_declaration=True, encoding='UTF-8', standalone=True)",
"def get_single_xml_metadata(_oid):\n record = Metadata.objects.get_or_404(pk=_oid)\n\n json_rec = json.loads(record.to_json())\n\n d_fmt = '%Y-%m-%d'\n\n d_fmt1 = '%Y-%m-%dT%H:%M:%SZ'\n\n try:\n #start/end date might not exist yet\n if record.start_date is not None:\n json_rec['start_date'] = record.start_date.isoformat() + '.000Z'\n if record.end_date is not None:\n json_rec['end_date'] = record.end_date.isoformat() + '.000Z'\n if record.first_pub_date is not None:\n json_rec['first_pub_date'] = record.first_pub_date.strftime(d_fmt)\n if record.md_pub_date is not None:\n json_rec['md_pub_date'] = record.md_pub_date.strftime(d_fmt1)\n\n except AttributeError:\n # if we get an attribute error, continue; any other error will still\n # cause the program to fail\n pass\n\n json_rec['last_mod_date'] = record.last_mod_date.strftime(d_fmt1)\n\n\n # for XSLT, need something inside of each <item> in this generic XML\n _enclose_word = lambda k: {'word': k}\n _enclose_words = lambda words: map(_enclose_word, words)\n\n json_rec['thematic_keywords'] = _enclose_words(\n json_rec['thematic_keywords'])\n\n json_rec['place_keywords'] = _enclose_words(json_rec['place_keywords'])\n\n json_rec['data_format'] = _enclose_words(json_rec['data_format'])\n\n json_rec['topic_category'] = _enclose_words(json_rec['topic_category'])\n\n _enclose_url = lambda url: {'url': url}\n\n json_rec['online'] = map(_enclose_url, json_rec['online'])\n\n if record.md_pub_date is not None:\n json_rec['download_url'] = \\\n app.config['ATTACHMENT_DOWNLOAD_BASE_URL'] + str(record.id)\n\n xml_str = dicttoxml(dict(record=json_rec)) # , attr_type=False)\n\n return Response(xml_str, 200, mimetype='application/xml')",
"def _copy_gpx_tags(el, out):\n if not el.tag.startswith(\"{http://www.topografix.com/GPX/1/1}\"):\n return\n\n out_el = ET.Element(el.tag, attrib=el.attrib)\n if el.text:\n t = el.text.strip()\n if t:\n out_el.text = t\n\n for c in el:\n _copy_gpx_tags(c, out_el)\n\n if el.tail:\n t = el.tail.strip()\n if t:\n out_el.tail = t\n\n out.append(out_el)",
"def site2nrml(model, params_dict): \n \"\"\"\n # Some XML definitions\n NAMESPACE = 'http://openquake.org/xmlns/nrml/0.4'\n GML_NAMESPACE = 'http://www.opengis.net/gml'\n SERIALIZE_NS_MAP = {None: NAMESPACE, 'gml': GML_NAMESPACE} \n gml_ns = SERIALIZE_NS_MAP['gml']\n \"\"\"\n \n # Head matter \n root = etree.Element(_tag='nrml', nsmap={'gml': 'http://www.opengis.net/gml'})\n root.set('xmlns', 'http://openquake.org/xmlns/nrml/0.4')\n root.append(etree.Comment('%s' % '%s site model' %(model)))\n \n\n # Define Site Model Name \n sMod = etree.SubElement(root, \"siteModel\")\n sMod.set('name', model + ' Site Model')\n \n # Define sub element\n \n for key in params_dict:\n \n site = etree.SubElement(sMod, \"site\")\n site.set('lon', '%s' % key[0])\n site.set('lat', '%s' % key[1])\n site.set('vs30', '%s' % params_dict[key][0])\n site.set('vs30Type', '%s' % 'inferred')\n site.set('z1pt0', '%s' % '%3.3f' % float(params_dict[key][1]))\n site.set('z2pt5', '%s' % '%3.3f' % float(params_dict[key][2]))\n \n #print(getMinMax(params_dict))\n \n # Form tree and write to xml\n root_tree = etree.ElementTree(root)\n outFile = open((out_directory + '/' + out_filename), 'wb')\n root_tree.write(outFile, encoding=\"utf-8\", xml_declaration=True, pretty_print=True)",
"def _build_tree(self, markup):\n clean_markup = tidy_document(markup,\n options={'numeric-entities':1,\n 'output-xml':1,\n 'output-encoding':'utf8'})[0]\n # Small fix for a cornercase involving invalid characters...\n clean_markup = clean_markup.replace('\\x15', '_')\n etree = self._fix_treetags(ET.fromstring(clean_markup))\n return etree",
"def serialize(self, root):",
"def sync_to_bucket(s3_url,\n region='eu-west-1',\n profile_name=None):\n\n parsed_s3_url = urlparse.urlparse(s3_url);\n\n bucket_name = parsed_s3_url.hostname;\n key_prefix = parsed_s3_url.path;\n if key_prefix[0] == '/':\n key_prefix = key_prefix[1:]\n if key_prefix[-1] != '/':\n key_prefix = key_prefix + '/'\n\n def inner(fn_inner):\n \"\"\"\n Decorator function function sent in should be having signature\n func(None,None, XmlDoc) and should yield JSON document one for\n each file that should be persisted to S3\n \"\"\"\n\n def handler(event, context):\n \"\"\"\n The AWS Lambda Entry Point\n \"\"\"\n s3conn = s3.connect_to_region(region, profile_name=profile_name)\n bucket = s3conn.get_bucket(bucket_name)\n\n # Use a map to track keys that are no longer in the feed, used for deletion\n remaining_keys = { key.name : True for key in bucket.list(prefix=key_prefix)}\n\n logger.debug(\"Existing keys in bucket\\n%s\", '\\n'.join(remaining_keys));\n\n for id, json_data in fn_inner():\n key_name = key_prefix + str(uuid.uuid5(uuid.NAMESPACE_URL, id.encode('utf-8')))\n\n # Key found, delete it from cleanup map\n if key_name in remaining_keys:\n del remaining_keys[key_name]\n\n string_data = json.dumps(json_data)\n s3_object = bucket.get_key(key_name)\n if s3_object == None:\n key = bucket.new_key(key_name);\n key.set_contents_from_string(string_data)\n logger.info('Creating:\\ts3://%s/%s', bucket_name, key_name)\n logger.debug(string_data)\n else:\n if s3_object.etag[1:len(s3_object.etag)-1] != s3etag.from_string(string_data):\n logger.info('Updating:\\ts3://%s/%s', bucket_name, key_name)\n logger.debug(string_data)\n s3_object.set_contents_from_string(string_data)\n else:\n logger.info('Same:\\ts3://%s/%s', bucket_name, key_name);\n logger.debug(string_data)\n\n # Remvoe remaining keys from the bucket to allow for cleanup\n for key in remaining_keys:\n logger.info('Removing:\\ts3://%s/%s', bucket_name, key);\n bucket.delete_key(key);\n\n logger.info('Done');\n\n return handler\n\n return inner",
"def test_rs3filewriter_nested():\n input_tree = t('elaboration', [\n ('N', ['eins']),\n ('S', [\n ('joint', [\n ('N', ['zwei']),\n ('N', ['drei'])])])])\n expected_output_tree = example2tree('eins-zwei-drei-(elab-eins-from-(joint-zwei-and-drei).rs3')\n\n tempfile = NamedTemporaryFile()\n RS3FileWriter(input_tree, output_filepath=tempfile.name)\n produced_output_tree = RSTTree(tempfile.name)\n\n assert produced_output_tree.edu_strings == produced_output_tree.tree.leaves() == ['eins', 'zwei', 'drei']\n assert input_tree == expected_output_tree.tree == produced_output_tree.tree",
"def physical_representation(contents):\n\n input_file = StringIO()\n output_file = StringIO()\n\n contents = contents.replace(\" \", \" \")\n contents = contents.replace(\"<br>\", \"<br/> \")\n contents = contents.encode( 'utf-8' )\n \n parser = make_parser()\n parser.setContentHandler(AnnotationFilter(output_file))\n\n input_file.write(contents)\n input_file.seek(0)\n parser.parse(input_file)\n\n output_file.seek(0)\n output = output_file.read()\n\n document = parseString(output)\n\n span_tags = [span for span in document.getElementsByTagName(\"span\") if span.getAttribute('identifier')]\n\n position = {}\n index = 0\n for span_tag in span_tags:\n identifier = span_tag.getAttribute('identifier')\n positions = position.get(identifier, [])\n positions.append(index)\n position[identifier] = positions\n index = index + 1\n\n\n for identifier, positions in position.iteritems():\n first = span_tags[positions[0]]\n first.setAttribute(\"type\", \"begin\")\n last = span_tags[positions[-2]]\n last.setAttribute(\"type\", \"end\")\n\n for index in positions[1:-2]:\n span_tag = span_tags[index]\n span_tag.parentNode.removeChild(span_tag)\n span_tag.unlink()\n\n return document.toxml()",
"def sc3ml2qml(zipdir, outdir, stylesheet, prog='xalan'):\n\n raw_files = []\n # raw_dir = '/home/chet/data/mrp_data/sherburn_catalog/quake-ml/xsl_test/sc3ml_test'\n for root, dirnames, filenames in os.walk(zipdir):\n for filename in fnmatch.filter(filenames, '*.xml.zip'):\n raw_files.append(os.path.join(root, filename))\n # Running sczip from SC3\n os.chdir('/home/chet/seiscomp3/lib/')\n for afile in raw_files:\n name = afile.rstrip('.zip')\n cmd_str = ' '.join(['/home/chet/seiscomp3/bin/sczip', '-d', afile,\n '-o', name])\n os.system(cmd_str)\n # Convert sc3ml to QuakeML\n # Put new files in separate directory\n new_name = ''.join([outdir, os.path.basename(afile).rstrip('.xml.zip'),\n '_QML.xml'])\n if prog == 'xsltproc':\n cmd_str2 = ' '.join(['xsltproc', '-o', new_name,\n stylesheet, name])\n elif prog == 'xalan':\n cmd_str2 = ' '.join(['xalan', '-xsl', stylesheet, '-in', name,\n '-out', new_name])\n else:\n print('Invalid program type. Use xalan or xsltproc')\n os.system(cmd_str2)\n #Remove all '#' from QuakeML (shady way of circumventing validation issues)\n # qml_files = glob('/home/chet/data/mrp_data/sherburn_catalog/quake-ml/*QML.xml')\n # for one_file in qml_files:\n # command = \"sed -i 's/#//g' \" + one_file\n # os.system(command)\n return",
"def getXML(self):\n nodes = list(self.nodes(data=True))\n nodes.sort()\n node_string = ''\n for n in nodes:\n attribute_string = ''\n keys = list(n[1].keys())\n keys.sort()\n for k in keys:\n attribute_string += \"\"\"<{0}> {1} </{2}>\\n\"\"\".format(k, n[1][k], k)\n modification_string = ''\n modified_by = self.predecessors(n[0])\n if modified_by:\n for mod in modified_by:\n modification_string += \"\"\"<modified_by>\\n\"\"\"\n modification_string += \\\n \"\"\"<modifyingNode> %s </modifyingNode>\\n\"\"\"%mod.getTagID()\n modification_string += \\\n \"\"\"<modifyingCategory> %s </modifyingCategory>\\n\"\"\"%mod.getCategory()\n modification_string += \"\"\"</modified_by>\\n\"\"\"\n modifies = self.successors(n[0])\n if modifies:\n for modified in modifies:\n modification_string += \"\"\"<modifies>\\n\"\"\"\n modification_string += \\\n \"\"\"<modifiedNode> {0} </modifiedNode>\\n\"\"\".format(modified.getTagID())\n modification_string += \\\n \"\"\"</modifies>\\n\"\"\"\n node_string += \\\n NODE_XML_SKEL.format(attribute_string+\"{0}\".format(n[0].getXML()) +\\\n modification_string)\n edges = list(self.edges(data=True))\n edges.sort()\n edge_string = ''\n for edge in edges:\n keys = list(edge[2].keys())\n keys.sort()\n attribute_string = ''\n for key in keys:\n attribute_string += \"\"\"<{0}> {1} </{2}>\\n\"\"\".format(key, edge[2][key], key)\n edge_string += \"{0}\".format(EDGE_XML_SKEL.format(edge[0].getTagID(),\n edge[1].getTagID(),\n attribute_string))\n\n return CONTEXT_MARKUP_XML_SKEL.format(xmlScrub(self.getRawText()),\n xmlScrub(self.getText()),\n node_string,\n edge_string)",
"def publish(self):\n #vprint(\"PUBLISHING \",self.__dict__)\n \n js = self.compute_json()\n name = self.name\n #topicdir = \"/topicd/\" if constants.publishToS3Dev else \"/topic/\"\n s3path = constants.compositeDir+\"/\"+name+\"/main.json\" #the path where the page will finally end up\n s3.s3SetContents(s3path,contents=js,relativeTo=\"\",contentType=\"application/json\")\n self.genPage()"
] | [
"0.6150906",
"0.51852006",
"0.508772",
"0.50787896",
"0.50765",
"0.5073153",
"0.5026106",
"0.50156415",
"0.49646425",
"0.49586073",
"0.4957999",
"0.4951361",
"0.4915427",
"0.49097997",
"0.48692062",
"0.48484707",
"0.4826902",
"0.47564453",
"0.47277948",
"0.47235677",
"0.47011268",
"0.4675392",
"0.46715328",
"0.46665192",
"0.4643192",
"0.46420306",
"0.46390545",
"0.46353626",
"0.46334642",
"0.46310183"
] | 0.6966398 | 0 |
Update custom fieldtype specific settings into the etree | def __update_custom_fieldtype_settings(self,
eachfield, #field etree
):
# xml attributes
TYPE = "type"
READABLE = "readable"
WRITABLE = "writable"
LABEL = "label"
HINT = "comment"
DEFAULT = "default"
LINES = "lines"
BOXES = "boxes"
HASOPTIONS = "has_options"
fieldtype = eachfield.attrib.get(TYPE)
field_property = self.custom_fieldtype_properties.get(fieldtype, {})
cust_fieldtype = fieldtype_property.get("fieldtype", None)
cust_readable = fieldtype_property.get("readable", None)
cust_writable = fieldtype_property.get("writable", None)
cust_label = fieldtype_property.get("label", None)
cust_hint = fieldtype_property.get("hint", None)
cust_default = fieldtype_property.get("default", None)
cust_lines = fieldtype_property.get("lines", None)
cust_boxes = fieldtype_property.get("boxes", None)
cust_has_options = fieldtype_property.get("has_options", None)
cust_options = fieldtype_property.get("options", None)
if cust_fieldtype:
if cust_fieldtype != None:
eachfield.set(TYPE, cust_fieldtype)
if cust_readable != None:
eachfield.set(READABLE, cust_readable)
if cust_writable != None:
eachfield.set(WRITABLE, cust_writable)
if cust_label != None:
eachfield.set(LABEL, cust_label)
if cust_hint != None:
eachfield.set(HINT, cust_hint)
if cust_default != None:
eachfield.set(DEFAULT, cust_default)
if cust_lines != None:
eachfield.set(LINES, cust_lines)
if cust_boxes != None:
eachfield.set(BOXES, cust_boxes)
if cust_has_options != None:
eachfield.set(HASOPTIONS, cust_has_options)
if cust_options != None:
opt_available = eachfield.getchildren()
if len(opt_available) == 0:
eachfield.append(cust_options)
elif len(opt_available) == 1:
eachfield.remove(opt_available[0])
eachfield.append(cust_options) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __update_custom_field_settings(self,\n eachfield, #field etree\n resourcetablename,\n fieldname\n ):\n\n # xml attributes\n TYPE = \"type\"\n READABLE = \"readable\"\n WRITABLE = \"writable\"\n LABEL = \"label\"\n HINT = \"comment\"\n DEFAULT = \"default\"\n LINES = \"lines\"\n BOXES = \"boxes\"\n HASOPTIONS = \"has_options\"\n\n unikey = \"%s__%s\" % (resourcetablename, fieldname)\n field_property = self.custom_field_properties.get(unikey, {})\n\n cust_fieldtype = field_property.get(\"fieldtype\", None)\n cust_readable = field_property.get(\"readable\", None)\n cust_writable = field_property.get(\"writable\", None)\n cust_label = field_property.get(\"label\", None)\n cust_hint = field_property.get(\"hint\", None)\n cust_default = field_property.get(\"default\", None)\n cust_lines = field_property.get(\"lines\", None)\n cust_boxes = field_property.get(\"boxes\", None)\n cust_has_options = field_property.get(\"has_options\", None)\n cust_options = field_property.get(\"options\", None)\n\n if cust_fieldtype:\n if cust_fieldtype != None:\n eachfield.set(TYPE, cust_fieldtype)\n if cust_readable != None:\n eachfield.set(READABLE, cust_readable)\n if cust_writable != None:\n eachfield.set(WRITABLE, cust_writable)\n if cust_label != None:\n eachfield.set(LABEL, cust_label)\n if cust_hint != None:\n eachfield.set(HINT, cust_hint)\n if cust_default != None:\n eachfield.set(DEFAULT, cust_default)\n if cust_lines != None:\n eachfield.set(LINES, cust_lines)\n if cust_boxes != None:\n eachfield.set(BOXES, cust_boxes)\n if cust_has_options != None:\n eachfield.set(HASOPTIONS, cust_has_options)\n if cust_options != None:\n opt_available = eachfield.getchildren()\n if len(opt_available) == 0:\n eachfield.append(cust_options)\n elif len(opt_available) == 1:\n eachfield.remove(opt_available[0])\n eachfield.append(cust_options)",
"def mask_custom_field(self, custom_field, doc_type):\n\t\tcustom_field.fields.update({\n\t\t\t'doctype': 'DocField',\n\t\t\t'parent': doc_type,\n\t\t\t'parentfield': 'fields',\n\t\t\t'parenttype': 'DocType',\n\t\t})",
"def upgrade_markup_controlpanel_settings(context):\n # get the old site properties\n portal_properties = getToolByName(context, \"portal_properties\")\n site_properties = portal_properties.site_properties\n # get the new registry\n registry = getUtility(IRegistry)\n # XXX: Somehow this code is executed for old migration steps as well\n # ( < Plone 4 ) and breaks because there is no registry. Looking up the\n # registry interfaces with 'check=False' will not work, because it will\n # return a settings object and then fail when we try to access the\n # attributes.\n try:\n settings = registry.forInterface(\n IMarkupSchema,\n prefix='plone',\n )\n except KeyError:\n settings = False\n if settings:\n settings.default_type = site_properties.default_contenttype\n\n forbidden_types = site_properties.getProperty('forbidden_contenttypes')\n forbidden_types = list(forbidden_types) if forbidden_types else []\n\n portal_transforms = getToolByName(context, 'portal_transforms')\n allowable_types = portal_transforms.listAvailableTextInputs()\n\n settings.allowed_types = tuple([\n _type for _type in allowable_types\n if _type not in forbidden_types\n and _type not in 'text/x-plone-outputfilters-html' # removed, as in plone.app.vocabularies.types # noqa\n ])",
"def update_set(self):\n for field in self.children:\n if issubclass(field.__class__, MyTextField):\n val = field.get_field().value\n setattr(self.set, field.get_field().name, val if val != \"\" else None)",
"def handle_field(self, obj, field):\n self.indent(3)\n internal_type = field.get_internal_type()\n attrs = {\n \"id\": field.name,\n \"resname\": field.name,\n \"restype\": \"x-%s\" % internal_type,\n \"translate\": \"no\",\n }\n if internal_type in (\"CharField\", \"TextField\"):\n attrs[\"translate\"] = \"yes\"\n\n if internal_type == \"CharField\":\n attrs[\"size-unit\"] = \"char\"\n attrs[\"maxwidth\"] = str(field.max_length)\n\n self.xml.startElement(\"trans-unit\", attrs)\n self.indent(4)\n self.xml.startElement(\"source\", {})\n # Get a \"string version\" of the object's data.\n if getattr(obj, field.name) is not None:\n self.xml.characters(field.value_to_string(obj))\n else:\n self.xml.addQuickElement(\"None\")\n\n self.xml.endElement(\"source\")\n self.indent(3)\n self.xml.endElement(\"trans-unit\")",
"def test_setter_builtin_types(self):\n root = netapp_api.NaElement('root')\n root['e1'] = 'v1'\n root['e2'] = 1\n root['e3'] = 2.0\n root['e4'] = 8l\n self.assertEqual(len(root.get_children()), 4)\n self.assertEqual(root.get_child_content('e1'), 'v1')\n self.assertEqual(root.get_child_content('e2'), '1')\n self.assertEqual(root.get_child_content('e3'), '2.0')\n self.assertEqual(root.get_child_content('e4'), '8')",
"def test_fieldValueTypes(self):\n # tests for \"method\" and \"datetime\" values follow later on ...\n # booleans are not tested yet\n\n factory = self.root.manage_addProduct['Formulator']\n factory.manage_add('form', 'ValueTest')\n factory.manage_add('form2', 'ValueTest')\n form = self.root.form\n form.manage_addField('int_field', 'Test Integer Field', 'IntegerField')\n form.manage_addField('float_field', 'Test Float Field', 'FloatField')\n form.manage_addField('date_field', 'Test Date Field', 'DateTimeField')\n form.manage_addField('list_field', 'Test List Field', 'ListField')\n form.manage_addField(\n 'multi_field',\n 'Test Checkbox Field',\n 'MultiCheckBoxField')\n form.manage_addField('link_field', 'Test Link Field', 'LinkField')\n form.manage_addField('empty_field', 'Test Empty Field', 'StringField')\n int_field = form.int_field\n float_field = form.float_field\n date_field = form.date_field\n list_field = form.list_field\n multi_field = form.multi_field\n link_field = form.link_field\n empty_field = form.empty_field\n\n # XXX editing fields by messing with a fake request\n # -- any better way to do this?\n # (could assign to \"values\" directly ...)\n\n default_values = {'field_title': 'Test Title',\n 'field_display_width': '92',\n 'field_required': 'checked',\n 'field_enabled': 'checked',\n }\n try:\n form_values = default_values.copy()\n form_values.update({'field_default': 'None',\n 'field_required': '',\n })\n empty_field.manage_edit(REQUEST=TestRequest(form=form_values))\n\n form_values = default_values.copy()\n form_values.update({'field_default': '42',\n 'field_enabled': 'checked'})\n int_field.manage_edit(REQUEST=TestRequest(form=form_values))\n\n form_values = default_values.copy()\n form_values.update({'field_default': '1.7'})\n float_field.manage_edit(REQUEST=TestRequest(form=form_values))\n\n # XXX cannot test \"defaults to now\", as this may fail randomly\n form_values = default_values.copy()\n form_values.update({'field_input_style': 'list',\n 'field_input_order': 'mdy',\n 'field_date_only': '',\n 'field_css_class': 'test_css',\n 'field_time_separator': '$'})\n date_field.manage_edit(REQUEST=TestRequest(form=form_values))\n\n form_values = default_values.copy()\n form_values.update({'field_default': 'foo',\n 'field_size': '1',\n 'field_items': 'Foo | foo\\n Bar | bar'})\n list_field.manage_edit(REQUEST=TestRequest(form=form_values))\n\n form_values = default_values.copy()\n form_values.update(\n {'field_default': 'foo',\n 'field_size': '3',\n 'field_items': 'Foo | foo\\n Bar | bar\\nBaz | baz',\n 'field_orientation': 'horizontal',\n 'field_view_separator': '<br />\\n'})\n multi_field.manage_edit(REQUEST=TestRequest(form=form_values))\n\n form_values = default_values.copy()\n form_values.update({'field_default': 'http://www.absurd.org',\n 'field_required': '1',\n 'field_check_timeout': '5.0',\n 'field_link_type': 'external',\n })\n link_field.manage_edit(REQUEST=TestRequest(form=form_values))\n\n except ValidationError as e:\n self.fail('error when editing field %s; error message: %s' %\n (e.field_id, e.error_text))\n\n form2 = self.root.form2\n\n xml = formToXML(form)\n XMLToForm(xml, form2)\n\n self.assertEqualForms(form, form2)\n\n request = TestRequest()\n request.form['field_int_field'] = '42'\n request.form['field_float_field'] = '2.71828'\n request.form['subfield_date_field_month'] = '11'\n request.form['subfield_date_field_day'] = '11'\n # This field only allows ten years in the future, today 2023-03-14\n request.form['subfield_date_field_year'] = '2033'\n request.form['subfield_date_field_hour'] = '09'\n request.form['subfield_date_field_minute'] = '59'\n request.form['field_list_field'] = 'bar'\n request.form['field_multi_field'] = ['bar', 'baz']\n request.form['field_link_field'] = 'http://www.zope.org'\n try:\n result1 = form.validate_all(request)\n except FormValidationError as e:\n # XXX only render first error ...\n self.fail('error when editing form1, field %s; error message: %s' %\n (e.errors[0].field_id, e.errors[0].error_text))\n\n try:\n result2 = form2.validate_all(request)\n except FormValidationError as e:\n # XXX only render first error ...\n self.fail('error when editing form1, field %s; error message: %s' %\n (e.errors[0].field_id, e.errors[0].error_text))\n self.assertEqual(result1, result2)\n self.assertEqual(42, result2['int_field'])\n self.assertEqual(2.71828, result2['float_field'])\n\n # check link field timeout value\n self.assertEqual(link_field.get_value('check_timeout'),\n form2.link_field.get_value('check_timeout'))\n\n # XXX not tested: equal form validation failure on invalid input",
"def custom_fields(self, custom_fields):\n\n self._custom_fields = custom_fields",
"def custom_fields(self, custom_fields):\n\n self._custom_fields = custom_fields",
"def custom_fields(self, custom_fields):\n\n self._custom_fields = custom_fields",
"def custom_fields(self, custom_fields):\n\n self._custom_fields = custom_fields",
"def custom_fields(self, custom_fields):\n\n self._custom_fields = custom_fields",
"def test_setter_builtin_types(self):\n root = netapp_api.NaElement('root')\n root['e1'] = 'v1'\n root['e2'] = 1\n root['e3'] = 2.0\n root['e4'] = 8\n self.assertEqual(4, len(root.get_children()))\n self.assertEqual('v1', root.get_child_content('e1'))\n self.assertEqual('1', root.get_child_content('e2'))\n self.assertEqual('2.0', root.get_child_content('e3'))\n self.assertEqual('8', root.get_child_content('e4'))",
"def update_settings_data(self):\n debug = False\n grid_data = self.get_grid_data() ## only saved data. eol-safe inc\n if debug: \n print(f'grid data: {grid_data}')\n print('Original settings data:')\n pprint.pprint(self.settings_data)\n for i, row in enumerate(grid_data):\n if debug: print(row)\n self.settings_data[i][mg.TBL_FLDNAME] = row[0]\n self.settings_data[i][mg.TBL_FLDTYPE] = row[1]\n if self.debug or debug:\n print('Final settings data:')\n pprint.pprint(self.settings_data)",
"def updateFields(self):\n super(AdminRulesForm, self).updateFields()\n self.fields['improved_templates'].widgetFactory = CheckBoxFieldWidget\n self.fields['iframe_enabled'].widgetFactory = SingleCheckBoxFieldWidget\n self.fields['js_enabled'].widgetFactory = SingleCheckBoxFieldWidget\n self.fields['inline_images'].widgetFactory = SingleCheckBoxFieldWidget",
"def _set_attributes(self):",
"def set_generic_fields(self):\n self.constant_fields[\"admver\"] = 9.1\n self.constant_fields[\"datatype\"] = 'raw'\n self.constant_fields[\"dfo\"] = '//'\n self.constant_fields[\"enterdate\"] = time.strftime(\"%m/%d/%Y\")",
"def patch_docfields(app):\n\n transform_node = partial(_transform_node, app)\n\n def get_data_structure(entries, types, field_object):\n \"\"\"\n Get a proper docfx YAML data structure from the entries & types\n \"\"\"\n\n data = {\n 'parameters': [],\n 'variables': [],\n 'exceptions': [],\n 'return': {},\n 'references': [],\n }\n\n def make_param(_id, _description, _type=None, _required=None):\n ret = {\n 'id': _id,\n 'description': _description.strip(\" \\n\\r\\t\")\n }\n if _type:\n ret['type'] = _type\n\n if _required is not None:\n ret['isRequired'] = _required\n\n return ret\n\n def transform_para(para_field):\n if isinstance(para_field, addnodes.pending_xref):\n return transform_node(para_field)\n else:\n return para_field.astext()\n\n def resolve_type(data_type):\n # Remove @ ~ and \\n for cross reference in parameter/return value type to apply to docfx correctly\n data_type = re.sub('[@~\\n]', '', data_type)\n\n # Add references for docfx to resolve ref if type contains TYPE_SEP_PATTERN\n _spec_list = []\n _spec_fullnames = re.split(TYPE_SEP_PATTERN, data_type)\n\n _added_reference = {}\n if len(_spec_fullnames) > 1:\n _added_reference_name = ''\n for _spec_fullname in _spec_fullnames:\n if _spec_fullname != '':\n _spec = {}\n _spec['name'] = _spec_fullname.split('.')[-1]\n _spec['fullName'] = _spec_fullname\n if re.match(TYPE_SEP_PATTERN, _spec_fullname) is None:\n _spec['uid'] = _spec_fullname\n _spec_list.append(_spec)\n _added_reference_name += _spec['name']\n\n _added_reference = {\n 'uid': data_type,\n 'name': _added_reference_name,\n 'fullName': data_type,\n 'spec.python': _spec_list\n }\n\n return data_type, _added_reference\n\n def extract_exception_desc(field_object):\n ret = []\n if len(field_object) > 0:\n for field in field_object:\n if 'field_name' == field[0].tagname and field[0].astext() == 'Raises':\n assert field[1].tagname == 'field_body'\n field_body = field[1]\n\n children = [n for n in field_body\n if not isinstance(n, nodes.Invisible)]\n\n for child in children:\n if isinstance (child, nodes.paragraph):\n pending_xref_index = child.first_child_matching_class(addnodes.pending_xref)\n if pending_xref_index is not None:\n pending_xref = child[pending_xref_index]\n raise_type_index = pending_xref.first_child_matching_class(nodes.literal)\n if raise_type_index is not None:\n raise_type = pending_xref[raise_type_index]\n ret.append({'type': pending_xref['reftarget'], 'desc': raise_type.astext()})\n\n return ret\n\n for entry in entries:\n if isinstance(entry, nodes.field):\n # pass-through old field\n pass\n else:\n fieldtype, content = entry\n fieldtypes = types.get(fieldtype.name, {})\n if fieldtype.name == 'exceptions':\n for _type, _description in content:\n data['exceptions'].append({\n 'type': _type,\n 'description': transform_node(_description[0]).strip(\" \\n\\r\\t\")\n })\n if fieldtype.name == 'returntype':\n for returntype_node in content[1]:\n returntype_ret = transform_node(returntype_node)\n if returntype_ret:\n # Support or in returntype\n for returntype in re.split('[ \\n]or[ \\n]', returntype_ret):\n returntype, _added_reference = resolve_type(returntype)\n if _added_reference:\n if len(data['references']) == 0:\n data['references'].append(_added_reference)\n elif any(r['uid'] != _added_reference['uid'] for r in data['references']):\n data['references'].append(_added_reference)\n\n data['return'].setdefault('type', []).append(returntype)\n if fieldtype.name == 'returnvalue':\n returnvalue_ret = transform_node(content[1][0])\n if returnvalue_ret:\n data['return']['description'] = returnvalue_ret.strip(\" \\n\\r\\t\")\n if fieldtype.name in ['parameter', 'variable', 'keyword']:\n for field, node_list in content:\n _id = field\n _description = transform_node(node_list[0])\n if field in fieldtypes:\n _type = u''.join(transform_para(n) for n in fieldtypes[field])\n else:\n _type = None\n\n _para_types = []\n if fieldtype.name == 'parameter' or fieldtype.name == 'keyword':\n if _type:\n # Support or in parameter type\n for _s_type in re.split('[ \\n]or[ \\n]', _type):\n _s_type, _added_reference = resolve_type(_s_type)\n if _added_reference:\n if len(data['references']) == 0:\n data['references'].append(_added_reference)\n elif any(r['uid'] != _added_reference['uid'] for r in data['references']):\n data['references'].append(_added_reference)\n\n _para_types.append(_s_type)\n\n\n\n _data = make_param(_id=_id, _type=_para_types, _description=_description, _required=False if fieldtype.name == 'keyword' else True)\n data['parameters'].append(_data)\n\n if fieldtype.name == 'variable':\n if _type:\n # Support or in variable type\n for _s_type in re.split('[ \\n]or[ \\n]', _type):\n _s_type, _added_reference = resolve_type(_s_type)\n if _added_reference:\n if len(data['references']) == 0:\n data['references'].append(_added_reference)\n elif any(r['uid'] != _added_reference['uid'] for r in data['references']):\n data['references'].append(_added_reference)\n\n _para_types.append(_s_type)\n\n _data = make_param(_id=_id, _type=_para_types, _description=_description)\n data['variables'].append(_data)\n\n ret_list = extract_exception_desc(field_object)\n for ret in ret_list:\n # only use type in exceptions\n data.setdefault('exceptions', []).append({\n 'type': ret['type']\n })\n\n return data\n\n\n class PatchedDocFieldTransformer(docfields.DocFieldTransformer):\n\n @staticmethod\n def type_mapping(type_name):\n mapping = {\n \"staticmethod\": \"method\",\n \"classmethod\": \"method\",\n \"exception\": \"class\"\n }\n\n return mapping[type_name] if type_name in mapping else type_name\n\n def __init__(self, directive):\n self.directive = directive\n super(PatchedDocFieldTransformer, self).__init__(directive)\n\n def transform_all(self, node):\n \"\"\"Transform all field list children of a node.\"\"\"\n # don't traverse, only handle field lists that are immediate children\n summary = []\n data = {}\n name, uid = _get_desc_data(node.parent)\n for child in node:\n if isinstance(child, remarks):\n remarks_string = transform_node(child)\n data['remarks'] = remarks_string\n elif isinstance(child, addnodes.desc):\n if child.get('desctype') == 'attribute':\n attribute_map = {} # Used for detecting duplicated attributes in intermediate data and merge them\n\n for item in child:\n if isinstance(item, desc_signature) and any(isinstance(n, addnodes.desc_annotation) for n in item):\n # capture attributes data and cache it\n data.setdefault('added_attribute', [])\n\n item_ids = item.get('ids', [''])\n\n if len(item_ids) == 0: # find a node with no 'ids' attribute\n curuid = item.get('module', '') + '.' + item.get('fullname', '')\n # generate its uid by module and fullname\n else:\n curuid = item_ids[0]\n\n if len(curuid) > 0:\n parent = curuid[:curuid.rfind('.')]\n name = item.children[0].astext()\n\n if curuid in attribute_map:\n if len(item_ids) == 0: # ensure the order of docstring attributes and real attributes is fixed\n attribute_map[curuid]['syntax']['content'] += (' ' + item.astext())\n # concat the description of duplicated nodes\n else:\n attribute_map[curuid]['syntax']['content'] = item.astext() + ' ' + attribute_map[curuid]['syntax']['content']\n else:\n if _is_desc_of_enum_class(node):\n addedData = {\n 'uid': curuid,\n 'id': name,\n 'parent': parent,\n 'langs': ['python'],\n 'name': name,\n 'fullName': curuid,\n 'type': item.parent.get('desctype'),\n 'module': item.get('module'),\n 'syntax': {\n 'content': item.astext(),\n 'return': {\n 'type': [parent]\n }\n }\n }\n else:\n addedData = {\n 'uid': curuid,\n 'class': parent,\n 'langs': ['python'],\n 'name': name,\n 'fullName': curuid,\n 'type': 'attribute',\n 'module': item.get('module'),\n 'syntax': {\n 'content': item.astext()\n }\n }\n\n attribute_map[curuid] = addedData\n else:\n raise Exception('ids of node: ' + repr(item) + ' is missing.')\n # no ids and no duplicate or uid can not be generated.\n if 'added_attribute' in data:\n data['added_attribute'].extend(attribute_map.values()) # Add attributes data to a temp list\n\n # Don't recurse into child nodes\n continue\n elif isinstance(child, nodes.field_list):\n (entries, types) = _hacked_transform(self.typemap, child)\n _data = get_data_structure(entries, types, child)\n data.update(_data)\n elif isinstance(child, addnodes.seealso):\n data['seealso'] = transform_node(child)\n elif isinstance(child, nodes.admonition) and 'Example' in child[0].astext():\n # Remove the admonition node\n child_copy = child.deepcopy()\n child_copy.pop(0)\n data['example'] = transform_node(child_copy)\n else:\n content = transform_node(child)\n\n # skip 'Bases' in summary\n if not content.startswith('Bases: '):\n summary.append(content)\n\n if \"desctype\" in node.parent and node.parent[\"desctype\"] == 'class':\n data.pop('exceptions', '') # Make sure class doesn't have 'exceptions' field.\n\n if summary:\n data['summary'] = '\\n'.join(summary)\n # Don't include empty data\n for key, val in data.copy().items():\n if not val:\n del data[key]\n data['type'] = PatchedDocFieldTransformer.type_mapping(node.parent[\"desctype\"]) if \"desctype\" in node.parent else 'unknown'\n self.directive.env.docfx_info_field_data[uid] = data\n super(PatchedDocFieldTransformer, self).transform_all(node)\n\n directives.DocFieldTransformer = PatchedDocFieldTransformer",
"def fallback(self, node_type: Any) -> None:\n for node in self.document.findall(node_type):\n newnode = nodes.inline()\n newnode.update_all_atts(node)\n newnode.extend(node)\n # Only set _sig_node_type if not defined by the user\n newnode.setdefault('_sig_node_type', node.tagname)\n node.replace_self(newnode)",
"def get_custom_fields_for_model(content_type, filterable_only=False, bulk_edit=False):\n field_dict = OrderedDict()\n kwargs = {'obj_type': content_type}\n if filterable_only:\n kwargs['is_filterable'] = True\n custom_fields = CustomField.objects.filter(**kwargs)\n\n for cf in custom_fields:\n field_name = 'cf_{}'.format(str(cf.name))\n\n # Integer\n if cf.type == CF_TYPE_INTEGER:\n field = forms.IntegerField(required=cf.required, initial=cf.default)\n\n # Boolean\n elif cf.type == CF_TYPE_BOOLEAN:\n choices = (\n (None, '---------'),\n (1, 'True'),\n (0, 'False'),\n )\n if cf.default.lower() in ['true', 'yes', '1']:\n initial = 1\n elif cf.default.lower() in ['false', 'no', '0']:\n initial = 0\n else:\n initial = None\n field = forms.NullBooleanField(required=cf.required, initial=initial,\n widget=forms.Select(choices=choices))\n\n # Date\n elif cf.type == CF_TYPE_DATE:\n field = forms.DateField(required=cf.required, initial=cf.default, help_text=\"Date format: YYYY-MM-DD\")\n\n # Select\n elif cf.type == CF_TYPE_SELECT:\n choices = [(cfc.pk, cfc) for cfc in cf.choices.all()]\n if not cf.required or bulk_edit or filterable_only:\n choices = [(None, '---------')] + choices\n field = forms.TypedChoiceField(choices=choices, coerce=int, required=cf.required)\n\n # URL\n elif cf.type == CF_TYPE_URL:\n field = LaxURLField(required=cf.required, initial=cf.default)\n\n # Text\n else:\n field = forms.CharField(max_length=255, required=cf.required, initial=cf.default)\n\n field.model = cf\n field.label = cf.label if cf.label else cf.name.replace('_', ' ').capitalize()\n if cf.description:\n field.help_text = cf.description\n\n field_dict[field_name] = field\n\n return field_dict",
"def _mutate_node(self, node):\n self.idx += 1\n\n if self.idx != self.r:\n return\n\n # Exclude some things like signatures, etc.\n exclusions = ['signature', 'crc']\n for ex in exclusions:\n if ex in node._pfp__name.lower():\n return\n\n if type(node) == pfp.fields.Dom:\n return\n elif self._base_name(node) == 'Struct':\n # This is a container, interested in\n # its children nodes\n return\n elif self._base_name(node) == 'Array':\n print(\"%s is an Array of %s (%s)\" % (node._pfp__name,\n node.field_cls, node.width))\n # I can change the data at once:\n node.raw_data = \"cacaca\"\n\n # Or iterate through its elements:\n # for e in node:\n # e._pfp__set_value(e._pfp__value + 1)\n else:\n # CORE TYPE\n # This is supposed to cast\n print('CORE TYPE?')\n node._pfp__set_value(1337)",
"def set_all(self, field, value):\n fields = self.find_all(field)\n for f in fields:\n f.value = value",
"def _adjust_kwargs(cls, **kwargs):\r\n tag = kwargs.pop('tag', 'unknown')\r\n kwargs['policy'] = {'{tag}/{url_name}'.format(tag=tag, url_name=kwargs['url_name']): kwargs['policy']}\r\n\r\n kwargs['xml_node'].text = kwargs.pop('text', None)\r\n\r\n kwargs['xml_node'].attrib.update(kwargs.pop('attribs', {}))\r\n\r\n # Make sure that the xml_module doesn't try and open a file to find the contents\r\n # of this node.\r\n inline_xml = kwargs.pop('inline_xml')\r\n\r\n if inline_xml:\r\n kwargs['xml_node'].set('not_a_pointer', 'true')\r\n\r\n for key in kwargs.keys():\r\n if key not in XML_IMPORT_ARGS:\r\n kwargs['xml_node'].set(key, kwargs.pop(key))\r\n\r\n if not inline_xml:\r\n kwargs['xml_node'].write(\r\n kwargs['filesystem'].open(\r\n '{}/{}.xml'.format(kwargs['tag'], kwargs['url_name'])\r\n ),\r\n encoding='utf-8'\r\n )\r\n\r\n return kwargs",
"def update(self, identity, data=None, record=None, **kwargs):\n record.custom_fields = data.get(\"custom_fields\", {})",
"def _adjust_kwargs(cls, **kwargs):\n tag = kwargs.pop('tag', 'unknown')\n kwargs['policy'] = {'{tag}/{url_name}'.format(tag=tag, url_name=kwargs['url_name']): kwargs['policy']}\n\n kwargs['xml_node'].text = kwargs.pop('text', None)\n\n kwargs['xml_node'].attrib.update(kwargs.pop('attribs', {}))\n\n # Make sure that the xml_module doesn't try and open a file to find the contents\n # of this node.\n inline_xml = kwargs.pop('inline_xml')\n\n if inline_xml:\n kwargs['xml_node'].set('not_a_pointer', 'true')\n\n for key in list(kwargs.keys()):\n if key not in XML_IMPORT_ARGS:\n kwargs['xml_node'].set(key, kwargs.pop(key))\n\n if not inline_xml:\n kwargs['xml_node'].write(\n kwargs['filesystem'].open(\n '{}/{}.xml'.format(kwargs['tag'], kwargs['url_name'])\n ),\n encoding='utf-8'\n )\n\n return kwargs",
"def prepare_node_attrs(self):",
"def _modify(self, fields):\n return fields",
"def update_simple(parent, name, value):\n element = parent.find('./' + name) \n\n if element is None:\n element = ET.SubElement(parent, name)\n element.text = value\n else:\n element.text = value",
"def update(self, feature_col, feature_value, node_type, nodes, children=[]):\n self.feature_col = feature_col\n self.feature_value = feature_value\n self.node_type = node_type\n self.nodes = nodes\n self.children = children",
"def _process_plugin_data(self, fields, fetch_related_data=False):\n for field, default_value in fields:\n try:\n setattr(\n self.data,\n field,\n self.plugin_data.get(field, default_value)\n )\n except Exception:\n setattr(self.data, field, default_value)"
] | [
"0.72086793",
"0.5807909",
"0.53538555",
"0.5347925",
"0.53453594",
"0.52939373",
"0.5225218",
"0.521721",
"0.521721",
"0.521721",
"0.521721",
"0.521721",
"0.517917",
"0.51706946",
"0.51572657",
"0.5152697",
"0.513768",
"0.5122455",
"0.5120483",
"0.51168615",
"0.5108007",
"0.5106897",
"0.5104712",
"0.5102605",
"0.5101905",
"0.50633186",
"0.50421566",
"0.5012305",
"0.5010542",
"0.50054085"
] | 0.78392935 | 0 |
Update custom field specific settings into the etree | def __update_custom_field_settings(self,
eachfield, #field etree
resourcetablename,
fieldname
):
# xml attributes
TYPE = "type"
READABLE = "readable"
WRITABLE = "writable"
LABEL = "label"
HINT = "comment"
DEFAULT = "default"
LINES = "lines"
BOXES = "boxes"
HASOPTIONS = "has_options"
unikey = "%s__%s" % (resourcetablename, fieldname)
field_property = self.custom_field_properties.get(unikey, {})
cust_fieldtype = field_property.get("fieldtype", None)
cust_readable = field_property.get("readable", None)
cust_writable = field_property.get("writable", None)
cust_label = field_property.get("label", None)
cust_hint = field_property.get("hint", None)
cust_default = field_property.get("default", None)
cust_lines = field_property.get("lines", None)
cust_boxes = field_property.get("boxes", None)
cust_has_options = field_property.get("has_options", None)
cust_options = field_property.get("options", None)
if cust_fieldtype:
if cust_fieldtype != None:
eachfield.set(TYPE, cust_fieldtype)
if cust_readable != None:
eachfield.set(READABLE, cust_readable)
if cust_writable != None:
eachfield.set(WRITABLE, cust_writable)
if cust_label != None:
eachfield.set(LABEL, cust_label)
if cust_hint != None:
eachfield.set(HINT, cust_hint)
if cust_default != None:
eachfield.set(DEFAULT, cust_default)
if cust_lines != None:
eachfield.set(LINES, cust_lines)
if cust_boxes != None:
eachfield.set(BOXES, cust_boxes)
if cust_has_options != None:
eachfield.set(HASOPTIONS, cust_has_options)
if cust_options != None:
opt_available = eachfield.getchildren()
if len(opt_available) == 0:
eachfield.append(cust_options)
elif len(opt_available) == 1:
eachfield.remove(opt_available[0])
eachfield.append(cust_options) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __update_custom_fieldtype_settings(self,\n eachfield, #field etree\n ):\n\n # xml attributes\n TYPE = \"type\"\n READABLE = \"readable\"\n WRITABLE = \"writable\"\n LABEL = \"label\"\n HINT = \"comment\"\n DEFAULT = \"default\"\n LINES = \"lines\"\n BOXES = \"boxes\"\n HASOPTIONS = \"has_options\"\n\n fieldtype = eachfield.attrib.get(TYPE)\n field_property = self.custom_fieldtype_properties.get(fieldtype, {})\n\n cust_fieldtype = fieldtype_property.get(\"fieldtype\", None)\n cust_readable = fieldtype_property.get(\"readable\", None)\n cust_writable = fieldtype_property.get(\"writable\", None)\n cust_label = fieldtype_property.get(\"label\", None)\n cust_hint = fieldtype_property.get(\"hint\", None)\n cust_default = fieldtype_property.get(\"default\", None)\n cust_lines = fieldtype_property.get(\"lines\", None)\n cust_boxes = fieldtype_property.get(\"boxes\", None)\n cust_has_options = fieldtype_property.get(\"has_options\", None)\n cust_options = fieldtype_property.get(\"options\", None)\n \n if cust_fieldtype:\n if cust_fieldtype != None:\n eachfield.set(TYPE, cust_fieldtype)\n if cust_readable != None:\n eachfield.set(READABLE, cust_readable)\n if cust_writable != None:\n eachfield.set(WRITABLE, cust_writable)\n if cust_label != None:\n eachfield.set(LABEL, cust_label)\n if cust_hint != None:\n eachfield.set(HINT, cust_hint)\n if cust_default != None:\n eachfield.set(DEFAULT, cust_default)\n if cust_lines != None:\n eachfield.set(LINES, cust_lines)\n if cust_boxes != None:\n eachfield.set(BOXES, cust_boxes)\n if cust_has_options != None:\n eachfield.set(HASOPTIONS, cust_has_options)\n if cust_options != None:\n opt_available = eachfield.getchildren()\n if len(opt_available) == 0:\n eachfield.append(cust_options)\n elif len(opt_available) == 1:\n eachfield.remove(opt_available[0])\n eachfield.append(cust_options)",
"def update_simple(parent, name, value):\n element = parent.find('./' + name) \n\n if element is None:\n element = ET.SubElement(parent, name)\n element.text = value\n else:\n element.text = value",
"def update_set(self):\n for field in self.children:\n if issubclass(field.__class__, MyTextField):\n val = field.get_field().value\n setattr(self.set, field.get_field().name, val if val != \"\" else None)",
"def set_all(self, field, value):\n fields = self.find_all(field)\n for f in fields:\n f.value = value",
"def mask_custom_field(self, custom_field, doc_type):\n\t\tcustom_field.fields.update({\n\t\t\t'doctype': 'DocField',\n\t\t\t'parent': doc_type,\n\t\t\t'parentfield': 'fields',\n\t\t\t'parenttype': 'DocType',\n\t\t})",
"def handle_field(self, obj, field):\n self.indent(3)\n internal_type = field.get_internal_type()\n attrs = {\n \"id\": field.name,\n \"resname\": field.name,\n \"restype\": \"x-%s\" % internal_type,\n \"translate\": \"no\",\n }\n if internal_type in (\"CharField\", \"TextField\"):\n attrs[\"translate\"] = \"yes\"\n\n if internal_type == \"CharField\":\n attrs[\"size-unit\"] = \"char\"\n attrs[\"maxwidth\"] = str(field.max_length)\n\n self.xml.startElement(\"trans-unit\", attrs)\n self.indent(4)\n self.xml.startElement(\"source\", {})\n # Get a \"string version\" of the object's data.\n if getattr(obj, field.name) is not None:\n self.xml.characters(field.value_to_string(obj))\n else:\n self.xml.addQuickElement(\"None\")\n\n self.xml.endElement(\"source\")\n self.indent(3)\n self.xml.endElement(\"trans-unit\")",
"def __set__(self, page, value):\n element = self.get(page)\n element.value = value",
"def _set_attributes(self):",
"def custom_fields(self, custom_fields):\n\n self._custom_fields = custom_fields",
"def custom_fields(self, custom_fields):\n\n self._custom_fields = custom_fields",
"def custom_fields(self, custom_fields):\n\n self._custom_fields = custom_fields",
"def custom_fields(self, custom_fields):\n\n self._custom_fields = custom_fields",
"def custom_fields(self, custom_fields):\n\n self._custom_fields = custom_fields",
"def prepare_node_attrs(self):",
"def _adjust_kwargs(cls, **kwargs):\n tag = kwargs.pop('tag', 'unknown')\n kwargs['policy'] = {'{tag}/{url_name}'.format(tag=tag, url_name=kwargs['url_name']): kwargs['policy']}\n\n kwargs['xml_node'].text = kwargs.pop('text', None)\n\n kwargs['xml_node'].attrib.update(kwargs.pop('attribs', {}))\n\n # Make sure that the xml_module doesn't try and open a file to find the contents\n # of this node.\n inline_xml = kwargs.pop('inline_xml')\n\n if inline_xml:\n kwargs['xml_node'].set('not_a_pointer', 'true')\n\n for key in list(kwargs.keys()):\n if key not in XML_IMPORT_ARGS:\n kwargs['xml_node'].set(key, kwargs.pop(key))\n\n if not inline_xml:\n kwargs['xml_node'].write(\n kwargs['filesystem'].open(\n '{}/{}.xml'.format(kwargs['tag'], kwargs['url_name'])\n ),\n encoding='utf-8'\n )\n\n return kwargs",
"def _adjust_kwargs(cls, **kwargs):\r\n tag = kwargs.pop('tag', 'unknown')\r\n kwargs['policy'] = {'{tag}/{url_name}'.format(tag=tag, url_name=kwargs['url_name']): kwargs['policy']}\r\n\r\n kwargs['xml_node'].text = kwargs.pop('text', None)\r\n\r\n kwargs['xml_node'].attrib.update(kwargs.pop('attribs', {}))\r\n\r\n # Make sure that the xml_module doesn't try and open a file to find the contents\r\n # of this node.\r\n inline_xml = kwargs.pop('inline_xml')\r\n\r\n if inline_xml:\r\n kwargs['xml_node'].set('not_a_pointer', 'true')\r\n\r\n for key in kwargs.keys():\r\n if key not in XML_IMPORT_ARGS:\r\n kwargs['xml_node'].set(key, kwargs.pop(key))\r\n\r\n if not inline_xml:\r\n kwargs['xml_node'].write(\r\n kwargs['filesystem'].open(\r\n '{}/{}.xml'.format(kwargs['tag'], kwargs['url_name'])\r\n ),\r\n encoding='utf-8'\r\n )\r\n\r\n return kwargs",
"def remember_custom_attribute(self, node_name, aid, value):\n if node_name in self.file.custom_attributes:\n self.file.custom_attributes[node_name][aid]=value\n else:\n self.file.custom_attributes[node_name] = { aid: value}",
"def _set_field(self, instrument_name, parameter_name, field, value, force_update):\n if self.verbose >= 2:\n print('_set_field: %s %s: %s' % (instrument_name, parameter_name, str(value)))\n tree_widget = self._itemsdict[instrument_name][parameter_name]['widget']\n double_box = self._itemsdict[instrument_name][parameter_name]['double_box']\n\n field_index = self._fields.index(field)\n\n double_value = False\n if field_index == 0 and double_box is not None:\n double_value = True\n if not double_value:\n tree_widget.setText(field_index + 1, str(value))\n else:\n # update a float value\n try:\n update_value = np.abs(tree_widget.value() - value) > 1e-9\n except Exception as ex:\n logging.debug(ex)\n update_value = True\n if update_value or force_update:\n if not double_box.hasFocus(): # do not update when editing\n logging.debug('update %s to %s' % (parameter_name, value))\n try:\n oldstate = double_box.blockSignals(True)\n double_box.setValue(value)\n double_box.blockSignals(oldstate)\n except Exception as ex:\n logging.debug(ex)",
"def set_adjustment_values(self,builder,etree):\n for object in etree.xpath('/interface/object[@class=\"GtkAdjustment\"]'):\n property = object.xpath('property[@name=\"value\"]')\n if len(property):\n obj = builder.get_object(object.get('id'))\n obj.set_value(float(property[0].text))",
"def _setValue(self, field, value):\n self._contents[field] = value",
"def update(self, identity, data=None, record=None, **kwargs):\n record.custom_fields = data.get(\"custom_fields\", {})",
"def _modify(self, fields):\n return fields",
"def update_settings_data(self):\n debug = False\n grid_data = self.get_grid_data() ## only saved data. eol-safe inc\n if debug: \n print(f'grid data: {grid_data}')\n print('Original settings data:')\n pprint.pprint(self.settings_data)\n for i, row in enumerate(grid_data):\n if debug: print(row)\n self.settings_data[i][mg.TBL_FLDNAME] = row[0]\n self.settings_data[i][mg.TBL_FLDTYPE] = row[1]\n if self.debug or debug:\n print('Final settings data:')\n pprint.pprint(self.settings_data)",
"def update_fields(self):\n if hasattr(self.day, \"body_composition\"):\n for f in self.get_fields():\n name = f.get_field().name\n value = getattr(self.day.body_composition, name, None)\n if value is not None:\n f.set_field(value)\n else:\n f.set_field(\"\")",
"def update(self, feature_col, feature_value, node_type, nodes, children=[]):\n self.feature_col = feature_col\n self.feature_value = feature_value\n self.node_type = node_type\n self.nodes = nodes\n self.children = children",
"def presavemodel_serializationhelpers_updatefields(self):\n # get a collection IF it exists\n #print \"ATTN: in presavemodel_serializationhelpers_updatefields stage 1 for object {0}\".format(str(self))\n sdictcollection = self.getcreate_serializedbdictcollection(False)\n if (sdictcollection == None):\n # nothing to do\n #print \"ATTN: no sitecollection found for object.\"\n return\n #print \"ATTN: in presavemodel_serializationhelpers_updatefields stage 2\"\n # ok we have some that potentially need save/update\n alldicts = sdictcollection.get_alldicts()\n for sdictkey, sdict in alldicts.iteritems():\n # check if this has changed and so needs updating\n #print \"ATTN: in presavemodel_serializationhelpers_updatefields stage 3 with {0}.\".format(sdictkey)\n if (sdict.get_haschanged()):\n # it has changed, get serialized string representation of the field to save\n serializedstring = sdict.get_serializedstr()\n # ok now we want to SAVE it to our attribute/field of this model\n # the internal attribute name for this field is the dictionary key itself\n attributename = sdictkey\n setattr(self,attributename,serializedstring)\n #print \"ATTN: in presavemodel_serializationhelpers_updatefields stage 4 with {0} and {1} and {2}.\".format(sdictkey,attributename,serializedstring)\n # clear haschanged flag\n sdict.set_haschanged(False)",
"def updateFields(self):\n super(AdminRulesForm, self).updateFields()\n self.fields['improved_templates'].widgetFactory = CheckBoxFieldWidget\n self.fields['iframe_enabled'].widgetFactory = SingleCheckBoxFieldWidget\n self.fields['js_enabled'].widgetFactory = SingleCheckBoxFieldWidget\n self.fields['inline_images'].widgetFactory = SingleCheckBoxFieldWidget",
"def updateTreeValues ( self, feature_column, feature_value, node_type, nodes, children = [ ] ):\n self.feature_column = feature_column\n self.feature_value = feature_value\n self.node_type = node_type\n self.nodes = nodes\n self.children = children\n # End updateTreeValues()",
"def test_update(self):\n doc_fields = document_fields.DocumentFields({\n 'foo@': 'bar',\n })\n self.assertEquals('bar', doc_fields['foo'])\n doc_fields.update({\n 'foo@': 'bbq',\n })\n self.assertEquals('bbq', doc_fields['foo'])",
"def upgrade_markup_controlpanel_settings(context):\n # get the old site properties\n portal_properties = getToolByName(context, \"portal_properties\")\n site_properties = portal_properties.site_properties\n # get the new registry\n registry = getUtility(IRegistry)\n # XXX: Somehow this code is executed for old migration steps as well\n # ( < Plone 4 ) and breaks because there is no registry. Looking up the\n # registry interfaces with 'check=False' will not work, because it will\n # return a settings object and then fail when we try to access the\n # attributes.\n try:\n settings = registry.forInterface(\n IMarkupSchema,\n prefix='plone',\n )\n except KeyError:\n settings = False\n if settings:\n settings.default_type = site_properties.default_contenttype\n\n forbidden_types = site_properties.getProperty('forbidden_contenttypes')\n forbidden_types = list(forbidden_types) if forbidden_types else []\n\n portal_transforms = getToolByName(context, 'portal_transforms')\n allowable_types = portal_transforms.listAvailableTextInputs()\n\n settings.allowed_types = tuple([\n _type for _type in allowable_types\n if _type not in forbidden_types\n and _type not in 'text/x-plone-outputfilters-html' # removed, as in plone.app.vocabularies.types # noqa\n ])"
] | [
"0.7209946",
"0.5625204",
"0.55950165",
"0.5523595",
"0.54563826",
"0.5431626",
"0.53579104",
"0.5331359",
"0.5295891",
"0.5295891",
"0.5295891",
"0.5295891",
"0.5295891",
"0.5272213",
"0.5268226",
"0.52631927",
"0.5263186",
"0.52535546",
"0.52160764",
"0.52101666",
"0.51781535",
"0.5172809",
"0.51359737",
"0.5115901",
"0.5110039",
"0.5084518",
"0.50826937",
"0.5080223",
"0.50757694",
"0.5074535"
] | 0.72390586 | 0 |
Helper to trim off any enclosing paranthesis | def __trim(self, text):
if isinstance(text, str) and \
text[0] == "(" and \
text[-1] == ")":
text = text[1:-1]
return text | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def removeOuterParentheses(self, S):\n _open, _close = \"(\", \")\"\n oc, cc = 0, 0\n part, res = \"\", \"\"\n\n for i, p in enumerate(S):\n if p == _open:\n oc += 1\n elif p == _close:\n cc += 1\n\n part += p\n\n if oc == cc:\n res += part[1:-1]\n part = \"\"\n\n return res",
"def strip_all_unbalanced_parens(s):\n c = strip_unbalanced_parens(s, '()')\n c = strip_unbalanced_parens(c, '<>')\n c = strip_unbalanced_parens(c, '[]')\n c = strip_unbalanced_parens(c, '{}')\n return c",
"def removeParentheses(text):\n\t#print text\n\tno_parentheses = re.sub(r'\\s?\\([^)]*\\)', '', text)\n\treturn no_parentheses",
"def _remove_between_square_brackets(text):\n return re.sub('\\[[^]]*\\]', '', text)",
"def strip_unbalanced_parens(s, parens='()'):\n start, end = parens\n if not start in s and not end in s:\n return s\n\n unbalanced = []\n unbalanced_append = unbalanced.append\n\n stack = []\n stack_append = stack.append\n stack_pop = stack.pop\n\n for i, c in enumerate(s):\n if c == start:\n stack_append((i, c,))\n elif c == end:\n try:\n stack_pop()\n except IndexError:\n unbalanced_append((i, c,))\n\n unbalanced.extend(stack)\n pos_to_del = set([i for i, c in unbalanced])\n cleaned = [c if i not in pos_to_del else ' ' for i, c in enumerate(s)]\n return type(s)('').join(cleaned)",
"def strip_brackets(text: str) -> str:\n\t\tpieces = [\n\t\t\t\t('(', ')'), ('[', ']'), ('[', ']'), ('{', '}'), ('<', '>'),\n\t\t\t\t(Chars.lshell, Chars.rshell), (Chars.langle, Chars.rangle),\n\t\t\t\t(Chars.ldparen, Chars.rdparen), (Chars.ldbracket, Chars.rdbracket), (Chars.ldangle, Chars.rdangle), (Chars.ldshell, Chars.rdshell)\n\t\t\t]\n\t\treturn StringTools.strip_paired(text, pieces)",
"def stripBrackets(b):\n\n while b.startswith(b\"> \"):\n b = b[2:]\n return b",
"def strip_brackets(text) -> str:\n if text is None:\n return \"\"\n\n if text.startswith(\"[\") and text.endswith(\"]\"):\n return text[1:len(text) - 1]\n\n return text",
"def strip_brackets_and_quotes(text: str) -> str:\n\t\tpieces = [\n\t\t\t\t('(', ')'), ('[', ']'), ('[', ']'), ('{', '}'), ('<', '>'),\n\t\t\t\t(Chars.lshell, Chars.rshell), (Chars.langle, Chars.rangle),\n\t\t\t\t('`', '`'),\n\t\t\t\t(Chars.lsq, Chars.rsq), (Chars.ldq, Chars.rdq), (\"'\", \"'\"), ('\"', '\"'),\n\t\t\t\t(Chars.ldparen, Chars.rdparen), (Chars.ldbracket, Chars.rdbracket), (Chars.ldangle, Chars.rdangle), (Chars.ldshell, Chars.rdshell)\n\t\t\t]\n\t\treturn StringTools.strip_paired(text, pieces)",
"def _remove_bracket(e_title: str) -> str:\n stack = []\n if e_title[0] == \"{\" and e_title[-1] == \"}\":\n for i, ch in enumerate(e_title):\n if ch == \"{\" and (i == 0 or (i > 0 and e_title[i - 1] != \"//\")):\n stack.append((i, ch))\n elif ch == \"}\" and e_title[i - 1] != \"//\":\n index, ch = stack.pop()\n if index == 0:\n if i == len(e_title) - 1:\n return e_title[1:-1]\n break\n return e_title",
"def _strip_braces(self, val):\n logger.debug('Strip braces')\n val = val.strip()\n if val.startswith('{') and val.endswith('}') and self._full_span(val):\n return val[1:-1]\n return val",
"def _despace(statement):\n return re.sub(r' +', ' ', statement)",
"def __clean_string(cls, text):\n if text.startswith(\"(\"):\n text = text[1:]\n if text.endswith(\")\"):\n text = text[:-1]\n if text.endswith(\",\"):\n text = text[:-1]\n if len(text) > 2 and cls.__is_quote(text[0]) and \\\n cls.__is_quote(text[-1]):\n text = text[1:-1]\n return text",
"def remove_parentheses(sequence):\n\n first_opener_idx_assigned = False\n started = False\n counter = 0\n\n for idx, e in enumerate(sequence):\n if e == '(':\n if started == False:\n started = True\n counter = counter + 1\n elif e == ')':\n if started == False:\n raise ValueError(\"remove_parentheses(sequence):\\\n missing correcponding parentheses; ')' without '('\")\n counter = counter - 1\n\n if started == True:\n if first_opener_idx_assigned == False:\n first_opener_idx = idx\n first_opener_idx_assigned = True\n if counter == 0:\n sequence.pop(idx)\n\n if idx < len(sequence):\n element_after_last_closer = sequence[idx]\n else:\n element_after_last_closer = None\n\n sequence.pop(first_opener_idx)\n return element_after_last_closer\n return None",
"def deparenthasize(lst):\n newlst = []\n \n for i in range(0, len(lst)):\n\n word = lst[i]\n #print \"Before: %s\" % word\n done = False\n\n while not done:\n if word == \"(\":\n word = \"\" \n elif len(word) > 0 and word[0] == \"(\":\n word = word[1:]\n if len(word) > 0 and word[-1] == \")\":\n word = word[:-1]\n\n if len(word) > 0:\n if word[0] == \"(\" or word[-1] == \")\":\n done = False\n else:\n done = True\n else:\n done = True\n\n if not word == \"\":\n newlst.append(word)\n\n #print \"After: %s\" % word\n\n return newlst",
"def clean_all_brackets(text):\n if \"[\" in text and \"]\" in text:\n text = delete_first_brackets(text)\n return clean_all_brackets(text)\n else:\n return text",
"def strip_cell(cell):\n\tcell = re.sub(r'\\[[^]]*\\]', '', cell)\n\tcell = re.sub(r'\\s+', ' ', cell)\n\tcell = cell.strip()\n\treturn cell",
"def truncate(s):\n in_str = False\n bb = 0\n for i, c in enumerate(s):\n if c == '(' and not in_str:\n bb += 1\n elif c == ')' and not in_str:\n bb -= 1\n if bb == 0:\n return s[:i+1]\n elif c == '\\\"':\n in_str = not in_str\n raise ValueError('Insufficient close brackets in ' + repr(s))",
"def remove_punc(self, r):\n c = ''\n useless = [',', '+', '-', '*', '/', '=', ',', '.']\n for d in r:\n if d not in useless:\n c += d\n brackets = ['(', ')', '[', ']', '{', '}', '<', '>']\n d = str(c)\n c = ''\n brac_cnt = 0\n for i in d:\n if i == '(' or i == '[' or i in '{':\n brac_cnt += 1\n if i == ')' or i == ']' or i == '}':\n brac_cnt -= 1\n if i not in brackets:\n if brac_cnt <= 0:\n c += i\n return c",
"def clean(val):\n\n val = re.sub(r'/s+', r'/s', val)\n return val.strip()",
"def sans_parens(s):\n s = prep_simple_str(s)\n \n total = s[0]\n \n for c in s[1:]:\n if c == \")\":\n return total\n elif c == \"*\":\n op = lambda a,b: a * b\n elif c == \"+\":\n op = lambda a,b: a + b\n else:\n total = op(total, c)\n return total",
"def clean_newick_string(self, newick_str):\n str_buff = []\n final_bracket, cur_len = 0, 0\n for data in self.separate_square_comments(newick_str):\n if data[0] != '[':\n clean_data = ''.join(data.split())\n str_buff.append(clean_data)\n brck_ind = clean_data.rfind(')')\n if brck_ind != -1:\n final_bracket = cur_len + brck_ind\n cur_len += len(clean_data)\n else:\n str_buff.append(data)\n cur_len += len(data)\n return ''.join(str_buff), final_bracket",
"def remove_extra_middle_spaces(x):\n\n return \" \".join(x.split())",
"def strip_space():\n pass",
"def remove_ellipses(data: pd.Series) -> pd.Series:\n return data.replace(r'\\(\\.+\\)', value='', regex=True)",
"def inner_strip(s):\n\n if strip_string:\n return s.strip(strip_string)\n return s.strip()",
"def clear_stop(stop):\n try:\n stop = stop[:stop.index('(')]\n except:\n pass\n return stop.strip()",
"def _StripWS(s):\r\n return re.sub('\\s+', '', s)",
"def _StripWS(s):\r\n return re.sub('\\s+', '', s)",
"def space_parantheses(input_string):\n\n char_list = []\n\n for i in range(len(input_string)):\n if (input_string[i] == \"(\") and (i != 0) and (input_string[i - 1] != \" \"):\n char_list.append(\" \")\n char_list.append(\"(\")\n\n elif (input_string[i] == \"[\") and (i != 0) and (input_string[i - 1] != \" \"):\n char_list.append(\" \")\n char_list.append(\"[\")\n\n elif (input_string[i] == \"{\") and (i != 0) and (input_string[i - 1] != \" \"):\n char_list.append(\" \")\n char_list.append(\"{\")\n\n elif (input_string[i] == \"<\") and (i != 0) and (input_string[i - 1] != \" \"):\n char_list.append(\" \")\n char_list.append(\"<\")\n\n else:\n char_list.append(input_string[i])\n\n return \"\".join(char_list)"
] | [
"0.73211086",
"0.7262523",
"0.7182982",
"0.710938",
"0.68146855",
"0.67114186",
"0.66600233",
"0.6646801",
"0.66210675",
"0.63920456",
"0.6388178",
"0.6297051",
"0.6198644",
"0.61323136",
"0.6124953",
"0.6042619",
"0.6026883",
"0.59085965",
"0.58875585",
"0.58423495",
"0.58147883",
"0.5778781",
"0.57781047",
"0.57478637",
"0.57318085",
"0.5687377",
"0.5687086",
"0.56776106",
"0.56776106",
"0.5665178"
] | 0.7543117 | 0 |
Generate barcode of uuid | def barcode(self, uuid):
barcode = code128.Code128(str(uuid), barWidth=1, barHeight=20)
barcode.drawOn(self.canvas, self.lastx, self.lasty)
self.lasty = self.lasty - 20
self.y = self.lasty | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_generate_barcode_upce(self):\n pass",
"def gen_uuid() -> str:\n return str(uuid4())",
"def gen_uuid():\n return str( uuid.uuid4() )",
"def gen_uuid():\n return str( uuid.uuid4() )",
"def gen_uuid():\n return str(uuid.uuid4())",
"def test_generate_barcode_qr_code(self):\n pass",
"def _generate_uuid():\n return str(uuid.uuid4())",
"def test_generate_barcode_upca(self):\n pass",
"def generate_anki_guid() -> str:\n\n def base62(num: int, extra: str = \"\") -> str:\n s = string\n table = s.ascii_letters + s.digits + extra\n buf = \"\"\n while num:\n num, i = divmod(num, len(table))\n buf = table[i] + buf\n return buf\n\n _base91_extra_chars = \"!#$%&()*+,-./:;<=>?@[]^_`{|}~\"\n\n def base91(num: int) -> str:\n # all printable characters minus quotes, backslash and separators\n return base62(num, _base91_extra_chars)\n\n return base91(random.randint(0, 2 ** 64 - 1))",
"def test_generate_barcode_ean13(self):\n pass",
"def test_generate_barcode_ean8(self):\n pass",
"def generate_id():\n return str(uuid.uuid4())[:5].replace('e','a')",
"def generate_product_number():\n return str(uuid.uuid4())",
"def default_code():\n return uuid.uuid4().hex",
"def new_barcode(num_digits=5, chars=string.digits+string.uppercase):\n return 'FLIM-'+(''.join([random.choice(chars) for _ in xrange(num_digits)]))",
"def _generate_uuid(self):\n\n return uuid.uuid4()",
"def uuid():\n from dallinger.experiment import Experiment\n\n click.echo(Experiment.make_uuid())",
"def generate_uuid():\n return uuid.uuid4().hex",
"def generateUUID(): # pylint: disable=C0103\r\n return str(uuid.uuid4())",
"def generate_uuid():\n return f'{uuid.uuid1()}'",
"def uuid():\n return ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(32))",
"def generate_uuid():\n return uuid.uuid4()",
"def _generate_tracking_number(self):\n return uuid.uuid4().hex.upper()",
"def __generate_random_string():\n return uuid4().hex[:6].upper()",
"def _generate_id() -> str:\n return \"\".join(sample(\"abcdefghjkmopqrstuvqxyz\", 16))",
"def unique_id() -> bytes:",
"def uuid(self, value):\n self.unique_id = UUID(str(value)).hex",
"def create_uid():\n return random_string(5, string.hexdigits.lower())\n # return (\"%x\" % (int(time.time()) * 0x10 % 0x1000000000)\n # + random_string(7, string.hexdigits.lower()))",
"def __generate_pin(cls) -> str:\n return str(randbelow(10 ** cls.PIN_DIGITS)).zfill(cls.PIN_DIGITS)",
"def get_uuid():\n return str(uuid4())"
] | [
"0.7014342",
"0.6803592",
"0.6756756",
"0.6756756",
"0.67063415",
"0.66737944",
"0.6648601",
"0.6631629",
"0.65757394",
"0.6569812",
"0.6564768",
"0.6560364",
"0.6551317",
"0.65252805",
"0.6519483",
"0.64701307",
"0.6449104",
"0.6431399",
"0.63647515",
"0.63552344",
"0.6345105",
"0.63425845",
"0.63385576",
"0.6328664",
"0.6320158",
"0.6286092",
"0.6272557",
"0.6255411",
"0.6252784",
"0.62522554"
] | 0.70955324 | 0 |
Writes one character on canvas | def writechar(self, char=" "):
font=self.selectfont(char)
t = self.canvas.beginText(self.x, self.y)
t.setFont(font, self.fontsize)
t.setFillGray(self.gray)
t.textOut(char)
self.canvas.drawText(t)
return t | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def writechar(self, char: int, /) -> None:",
"def point(self, x, y, char):\n assert len(char) == 1\n assert x >= 0\n assert x < self.cols\n assert y >= 0\n assert y < self.lines\n\n self.canvas[y][x] = char",
"def draw(self, x, y, char=None, fg=(255, 255, 255), bg=None):\n self.console.draw_char(x, y, char, fg, bg)",
"def _put_chr_at(self, char, row, col, color, adjustment_x=.19, adjustment_y=.19):\n self._goto_piece_xy(row, col, adjustment_x, adjustment_y)\n self.pen.color(color)\n self.pen.write(char, font=(\"Courier\", round(self.square_side_size * .7),\n \"normal\"))",
"def write_char(self, char, token, string_index=None,\n set_cursor_position=False, z_index=False):\n assert len(char) == 1\n\n char_obj = Char(char, token, z_index)\n char_width = char_obj.get_width()\n\n # In case there is no more place left at this line, go first to the\n # following line. (Also in case of double-width characters.)\n if self._x + char_width > self.size.columns:\n self._y += 1\n self._x = 0\n\n insert_pos = self._y, self._x # XXX: make a Point of this?\n\n if string_index is not None:\n self._cursor_mappings[string_index] = insert_pos\n\n if set_cursor_position:\n self.cursor_position = Point(y=self._y, x=self._x)\n\n # Insertion of newline\n if char == '\\n':\n self._y += 1\n self._x = 0\n self._line_number += 1\n\n # Insertion of a 'visible' character.\n else:\n if char_obj.z_index >= self._buffer[self._y][self._x].z_index:\n self._buffer[self._y][self._x] = char_obj\n\n # When we have a double width character, store this byte in the\n # second cell. So that if this character gets deleted afterwarsd,\n # the ``output_screen_diff`` will notice that this byte is also\n # gone and redraw both cells.\n if char_width > 1:\n self._buffer[self._y][self._x+1] = Char(six.unichr(0))\n\n # Move position\n self._x += char_width\n\n return insert_pos",
"def drawChar(self, char, x, y, color=Config.FONT_COLOR):\n\n pixels, width, height = char.pixels, char.width, char.height\n pixel_size = char.pixel_size\n dx, dy = 0, 0\n\n # Loops though the character's list that specifies where to draw\n for row in range(char.height):\n\n for column in range(char.width):\n\n if pixels[row][column]: # If there is a 1 at the specified index in the char, draw a pixel(s)\n self.draw(x + dx, y + dy, x + dx + pixel_size, y + dy + pixel_size, color)\n\n dx += pixel_size + 1 # Increase the horizontal offset\n\n dy += pixel_size + 1 # Increase the vertical offset\n dx = 0 # Reset the horizontal offset",
"def write(self, text, x=None, y=None):\n\n # TODO - change this so that the cursor moves.\n if x is None:\n x = self.cursorx\n if y is None:\n y = self.cursory\n\n self._strDirty = True\n startIndex = self._convertTupleIndexsToSingleIndex(x, y)\n for i in range(startIndex, startIndex + len(text)):\n cx, cy = self._convertSingleIndexToTupleIndexes(i % self.area)\n if not self.isOnCanvas(cx, cy):\n break\n\n self._chars[cx][cy] = text[i - startIndex]\n self._fginfo[cx][cy] = self._fg\n self._bginfo[cx][cy] = self._bg\n\n self.cursor = self._convertSingleIndexToTupleIndexes((startIndex + len(text)) % self.area)",
"def draw_char(\n self, char, x, y, framebuffer, color, size=1\n ): # pylint: disable=too-many-arguments\n size = max(size, 1)\n # Don't draw the character if it will be clipped off the visible area.\n # if x < -self.font_width or x >= framebuffer.width or \\\n # y < -self.font_height or y >= framebuffer.height:\n # return\n # Go through each column of the character.\n for char_x in range(self.font_width):\n # Grab the byte for the current column of font data.\n self._font.seek(2 + (ord(char) * self.font_width) + char_x)\n try:\n line = struct.unpack(\"B\", self._font.read(1))[0]\n except RuntimeError:\n continue # maybe character isnt there? go to next\n # Go through each row in the column byte.\n for char_y in range(self.font_height):\n # Draw a pixel for each bit that's flipped on.\n if (line >> char_y) & 0x1:\n framebuffer.fill_rect(\n x + char_x * size, y + char_y * size, size, size, color\n )",
"def draw_char( self, x, y , ch ):\n\t\tcharBuf = None\n\t\ti = 0\n\t\tj = 0\n\t\tk = 0\n\t\tvar1 = 0\n\t\ttextWidth = 0\n\t\ttextHeight = 0\n\n\t\t#print( x,y,ch )\n\t\tcharBuf, charWidth, charHeight = get_character( ch )\n\t\tself.fill_rect( (x,y),charWidth,charHeight, self._text_bg )\n\n\t\t# No character Drawing\n\t\tif not charBuf:\n\t\t\treturn charWidth, charHeight\n\n\t\t# Draw the character\n\t\t#\tCodification for A = 0x7C,0x12,0x11,0x12,0x7C,0x00\n\t\t#\t0x7C .11111..\n\t\t#\t0x12 ...1..1.\n\t\t#\t0x11 ...1...1\n\t\t#\t0x12 ...1..1.\n\t\t#\t0x7C .11111..\n\t\t#\t0x00 ........\n\t\tfor x_pos in range(len(charBuf)):\n\t\t\tbits = charBuf[x_pos]\n\t\t\tfor y_pos in range(8):\n\t\t\t\ta_bit = (bits & (1<<y_pos))>0\n\t\t\t\t# pixel position & pixel size\n\t\t\t\txstart = x+(x_pos*self.text_size)\n\t\t\t\tystart = y+(y_pos*self.text_size)\n\t\t\t\tif a_bit:\n\t\t\t\t\tif self.text_size==1:\n\t\t\t\t\t\tself.pixel( (xstart,ystart), self.text_fg )\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.fill_rect( (xstart,ystart), self.text_size, self.text_size, self.text_fg )\n\n\t\t# return the drawing size\n\t\treturn charWidth*self.text_size, charHeight*self.text_size",
"def write_char(self, char=' '):\n integer = ord(char)\n self.instruction(integer, True)",
"def draw_char(self, x, y, c, color, bg, aa=False, font_name=font.default_font, font_scale=1):\n assert font_scale >= 1, \"font_scale must be >= 1\"\n f = font.fonts[font_name]\n fh = f['height']\n FONT = f['data']\n\n c = ord(c) # make it the int value\n if c < f['bounds'][0] or c > f['bounds'][1]:\n c_data = f['undef']\n else:\n c_data = FONT[c - f['bounds'][0]]\n\n fw = len(c_data)\n for i in range(fw + f['sep']):\n xPos = x + (i * font_scale)\n if ((xPos < self.width) and (xPos + fw * font_scale - 1) >= 0):\n if i >= fw:\n line = 0\n else:\n line = FONT[c][i]\n for j in range(fh):\n yPos = y + (j * font_scale)\n if ((yPos < self.height) and\n (yPos + fh * font_scale - 1) >= 0):\n if line & 0x1:\n if font_scale == 1:\n self.set(xPos, yPos, color)\n else:\n self.draw_rect_filled(xPos, yPos, font_scale, font_scale, color, aa)\n elif bg != color and bg is not None:\n if font_scale == 1:\n self.set(xPos, yPos, bg)\n else:\n self.draw_rect_filled(xPos, yPos, font_scale, font_scale, bg, aa)\n line >>= 1\n return fw + f['sep']",
"def Draw(self):\n print ( 10*\"*\")\n print (\"Player \" + self.character + \" says:\")\n print (\"It's a Draw\")\n print ( 10*\"*\")",
"def write_at_pos(self, y, x, char_obj):\n # Add char to buffer\n if x < self.size.columns:\n if char_obj.z_index >= self._buffer[y][x].z_index:\n self._buffer[y][x] = char_obj",
"def draw(canvas):\n global n\n global message\n canvas.draw_text(message, [WIDTH // 2, HEIGTH // 2], 35, 'Gray')\n canvas.draw_text(display(), [250, 20], 25, 'Gray')",
"def putchar(self, col, row, char, color=GREEN):\n for j in range(FONT_HEIGHT - 5):\n v = self.font[ord(char)][3 + j]\n for i in range(FONT_WIDTH):\n if v & (1 << (7 - i)):\n self.putpixel(col + i, row + j, color)\n else:\n self.putpixel(col + i, row + j, BLACK)",
"def addch(self, posy, posx, character, color_pair):\r\n if posy < 0 or posy > self.height - 1:\r\n return\r\n if posx < 0 or posx > self.width - 1:\r\n return\r\n if posx == self.width - 1 and posy == self.height - 1:\r\n return\r\n self.win.addch(posy, posx, character, color_pair)",
"def def_char(self, offset, data):\n self.send((\"\\x1b\\x26\\x01%c%c\\x05\") % ((offset&0xff), (offset&0xff)))\n time.sleep(0.01)\n for i in data:\n self.send((\"%c\")%i)",
"def write(self, x, y, text, fg, bg):\n brush = self.get_brush(fg, bg)\n try:\n self.win.addstr(y, x, text, brush)\n except curses.error:\n if x == self.width - 1 and y == self.height - 1:\n pass",
"def set_character(self, y_pos, x_pos):\n self.map[y_pos][x_pos] = 'G'",
"def fill(self, char=' '):\n if char is not None:\n char = str(char)\n if len(char) != 1:\n raise PyTextCanvasException('char must be a single character or None')\n\n for x in range(self.width):\n for y in range(self.height):\n self._chars[x][y] = char\n self._fginfo[x][y] = self._fg\n self._bginfo[x][y] = self._bg\n self._strDirty = True",
"def create_char(self, location, bitmap):\n assert 0 <= location <= 7, 'Only locations 0-7 are valid.'\n assert len(bitmap) == 8, 'Bitmap should have exactly 8 rows.'\n\n # Store previous position\n pos = self.cursor_pos\n\n # Write character to CGRAM\n self.command(self.LCD_SETCGRAMADDR | location << 3)\n for row in bitmap:\n self._send(row, self.RS_DATA)\n\n # Restore cursor pos\n self.cursor_pos = pos",
"def _insChar(self, char, pos, color):\n char, vertices, glyph = self._extractGlyph(char, glm.vec4(color))\n if not self.text:\n off, kern = self._updateMetric(pos, char)\n if char in self.NO_GLYPH_CHARS:\n self.colors.insert(pos, [char, None])\n else:\n vertices['vtx'] += off + glyph['offset']\n self.allVertices = np.hstack(vertices)\n self.allIndices = self._baseInd\n self.colors.insert(pos, [char, color])\n self.text += char\n else:\n self.logger.debug(\"Inserting %r at %d\" % (char, pos))\n nonGlyph = countInSet(self.text[:pos], self.NO_GLYPH_CHARS)\n # Arrange vertices\n if pos < len(self.text):\n self.allVertices = self.allVertices[:(pos - nonGlyph) * 4]\n self.allIndices = self.allIndices[:pos - nonGlyph]\n\n # Set the metric\n off, kern = self._updateMetric(pos, char)\n if char in self.NO_GLYPH_CHARS:\n color = None\n else:\n vertices['vtx'] += off + kern + glyph['offset']\n if self.allVertices is None:\n self.allVertices = np.hstack(vertices)\n else:\n self.allVertices = np.append(self.allVertices, vertices)\n if self.allIndices is None:\n self.allIndices = self._baseInd\n else:\n self.allIndices = np.vstack((self.allIndices,\n self._baseInd + (pos - nonGlyph) * 4))\n\n self.colors.insert(pos, [char, color])\n if pos < len(self.text):\n self.text = self.text[:pos] + char + self.text[pos:]\n self._updateGlyphs(pos, char)\n else:\n self.text += char",
"def output_char(char):\n # If the last printed character has the same token, it also has the\n # same style, so we don't output it.\n if last_char[0] and last_char[0].token == char.token:\n write(char.char)\n else:\n style = get_style_for_token(char.token)\n\n if style:\n output.set_attributes(style['color'], style['bgcolor'],\n bold=style.get('bold', False),\n underline=style.get('underline', False))\n\n # If we print something with a background color, remember that.\n background_turned_on[0] = bool(style['bgcolor'])\n else:\n # Reset previous style and output.\n output.reset_attributes()\n\n write(char.char)\n\n last_char[0] = char",
"def __draw_player(self, state):\n self.arena_win.addch(state.player.position[1] + 1, state.player.position[0] + 1, '@')",
"def draw(self, canvas):\n canvas.draw_text(\"Score: \" + str(self.__score), self.__pos, 25 , 'white', 'monospace')",
"def draw(self, data):\n data = data.translate(\n self.g1_charset if self.charset else self.g0_charset)\n\n for char in data:\n char_width = wcwidth(char)\n if (self.cursor.x == self.columns and char_width >= 1) \\\n or (self.cursor.x == self.columns - 1 and char_width >= 2):\n if mo.DECAWM in self.mode:\n last = self.buffer[self.cursor.y][self.columns - 1]\n self.buffer[self.cursor.y][self.columns - 1] = \\\n last._replace(linefeed=True)\n self.dirty.add(self.cursor.y)\n self.carriage_return()\n self.linefeed()\n elif char_width > 0:\n self.cursor.x -= char_width\n\n if mo.IRM in self.mode and char_width > 0:\n self.insert_characters(char_width)\n\n line = self.buffer[self.cursor.y]\n if char_width == 1:\n if is_windows and self.cursor.x == self.columns - 1:\n # always put a linefeed marker when cursor is at the last column\n line[self.cursor.x] = self.cursor.attrs._replace(data=char, linefeed=True)\n else:\n line[self.cursor.x] = self.cursor.attrs._replace(data=char)\n\n elif char_width == 2:\n line[self.cursor.x] = self.cursor.attrs._replace(data=char)\n if is_windows and self.cursor.x == self.columns - 2:\n # always put a linefeed marker when the next char is at the last column\n line[self.cursor.x + 1] = self.cursor.attrs._replace(data=\"\", linefeed=True)\n elif self.cursor.x + 1 < self.columns:\n line[self.cursor.x + 1] = self.cursor.attrs._replace(data=\"\")\n\n elif char_width == 0 and unicodedata.combining(char):\n # unfornately, sublime text doesn't render decomposed double char correctly\n pos = None\n for (row, col) in [\n (self.cursor.y, self.cursor.x),\n (self.cursor.y - 1, self.columns)]:\n if row < 0:\n continue\n if col >= 2:\n last = line[col - 2]\n if wcswidth(last.data) >= 2:\n pos = (row, col - 2)\n break\n if col >= 1:\n last = line[col - 1]\n pos = (row, col - 1)\n break\n\n if pos:\n normalized = unicodedata.normalize(\"NFC\", last.data + char)\n self.buffer[pos[0]][pos[1]] = last._replace(data=normalized)\n self.dirty.add(pos[0])\n else:\n break\n\n if char_width > 0:\n self.cursor.x = min(self.cursor.x + char_width, self.columns)\n\n self.dirty.add(self.cursor.y)",
"def write(self, x, y, msg, fg=(255, 255, 255), bg=None):\n self.console.draw_str(x, y, msg, fg, bg)",
"def setChar(self, char):\n self.label.setText(str(char))",
"def create_char(self, location, bitmap):\n if not (0 <= location <= 7):\n raise ValueError('Only locations 0-7 are valid.')\n if len(bitmap) != 8:\n raise ValueError('Bitmap should have exactly 8 rows.')\n\n # Store previous position\n pos = self.cursor_pos\n\n # Write character to CGRAM\n self.command(_LCD_SETCGRAMADDR | location << 3)\n for row in bitmap:\n self._send(row, _RS_DATA)\n\n # Restore cursor pos\n self.cursor_pos = pos",
"def write(self, chars, output, format='png'):\n im = self.generate_image(chars)\n return im.save(output, format=format)"
] | [
"0.73790383",
"0.7066766",
"0.68195283",
"0.6806944",
"0.67353094",
"0.67342776",
"0.6577753",
"0.657208",
"0.65341264",
"0.63544863",
"0.62962973",
"0.6283854",
"0.6271442",
"0.62612313",
"0.6221333",
"0.61874944",
"0.6184578",
"0.61663246",
"0.61465734",
"0.6140425",
"0.6032455",
"0.6022973",
"0.6019244",
"0.6013984",
"0.59807765",
"0.596505",
"0.595951",
"0.593138",
"0.5910044",
"0.58963126"
] | 0.7528636 | 0 |
Select font according to the input character | def selectfont(self, char):
charcode = ord(char)
for font in fontchecksequence:
for fontrange in fontmapping[font]:
if charcode in xrange(fontrange[0], fontrange[1]):
return font
return "Helvetica" # fallback, if no thirdparty font is installed | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def selectFont():\n font,ok = QtGui.QFontDialog.getFont()\n if ok:\n return font\n else:\n return None",
"def get_font(self, option):\n return get_font(option=option)",
"def comdlg32_ChooseFont(jitter, get_str, set_str):\n ret_ad, args = jitter.func_args_stdcall([\"lpcf\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def setDislinFont(font='default'):\n fontdict[font]()",
"def create(font_name, point):\n return pygame.font.SysFont(font_name, int(point))",
"def font(self, font='a'):\n if font not in self.__class__.__fontMap.keys():\n raise ValueError('font must be \\'a\\', \\'b\\', \\'c\\'')\n elif self._usePrintMode:\n self._textFont = font\n self._updatePrintMode()\n else:\n self._write(self.__class__.__ESC + 'M' + self.__class__.__fontMap[font])",
"def font(self):\n return self[\"font\"]",
"def font(self):\n return self[\"font\"]",
"def font(self):\n return self[\"font\"]",
"def shell_font_changed(self, font):\n self.set_font(font)",
"def askopenfont(self, *args, **kw):\n\n self.tk.tk_setPalette('#888888')\n save_update_step = self.update_step\n self.update_step = 0\n\n filename = tkinter.filedialog.askopenfilename(parent=self.tk)\n if filename:\n self.readwtf(filename)\n self.redraw_letters()\n self.update_step = save_update_step\n self.tk.tk_setPalette('#000000')",
"def get_font_dict(f):\n return tk_font.Font(font=f).actual()",
"def set_font(self, font):\n\tself.m_font = font",
"def set_font(self, font='A'):\n upper = font.upper()\n if upper == 'B':\n self._set_print_mode(self.FONT_MASK)\n elif upper == 'A':\n self._unset_print_mode(self.FONT_MASK)\n else:\n self._unset_print_mode(self.FONT_MASK)",
"def named_font(self, point):\n return Font.create(self.name, point * self.scale)",
"def set_font(self, font):\n\ttry:\n\t self.m_gdfont = self._fonts[font.lower()]\n\t self.m_font = font\n\texcept KeyError:\n\t raise ValueError, 'Illegal font name.'",
"def get_named_font(*a, **kw):\n return get_named_font(*a, **kw)",
"def get_text_font ( self, object ):\n if self._is_selected( object ):\n return self.selected_text_font\n return self.text_font",
"def fontDialog(*args, FontList: bool=True, scalable: bool=True, **kwargs)->AnyStr:\n pass",
"def SetFont(*args):\n return _gdi_.GraphicsContext_SetFont(*args)",
"def SetFont(*args, **kwargs):\n return _gdi_.PseudoDC_SetFont(*args, **kwargs)",
"def font(self):\n\treturn self.m_font",
"def font(self):\n\treturn self.m_font",
"def setFont(font='default',hardware=1):\n if font == 'default' and hardware:\n setHardwareFont()\n return\n currfmt = getFileFormat()\n if isPostscript(currfmt):\n setPSFont(font)\n elif isWMF(currfmt):\n setTTFont(font)\n else:\n setDislinFont(font)",
"def select_character(self):\n pass",
"def SetFont(*args, **kwargs):\n return _gdi_.DC_SetFont(*args, **kwargs)",
"def set_font(self, font: str):\n self.font = font",
"def TextFieldOptionsAddFontName(builder, fontName):\n return AddFontName(builder, fontName)",
"def SetFont(self, font):\r\n \r\n wx.PyPanel.SetFont(self, font)\r\n\r\n selectedFont = wx.Font(font.GetPointSize(), font.GetFamily(),\r\n font.GetStyle(), wx.BOLD, font.GetUnderlined(),\r\n font.GetFaceName(), font.GetEncoding())\r\n\r\n self.SetNormalFont(font)\r\n self.SetSelectedFont(selectedFont)\r\n self.SetMeasuringFont(selectedFont)\r\n\r\n return True",
"def GetSelectedFont(self):\r\n\r\n return self._selected_font"
] | [
"0.71344846",
"0.6802239",
"0.6530128",
"0.6522377",
"0.6478366",
"0.64376915",
"0.6403912",
"0.6403912",
"0.6403912",
"0.63943005",
"0.63856626",
"0.63823503",
"0.63529533",
"0.6318125",
"0.6304441",
"0.62798864",
"0.6212459",
"0.62093073",
"0.6207095",
"0.61832416",
"0.6175667",
"0.6159466",
"0.6159466",
"0.6158796",
"0.6132895",
"0.60856974",
"0.60768",
"0.60708016",
"0.6067101",
"0.6058817"
] | 0.8041888 | 0 |
Function to draw check boxes default no of boxes = 1 | def draw_check_boxes(self,
boxes=1,
completeline=0,
lines=0,
seek=0,
continuetext=0,
fontsize=15,
gray=0,
style="",
):
if not continuetext and not self.pagebegin:
self.resetx()
self.nextline()
self.pagebegin = 0
self.fontsize = fontsize
c = self.canvas
c.setLineWidth(0.90)
c.setStrokeGray(gray)
if style == "center":
self.x = self.width / 2
elif style == "right":
self.x = self.width - self.marginsides - self.fontsize
if seek > (self.width - (self.marginsides + self.fontsize)):
seek = 0
if (self.y - self.fontsize) < 40:
self.set_new_page()
#if continuetext == 1:
# self.y = self.y + self.fontsize
# self.x = self.lastx
#else:
# self.x = self.marginsides
if seek != 0:
self.x = self.x + seek
if fontsize == 0:
fontsize = self.fontsize
else:
self.fontsize = fontsize
if completeline == 1:
boxes = int(self.width / self.fontsize)
for i in range(boxes):
c.rect(self.x, self.y, self.fontsize, self.fontsize)
self.x = self.x + self.fontsize
if self.x > (self.width - (self.marginsides + self.fontsize)):
break
self.lastx = self.x
#self.x = self.marginsides
#self.y = self.y - self.fontsize
#if isdate:
# t = c.beginText(self.x, self.y)
# t.setFont(Helvetica, 13)
# t.setFillGray(0)
# t.textOut(" D D M M Y Y Y Y")
# c.drawText(t)
# self.y = self.y - fontsize
# self.lastx = t.getX()
# self.lasty = self.y
#if isdatetime:
# t = c.beginText(self.x, self.y)
# t.setFont(Helvetica, 12.5)
# t.setFillGray(0.4)
# t.textOut(" D D M M Y Y Y Y -H H :M M")
# c.drawText(t)
# self.y = self.y - fontsize
# self.lastx = t.getX()
# self.lasty = self.y
self.lastx = self.x | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_checkboxes(self):\n self.create_y_crop_box()",
"def draw(self, surface):\n for box in self.checkboxes:\n box.draw(surface)",
"def _create_checkboxes(self) -> widgets.VBox:\n checkboxes = []\n pgons_checkboxes = []\n graph_checkboxes = []\n\n graphs = [\n (name, \"graphs\", layer_subtype, graph)\n for name, graph in self.viewer.layer_dict[\"graphs\"].items()\n for layer_subtype in [\"graph\", \"pgons\"]\n ]\n maps = [\n (name, \"maps\", \"map\", map_layer[\"map\"])\n for name, map_layer in self.viewer.layer_dict[\"maps\"].items()\n ]\n\n # Add checkboxes for all maps and graphs (including habitats)\n for idx, (layer_name, layer_type, layer_subtype, layer_dict) in enumerate(\n maps + graphs\n ):\n\n layout = widgets.Layout(padding=\"0px 0px 0px 0px\")\n\n # Indent habitat checkboxes\n if layer_type == \"graphs\":\n if layer_dict[\"is_habitat\"]:\n layout = widgets.Layout(padding=\"0px 0px 0px 25px\")\n\n checkbox = widgets.Checkbox(\n value=True,\n description=\"{} ({})\".format(layer_name, layer_subtype),\n disabled=False,\n indent=False,\n layout=layout,\n )\n checkbox.add_traits(\n layer_type=traitlets.Unicode().tag(sync=True),\n layer_subtype=traitlets.Unicode().tag(sync=True),\n layer_name=traitlets.Unicode().tag(sync=True),\n )\n checkbox.layer_type = layer_type\n checkbox.layer_name = layer_name\n checkbox.layer_subtype = layer_subtype\n\n checkbox.observe(self._switch_layer_visibility)\n\n if idx == 0:\n checkboxes.append(widgets.HTML(\"<b>Map Data</b>\"))\n\n checkboxes.append(checkbox)\n\n if layer_subtype == \"graph\":\n graph_checkboxes.append(checkbox)\n elif layer_subtype == \"pgons\":\n pgons_checkboxes.append(checkbox)\n\n # Add habitats header if last part of main graph\n if (\n layer_type == \"graphs\"\n and layer_subtype == \"pgons\"\n and not layer_dict[\"is_habitat\"]\n ):\n checkboxes.append(\n widgets.HTML(\n \"<b>Habitats in {}</b>\".format(layer_name),\n layout=widgets.Layout(padding=\"0px 0px 0px 25px\"),\n )\n )\n\n # Add horizontal rule if last map to separate from graphs\n if idx == len(maps) - 1:\n checkboxes.append(widgets.HTML(\"<hr/>\"))\n checkboxes.append(widgets.HTML(\"<b>Graph Data</b>\"))\n\n # Create button to toggle all polygons at once\n hide_pgon_button = widgets.ToggleButton(description=\"Toggle all polygons\")\n\n def toggle_all_pgons(change):\n try:\n if change[\"name\"] == \"value\":\n for box in pgons_checkboxes:\n box.value = change[\"new\"]\n except: # pylint: disable=bare-except\n self.logger.exception(\"Exception in view button callback on click.\")\n\n hide_pgon_button.observe(toggle_all_pgons)\n\n # Create button to toggle all graphs at once\n hide_graph_button = widgets.ToggleButton(description=\"Toggle all graphs\")\n\n def toggle_all_graphs(change):\n try:\n if change[\"name\"] == \"value\":\n for box in graph_checkboxes:\n box.value = change[\"new\"]\n except: # pylint: disable=bare-except\n self.logger.exception(\"Exception in view button callback on click.\")\n\n hide_graph_button.observe(toggle_all_graphs)\n\n checkboxes.append(widgets.HTML(\"<hr/>\"))\n buttons = widgets.HBox([hide_pgon_button, hide_graph_button])\n checkboxes.append(buttons)\n\n return widgets.VBox(checkboxes)",
"def checkBox(self, x, y):\n used = []\n for i in range(3):\n for j in range(3):\n cur = self.board[x+i][y+j]\n if cur not in used:\n if cur !=0:\n used += [cur]\n else:\n return False\n return True",
"def draw_bbox(n):\n return drawBbox(named(n))",
"def create_checkboxes(self, content, start, space):\n boxes = []\n size = (20, 20)\n for i,name in enumerate(content):\n rect = pg.Rect((start[0]+i*space[0],start[1]+i*space[1]), size)\n checked = self.state[name]\n boxes.append(CheckBox(name, rect, checked, self.get_result))\n return boxes",
"def test():\n labelFontSize = 10\n D = shapes.Drawing(450,650)\n cb = Crossbox()\n cb.x = 20\n cb.y = 530\n D.add(cb)\n D.add(shapes.String(cb.x+(cb.size/2),(cb.y-(1.2*labelFontSize)),\n cb.__class__.__name__, fillColor=colors.black, textAnchor='middle',\n fontSize=labelFontSize))\n\n tb = Tickbox()\n tb.x = 170\n tb.y = 530\n D.add(tb)\n D.add(shapes.String(tb.x+(tb.size/2),(tb.y-(1.2*labelFontSize)),\n tb.__class__.__name__, fillColor=colors.black, textAnchor='middle',\n fontSize=labelFontSize))\n\n\n yn = YesNo()\n yn.x = 320\n yn.y = 530\n D.add(yn)\n tempstring = yn.__class__.__name__ + '*'\n D.add(shapes.String(yn.x+(tb.size/2),(yn.y-(1.2*labelFontSize)),\n tempstring, fillColor=colors.black, textAnchor='middle',\n fontSize=labelFontSize))\n D.add(shapes.String(130,6,\n \"(The 'YesNo' widget returns a tickbox if testvalue=1, and a crossbox if testvalue=0)\", fillColor=colors.black, textAnchor='middle',\n fontSize=labelFontSize*0.75))\n\n\n ss = StopSign()\n ss.x = 20\n ss.y = 400\n D.add(ss)\n D.add(shapes.String(ss.x+(ss.size/2), ss.y-(1.2*labelFontSize),\n ss.__class__.__name__, fillColor=colors.black, textAnchor='middle',\n fontSize=labelFontSize))\n\n ne = NoEntry()\n ne.x = 170\n ne.y = 400\n D.add(ne)\n D.add(shapes.String(ne.x+(ne.size/2),(ne.y-(1.2*labelFontSize)),\n ne.__class__.__name__, fillColor=colors.black, textAnchor='middle',\n fontSize=labelFontSize))\n\n sf = SmileyFace()\n sf.x = 320\n sf.y = 400\n D.add(sf)\n D.add(shapes.String(sf.x+(sf.size/2),(sf.y-(1.2*labelFontSize)),\n sf.__class__.__name__, fillColor=colors.black, textAnchor='middle',\n fontSize=labelFontSize))\n\n ds = DangerSign()\n ds.x = 20\n ds.y = 270\n D.add(ds)\n D.add(shapes.String(ds.x+(ds.size/2),(ds.y-(1.2*labelFontSize)),\n ds.__class__.__name__, fillColor=colors.black, textAnchor='middle',\n fontSize=labelFontSize))\n\n na = NotAllowed()\n na.x = 170\n na.y = 270\n D.add(na)\n D.add(shapes.String(na.x+(na.size/2),(na.y-(1.2*labelFontSize)),\n na.__class__.__name__, fillColor=colors.black, textAnchor='middle',\n fontSize=labelFontSize))\n\n ns = NoSmoking()\n ns.x = 320\n ns.y = 270\n D.add(ns)\n D.add(shapes.String(ns.x+(ns.size/2),(ns.y-(1.2*labelFontSize)),\n ns.__class__.__name__, fillColor=colors.black, textAnchor='middle',\n fontSize=labelFontSize))\n\n a1 = ArrowOne()\n a1.x = 20\n a1.y = 140\n D.add(a1)\n D.add(shapes.String(a1.x+(a1.size/2),(a1.y-(1.2*labelFontSize)),\n a1.__class__.__name__, fillColor=colors.black, textAnchor='middle',\n fontSize=labelFontSize))\n\n a2 = ArrowTwo()\n a2.x = 170\n a2.y = 140\n D.add(a2)\n D.add(shapes.String(a2.x+(a2.size/2),(a2.y-(1.2*labelFontSize)),\n a2.__class__.__name__, fillColor=colors.black, textAnchor='middle',\n fontSize=labelFontSize))\n\n fd = FloppyDisk()\n fd.x = 320\n fd.y = 140\n D.add(fd)\n D.add(shapes.String(fd.x+(fd.size/2),(fd.y-(1.2*labelFontSize)),\n fd.__class__.__name__, fillColor=colors.black, textAnchor='middle',\n fontSize=labelFontSize))\n\n renderPDF.drawToFile(D, 'signsandsymbols.pdf', 'signsandsymbols.py')\n print('wrote file: signsandsymbols.pdf')",
"def crearChecks(self):\n check1 = Checkbutton(self.master, text=\"Tema 1\", variable=self.checkStatus1, command= self.updateCheck)\n check1.grid(row=7, column=1)\n check2 = Checkbutton(self.master, text=\"Tema 2\", variable=self.checkStatus2, command= self.updateCheck)\n check2.grid(row=8, column=1)\n check3 = Checkbutton(self.master, text=\"Tema 3\", variable=self.checkStatus3, command= self.updateCheck)\n check3.grid(row=9, column=1)",
"def getNumberChecked(self):\n return (self.checkboxScatterPlot.checkState() + self.checkboxDonutPlot.checkState() + self.checkboxAllTrees.checkState()) / 2",
"def num_check(xi, yi, li):\r\n lb_f = Label(window1, font=(\"Arial Bold\", 14), text='only integer number available')\r\n lb_f.place(x=xi, y=yi)\r\n list_cb[li].set(0)\r\n return",
"def adjust_nums_checked(self, checked):\n mw.checked_stats = []\n if checked:\n self.num_checked += 1\n elif not checked:\n self.num_checked -= 1\n for checkbox in self.checkboxes:\n if checkbox.isChecked():\n mw.checked_stats.append(checkbox)\n mw.bonuses[checkbox.text()] = 1\n for checkbox in self.checkboxes:\n if self.num_checked == 2:\n if not checkbox.isChecked():\n checkbox.setDisabled(True)\n if checkbox.text() in mw.bonuses.keys():\n del mw.bonuses[checkbox.text()]\n self.submit_btn.setEnabled(True)\n else:\n checkbox.setDisabled(False)\n self.submit_btn.setEnabled(False)",
"def autolabel(rects):",
"def draw_boxes(self, image, boxes):\n return draw_boxes(image, boxes, self.labels)",
"def GridCheck(Parent,DefaultSelected,Row,Column):\r\n dummyvar = IntVar()\r\n C = Checkbutton(Parent,var=dummyvar)\r\n if DefaultSelected == 1:\r\n C.select()\r\n C.grid(row=Row,column=Column)\r\n C.isChecked = dummyvar\r\n return C",
"def get_checkbox_coordinates():\n boxes = []\n current_y = CHECKBOX_TOP_Y_START\n for _ in range(NUM_CHECKBOXES):\n top = current_y\n bottom = top + CHECKBOX_HEIGHT - 1\n left = CHECKBOX_LEFT_X_START\n right = CHECKBOX_RIGHT_X_END\n boxes.append((left, right, bottom, top))\n current_y += CHECKBOX_INTERTOP_DISTANCE\n return boxes",
"def init_round_curve_checkbox(self):\n self.vars[\"round_corners\"] = BooleanVar(self.frame)\n self.buttons[\"chkbtn_round_corners\"] = Checkbutton(\n self.frame, text='round corners',\n var=self.vars[\"round_corners\"])\n self.buttons[\"chkbtn_round_corners\"].grid(row=6, column=0)",
"def _draw_boxes(self, image, boxes, classes, thickness=4):\n for i in range(len(boxes)):\n bot, left, top, right = boxes[i, ...]\n class_id = int(classes[i]) - 1\n color = self.COLOR_LIST[class_id]\n cv2.rectangle(image, (left, top), (right, bot), color=color, thickness=thickness)",
"def DrawCheckMarkRect(*args, **kwargs):\n return _gdi_.DC_DrawCheckMarkRect(*args, **kwargs)",
"def enable_selection(self):\n n_t = 0\n n_t_t = 0\n if self.tree_ctrl is not None:\n n_t = self.tree_ctrl.GetCount()\n if self.tree_ctrl_theory is not None:\n n_t_t = self.tree_ctrl_theory.GetCount()\n if n_t + n_t_t > 0 and self.selection_cbox is not None:\n self.selection_cbox.Enable()\n else:\n self.selection_cbox.Disable()",
"def drawCheckerBoard(N=5, white=GLfloat_3(1, 1, 1), black=GLfloat_3(0, 0, 0)):\r\n glDisable(GL_LIGHTING)\r\n try:\r\n for x in range(-N, N):\r\n for y in range(-N, N):\r\n if (x + y) % 2 == 0:\r\n glColor3fv(white)\r\n else:\r\n glColor3fv(black)\r\n glRectf(x, y, x + 1, y + 1)\r\n finally:\r\n glEnable(GL_LIGHTING)",
"def DrawCheckMarkRect(*args, **kwargs):\n return _gdi_.PseudoDC_DrawCheckMarkRect(*args, **kwargs)",
"def checkbox_1D(image, checkbox, debug=False):\n \n # Collapse input image, currently onto X axis\n # Reshape to reflect collapse onto x axis\n vector = np.sum(image, axis=0)\n print('(checkbox_1D): Image collapsed into 1D vector.')\n print()\n \n # Calculate the checkbox half-width\n chw = (checkbox - 1) / 2\n\n \n # Calculate the image size\n xsize, ysize = image.shape[1], image.shape[0]\n \n # Calculate the x and y widths of checkbox region\n xwidth = xsize - checkbox + 1\n\n # If the checkbox size is not equal to both the X and Y sizes, \n # find the pixel with the brightest checkbox\n if checkbox != xsize and checkbox != ysize:\n xpeak = 0\n ypeak = 1\n sumpeak = 0\n for ii in xrange(xsize - checkbox):\n t = np.sum(vector[ii:ii+checkbox])\n if t > sumpeak:\n xpeak = ii + 1\n sumpeak = t\n\n print('(checkbox_1D): Checkbox not equal to xsize.')\n \n \n # If the checkbox size is equal to both the X and Y sizes\n if checkbox == xsize:\n xpeak = xsize / 2\n sumpeak = np.sum(vector, axis=None)\n \n print('(checkbox_1D): Checkbox equal to xsize.')\n \n # Print checkbox center and peak around centroid region\n\n # Find the checkbox region half-width in x and y\n xhw = xwidth / 2\n \n if xpeak < xhw or xpeak > xsize - xhw:\n print('(checkbox_1D): WARNING - Peak too close to edge of image.')\n \n \n # Debug messages\n if debug:\n print('(checkbox_1D): chw = ', chw)\n print('(checkbox_1D): xhw = ', xhw)\n print('(checkbox_1D): xsize = ', xsize)\n print('(checkbox_1D): xwidth = ', xwidth)\n print('(checkbox_1D): xpeak = ', xpeak)\n print('(checkbox_1D): sumpeak = ', sumpeak)\n print() \n \n# NOTE: Use this section of the input image is a subset of a larger image\n# Not currently needed for this analysis\n# # Determine the center of the brightest checkbox, in extracted\n# # image coordinates\n# xpeak = xpeak + xhw\n \n return xpeak, xhw",
"def DrawCheckBox(*args, **kwargs):\n return _gdi_.RendererNative_DrawCheckBox(*args, **kwargs)",
"def paint(self, painter, option, index):\n self.drawCheck(painter, option, option.rect, Qt.Unchecked if int(index.data()) == 0 else Qt.Checked)",
"def GetCheckBoxSize(*args, **kwargs):\n return _gdi_.RendererNative_GetCheckBoxSize(*args, **kwargs)",
"def add_CheckBox(self, checkbox_rating):\n for item in checkbox_rating:\n checkbox = MyCheckbox(item)\n self.layout.add_widget(checkbox)\n self.list_checkboxes.append(checkbox)",
"def plot_all_gt_bboxes(self, axis):\n\n for corner_sub in self.corner_list:\n utils.draw_box(axis, corner_sub, axes=[0, 1, 2], color='blue')",
"def draw_boxes(self, im, boxes):\n for bbox in boxes:\n l = [int(x) for x in bbox[\"coords\"]]\n l = self.scalebox(l)\n icon = self.classes_to_icons[bbox[\"label\"]]\n overlay_im_to_background(im, icon, l[0], l[1] - icon.shape[0] - 5)\n cv2.rectangle(im,(l[0],l[1]),(l[2],l[3]),self.color,2)",
"def updateButtons(self):\n self.cboxes = [] # List of check boxes\n self.tboxes = [] # Corresponding list of text boxes\n for r in range(self.nclasses):\n c = 0\n # print('**', self.clusters[r])\n tbox = QLineEdit(self.clusters[r])\n tbox.setMinimumWidth(80)\n tbox.setMaximumHeight(150)\n tbox.setStyleSheet(\"border: none;\")\n tbox.setAlignment(Qt.AlignCenter)\n tbox.textChanged.connect(self.updateClusterNames)\n self.tboxes.append(tbox)\n self.flowLayout.addWidget(self.tboxes[-1], r, c)\n c += 1\n cbox = QCheckBox(\"\")\n cbox.clicked.connect(self.selectAll)\n self.cboxes.append(cbox)\n self.flowLayout.addWidget(self.cboxes[-1], r, c)\n c += 1\n # Find the segments under this class and show them\n for segix in range(len(self.segments)):\n if self.segments[segix][-1] == r:\n self.flowLayout.addWidget(self.picbuttons[segix], r, c)\n c += 1\n self.picbuttons[segix].show()\n self.flowLayout.adjustSize()\n self.flowLayout.update()\n self.setColourLevels()",
"def check_box(self, grid: object, name: str, xposition: int, yposition: int,\n synchronize: bool = False, xspan: int = 1, yspan: int = 1) -> QtWidgets.QCheckBox:\n label = QtWidgets.QLabel()\n label.setText(TR().tr(name) + ':')\n grid.addWidget(label, yposition, xposition, 1, 1)\n\n input = QtWidgets.QCheckBox()\n input.setObjectName(name)\n if synchronize:\n self.synchronize(input)\n grid.addWidget(input, yposition, xposition + 1, yspan, xspan)\n input.stateChanged.connect(self.data_changed)\n\n return input"
] | [
"0.6706593",
"0.62596804",
"0.5975192",
"0.57803637",
"0.5758032",
"0.5719117",
"0.5667263",
"0.5657341",
"0.5637688",
"0.5620014",
"0.5609665",
"0.5569312",
"0.5564542",
"0.55392426",
"0.5529903",
"0.55119216",
"0.5509527",
"0.54798144",
"0.5429685",
"0.5427229",
"0.541907",
"0.5411637",
"0.5366985",
"0.5352789",
"0.5331311",
"0.5276325",
"0.52651143",
"0.526081",
"0.5257372",
"0.5255166"
] | 0.71929073 | 0 |
Generate ``model``specific synthesis script. | def generate_yosys_script(summary, renderer, ostream, model, model_sources, template = "synth.specific.tmpl.ys"):
renderer.add_generic( ostream, template,
model = model, model_sources = model_sources, yosys_script = summary.yosys["script"],
iteritems = iteritems, itervalues = itervalues ) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def gen_script(model: onnx.ModelProto, output_file: str = None) -> str:\n current_dir = os.path.dirname(os.path.realpath(__file__))\n env = jinja2.Environment(loader=jinja2.FileSystemLoader(current_dir + '/templates/'))\n model_header_render = gen_model_header(env, model)\n imports, main_function, sub_functions = gen_graph_functions(env, model.graph)\n\n wdir = \"\"\n if len(imports) > 0:\n # need to set wdir to enable imports\n wdir = util.resolve_systemds_root() + \"/scripts\"\n\n main_template = env.get_template(\"main.dml.jinja\")\n result_render = main_template.render(\n title=\"This file was generated by onnx-systemds\",\n model_header_render=model_header_render,\n wdir=wdir,\n imports=imports,\n main_function=main_function,\n sub_functions=sub_functions\n )\n if output_file:\n directory = os.path.dirname(output_file)\n if len(directory) > 0:\n os.makedirs(directory, exist_ok=True)\n with open(output_file, 'w') as f:\n f.write(result_render)\n\n return result_render",
"def build_model_multi(self):\n\t\n\t\tif len(self.template) < 1: raise Exception('except: needs multiple templates '+str(self.template))\n\t\tif len(self.target) != 1: raise Exception('except: needs only one target '+str(self.template))\n\t\n\t\tprint 'preparing modeller scripts'\n\t\t#---variables passed to modeller via settings-homology.py\n\t\tvars_to_modeller = {\n\t\t\t'pdblist':self.template,\n\t\t\t'target_seq':self.target[0][0],\n\t\t\t'n_models':self.settings['n_models'],\n\t\t\t}\n\t\n\t\t#---write a settings file for the modeller script\n\t\twith open(self.rootdir+'settings-homology.py','w') as fp:\n\t\t\tfp.write('#!/usr/bin/python\\n\\n')\n\t\t\tfor var in vars_to_modeller.keys():\n\t\t\t\tval = '\\''+str(vars_to_modeller[var])+'\\'' \\\n\t\t\t\t\tif type(vars_to_modeller[var]) == str else vars_to_modeller[var]\n\t\t\t\tfp.write(var+' = '+str(val)+'\\n')\n\t\t\t\n\t\t#---write an ali file with the target\n\t\tfasta_linelen = 50\n\t\twith open(self.rootdir+self.target[0][0]+'.ali','w') as fp:\n\t\t\tfp.write('>P1;'+self.target[0][0]+'\\n')\n\t\t\tfp.write('sequence:'+self.target[0][0]+':::::::0.00:0.00\\n')\n\t\t\tseq = self.target[0][1]\n\t\t\tchopped = [seq[j*fasta_linelen:(j+1)*fasta_linelen] for j in range(len(seq)/fasta_linelen+1)]\n\t\t\tchopped = [i for i in chopped if len(i) > 0]\n\t\t\tfor i,seg in enumerate(chopped): fp.write(seg+('\\n' if i < len(chopped)-1 else '*\\n'))\n\t\t\n\t\tprint 'running modeller'\n\t\tcmd = [gmxpaths['modeller'],'script-multi.py']\n\t\tcall(cmd,logfile='log-modeller-script-multi',cwd=self.rootdir)",
"def writeCode(doc):\n\n comp_template = \"model.addCompartment(vol=%s, comp_id='%s');\"\n species_template = \"model.addSpecies(species_id='%s', amt=%s, comp='%s');\"\n param_template = \"model.addParameter(param_id='%s', val=%s, units='%s');\"\n rxn_template = (\n \"model.addReaction(reactants=%s, products=%s, \"\n \"expression='%s', local_params=%s, rxn_id='%s');\"\n )\n event_template = (\n \"model.addEvent(trigger='%s', assignments=%s, persistent=%s, \"\n \"initial_value=%s, priority=%s, delay=%s, event_id='%s');\"\n )\n event_defaults = [True, False, \"0\", 0]\n assignrule_template = \"model.addAssignmentRule(var='%s', math='%s');\"\n raterule_template = \"model.addRateRule(var='%s', math='%s', rr_id='%s');\"\n initassign_template = \"model.addInitialAssignment(symbol='%s', math='%s')\"\n init_template = (\n \"import simplesbml\\nmodel = simplesbml.sbmlModel(time_units='%s', \"\n \"extent_units='%s', sub_units='%s', level=%s, version=%s);\"\n )\n init_defaults = [\"min\", \"Molar\", \"Molar\", 3, 1]\n command_list = []\n\n if doc.getLevel() == 1:\n warnings.warn(\"Warning: SimpleSBML does not support SBML Level 1.\")\n\n props = libsbml.ConversionProperties()\n props.addOption(\"flatten comp\", True)\n result = doc.convert(props)\n if result != libsbml.LIBSBML_OPERATION_SUCCESS:\n raise SystemExit(\"Conversion failed: (\" + str(result) + \")\")\n\n mod = doc.getModel()\n comps = mod.getListOfCompartments()\n species = mod.getListOfSpecies()\n params = mod.getListOfParameters()\n rxns = mod.getListOfReactions()\n events = mod.getListOfEvents()\n rules = mod.getListOfRules()\n print(\"rules\", rules)\n inits = []\n if doc.getLevel() == 3 or (doc.getLevel() == 2 and doc.getVersion() > 1):\n inits = mod.getListOfInitialAssignments()\n\n timeUnits = \"min\" # second\n substanceUnits = \"Molar\" # mole\n extentUnits = \"Molar\" # mole\n if doc.getLevel() == 3:\n timeUnits = mod.getTimeUnits()\n extentUnits = mod.getExtentUnits()\n substanceUnits = mod.getSubstanceUnits()\n level = mod.getLevel()\n version = mod.getVersion()\n init_list = [timeUnits, extentUnits, substanceUnits, level, version]\n for i in range(0, 5):\n if init_list[i] == init_defaults[i]:\n init_list[i] = \"del\"\n\n command_list.append(\n init_template\n % (init_list[0], init_list[1], init_list[2], init_list[3], init_list[4])\n )\n\n for comp in comps:\n if comp.getId() != \"c1\":\n if comp.getId()[0] == \"c\" and comp.getId()[1 : len(comp.getId())].isdigit():\n if comp.getSize() == 1e-15:\n command_list.append(comp_template % (\"del\", \"del\"))\n else:\n command_list.append(comp_template % (comp.getSize(), \"del\"))\n else:\n if comp.getSize() == 1e-15:\n command_list.append(comp_template % (\"del\", comp.getId()))\n else:\n command_list.append(comp_template % (comp.getSize(), comp.getId()))\n\n for s in species:\n conc = s.getInitialConcentration()\n amt = s.getInitialAmount()\n sid = s.getId()\n if s.getCompartment() == \"c1\":\n comp = \"del\"\n else:\n comp = s.getCompartment()\n bc = s.getBoundaryCondition()\n if bc:\n sid = \"$\" + sid\n if isnan(conc) or amt > conc:\n command_list.append(species_template % (sid, str(amt), comp))\n else:\n command_list.append(species_template % (\"[\" + sid + \"]\", str(conc), comp))\n\n for p in params:\n val = p.getValue()\n pid = p.getId()\n if p.getUnits() == \"per_second\":\n units = \"del\"\n else:\n units = p.getUnits()\n isDelay = pid.find(\"Delay\")\n if isDelay == -1:\n command_list.append(param_template % (pid, str(val), str(units)))\n\n for v in rxns:\n vid = v.getId()\n if vid[0] == \"v\" and vid[1 : len(vid)].isdigit():\n vid = \"del\"\n reactants = []\n for r in v.getListOfReactants():\n reactants.append(\n (str(r.getStoichiometry()) + \" \" + r.getSpecies()).replace(\"1.0 \", \"\")\n )\n products = []\n for p in v.getListOfProducts():\n products.append(\n (str(p.getStoichiometry()) + \" \" + p.getSpecies()).replace(\"1.0 \", \"\")\n )\n expr = libsbml.formulaToString(v.getKineticLaw().getMath())\n local_params = {}\n local_ids = []\n local_values = []\n for k in v.getKineticLaw().getListOfParameters():\n local_ids.append(k.getId())\n local_values.append(k.getValue())\n local_params = dict(zip(local_ids, local_values))\n if len(local_params) == 0:\n local_params = \"del\"\n command_list.append(\n rxn_template % (str(reactants), str(products), expr, str(local_params), vid)\n )\n\n for e in events:\n persistent = True\n initialValue = False\n priority = \"0\"\n eid = e.getId()\n if len(eid) == 0 or (eid[0] == \"e\" and eid[1 : len(eid)].isdigit()):\n eid = \"del\"\n if doc.getLevel() == 3:\n persistent = e.getTrigger().getPersistent()\n initialValue = e.getTrigger().getInitialValue()\n priority = e.getPriority()\n if isinstance(priority, libsbml.Priority):\n priority = libsbml.formulaToL3String(priority.getMath())\n else:\n priority = \"0\"\n tri = libsbml.formulaToL3String(e.getTrigger().getMath())\n did = e.getDelay()\n if isinstance(did, libsbml.Delay):\n delay = libsbml.formulaToL3String(did.getMath())\n else:\n delay = \"0\"\n assigns = e.getListOfEventAssignments()\n var = []\n values = []\n for assign in assigns:\n var.append(assign.getVariable())\n values.append(libsbml.formulaToL3String(assign.getMath()))\n assigns = dict(zip(var, values))\n\n event_list = [persistent, initialValue, priority, delay]\n for i in range(0, 4):\n if event_list[i] == event_defaults[i]:\n event_list[i] = \"del\"\n\n command_list.append(\n event_template\n % (\n tri,\n str(assigns),\n event_list[0],\n event_list[1],\n event_list[2],\n event_list[3],\n eid,\n )\n )\n\n for r in rules:\n rid = r.getId()\n print(\"rid\")\n # if rid[0] == 'Rate' and rid[1:len(rid)].isdigit():\n # rid = 'del'\n sym = r.getVariable()\n math = libsbml.formulaToL3String(r.getMath())\n if r.getTypeCode() == libsbml.SBML_ASSIGNMENT_RULE:\n command_list.append(assignrule_template % (sym, math))\n elif r.getTypeCode() == libsbml.SBML_RATE_RULE:\n command_list.append(raterule_template % (sym, math, rid))\n else:\n pass\n\n for i in inits:\n sym = i.getSymbol()\n math = libsbml.formulaToL3String(i.getMath())\n command_list.append(initassign_template % (sym, math))\n\n commands = \"\\n\".join(command_list)\n commands = sub(r\"\\w+='?del'?(?=[,)])\", \"\", commands)\n commands = sub(r\"\\((, )+\", \"(\", commands)\n commands = sub(r\"(, )+\\)\", \")\", commands)\n commands = sub(\"(, )+\", \", \", commands)\n return commands",
"def generateModelFilename(args, type):\n opt = []\n if args.letters:\n opt.append('l')\n if args.symbols:\n opt.append('s')\n if args.digits:\n opt.append('d')\n opt.sort()\n return \"models/model_{0}_{1}.yml\".format(type, ''.join(opt))",
"def _generate_model(self, specs, experiment = None, filename = 'dist/app/Model.hs'):\n with open(filename, \"w\") as file:\n self._write_model(file, specs, experiment = experiment)",
"def _synthesize_for_model(model: Model) -> Formula:\n assert is_model(model)\n assert len(model.keys()) > 0\n # Task 2.6\n vars = list(variables(model))\n formula =_synthesize_for_model_helper(model, vars, 0)\n return formula",
"def synthesize(model, mspec, spk):\n if use_cuda:\n model = model.cuda()\n\n model.eval()\n\n sequence = np.array(mspec)\n sequence = Variable(torch.from_numpy(sequence)).unsqueeze(0)\n spk = np.array(spk)\n spk = Variable(torch.from_numpy(spk)).unsqueeze(0)\n\n if use_cuda:\n sequence = sequence.cuda() \n spk = spk.cuda()\n\n with torch.no_grad():\n model.forward_getlatents(sequence)\n mel_outputs, linear_outputs, = model.forward_eval(sequence, spk)\n\n linear_output = linear_outputs[0].cpu().data.numpy()\n spectrogram = audio.denormalize(linear_output)\n waveform = audio.inv_spectrogram(linear_output.T)\n\n return waveform",
"def synthesize_for_model(model: Model) -> Formula:\r\n # the idea is -> first step put the var or ~var\r\n # than each time do - > add '(' at first\r\n # '(' + the_string '&' + the_new_string + ')'\r\n \"\"\"\r\n We solve this equation by using CNF.\r\n every var that is false we doing ~var, and connecting all the var by '&'\r\n and this will provide us with formula which is true just \r\n for the given model\r\n \"\"\"\r\n assert is_model(model)\r\n # Task 2.6\r\n first = True\r\n str_formula = \"\"\r\n for key, value in model.items():\r\n if first:\r\n first = False\r\n if not value:\r\n str_formula += '~'\r\n str_formula += key\r\n else:\r\n str_formula = \"(\" + str_formula + \"&\"\r\n if not value:\r\n str_formula += '~'\r\n str_formula += key\r\n str_formula += \")\"\r\n # creating a list, that list[0] contain the string, because that what\r\n # list_to_string function is required\r\n list_of_string = list()\r\n list_of_string.append(str_formula)\r\n return str_to_form(list_of_string)",
"def script_generator(self):\n py = self.global_setting.get('python', sys.executable)\n ex_options = self.global_setting.get('evaluate_options', str())\n train_py = \"/home/haihuam/Projects/RepPoints/mmdetection/tools/train.py\"\n if os.access(py, os.X_OK):\n content = \"set -e \\n\"\n content += \"export CUDA_VISIBLE_DEVICES=\" + \\\n \",\".join(self.selected_gpus)+ \" \\n\"\n content += \"cd %s \\n\"%(self.run_dir)\n \n content += \"%s %s %s --work_dir %s --validate %s &> train.log \\n\"%(py, \n train_py,\n self.setting['config_file'],\n self.run_dir,\n ex_options)\n content += \"touch evaluate.done \\n\"\n\n self.script_content = content\n else:\n print(\"Error: %s is not executable.\"%py)\n sys.exit(0)",
"def GenerateModel(modelData, outputFilePath, objectName = 'SBMLmodel'):\n #The library mathFuncs serves to both only allow functions supported\n #functions in SBML/user defined functions, but also the python equivalent\n \n np.set_printoptions(threshold=sys.maxsize)\n \n \n \n outputFile = open(outputFilePath, \"w\")\n\n parameters = modelData.parameters\n compartments = modelData.compartments\n species = modelData.species\n reactions = modelData.reactions\n functions = modelData.functions\n \n assignmentRules = modelData.assignmentRules\n rateRules = modelData.rateRules\n initialAssignments = modelData.initialAssignments\n \n mathFuncs = {'abs' : 'abs',\n 'max' : 'max',\n 'min' : 'min',\n 'pow' : 'pow',\n 'exp' : 'math.exp',\n 'floor' : 'np.floor',\n 'ceiling' : 'math.ceil',\n 'exp' : 'math.exp',\n 'ln' : 'math.log',\n 'log' : 'math.log10',\n 'factorial' : 'math.factorial',\n 'sqrt' : 'math.sqrt',\n \n 'eq' : 'operator.eq',\n 'neq' : 'operator.ne',\n 'gt' : 'operator.gt',\n 'lt' : 'operator.lt',\n 'geq' : 'operator.ge',\n 'leq' : 'operator.le',\n \n 'and' : 'operator.and_',\n 'or' : 'operator.or_',\n 'xor' : 'operator.xor_',\n 'not' : 'operator.not_',\n \n 'sin' : 'np.sin',\n 'cos' : 'np.cos',\n 'tan' : 'np.tan',\n 'sec' : '1/np.cos',\n 'csc' : '1/np.sin',\n 'cot' : '1/np.tan',\n 'sinh' : 'np.sinh',\n 'cosh' : 'np.cosh',\n 'tanh' : 'np.tanh',\n 'sech' : '1/np.cosh',\n 'csch' : '1/np.sinh',\n 'coth' : '1/np.tanh',\n 'arcsin' : 'np.arcsin',\n 'arccos' : 'np.arccos',\n 'arctan' : 'np.arctan',\n 'arcsinh' : 'np.arcsinh',\n 'arccosh' : 'np.arccosh',\n 'arctanh' : 'np.arctanh',\n \n 'true' : 'True',\n 'false' : 'False',\n 'notanumber' : 'np.nan',\n 'pi' : 'np.pi',\n 'infinity' : 'np.inf',\n 'exponentiale' : 'np.e',\n 'piecewise' : 'Piecewise'\n } \n #Add in user defined functions\n# for function in functions:\n# mathFuncs[function] = \"self.\" + function\n\t\t\n #Set up stoichCoeffMat, a matrix of stoichiometric coefficients for solving the reactions\n reactantCounter = 0\n reactantIndex = {}\n reactionCounter = 0\n reactionIndex = {}\n \n rateRuleVars = []\n rateParams = 0\n for specie in species:\n reactantIndex[specie] = reactantCounter\n reactantCounter += 1\n for key, rateRule in rateRules.items():\n if rateRule.variable in parameters or rateRule.variable in compartments:\n rateParams += 1\n reactantIndex[rateRule.variable] = reactantCounter\n reactantCounter += 1\n rateRuleVars.append(rateRule.variable)\n elif rateRule.variable in species:\n pass\n else:\n raise Exception(\"Rate Rule adjusting something other than specie amount, parameter value, or compartment size.\")\n\n \t\t\n stoichCoeffMat = np.zeros([len(species) + rateParams, max(len(reactions),1)])\n \n for rxnId in reactions:\n reactionIndex[rxnId] = reactionCounter\n reactionCounter += 1\n reaction = reactions[rxnId]\n for reactant in reaction.reactants:\n if reactant[1] not in reactantIndex:\n reactantIndex[reactant[1]] = reactantCounter\n reactantCounter += 1\n if not (species[reactant[1]].isBoundarySpecies == \"True\"):\n stoichCoeffMat[reactantIndex[reactant[1]], reactionIndex[rxnId]] += reactant[0]\n\n \t\n # for reaction in reactions:\n # for reactant in reactions[reaction][0]:\n # if reactant[1] not in reactantIndex:\n # reactantIndex[reactant[1]] = reactantCounter\n # reactantCounter += 1\n # if not species[reactant[1]][4]:\n # stoichCoeffMat[reactantIndex[reactant[1]], reaction-1] += reactant[0]\n #print(rateParams)\n #print(stoichCoeffMat)\n \n outputFile.write(\"from sbmltopyode.SBMLModelClasses import *\\n\")\n outputFile.write(\"from scipy.integrate import odeint\\n\")\n outputFile.write(\"import numpy as np\\n\")\n outputFile.write(\"import operator\\n\")\n outputFile.write(\"import math\\n\\n\")\n \n outputFile.write(\"class \" + objectName +\":\\n\\n\")\n \n outputFile.write(\"\\tdef __init__(self):\\n\\n\")\n outputFile.write(\"\\t\\tself.p = {} #Dictionary of model parameters\\n\")\n for paramId in parameters:\n outputFile.write(\"\\t\\tself.p[\\'\" + paramId + \"\\'] = Parameter(\" + str(parameters[paramId].value)+ \", \\'\"+ paramId + \"\\', \" + str(parameters[paramId].isConstant) +\")\\n\")\n \n outputFile.write(\"\\n\\t\\tself.c = {} #Dictionary of compartments\\n\")\n for compartmentId in compartments:\n outputFile.write(\"\\t\\tself.c[\\'\" + compartmentId + \"\\'] = Compartment(\" + str(compartments[compartmentId].size) + \", \" + str(compartments[compartmentId].dimensionality)+ \", \" + str(compartments[compartmentId].isConstant) + \")\\n\")\n \n outputFile.write(\"\\n\\t\\tself.s = {} #Dictionary of chemical species\\n\")\n for speciesId in species:\n outputFile.write(\"\\t\\tspeciesMetadata = SBMLMetadata('\" + species[speciesId].name +\"')\\n\")\n outputFile.write(\"\\t\\tself.s[\\'\" + speciesId + \"\\'] = Species(\" + str(species[speciesId].value) + \", '\" + species[speciesId].valueType + \"', self.c['\" + species[speciesId].compartment + \"'], \" + str(species[speciesId].hasOnlySubstanceUnits) + \", constant = \" + str(species[speciesId].isConstant) + \")\\n\")\n for key, rule in assignmentRules.items():\n if rule.variable == speciesId:\n outputFile.write(\"\\t\\tself.s[\\'\" + speciesId + \"\\']._modifiedBy = \" + rule.Id + \"\\n\")\n for key, rule in rateRules.items():\n if rule.variable == speciesId:\n outputFile.write(\"\\t\\tself.s[\\'\" + speciesId + \"\\']._modifiedBy = \" + rule.Id + \"\\n\")\n \n \n outputFile.write(\"\\n\\t\\tself.r = {} #Dictionary of reactiions\\n\")\n for reactionId in reactions:\n outputFile.write(\"\\t\\tself.r[\\'\" + reactionId + \"\\'] = \" + reactionId + \"(self, SBMLMetadata('\" + reactions[reactionId].name + \"'))\\n\")\n \n outputFile.write(\"\\t\\tself.time = 0\\n\\n\")\n \n outputFile.write(\"\\t\\tself.reactionMetadata = {\")\n commaFlag = 0\n for reactionId in reactions:\n if commaFlag == 0:\n commaFlag = 1\n outputFile.write(\"\\n\\t\\t\")\n else:\n outputFile.write(\",\\n\\t\\t\")\n outputFile.write(\"self.Reaction\" + reactionId + \": SBMLMetadata('\" + reactions[reactionId].name + \"')\")\n outputFile.write(\"\\n\\t\\t}\\n\")\n \n outputFile.write('\\t\\tself.AssignmentRules()\\n\\n')\n \n outputFile.write(\"\\n\\n\")\n outputFile.write(\"\\tdef AssignmentRules(self):\\n\\n\")\n #These functions are defined here due to reading variables in the parent function's namespace\n #These are not intended to be used elsewhere\n def ParseLHS(rawLHS):\n returnLHS = ''\n if rawLHS in parameters:\n returnLHS = \"self.p[\\'\" + rawLHS + \"\\'].value = \"\n elif rawLHS in species:\n if not species[rawLHS].hasOnlySubstanceUnits: \n returnLHS = 'self.s[\\'' + rawLHS + '\\'].concentration = '\n else: \n returnLHS = 'self.s[\\'' + rawLHS + '\\'].amount = '\n elif rawLHS in compartments:\n returnLHS = 'self.c[\\'' + rawLHS + '\\'].size = '\n else:\n raise(Exception(\"New case: rule LHS not in p: \" + rawLHS))\n\n return returnLHS\n\t\n def ParseRHS(rawRHS, extendedParams = [], objectText = \"self\"):\n #objectText is not \"self\" when parsing reaction math\n \n #The main purpose of this function is to turn math strings given by libSBML into\n #code formated to properly call members of the resulting class\n #For example k_1*C_A may turn to\n \n \n rawRHS = rawRHS.replace(\"^\", \"**\") #Replaces carrot notation for exponentiation with ** operator\n variables = []\n for match in re.finditer(r'\\b[a-zA-Z_]\\w*', rawRHS): #look for variable names\n #ToDo: check for function calls\n variables.append([rawRHS[match.start():match.end()], match.span()])\n \n #rule[1] contains the right hand side\n returnRHS = ''\n oldSpan = None\n if variables != []:\n for variable in variables:\n if oldSpan == None and variable[1][0] != 0:\n returnRHS += rawRHS[0:variable[1][0]]\n elif oldSpan != None:\n returnRHS += rawRHS[oldSpan[1]:variable[1][0]]\n oldSpan = variable[1]\n if variable[0] in parameters:\n returnRHS += objectText + '.p[\\'' + variable[0] + '\\'].value'\n elif variable[0] in species:\n if not species[variable[0]].hasOnlySubstanceUnits == \"True\": \n returnRHS += objectText + '.s[\\'' + variable[0] + '\\'].concentration'\n else: \n returnRHS += objectText + '.s[\\'' + variable[0] + '\\'].amount'\n elif variable[0] in compartments:\n returnRHS += objectText + '.c[\\'' + variable[0] + '\\'].size'\n elif variable[0] in mathFuncs:\n returnRHS += mathFuncs[variable[0]]\n elif variable[0] in functions:\n returnRHS += objectText + '.' + variable[0]\n elif variable[0] in extendedParams:\n if objectText == \"self\":\n returnRHS += variable[0]\n else:\n returnRHS += \"self.p[\\'\" + variable[0] + \"\\'].value\"\n\n elif variable[0] == \"time\":\n returnRHS += objectText + '.time'\n elif variable[0] == \"pi\":\n returnRHS += \"np.pi\"\n else:\n raise(Exception('New case: unkown RHS variable: ' + variable[0]))\n returnRHS += rawRHS[variable[1][1]:len(rawRHS)]\n # print(rule[1][variable[1][1]])\n #print(rule[1][-1])\n else:\n returnRHS = rawRHS\n\t\t\n return returnRHS\n\n ruleDefinedVars = [rule.variable for rule in assignmentRules.values()]\n for key, assignment in initialAssignments.items():\n ruleDefinedVars.append(assignment.variable)\n \n for key, rule in assignmentRules.items():\n rule.dependents = []\n for match in re.finditer(r'\\b[a-zA-Z_]\\w*', rule.math): #look for variable names\n rule.dependents.append(rule.math[match.start():match.end()])\n originalLen = len(rule.dependents)\n for i in range(originalLen):\n if rule.dependents[originalLen - i -1] not in ruleDefinedVars:\n rule.dependents.pop(originalLen- i-1)\n \n for key, assignment in initialAssignments.items():\n assignment.dependents = []\n for match in re.finditer(r'\\b[a-zA-Z_]\\w*', assignment.math): #look for variable names\n assignment.dependents.append(assignment.math[match.start():match.end()])\n originalLen = len(assignment.dependents)\n for i in range(originalLen):\n if assignment.dependents[originalLen - i -1] not in ruleDefinedVars :\n assignment.dependents.pop(originalLen- i-1)\n \n# breakVar = False\n while True:\n continueVar = False\n breakVar = True\n varDefinedThisLoop = None\n for key, rule in assignmentRules.items():\n if rule.dependents == []:\n ruleLHS = ParseLHS(rule.variable)\n ruleRHS = ParseRHS(rule.math)\n outputFile.write(\"\\t\\t\" + ruleLHS + ruleRHS + '\\n\\n')\n varDefinedThisLoop = rule.variable\n rule.dependents = None\n continueVar = True\n breakVar = False\n break\n elif not rule.dependents == None:\n breakVar = False\n \n if not continueVar:\n for key, assignment in initialAssignments.items():\n if assignment.dependents == []:\n assignmentLHS = ParseLHS(assignment.variable)\n assignmentRHS = ParseRHS(assignment.math)\n outputFile.write(\"\\t\\tif self.time <= 0 :\\n\")\n if assignment.variable in parameters:\n outputFile.write(\"\\t\\t\\tisConstantValue = self.p['\" + assignment.variable + \"']._constant\\n\")\n outputFile.write(\"\\t\\t\\tself.p['\" + assignment.variable + \"']._constant = False\\n\")\n outputFile.write(\"\\t\\t\\t\" + assignmentLHS + assignmentRHS + '\\n')\n outputFile.write(\"\\t\\t\\tself.p['\" + assignment.variable + \"']._constant = isConstantValue\\n\\n\")\n elif assignment.variable in species:\n outputFile.write(\"\\t\\t\\tisConstantValue = self.s['\" + assignment.variable + \"']._constant\\n\")\n outputFile.write(\"\\t\\t\\tself.s['\" + assignment.variable + \"']._constant = False\\n\")\n outputFile.write(\"\\t\\t\\t\" + assignmentLHS + assignmentRHS + '\\n')\n outputFile.write(\"\\t\\t\\tself.s['\" + assignment.variable + \"']._constant = isConstantValue\\n\\n\")\n elif assignment.variable in compartment:\n outputFile.write(\"\\t\\t\\tisConstantValue = self.c['\" + assignment.variable + \"']._constant\\n\")\n outputFile.write(\"\\t\\t\\tself.c['\" + assignment.variable + \"']._constant = False\\n\")\n outputFile.write(\"\\t\\t\\t\" + assignmentLHS + assignmentRHS + '\\n')\n outputFile.write(\"\\t\\t\\tself.c['\" + assignment.variable + \"']._constant = isConstantValue\\n\\n\")\n \n varDefinedThisLoop = assignment.variable\n assignment.dependents = None\n continueVar = True\n breakVar = False\n break\n elif not rule.dependents == None:\n breakVar = False\n \n for rule in assignmentRules.values():\n if not rule.dependents == None:\n originalLen = len(rule.dependents)\n for i in range(originalLen):\n if rule.dependents[originalLen - i -1] == varDefinedThisLoop:\n rule.dependents.pop(originalLen - i -1)\n# print(rule.variable + ':' + str(rule.dependents))\n\n for assignment in initialAssignments.values():\n if not assignment.dependents == None:\n originalLen = len(assignment.dependents)\n for i in range(originalLen):\n if assignment.dependents[originalLen - i - 1] == varDefinedThisLoop:\n assignment.dependents.pop(originalLen - i - 1)\n# print(assignment.variable + ':' + str(assignment.dependents))\n \n if continueVar:\n continue\n elif breakVar:\n break\n else:\n raise Exception('Algebraic Loop in AssignmentRules')\n \n outputFile.write(\"\\t\\treturn\\n\\n\")\n \n for functionId in functions:\n arguments = functions[functionId].arguments\n argumentString = \"\"\n for i in range(len(arguments)):\n argumentString += arguments[i]\n if i != len(arguments) - 1:\n argumentString += \", \"\n \n outputFile.write(\"\\tdef \" + functionId + \"(self, \" + argumentString + \"):\\n\")\n outputFile.write(\"\\t\\treturn \" + functions[functionId].mathString.replace(\"^\", \"**\") + \"\\n\")\n \n for reactionId in reactions:\n outputFile.write(\"\\tdef Reaction\" + str(reactionId) + \"(self):\\n\\n\")\n\n rxnParameters = []\n for param in reactions[reactionId].rxnParameters:\n outputFile.write(\"\\t\\t\" + param[0] + \" = \" + str(param[1]) + \"\\n\")\n rxnParameters.append(param[0])\n\t\t\t\n rateLaw = ParseRHS(reactions[reactionId].rateLaw, rxnParameters)\n \n outputFile.write('\\t\\treturn ' + rateLaw + '\\n\\n')\n\n rateRuleLHSVars = []\n for key, rateRule in rateRules.items():\n rateRuleLHSVars.append(rateRule.variable)\n outputFile.write(\"\\tdef Rate\" + rateRule.variable + \"(self):\\n\\n\")\n rateLaw = ParseRHS(rateRule.math)\n outputFile.write('\\t\\treturn ' + rateLaw + '\\n\\n')\n \n yArray = ''\n i = 0\n yArrayVars = [0 for x in range(len(species) + rateParams)]\n for variable, index in reactantIndex.items():\n yArrayVars[index] = variable\n \n for index in range(len(yArrayVars)):\n # print(yArrayVars[index])\n if index != 0:\n yArray += ', '\n \n if yArrayVars[index] in species:\n yArray += 'self.s[\\'' + yArrayVars[index] + '\\'].amount'\n continue\n \n if yArrayVars[index] in parameters:\n yArray += 'self.p[\\'' + yArrayVars[index] + '\\'].value'\n continue\n \n if yArrayVars[index] in compartments:\n yArray += 'self.c\\'' + yArrayVars[index] + '\\'].size'\n continue\n \n\n \n outputFile.write('\\tdef _SolveReactions(self, y, t):\\n\\n')\n outputFile.write('\\t\\tself.time = t\\n')\n outputFile.write('\\t\\t' + yArray + ' = y\\n')\n outputFile.write('\\t\\tself.AssignmentRules()\\n\\n')\n# outputFile.write('\\t\\t[self.s[speciesId].UpdateCompartmentSizeMember() for speciesId in self.s]\\n')\n rateArray = '[ '\n i = 0\n rateArrayVars = [0 for x in range(len(species) + rateParams)]\n \n for variable, index in reactantIndex.items():\n if variable in rateRuleLHSVars:\n rateArrayVars[index] = variable\n \n\n \n for variable in rateArrayVars:\n if i != 0:\n rateArray += ', '\n i += 1\n if variable == 0:\n rateArray += '0'\n else:\n rateArray += 'self.Rate' + variable + '()'\n \n \n \n \n rateArray += ']'\n outputFile.write('\\t\\trateRuleVector = np.array(' + str(rateArray) + ', dtype = np.float64)\\n\\n') \n \n outputFile.write('\\t\\tstoichiometricMatrix = np.array(' + re.sub('\\n,', ',\\n\\t\\t\\t\\t\\t', re.sub('[^[] +', ',' ,str(stoichCoeffMat))) + ', dtype = np.float64)\\n\\n')\n outputFile.write('\\t\\treactionVelocities = np.array([')\n reactionElements = ''\n if reactions:\n for reactionId in reactions:\n if reactionElements == '':\n reactionElements += ('self.r[\\'' + str(reactionId) + '\\']()')\n else:\n reactionElements += (', self.r[\\'' + str(reactionId) + '\\']()')\n else:\n reactionElements = '0'\n outputFile.write(reactionElements + '], dtype = np.float64)\\n\\n')\n outputFile.write('\\t\\trateOfSpeciesChange = stoichiometricMatrix @ reactionVelocities + rateRuleVector\\n\\n')\n outputFile.write('\\t\\treturn rateOfSpeciesChange\\n\\n')\n \n outputFile.write('\\tdef RunSimulation(self, deltaT, absoluteTolerance = 1e-12, relativeTolerance = 1e-6):\\n\\n')\n \n outputFile.write('\\t\\tfinalTime = self.time + deltaT\\n')\n outputFile.write('\\t\\ty0 = np.array([' + yArray + '], dtype = np.float64)\\n')\n outputFile.write('\\t\\t' + yArray + ' = odeint(self._SolveReactions, y0, [self.time, finalTime], atol = absoluteTolerance, rtol = relativeTolerance, mxstep=5000000)[-1]\\n')\n outputFile.write('\\t\\tself.time = finalTime\\n')\n outputFile.write('\\t\\tself.AssignmentRules()\\n')\n# outputFile.write('\\t\\t[self.s[speciesId].UpdateCompartmentSizeMember() for speciesId in self.s]\\n')\n outputFile.write('\\n')\n \n for key in reactions.keys():\n outputFile.write('class ' + key + ':\\n\\n')\n outputFile.write('\\tdef __init__(self, parent, metadata = None):\\n\\n')\n outputFile.write('\\t\\tself.parent = parent\\n')\n outputFile.write('\\t\\tself.p = {}\\n')\n outputFile.write('\\t\\tself.metadata = metadata\\n\\n')\n for param in reactions[key].rxnParameters:\n outputFile.write(\"\\t\\tself.p[\\'\" + param[0] + \"\\'] = Parameter(\" + str(param[1]) + \", '\" + param[0] + \"')\\n\")\n #\"\\t\\tself.p[\\'\" + paramId + \"\\'] = Parameter(\" + str(parameters[paramId].value)+ \", \"+ paramId + \", \" + str(parameters[paramId].isConstant) +\")\\n\"\n \n outputFile.write('\\n\\tdef __call__(self):\\n')\n# print(key)\n# print(reactions[key].rxnParameters)\n rxnParamNames = [param[0] for param in reactions[key].rxnParameters]\n rateLaw = ParseRHS(reactions[key].rateLaw, rxnParamNames, \"self.parent\")\n outputFile.write('\\t\\treturn ' + rateLaw + '\\n\\n')\n\n \n for key in functions.keys():\n outputFile.write('class ' + key + ':\\n\\n')\n outputFile.write('\\tdef __init__(self, parent, metadata = None):\\n\\n')\n outputFile.write('\\t\\tself.parent = parent\\n')\n outputFile.write('\\t\\tself.metadata = metadata\\n\\n')\n\n arguments = functions[key].arguments\n argumentString = \"\"\n for i in range(len(arguments)):\n argumentString += arguments[i]\n if i != len(arguments) - 1:\n argumentString += \", \"\n \n outputFile.write('\\tdef __call__(self, ' + argumentString + '):\\n')\n outputFile.write(\"\\t\\treturn \" + functions[key].mathString.replace(\"^\", \"**\") + \"\\n\\n\")\n\n outputFile.close()",
"def _spawn_model(self, model_xml: str):\n self.spawn_publisher.publish(f'<sdf version=\"1.7\">{model_xml}</sdf>')",
"def WriteSourceFileForSrnModel(filename, model):\n srn_model_name = GetModelName(filename, model) # Get the name of the file we will write \n\n #Open to file to write\n source_file = open(srn_model_name + \".cpp\", 'w')\n\n #Include header files\n included_files = GetIncludedFilesForSourceString(filename, model)\n source_file.write(included_files)\n\n #Initialise class\n class_def = GetClassDefinition(filename, model)\n source_file.write(class_def)\n\n #Constructor for system\n constructor = GetClassConstructor(filename)\n source_file.write(constructor)\n\n #Functiond efinitions\n funct_defn_str = GetFunctionDefinitionsForSource(filename, model)\n source_file.write(funct_defn_str)\n\n #Initialise parameters\n init_fn = GetInitForSource(filename, model)\n source_file.write(init_fn)\n\n #Get the derivative function\n derivs_fn = GetEvaluateYDerivativesVoidString(filename, model)\n source_file.write(derivs_fn)\n\n #Initialise function\n initialise_fn = GetInitialiseString(filename, model)\n source_file.write(initialise_fn)\n\n #Define SRN Model\n srn_model_defn = GetModelDefinitionString(filename, model, False)\n source_file.write(srn_model_defn)\n\n source_file.close()\n\n print(srn_model_name + \".cpp written!\\n\")",
"def template_model():\n model_type = 'continuous' # either 'discrete' or 'continuous'\n model = do_mpc.model.Model(model_type)\n\n # Model variables:\n var1 = model.set_variable(var_type='_x', var_name='var1')\n var2 = model.set_variable(var_type='_x', var_name='var2')\n\n state = vertcat(var1,var2)\n state_dot = model.set_variable(var_type='_x', var_name='state_dot', shape=(2.1))\n\n input1 = model.set_variable(var_type='_u', var_name='input1')\n\n\n # Parameters:\n # define Parameters\n\n model.set_rhs('var1',state_dot[0])\n model.set_rhs('var2',state_dot[1])\n\n state_dot_rhs = vertcat(\n # rhs1,\n # rhs2)\n model.set_rhs('state_dot',state_dot_rhs)\n\n model.setup()\n\n return model",
"def main():\n if args.file and not args.nomodel:\n text = read_file(args.file)\n trained_model = train_char_model(text, args.prev)\n save_model(trained_model, args.file)\n sys.exit()\n if args.model:\n trained_model = load_model(args.model)\n if args.nomodel and args.file:\n trained_model = train_char_model(read_file(args.file), args.prev)\n # generate some random text\n history = check_history(trained_model, args.prev)\n gentext = generate_text(trained_model, history, args.gen)\n print(gentext)",
"def gen_model():\n\n\tmodel = skipthoughts.load_model()\n\treturn model",
"def _modified_noise_model_program_header(noise_model: NoiseModel) -> \"Program\":\n from pyquil.quil import Program\n\n p = Program()\n defgates: Set[str] = set()\n for k in noise_model.gates:\n\n # obtain ideal gate matrix and new, noisy name by looking it up in the NOISY_GATES dict\n try:\n ideal_gate, new_name = get_modified_noisy_gate(k.gate, tuple(k.params))\n\n # if ideal version of gate has not yet been DEFGATE'd, do this\n if new_name not in defgates:\n p.defgate(new_name, ideal_gate)\n defgates.add(new_name)\n except NoisyGateUndefined:\n print(\n \"WARNING: Could not find ideal gate definition for gate {}\".format(k.gate),\n file=sys.stderr,\n )\n new_name = k.gate\n\n # define noisy version of gate on specific targets\n p.define_noisy_gate(new_name, k.targets, k.kraus_ops)\n\n # define noisy readouts\n for q, ap in noise_model.assignment_probs.items():\n p.define_noisy_readout(q, p00=ap[0, 0], p11=ap[1, 1])\n return p",
"def script_generator(self):\n analyze_tool = \"/home/haihuam/Projects/RepPoints/mmdetection/tools/analyze_logs.py\"\n ex_options = self.global_setting.get('analyze_options', str())\n py = self.global_setting.get('python', sys.executable)\n if os.access(py, os.X_OK):\n content = \"set -e \\n\" \n content += \"cd %s \\n\"%(self.run_dir)\n content += \"%s %s plot_curve *.log.json \"%(py, analyze_tool)\n content += \"--keys loss loss_cls loss_pts_init \"\n content += \"loss_pts_refine \"\n content += \"--out losses.pdf %s &> analyze.log \\n\"%(ex_options)\n\n content += \"touch analyze.done \\n\"\n self.script_content = content\n else:\n print(\"Error: %s is not executable.\"%py)\n sys.exit(0)",
"def get_model_filename(config):\n base = os.path.splitext(config['corpus'])[0]\n return '%s--%dT.model' % (base, config['T'])",
"def WriteSourceFileForCcmModel(filename, model):\n ccm_model_name = GetModelName(filename, model) # Get the name of the file we will write \n\n #Open to file to write\n source_file = open(ccm_model_name + \".cpp\", 'w')\n\n #Include header files\n included_files = GetIncludedFilesForSourceString(filename, model)\n source_file.write(included_files)\n\n #Initialise class\n class_def = GetClassDefinition(filename, model)\n source_file.write(class_def)\n\n #Constructor for system\n constructor = GetClassConstructor(filename)\n source_file.write(constructor)\n\n #Function definitions\n funct_defn_str = GetFunctionDefinitionsForSource(filename, model)\n source_file.write(funct_defn_str)\n\n #Initialise parameters\n init_fn = GetInitForSource(filename, model)\n source_file.write(init_fn)\n\n #Get the derivative function\n derivs_fn = GetEvaluateYDerivativesVoidString(filename, model)\n source_file.write(derivs_fn)\n\n #Get the stopping event function\n stopping_event_fn = GetStoppingEventBooleanString(filename, model)\n source_file.write(stopping_event_fn)\n\n #Get the void to check and update SBML events\n events_fn = GetCheckAndUpdateEventsVoidString(filename, model)\n source_file.write(events_fn)\n\n #Get the void to check and update SBML events\n events_satisfied_fn = GetAreAllEventsSatisfiedBooleanString(filename)\n source_file.write(events_satisfied_fn)\n\n #Initialise function\n initialise_fn = GetInitialiseString(filename, model)\n source_file.write(initialise_fn)\n\n #Define SRN Model\n srn_model_defn = GetModelDefinitionString(filename, model, False)\n source_file.write(srn_model_defn)\n\n source_file.close()\n\n print(ccm_model_name + \".cpp written!\\n\")",
"def synthesize_for_model(model):\r\n # Task 2.6\r\n keys = list(model.keys())\r\n propositional = keys[0]\r\n if not model[propositional]:\r\n propositional = \"~\" + propositional\r\n iterkeys = iter(keys)\r\n next(iterkeys)\r\n for key in iterkeys:\r\n if model[key]:\r\n propositional = \"(\" + propositional + \"&\" + key + \")\"\r\n else:\r\n propositional = \"(\" + propositional + \"&~\" + key + \")\"\r\n return Formula.from_infix(propositional)",
"def makeModel(self):\n\n # Get the script\n modelScript = os.path.join(self.datapath, 'make3FGLxml.py')\n if not os.path.isfile(modelScript):\n # download it\n print(\"\\t=== Downloading make3FGLxml.py ===\")\n os.system('wget https://fermi.gsfc.nasa.gov/ssc/data/analysis/user/make3FGLxml.py -O {}'.format(modelScript))\n\n # Create the model using Tyrel's script\n galModel = os.path.join(self.diffpath, 'gll_iem_v06.fits')\n isoModel = os.path.join(self.diffpath, 'iso_'+self.irf+'_v06.txt')\n if (not os.path.isfile(galModel)) or (not os.path.isfile(isoModel)):\n print(\"\\t=== Unable to find the diffuse models, check the variable '$FERMI_DIR' ===\")\n return\n if not os.path.isdir(self.extpath):\n print(\"\\t=== Unable to find models of extended sources, check the variable '$LATEXTDIR' ===\")\n return\n if not os.path.isfile(self.fermicat):\n # download it\n print(\"\\t=== Downloading 3FGL catalog ===\")\n os.system('wget https://fermi.gsfc.nasa.gov/ssc/data/access/lat/4yr_catalog/gll_psc_v16.fit -O {}'.format(self.fermicat))\n\n os.popen(\"python {} {} {} -o {} -G {} -g 'gll_iem_v06'\\\n -I {} -i 'iso_source_v06' -e {} -r 5 -R 10 -ER 10\\\n -s 9 -m False -GIF False\".format(modelScript, self.fermicat,\n self.ft1, self.model, galModel, isoModel, self.extpath))\n\n # Add the target to the model\n tmpName = self.model + '.tmp'\n rfil = open(self.model, 'r')\n wfil = open(tmpName, 'w')\n # Copy the XML to the temporary model\n wfil.writelines([l for l in rfil.readlines() if not l=='</source_library>']) # copy everything but the last line\n wfil.write(' <source ROI_Center_Distance=\"0.00\" name=\"TARGET\" type=\"PointSource\">\\n')\n wfil.write(' <spectrum type=\"PowerLaw2\">\\n')\n wfil.write(' <parameter free=\"1\" max=\"1000\" min=\"1e-05\" name=\"Integral\" scale=\"1e-08\" value=\"0.3591824258\"/>\\n')\n wfil.write(' <parameter free=\"1\" max=\"1\" min=\"-5\" name=\"Index\" scale=\"1\" value=\"-2.7\"/>\\n')\n wfil.write(' <parameter free=\"0\" max=\"1000000\" min=\"20\" name=\"LowerLimit\" scale=\"1\" value=\"100\"/>\\n')\n wfil.write('<parameter free=\"0\" max=\"1000000\" min=\"20\" name=\"UpperLimit\" scale=\"1\" value=\"100000\"/>\\n')\n wfil.write(' </spectrum>\\n')\n wfil.write(' <spatialModel type=\"SkyDirFunction\">\\n')\n wfil.write(' <parameter free=\"0\" max=\"360.0\" min=\"-360.0\" name=\"RA\" scale=\"1.0\" value=\"'+str(self.ra)+'\"/>\\n')\n wfil.write(' <parameter free=\"0\" max=\"360.0\" min=\"-360.0\" name=\"DEC\" scale=\"1.0\" value=\"'+str(self.dec)+'\"/>\\n')\n wfil.write(' </spatialModel>\\n')\n wfil.write(' </source>\\n')\n wfil.write('</source_library>\\n')\n rfil.close()\n wfil.close()\n\n os.remove(self.model)\n os.rename(tmpName, self.model)\n \n print(\"\\t=== Source model {} added ===\".format(self.model))\n return",
"def main():\n model = Calculator()",
"def generate_DSP_script_for_Xwr16xx(params):\n return str(params)",
"def GetModelDefinitionString(filename, model, forhpp):\n #Get the model by reading from the filename\n\n #Size is defined by the number of ODEs\n odes_dict = translator.GetOdesDictionary(model)\n rules_dict = translator.GetRulesDictionary(model)\n num_species = len(odes_dict) + len(rules_dict)\n\n model_defn_string = \"\"\n\n ode_name = GetOdeSystemName(filename)\n model_name = GetModelName(filename, model)\n\n #Define some common names depending on whether or not the model is a SRN or a CCM\n is_srn_model = translator.IsSrnModel(model)\n if (is_srn_model):\n wrapper = \"SbmlSrnWrapperModel\"\n\n model_defn_string += translator.GetBlockCommentDefinition(0, \"Define SRN model using Wrappers.\", True)\n else:\n wrapper = \"SbmlCcmWrapperModel\"\n\n model_defn_string += translator.GetBlockCommentDefinition(0, \"Define cell cycle model using Wrappers.\", True)\n\n model_defn_string += (IncludeString(\"\\\"\" + wrapper + \".hpp\\\"\") + \n IncludeString(\"\\\"\" + wrapper + \".cpp\\\"\") + \"\\n\" + \n \"typedef \" + wrapper + \"<\" + ode_name + \",\" + str(num_species) + \"> \" + model_name + \";\\n\" + \n \"\\n\")\n\n model_defn_string += translator.GetCommentDefinition(0, \"Declare identifiers for the serializer\", True)\n if ( forhpp ):\n model_defn_string += IncludeString(\"\\\"SerializationExportWrapper.hpp\\\"\")\n else:\n model_defn_string += IncludeString(\"\\\"SerializationExportWrapperForCpp.hpp\\\"\")\n\n model_defn_string += (\"CHASTE_CLASS_EXPORT(\" + ode_name + \")\\n\" + \n \"EXPORT_TEMPLATE_CLASS2(\" + wrapper + \", \" + ode_name + \", \" + str(num_species) + \")\\n\" + \n \"\\n\" + \n IncludeString(\"\\\"CellCycleModelOdeSolverExportWrapper.hpp\\\"\") + \n \"EXPORT_CELL_CYCLE_MODEL_ODE_SOLVER(\" + model_name + \")\\n\\n\" )\n\n return model_defn_string",
"def save_plot_model_script(folderOUT):\n with open(folderOUT+'generate_model_plot.py', 'w') as f_out:\n f_out.write('#!/usr/bin/env python' + '\\n')\n f_out.write('try:' + '\\n')\n f_out.write('\\timport keras as ks' + '\\n')\n f_out.write('except ImportError:' + '\\n')\n f_out.write('\\tprint \"Keras not available. Activate tensorflow_cpu environment\"' + '\\n')\n f_out.write('\\traise SystemExit(\"=========== Error -- Exiting the script ===========\")' + '\\n')\n f_out.write('model = ks.models.load_model(\"%smodels/model-000.hdf5\")'%(folderOUT) + '\\n')\n f_out.write('try:' + '\\n')\n f_out.write('\\tks.utils.plot_model(model, to_file=\"%s/plot_model.png\", show_shapes=True, show_layer_names=True)'%(folderOUT) + '\\n')\n f_out.write('except OSError:' + '\\n')\n f_out.write('\\tprint \"could not produce plot_model.png ---- try on CPU\"' + '\\n')\n f_out.write('\\traise SystemExit(\"=========== Error -- Exiting the script ===========\")' + '\\n')\n f_out.write('print \"=========== Generating Plot Finished ===========\"' + '\\n')\n f_out.write('\\n')",
"def main():\n logging.basicConfig(level=logging.WARN)\n\n text = extract()\n text, char_indices, indices_char, x, y = transform(text)\n model(text, char_indices, indices_char, x, y)\n\n pass",
"def _synthesize_for_all_except_model(model: Model) -> Formula:\n assert is_model(model)\n assert len(model.keys()) > 0\n # Optional Task 2.8\n vars = list(variables(model))\n formula =_synthesize_for_all_except_model_helper(model, vars, 0)\n return formula",
"def synthesize(self, model, mu = 1.0):\n ndim = len(model.shape)\n dtype = self._get_dtype()\n \n if(ndim == 1):\n model1 = np.ascontiguousarray(model.reshape((1,1,model.size)), dtype=dtype)\n elif(ndim == 3):\n model1 = model\n else:\n print(\"MilneEddington::synthesize: ERROR, the input model must have 1 or 3 dimensions\")\n return None\n\n if(model1.shape[2] != 9):\n print(\"MilneEddington::synthesize: ERROR, input model has npar={0}, should be 9\".format(model1.shape[2]))\n return None\n\n isContiguous = model1.flags['C_CONTIGUOUS']\n if(not isContiguous or model1.dtype != dtype):\n model1 = np.ascontiguousarray(model1, dtype=dtype)\n\n \n \n return self.Me.synthesize(model, mu=mu)",
"def synthesise(self, library, entity, fpga_part=None):\n super(Quartus, self).synthesise(library, entity, fpga_part)\n # make a temporary working directory for the synth tool\n import tempfile\n startTime = datetime.datetime.now()\n with tempfile.TemporaryDirectory(\n dir=self.project.get_synthesis_directory()\n ) as workingDirectory:\n log.info(\n 'Created temporary synthesis directory: ' + workingDirectory\n )\n synthName = entity + '_synth_' + startTime.strftime(\n '%d%m%y_%H%M%S'\n )\n archiveName = synthName + '.tar'\n synthesisDirectory = os.path.join(workingDirectory, synthName)\n os.makedirs(synthesisDirectory)\n projectFilePath = os.path.join(synthesisDirectory, entity + '.tcl')\n if fpga_part is None:\n fpga_part = self.project.get_fpga_part()\n self.makeProject(\n projectFilePath,\n self.project.get_synthesis_fileset(),\n self.project.get_constraints(),\n fpga_part,\n self.project.get_generics(),\n synthesisDirectory,\n entity,\n )\n try:\n # Run the flow\n self.exec_quartus_sh(\n os.path.basename(projectFilePath),\n synthesisDirectory\n )\n except:\n # Archive the outputs\n log.error(\n 'Synthesis error, storing output in error directory...'\n )\n self.storeOutputs(workingDirectory, 'ERROR_' + archiveName)\n raise\n log.info(\n 'Build successful, checking reports for unacceptable ' +\n 'messages...'\n )\n # Check the report\n reporter_fn = self.project.get_reporter()\n try:\n if reporter_fn is not None:\n reporter_fn(synthesisDirectory)\n except:\n log.error(\n 'The post-synthesis reporter script caused an error:\\n' +\n traceback.format_exc()\n )\n # Archive the outputs\n log.info('Synthesis completed, saving output to archive...')\n self.storeOutputs(workingDirectory, archiveName)\n log.info('...done')",
"def __init__(self, model, idx=0, seed=None):\n\n self.__logger.info(\"Synthesizer init\")\n self.__logger.debug(\"DEBUG Message\")\n\n self.fake = Faker(seed) # First initialization of Faker\n self.__reccntr = idx # ?? Unknown variable\n self.add_providers() # Add providers to the faker\n self.schema = []\n self.is_dependent = []\n for field in model.info.schema.info.fields:\n self.schema.append(field.name)\n if field.info.aux.dependent == \"\":\n self.is_dependent.append(False)\n else:\n self.is_dependent.append(True)\n\n # Cache the generator functions once\n self.generator_fcns = {}\n\n self.set_generators_from_proto(model)\n\n # Following extension for generating duplicate records\n self.__dupcntr = 0\n self.__maxdup = 0\n self.__dupdist = [] # List of duplicate counts\n\n self._original = []\n self.duplicate = False\n self._expect_duplicate = False\n self.nduplicate_weights = None\n self.wrg = None\n self.mod = None\n\n # Generator counters/stats\n self.stats = {\"Total\": 0, \"Original\": 0, \"Duplicate\": 0}\n # self.h_dupdist = Histogram1D(range(10))\n\n if model.info.aux.HasField(\"duplicate\"):\n self.duplicate = True\n self.duplicate_cfg = dict()\n self.duplicate_cfg[\"Prob_duplicate\"] = model.info.aux.duplicate.probability\n self.duplicate_cfg[\"Dist_duplicate\"] = model.info.aux.duplicate.distribution\n self.duplicate_cfg[\"Max_duplicate\"] = model.info.aux.duplicate.maximum\n\n self.nduplicate_weights = self.generate_duplicate_pdf()\n if model.info.aux.HasField(\"record_modifier\"):\n self.mod = Modifier(\n self.fake,\n self.generator_fcns,\n self.schema,\n model.info.aux.record_modifier,\n )\n\n self.__logger.info(\"\")\n self.__logger.info(\"Synthesizer configured\")\n self.__logger.info(\"Model: %s\" % model)\n self.__logger.info(\"Schema:\")\n self.__logger.info(pformat(self.schema))\n self.__logger.info(\"Dataset record index: %d\" % idx)\n\n if seed:\n self.__logger.info(\"Seed set: %d\" % seed)\n\n self.__logger.info(\"Generate duplicate records:\")\n self.__logger.info(pformat(self.duplicate))\n\n if self.duplicate:\n self.__logger.info(\"Duplicate record probabilities\")\n self.__logger.info(pformat(self.duplicate_cfg))\n self.__logger.info(\"Duplicate PDF\")\n self.__logger.info(pformat(self.nduplicate_weights))\n self.__logger.info(\"Record modifier configuration\")\n self.__logger.info(model.info.aux.record_modifier)"
] | [
"0.71790755",
"0.61078423",
"0.6011807",
"0.5968458",
"0.5965008",
"0.5928381",
"0.59106827",
"0.5892473",
"0.58815545",
"0.5837325",
"0.5752082",
"0.57328606",
"0.5700278",
"0.5665125",
"0.5638353",
"0.5630792",
"0.5620719",
"0.5588311",
"0.55843246",
"0.556606",
"0.55422443",
"0.55131596",
"0.5507839",
"0.54831964",
"0.54804856",
"0.5443248",
"0.5439383",
"0.5436907",
"0.5429792",
"0.54119074"
] | 0.63034165 | 1 |
Adapt a CWL job object to the Galaxy API. CWL derived tools in Galaxy can consume a job description sort of like CWL job objects via the API but paths need to be replaced with datasets and records and arrays with collection references. This function will stage files and modify the job description to adapt to these changes for Galaxy. | def galactic_job_json(job, test_data_directory, upload_func, collection_create_func):
datasets = []
dataset_collections = []
def upload_file(file_path):
if not os.path.isabs(file_path):
file_path = os.path.join(test_data_directory, file_path)
_ensure_file_exists(file_path)
upload_response = upload_func(FileUploadTarget(file_path))
dataset = upload_response["outputs"][0]
datasets.append((dataset, file_path))
dataset_id = dataset["id"]
return {"src": "hda", "id": dataset_id}
def upload_object(the_object):
upload_response = upload_func(ObjectUploadTarget(the_object))
dataset = upload_response["outputs"][0]
datasets.append((dataset, the_object))
dataset_id = dataset["id"]
return {"src": "hda", "id": dataset_id}
def replacement_item(value, force_to_file=False):
is_dict = isinstance(value, dict)
is_file = is_dict and value.get("class", None) == "File"
if force_to_file:
if is_file:
return replacement_file(value)
else:
return upload_object(value)
if isinstance(value, list):
return replacement_list(value)
elif not isinstance(value, dict):
return upload_object(value)
if is_file:
return replacement_file(value)
else:
return replacement_record(value)
def replacement_file(value):
file_path = value.get("location", None) or value.get("path", None)
if file_path is None:
return value
return upload_file(file_path)
def replacement_list(value):
collection_element_identifiers = []
for i, item in enumerate(value):
dataset = replacement_item(item, force_to_file=True)
collection_element = dataset.copy()
collection_element["name"] = str(i)
collection_element_identifiers.append(collection_element)
collection = collection_create_func(collection_element_identifiers, "list")
dataset_collections.append(collection)
hdca_id = collection["id"]
return {"src": "hdca", "id": hdca_id}
def replacement_record(value):
collection_element_identifiers = []
for record_key, record_value in value.items():
if record_value.get("class") != "File":
dataset = replacement_item(record_value, force_to_file=True)
collection_element = dataset.copy()
else:
dataset = upload_file(record_value["location"])
collection_element = dataset.copy()
collection_element["name"] = record_key
collection_element_identifiers.append(collection_element)
collection = collection_create_func(collection_element_identifiers, "record")
dataset_collections.append(collection)
hdca_id = collection["id"]
return {"src": "hdca", "id": hdca_id}
replace_keys = {}
for key, value in iteritems(job):
replace_keys[key] = replacement_item(value)
job.update(replace_keys)
return job, datasets | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def galactic_job_json(\n job, test_data_directory, upload_func, collection_create_func, tool_or_workflow=\"workflow\"\n):\n\n datasets = []\n dataset_collections = []\n\n def upload_file(file_path, secondary_files, **kwargs):\n file_path = abs_path_or_uri(file_path, test_data_directory)\n target = FileUploadTarget(file_path, secondary_files, **kwargs)\n upload_response = upload_func(target)\n dataset = upload_response[\"outputs\"][0]\n datasets.append((dataset, target))\n dataset_id = dataset[\"id\"]\n return {\"src\": \"hda\", \"id\": dataset_id}\n\n def upload_tar(file_path):\n file_path = abs_path_or_uri(file_path, test_data_directory)\n target = DirectoryUploadTarget(file_path)\n upload_response = upload_func(target)\n dataset = upload_response[\"outputs\"][0]\n datasets.append((dataset, target))\n dataset_id = dataset[\"id\"]\n return {\"src\": \"hda\", \"id\": dataset_id}\n\n def upload_object(the_object):\n target = ObjectUploadTarget(the_object)\n upload_response = upload_func(target)\n dataset = upload_response[\"outputs\"][0]\n datasets.append((dataset, target))\n dataset_id = dataset[\"id\"]\n return {\"src\": \"hda\", \"id\": dataset_id}\n\n def replacement_item(value, force_to_file=False):\n is_dict = isinstance(value, dict)\n item_class = None if not is_dict else value.get(\"class\", None)\n is_file = item_class == \"File\"\n is_directory = item_class == \"Directory\"\n is_collection = item_class == \"Collection\" # Galaxy extension.\n\n if force_to_file:\n if is_file:\n return replacement_file(value)\n else:\n return upload_object(value)\n\n if isinstance(value, list):\n return replacement_list(value)\n elif not isinstance(value, dict):\n if tool_or_workflow == \"workflow\":\n # All inputs represented as dataset or collection parameters\n return upload_object(value)\n else:\n return value\n\n if is_file:\n return replacement_file(value)\n elif is_directory:\n return replacement_directory(value)\n elif is_collection:\n return replacement_collection(value)\n else:\n return replacement_record(value)\n\n def replacement_file(value):\n file_path = value.get(\"location\", None) or value.get(\"path\", None)\n if file_path is None:\n return value\n\n filetype = value.get('filetype', None)\n secondary_files = value.get(\"secondaryFiles\", [])\n secondary_files_tar_path = None\n if secondary_files:\n tmp = tempfile.NamedTemporaryFile(delete=False)\n tf = tarfile.open(fileobj=tmp, mode='w:')\n order = []\n index_contents = {\n \"order\": order\n }\n for secondary_file in secondary_files:\n secondary_file_path = secondary_file.get(\"location\", None) or secondary_file.get(\"path\", None)\n assert secondary_file_path, \"Invalid secondaryFile entry found [%s]\" % secondary_file\n full_secondary_file_path = os.path.join(test_data_directory, secondary_file_path)\n basename = secondary_file.get(\"basename\") or os.path.basename(secondary_file_path)\n order.append(basename)\n tf.add(full_secondary_file_path, os.path.join(SECONDARY_FILES_EXTRA_PREFIX, basename))\n tmp_index = tempfile.NamedTemporaryFile(delete=False)\n json.dump(index_contents, tmp_index)\n tmp_index.close()\n tf.add(tmp_index.name, SECONDARY_FILES_INDEX_PATH)\n tf.close()\n secondary_files_tar_path = tmp.name\n\n return upload_file(file_path, secondary_files_tar_path, filetype=filetype)\n\n def replacement_directory(value):\n file_path = value.get(\"location\", None) or value.get(\"path\", None)\n if file_path is None:\n return value\n\n if not os.path.isabs(file_path):\n file_path = os.path.join(test_data_directory, file_path)\n\n tmp = tempfile.NamedTemporaryFile(delete=False)\n tf = tarfile.open(fileobj=tmp, mode='w:')\n tf.add(file_path, '.')\n tf.close()\n\n return upload_tar(tmp.name)\n\n def replacement_list(value):\n collection_element_identifiers = []\n for i, item in enumerate(value):\n dataset = replacement_item(item, force_to_file=True)\n collection_element = dataset.copy()\n collection_element[\"name\"] = str(i)\n collection_element_identifiers.append(collection_element)\n\n # TODO: handle nested lists/arrays\n collection = collection_create_func(collection_element_identifiers, \"list\")\n dataset_collections.append(collection)\n hdca_id = collection[\"id\"]\n return {\"src\": \"hdca\", \"id\": hdca_id}\n\n def replacement_collection(value):\n collection_element_identifiers = []\n assert \"collection_type\" in value\n assert \"elements\" in value\n\n collection_type = value[\"collection_type\"]\n elements = value[\"elements\"]\n\n for element in elements:\n dataset = replacement_item(element, force_to_file=True)\n collection_element = dataset.copy()\n collection_element[\"name\"] = element[\"identifier\"]\n collection_element_identifiers.append(collection_element)\n\n # TODO: handle nested lists/arrays\n collection = collection_create_func(collection_element_identifiers, collection_type)\n dataset_collections.append(collection)\n hdca_id = collection[\"id\"]\n return {\"src\": \"hdca\", \"id\": hdca_id}\n\n def replacement_record(value):\n collection_element_identifiers = []\n for record_key, record_value in value.items():\n if record_value.get(\"class\") != \"File\":\n dataset = replacement_item(record_value, force_to_file=True)\n collection_element = dataset.copy()\n else:\n dataset = upload_file(record_value[\"location\"])\n collection_element = dataset.copy()\n\n collection_element[\"name\"] = record_key\n collection_element_identifiers.append(collection_element)\n\n collection = collection_create_func(collection_element_identifiers, \"record\")\n dataset_collections.append(collection)\n hdca_id = collection[\"id\"]\n return {\"src\": \"hdca\", \"id\": hdca_id}\n\n replace_keys = {}\n for key, value in iteritems(job):\n replace_keys[key] = replacement_item(value)\n\n job.update(replace_keys)\n return job, datasets",
"def convert(job, optimize=True, tolerance=conf['tolerance'], matrix=None):\n type_ = get_type(job)\n if type_ == 'dba':\n if type(job) is bytes: \n job = job.decode('utf-8')\n if type(job) is str:\n job = json.loads(job)\n if optimize:\n if 'defs' in job:\n for def_ in job['defs']:\n if def_['kind'] == 'path':\n pathoptimizer.optimize(def_['data'], tolerance)\n if def_['kind'] == 'fill':\n fill_mode = conf['fill_mode']\n if fill_mode not in ['Forward', 'Bidirectional', 'NearestNeighbor']:\n fill_mode = 'Bidirectional'\n print(\"WARN: fill_mode not recognized. Please check your config file.\")\n if conf['fill_mode'] == 'Forward':\n pass\n elif conf['fill_mode'] == 'Reverse':\n pathoptimizer.reverse_path(def_['data'])\n elif conf['fill_mode'] == 'Bidirectional':\n pathoptimizer.fill_optimize(def_['data'], tolerance)\n elif conf['fill_mode'] == 'NearestNeighbor':\n pathoptimizer.optimize(def_['data'], tolerance) \n if not 'head' in job:\n job['head'] = {}\n job['head']['optimized'] = tolerance\n elif type_ == 'svg':\n job = read_svg(job, conf['workspace'],\n tolerance, optimize=optimize)\n elif type_ == 'dxf':\n job = read_dxf(job, tolerance, optimize=optimize)\n elif type_ == 'gcode':\n job = read_gcode(job, tolerance, optimize=optimize)\n else:\n print(\"ERROR: file type not recognized\")\n raise TypeError\n if matrix:\n apply_alignment_matrix(job, matrix)\n return job",
"def job_to_cwl(job, dag, outputs, inputs):\n\n if job.dynamic_output:\n raise WorkflowError(\"Dynamic output is not supported by CWL conversion.\")\n for f in job.output:\n if os.path.isabs(f):\n raise WorkflowError(\n \"All output files have to be relative to the working directory.\"\n )\n\n get_output_id = lambda job, i: f\"#main/job-{job.jobid}/{i}\"\n\n dep_ids = {\n o: get_output_id(dep, i)\n for dep, files in dag.dependencies[job].items()\n for i, o in enumerate(dep.output)\n if o in files\n }\n files = [f for f in job.input if f not in dep_ids]\n if job.conda_env_file:\n files.add(os.path.relpath(job.conda_env_file))\n\n out = [get_output_id(job, i) for i, _ in enumerate(job.output)]\n\n def workdir_entry(i, f):\n location = f\"??inputs.input_files[{i}].location??\"\n if f.is_directory:\n entry = {\n \"class\": \"Directory\",\n \"basename\": os.path.basename(f),\n \"location\": location,\n }\n else:\n entry = {\n \"class\": \"File\",\n \"basename\": os.path.basename(f),\n \"location\": location,\n }\n return \"$({})\".format(\n json.dumps(outer_entry(f, entry)).replace('\"??', \"\").replace('??\"', \"\")\n ).replace('\"', \"'\")\n\n def outer_entry(f, entry):\n parent = os.path.dirname(f)\n if parent:\n return outer_entry(\n parent,\n {\n \"class\": \"Directory\",\n \"basename\": os.path.basename(parent),\n \"listing\": [entry],\n },\n )\n else:\n return entry\n\n if job in dag.targetjobs:\n # TODO this maps output files into the cwd after the workflow is complete.\n # We need to find a way to define subdirectories though. Otherwise,\n # there can be name clashes, and it will also become very crowded.\n outputs.append(\n {\n \"type\": {\"type\": \"array\", \"items\": \"File\"},\n \"outputSource\": f\"#main/job-{job.jobid}/output_files\",\n \"id\": f\"#main/output/job-{job.jobid}\",\n }\n )\n\n cwl = {\n \"run\": \"#snakemake-job\",\n \"requirements\": {\n \"InitialWorkDirRequirement\": {\n \"listing\": [\n {\"writable\": True, \"entry\": workdir_entry(i, f)}\n for i, f in enumerate(\n chain(\n files,\n (f for dep in dag.dependencies[job] for f in dep.output),\n )\n )\n ]\n }\n },\n \"in\": {\n \"cores\": {\"default\": job.threads},\n \"target_files\": {\"default\": job.output._plainstrings()},\n \"rules\": {\"default\": [job.rule.name]},\n },\n \"out\": [\"output_files\"],\n \"id\": f\"#main/job-{job.jobid}\",\n }\n if files:\n inputs.append(\n {\n \"type\": {\"type\": \"array\", \"items\": \"File\"},\n \"default\": [{\"class\": \"File\", \"location\": f} for f in files],\n \"id\": f\"#main/input/job-{job.jobid}\",\n }\n )\n\n input_files = []\n if files:\n input_files.append(f\"#main/input/job-{job.jobid}\")\n input_files.extend(\n f\"#main/job-{dep.jobid}/output_files\" for dep in dag.dependencies[job]\n )\n\n cwl[\"in\"][\"input_files\"] = {\"source\": input_files, \"linkMerge\": \"merge_flattened\"}\n\n return cwl",
"def convertJobData(dataPath, jd, ap):\n\tif os.path.isfile(os.path.join(dataPath, 'jobData.py')):\n\t\tsys.path.append(dataPath)\n\t\timport jobData\n\t\treloadModule(jobData)\n\t\tsys.path.remove(dataPath)\n\n\t\t# Units settings\n\t\ttry:\n\t\t\tjd.set_attr('units', 'linear', jobData.unit)\n\t\texcept AttributeError:\n\t\t\tpass\n\t\ttry:\n\t\t\tjd.set_attr('units', 'angle', jobData.angle)\n\t\texcept AttributeError:\n\t\t\tpass\n\t\ttry:\n\t\t\tjd.set_attr('units', 'time', jobData.timeFormat)\n\t\texcept AttributeError:\n\t\t\tpass\n\t\ttry:\n\t\t\tjd.set_attr('units', 'fps', jobData.fps)\n\t\texcept AttributeError:\n\t\t\tpass\n\n\t\t# Time settings\n\n\t\t# App versions\n\t\ttry:\n\t\t\tjd.set_attr('apps', 'Maya', convertAppExecPath('Maya', jobData.mayaVersion, ap))\n\t\texcept AttributeError:\n\t\t\tpass\n\t\ttry:\n\t\t\tjd.set_attr('apps', 'Mudbox', convertAppExecPath('Mudbox', jobData.mudboxVersion, ap))\n\t\texcept AttributeError:\n\t\t\tpass\n\t\ttry:\n\t\t\tjd.set_attr('apps', 'Mari', convertAppExecPath('Mari', jobData.mariVersion, ap))\n\t\texcept AttributeError:\n\t\t\tpass\n\t\ttry:\n\t\t\tjd.set_attr('apps', 'Nuke', convertAppExecPath('Nuke', jobData.nukeVersion, ap))\n\t\texcept AttributeError:\n\t\t\tpass\n\t\ttry:\n\t\t\tjd.set_attr('apps', 'RealFlow', convertAppExecPath('RealFlow', jobData.realflowVersion, ap))\n\t\texcept AttributeError:\n\t\t\tpass\n\t\ttry:\n\t\t\tjd.set_attr('apps', 'HieroPlayer', convertAppExecPath('HieroPlayer', jobData.hieroPlayerVersion, ap))\n\t\texcept AttributeError:\n\t\t\tpass\n\n\t\t# Other settings\n\t\ttry:\n\t\t\tjd.set_attr('other', 'prodboard', jobData.prodBoard)\n\t\texcept AttributeError:\n\t\t\tpass\n\t\ttry:\n\t\t\tjd.set_attr('other', 'projtools', jobData.projectTools)\n\t\texcept AttributeError:\n\t\t\tpass\n\t\ttry:\n\t\t\tjd.set_attr('other', 'elementslib', jobData.elementsLibrary)\n\t\texcept AttributeError:\n\t\t\tpass\n\n\t\t# Save XML\n\t\tif jd.save():\n\t\t\tverbose.message(\"Successfully converted legacy job data to XML.\")\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\telse:\n\t\tverbose.print_(\"Cannot convert settings: job data not found.\", 4)\n\t\treturn False",
"def run_job(playerID, rF2root, job, config):\n _j = Job(job, config)\n # read the file to be edited\n try:\n _j.read_json_file_to_be_edited()\n # do the edits\n try:\n _edit_count = _j.run_edits()\n if _edit_count:\n # if successful:\n # backup 'filepath'\n # save new contents to 'filepath\n _report = _j.backup_file()\n _j.write()\n else:\n _report = ''\n return _report\n except (KeyError, ValueError, EmptyJsonError) as e:\n raise JobFailedError\n except JsonContentError:\n raise FileNotFoundError",
"def parse_job(\n source, pedantic=None, filename=None, _debug_python_based_parser=False\n):\n return parse_for_object(source, JobFile, pedantic, filename,\n _debug_python_based_parser)",
"def process_datasets(self):\n\n with open(self.mappings, \"r+\") as json_file:\n emsl_to_jgi = json.load(json_file)\n emsl_to_jgi_copy = copy.deepcopy(emsl_to_jgi)\n\n contaminant_file_loc = emsl_to_jgi[\"contaminant_file_loc\"]\n # run for each dataset\n for dataset_id, values in emsl_to_jgi.items():\n if dataset_id not in [\n \"contaminant_file_loc\",\n \"analysis_activity_file_loc\",\n \"data_objects_file_loc\",\n \"STUDY\",\n \"tools_used\",\n ]:\n raw_file_loc = values[\"raw_file_loc\"]\n self.dataset_name = values[\"dataset_name\"]\n # dataset search against a fasta file\n for genome_directory, locations in values[\n \"genome_directory\"\n ].items():\n # clear object to prepare next job\n ANALYSIS_JOBS_OBJECT.clear()\n\n # create log_dir\n self.save_job_results = os.path.join(\n self.result_loc, dataset_id, genome_directory\n )\n self.log_collected_at = os.path.join(\n os.path.abspath(self.save_job_results), \"analysis_jobs_logs\"\n )\n if not os.path.exists(self.log_collected_at):\n os.makedirs(self.log_collected_at)\n\n files = [locations[\"faa_file_loc\"], contaminant_file_loc]\n contaminated_faa_file_loc = self.contaminate_fasta(files)\n\n self.register_job_in_emsl_to_jgi(\n dataset_id,\n genome_directory,\n \"contaminated_faa_file_loc\",\n contaminated_faa_file_loc,\n emsl_to_jgi_copy,\n )\n # convert .faa to .txt\n faa_txt_file = self.convert_faa2txt(\n dataset_id, contaminated_faa_file_loc\n )\n self.register_job_in_emsl_to_jgi(\n dataset_id,\n genome_directory,\n \"txt_faa_file_loc\",\n faa_txt_file,\n emsl_to_jgi_copy,\n )\n\n # log & run job\n self.run_n_log_job(\n dataset_id,\n genome_directory,\n contaminated_faa_file_loc,\n raw_file_loc,\n emsl_to_jgi_copy,\n )\n\n # merge analysis\n resultant_file = self.merge_analysis_jobs(\n dataset_id, genome_directory\n )\n self.register_job_in_emsl_to_jgi(\n dataset_id,\n genome_directory,\n \"resultant_file_loc\",\n resultant_file,\n emsl_to_jgi_copy,\n )\n\n # capture the job metadata object\n logger.info(\"Jobrun\", extra=LOGGED_ANALYSIS_JOB)\n\n # update emsl_to_jgi.json\n json_file.seek(0) # move back to BOF.\n json_file.truncate()\n json_file.write(json.dumps(emsl_to_jgi_copy, default=str, indent=4))\n pass",
"def edit_job_file(job_file_name, out_file_name, edits):\n o_job = JsonJobsFile()\n o_job._raw_read(job_file_name)\n o_job._edit_job_file(edits)\n o_job.write_as(out_file_name)",
"def parse_composite_geo(self,\n label: str,\n job: 'JobAdapter',\n ) -> bool:\n logger.debug(f'parsing composite geo for {job.job_name}')\n freq_ok = False\n if job.job_status[1]['status'] == 'done':\n self.species_dict[label].final_xyz = parser.parse_xyz_from_file(path=job.local_path_to_output_file)\n self.output[label]['job_types']['composite'] = True\n self.output[label]['job_types']['opt'] = True\n self.output[label]['job_types']['sp'] = True\n if self.job_types['fine']:\n self.output[label]['job_types']['fine'] = True # all composite jobs are fine if fine was asked for\n self.output[label]['paths']['composite'] = os.path.join(job.local_path_to_output_file)\n if self.composite_method is not None:\n self.species_dict[label].opt_level = self.composite_method.simple()\n rxn_str = ''\n if self.species_dict[label].is_ts:\n rxn_str = f' of reaction {self.species_dict[label].rxn_label}' \\\n if self.species_dict[label].rxn_label is not None else ''\n logger.info(f'\\nOptimized geometry for {label}{rxn_str} at {job.level.simple()}:\\n'\n f'{xyz_to_str(xyz_dict=self.species_dict[label].final_xyz)}\\n')\n plotter.save_geo(species=self.species_dict[label], project_directory=self.project_directory)\n if not job.is_ts:\n plotter.draw_structure(species=self.species_dict[label],\n project_directory=self.project_directory)\n else:\n # for TSs, only use `draw_3d()`, not `show_sticks()` which gets connectivity wrong:\n plotter.draw_structure(species=self.species_dict[label],\n project_directory=self.project_directory,\n method='draw_3d')\n frequencies = parser.parse_frequencies(job.local_path_to_output_file, job.job_adapter)\n freq_ok = self.check_negative_freq(label=label, job=job, vibfreqs=frequencies)\n if freq_ok:\n # Update restart dictionary and save a restart file:\n self.save_restart_dict()\n success = True # run freq / scan jobs on this optimized geometry\n if not self.species_dict[label].is_ts:\n is_isomorphic = self.species_dict[label].check_xyz_isomorphism(\n allow_nonisomorphic_2d=self.allow_nonisomorphic_2d)\n if is_isomorphic:\n self.output[label]['isomorphism'] += 'composite passed isomorphism check; '\n else:\n self.output[label]['isomorphism'] += 'composite did not pass isomorphism check; '\n success &= is_isomorphic\n if success:\n self.check_rxn_e0_by_spc(label)\n return success\n elif not self.species_dict[label].is_ts and self.trsh_ess_jobs:\n self.troubleshoot_negative_freq(label=label, job=job)\n if job.job_status[1]['status'] != 'done' or (not freq_ok and not self.species_dict[label].is_ts):\n self.troubleshoot_ess(label=label, job=job, level_of_theory=job.level)\n return False # return ``False``, so no freq / scan jobs are initiated for this unoptimized geometry",
"def input_config_to_job_input(input_batch_id, job_name, job_level, input_config):\n JobInput = namedtuple(\n \"JobInput\",\n [\"input_manifest_s3_uri\", \"label_attribute_name\", \"label_category_s3_uri\"],\n )\n\n input_manifest_s3_uri = input_config.get(\"inputManifestS3Uri\")\n if input_manifest_s3_uri is not None:\n return JobInput(\n input_manifest_s3_uri=input_manifest_s3_uri,\n label_attribute_name=None,\n label_category_s3_uri=None,\n )\n\n chain_to_job_name = job_name\n chain_from_job_name = input_config[\"chainFromJobName\"]\n\n # Only support jobs within the current batch for now.\n if job_level == 1:\n raise Exception(\"can't chain in job_level 1\")\n\n batches = chainable_batches(input_batch_id, job_level)\n if len(batches) == 0:\n raise Exception(\"no chainable batches found\")\n\n processed_job_level_batch = next(\n iter(\n db.get_batch_metadata_by_labeling_job_name(\n chain_to_job_name, BatchMetadataType.PROCESS_LEVEL\n )\n ),\n None,\n )\n\n prev_level_jobs = []\n for batch in batches:\n prev_level_jobs += db.get_child_batch_metadata(\n batch[\"BatchId\"], BatchMetadataType.JOB_LEVEL\n )\n\n for job in prev_level_jobs:\n if job[BatchMetadataTableAttributes.LABELING_JOB_NAME] == chain_from_job_name:\n # If available, use the downsampled manifest file as input to the new job\n if processed_job_level_batch:\n processed_data_location = processed_job_level_batch[\n BatchMetadataTableAttributes.JOB_INPUT_LOCATION\n ]\n else:\n processed_data_location = None\n\n batch_output_location = (\n processed_data_location or job[BatchMetadataTableAttributes.JOB_OUTPUT_LOCATION]\n )\n\n return JobInput(\n input_manifest_s3_uri=batch_output_location,\n label_attribute_name=job[BatchMetadataTableAttributes.LABEL_ATTRIBUTE_NAME],\n label_category_s3_uri=job[BatchMetadataTableAttributes.LABEL_CATEGORY_CONFIG],\n )\n\n raise Exception(f\"chain job {chain_from_job_name} not found\")",
"def update_job(self, job_id, end, status, output_path):\n\n # Close the DB connections\n django.db.connection.close()\n\n try:\n Job.objects.filter(id=job_id).update(\n end=end,\n status=status\n )\n\n qas = list()\n\n for product in glob.glob(output_path):\n qa = self.create_qa_bulk(product, job_id)\n if not qa:\n logger.warning('Error to create QA: {}'.format(product))\n continue\n\n qas.append(qa)\n\n QA.objects.bulk_create(qas)\n\n logger.info('Job {} updated.'.format(job_id))\n except Exception as err:\n logger.error('Job {} failed.'.format(job_id))\n logger.error(err)",
"def handleChainedProcessing(self):\n self.logger.info(\"Handling chained processing job\")\n # first, create an instance of TrivialFileCatalog to override\n tfc = TrivialFileCatalog()\n # check the jobs input files\n inputFile = (\"../%s/%s.root\" % (self.step.data.input.inputStepName,\n self.step.data.input.inputOutputModule))\n tfc.addMapping(\"direct\", inputFile, inputFile, mapping_type=\"lfn-to-pfn\")\n tfc.addMapping(\"direct\", inputFile, inputFile, mapping_type=\"pfn-to-lfn\")\n\n fixupFileNames(self.process)\n fixupMaxEvents(self.process)\n self.process.source.fileNames.setValue([inputFile])\n self.process.maxEvents.input.setValue(-1)\n\n tfcName = \"override_catalog.xml\"\n tfcPath = os.path.join(os.getcwd(), tfcName)\n self.logger.info(\"Creating override TFC and saving into '%s'\", tfcPath)\n tfcStr = tfc.getXML()\n with open(tfcPath, 'w') as tfcFile:\n tfcFile.write(tfcStr)\n\n self.step.data.application.overrideCatalog = \"trivialcatalog_file:\" + tfcPath + \"?protocol=direct\"\n\n return",
"def process_job(q):\n del log_msg[:]\n logger.info('Processing Job %s', q.id)\n\n datatype = q.datatype\n input_dir = q.input_dir\n output_dir = q.output_dir\n processor = q.processor\n if datatype.lower() == 'laz':\n block_name = proper_block_name(input_dir)\n elif datatype.lower() == 'ortho':\n block_name = proper_block_name_ortho(input_dir)\n if datatype.lower() == 'laz' or datatype.lower() == 'ortho':\n logger.info('Verifying las tiles in directory...')\n log_msg.append('Verifying las tiles in directory...\\n')\n has_error, remarks = verify_dir(input_dir, datatype.lower())\n\n if has_error:\n assign_status(q, error=True)\n log_msg.append('Error in verify_las/verify_raster!\\n {0} \\n'.format(remarks))\n else:\n logger.info('Renaming tiles...')\n\n logger.info('BLOCK NAME %s', block_name)\n log_msg.append('BLOCK NAME {0}\\n'.format(block_name))\n\n in_coverage, block_uid = find_in_coverage(block_name)\n\n #: Check first if folder or `block_name` is in `Cephgeo_LidarCoverageBlock`\n #: If not found, `output_dir` is not created and data is not processed\n if in_coverage:\n logger.info('Found in Lidar Coverage model %s %s',\n block_name, block_uid)\n log_msg.append('Found in Lidar Coverage model {0} {1}\\n'.format(\n block_name, block_uid))\n\n rename_tiles(input_dir, output_dir, processor,\n block_name, block_uid, q)\n logger.info('Status %s Status Timestamp %s',\n q.status, q.status_timestamp)\n log_msg.append('Status {0} Status Timestamp {1}\\n'.format(\n q.status, q.status_timestamp))\n\n else:\n has_error = True\n logger.error('ERROR NOT FOUND IN MODEL %s %s', block_name, block_uid)\n log_msg.append('ERROR NOT FOUND IN MODEL {0} {1}\\n'.format(block_name, block_uid))\n assign_status(q, error=True)\n # for DEM\n else:\n logger.info('Handler not implemented for type: %s',\n str(q.datatype))\n log_msg.append('Handler not implemented for type: {0}\\n'.format(\n str(q.datatype)))\n assign_status(q, error=True)\n\n paragraph = ''\n for par in log_msg:\n paragraph = paragraph + par\n\n #: Save log messages from renaming tiles to `Automation_AutomationJob.log`\n with PSQL_DB.atomic() as txn:\n new_q = (Automation_AutomationJob\n .update(data_processing_log=paragraph, status_timestamp=datetime.now())\n .where(Automation_AutomationJob.id == q.id))\n new_q.execute()",
"def submit(self, job_parameters):\n # FIX: Don't pass through the real job name. Bilby outputs the job files by whatever this parameter is, that\n # means that names containing special characters will break. Uniqueness is guaranteed by the folder structure\n job_parameters = json.loads(job_parameters)\n job_parameters['name'] = 'bilby'\n\n # Write the job parameters to a file\n json.dump(job_parameters, open(self.job_parameter_file, 'w'))\n\n # Run the job\n return super().submit(job_parameters)",
"def __stage_du_to_pj(self, pilotdata, pilotjob):\n pass",
"def configure_queued_job(self, job):\n\n config = ExecutionConfiguration()\n data = job.get_job_data()\n\n # Add input file meta-data\n input_files_dict = self._create_input_file_dict(data)\n config.set_input_files(input_files_dict)\n\n # Set up env vars for job's input data\n input_values = data.get_injected_input_values(input_files_dict)\n interface = job.job_type_rev.get_input_interface()\n\n env_vars = {}\n if isinstance(data, JobData):\n # call job.data.job_data.JobData.get_injected_env_vars\n env_vars = data.get_injected_env_vars(input_files_dict, interface)\n else:\n # call old job.configuration.data.job_data.get_injected_env_vars\n # TODO: remove once old JobData class is no longer used\n env_vars = data.get_injected_env_vars(input_files_dict)\n\n task_workspaces = {}\n if job.job_type.is_system:\n # Add any workspaces needed for this system job\n task_workspaces = QueuedExecutionConfigurator._system_job_workspaces(job)\n else:\n # Set any output workspaces needed\n output_workspaces = {}\n if job.input and 'version' in job.input and job.input['version'] == '1.0':\n # Set output workspaces using legacy job data\n self._cache_workspace_names(data.get_output_workspace_ids())\n output_workspaces = {}\n for output, workspace_id in data.get_output_workspaces().items():\n output_workspaces[output] = self._cached_workspace_names[workspace_id]\n config.set_output_workspaces(output_workspaces)\n if not output_workspaces:\n # Set output workspaces from job configuration\n output_workspaces = {}\n job_config = job.get_job_configuration()\n interface = SeedManifest(job.job_type_rev.manifest, do_validate=False)\n for output_name in interface.get_file_output_names():\n output_workspace = job_config.get_output_workspace(output_name)\n if output_workspace:\n output_workspaces[output_name] = output_workspace\n config.set_output_workspaces(output_workspaces)\n\n # Create main task with fields populated from input data\n args = job.get_job_interface().get_injected_command_args(input_values, env_vars)\n config.create_tasks(['main'])\n config.add_to_task('main', args=args, env_vars=env_vars, workspaces=task_workspaces)\n return config",
"def __Job_list__(self):\n # | - __Job_list__\n\n # | - Adding Jobs From Individual Directory List\n if self.indiv_dir_lst is not None:\n for job_i_dir in self.indiv_dir_lst:\n\n rev_dirs, max_rev = self.__revision_list_and_max__(job_i_dir)\n\n print(job_i_dir)\n if rev_dirs:\n\n print(\"rev_dirs:\", rev_dirs)\n\n if self.parse_all_revisions is False:\n\n last_rev_int = np.sort(\n [int(i.split(\"_\")[-1]) for i in rev_dirs])[-1]\n rev_dirs = [\"_\" + str(last_rev_int), ]\n # rev_dirs = [rev_dirs[-1]]\n\n print(\"rev_dirs:\", rev_dirs)\n print(\"IOPSDFJOKIDSIJFIJDSF\")\n\n for rev_i in rev_dirs:\n path_i = os.path.join(job_i_dir, rev_i)\n path_i = os.path.normpath(path_i)\n\n Job_i = Job(\n path_i=path_i,\n job_params_dict=None,\n max_revision=max_rev,\n root_dir=None,\n )\n\n self.Job_list.append(Job_i)\n else:\n print(\"Didn't find any job dirs here:\")\n print(job_i_dir)\n pass\n # __|\n\n # | - Adding Jobs From Enumerated Job Properties Tree\n if self.job_var_lst is not None:\n for job_i in self.job_var_lst:\n job_var_dict = self.__job_i_vars_to_dict__(job_i)\n\n if self.folders_exist:\n path_i = self.var_lst_to_path(\n job_i,\n job_rev=\"Auto\",\n relative_path=False,\n )\n\n # | - __old__\n # else:\n # print(\"else *s8fs*sdf\")\n # path_i = os.path.join(\n #\n # self.var_lst_to_path(\n # job_i,\n # job_rev=\"Auto\",\n # relative_path=False,\n # ),\n #\n # # self.var_lst_to_path(\n # # job_i,\n # # ),\n #\n # \"_1\",\n # )\n # __|\n\n rev_dirs, max_rev = self.__revision_list_and_max__(\n # path_i\n self.var_lst_to_path(\n job_i,\n job_rev=\"None\",\n relative_path=False,\n )\n )\n\n Job_i = Job(\n path_i=path_i,\n job_params_dict=job_var_dict,\n max_revision=max_rev,\n root_dir=self.root_dir,\n )\n\n self.Job_list.append(Job_i)\n # __|\n\n # | - TEMP | I don't remember why this is here\n indiv_job = self.indiv_job_lst is not None\n level_labels = self.tree_level_labels is not None\n if indiv_job and level_labels:\n print(\"LSKDJFKLDS_-09sdfsdfs9dfas\")\n for job_params_i in self.indiv_job_lst:\n\n job_var_lst_i = self.__job_i_param_dict_to_job_var_lst__(\n job_params_i,\n )\n\n path_i = os.path.join(\n self.new_var_lst_to_path(job_var_lst_i),\n \"_1\",\n )\n\n Job_i = Job(\n path_i=path_i,\n job_params_dict=job_params_i,\n max_revision=None,\n root_dir=self.root_dir,\n )\n\n self.Job_list.append(Job_i)\n # __|\n\n if self.indiv_job_dict_lst is not None:\n self.create_Jobs_from_dicts_and_paths(\n self.indiv_job_dict_lst,\n )\n # __|",
"def main():\n LESSONS_PATH = os.path.join(LESSON_LOCATOR_DATA, LESSON_SETS[0])\n ORIGINAL_LESSONS_PATH = os.path.join(LESSONS_PATH, \"original\")\n ANNOTATED_LESSONS_PATH = os.path.join(LESSONS_PATH, \"annotated\")\n\n if not os.path.exists(ANNOTATED_LESSONS_PATH):\n os.mkdir(ANNOTATED_LESSONS_PATH)\n\n print(\"Scanning original lessons in %s...\" % ORIGINAL_LESSONS_PATH)\n\n for item in os.listdir(ORIGINAL_LESSONS_PATH):\n if item == \".DS_Store\": continue\n\n print(\" found: %s\" % item)\n\n item_path = os.path.join(ORIGINAL_LESSONS_PATH, item)\n\n lesson_number = None\n lesson_description = None\n mobj = re.search(r'^AY\\s+(\\d+)\\s*-\\s*(.+)\\.txt$', item)\n if mobj:\n lesson_number = mobj.group(1)\n lesson_description = mobj.group(2)\n\n print(\" number: %s\" % lesson_number)\n print(\" description: %s\" % lesson_description)\n\n lesson = dict()\n lesson['number'] = lesson_number\n lesson['description'] = lesson_description\n\n fh = open(item_path)\n lesson_raw_text = fh.read()\n fh.close()\n lesson_text = re.split(r'\\n', lesson_raw_text)\n# lesson_raw_text_reencoded = lesson_raw_text.decode('mac-roman').encode('utf-8')\n# lesson_text = re.split(r'\\n', lesson_raw_text_reencoded)\n\n lesson['text'] = lesson_text\n lesson['parsed'] = parseLesson(lesson_text)\n\n if lesson['parsed']['end_of_lesson'] is None:\n print(\" lesson has no 'end of lesson' marker\")\n\n lesson_json = json.dumps(lesson, indent=4)\n annotated_lesson_path = os.path.join(ANNOTATED_LESSONS_PATH, \"ay_%04d.json\" % int(lesson_number))\n fh = open(annotated_lesson_path, \"w\")\n fh.write(lesson_json)\n fh.close()\n\n else:\n print(\"ERROR: File name not understood: %s\" % item)\n\n return 0",
"def upgrade_to_7():\n\n # The infrastructure runs this upgrade script before populating manifests.\n # For this reason, this one-time script does NOT pull manifests to do the input-name mapping, instead relying on a hard-coded alg name -> input name map.\n # If you have other gears in your system at the time of upgrade, you must add that mapping here.\n input_name_for_gear = {\n 'dcm_convert': 'dicom',\n 'qa-report-fmri': 'nifti',\n 'dicom_mr_classifier': 'dicom',\n }\n\n jobs = config.db.jobs.find({'input': {'$exists': True}})\n\n for job in jobs:\n gear_name = job['algorithm_id']\n input_name = input_name_for_gear[gear_name]\n\n # Move single input to named input map\n input_ = job['input']\n input_.pop('filehash', None)\n inputs = { input_name: input_ }\n\n # Destination is required, and (for these jobs) is always the same container as the input\n destination = copy.deepcopy(input_)\n destination.pop('filename', None)\n\n config.db.jobs.update_one(\n {'_id': job['_id']},\n {\n '$set': {\n 'inputs': inputs,\n 'destination': destination\n },\n '$unset': {\n 'input': ''\n }\n }\n )",
"def createArchiverDoc(job, version=None):\n\n job_id = job[\"id\"]\n fwjr = job['doc'][\"fwjr\"]\n jobtype = job['doc'][\"jobtype\"]\n jobstate = job['doc']['jobstate']\n create_ts = job['doc']['timestamp']\n newfwjr = convertToArchiverFormat(fwjr)\n\n fArrayRef = {}\n fArray = {}\n for fileType in WMARCHIVE_FILE_REF_KEY:\n fArrayRef[fileType] = set()\n fArray[fileType] = set()\n\n createFileArrayRef(newfwjr, fArrayRef)\n\n for fileType in WMARCHIVE_FILE_REF_KEY:\n fArrayRef[fileType] = list(fArrayRef[fileType])\n\n createFileArray(newfwjr, fArray, fArrayRef)\n\n for fileType in WMARCHIVE_FILE_REF_KEY:\n fArray[fileType] = list(fArray[fileType])\n\n changeToFileRef(newfwjr, fArray, fArrayRef)\n\n # convert to fwjr format\n\n for fileType in WMARCHIVE_FILE_REF_KEY:\n newfwjr[\"%sArrayRef\" % fileType] = fArrayRef[fileType]\n newfwjr[\"%sArray\" % fileType] = fArray[fileType]\n\n if version is None:\n # add this trry to remove the dependency on WMCore code.\n import WMCore\n version = WMCore.__version__\n # append meta data in fwjr\n wnName = \"\"\n if \"WorkerNodeInfo\" in fwjr:\n wnName = fwjr[\"WorkerNodeInfo\"].get(\"HostName\", \"\")\n\n newfwjr['meta_data'] = {'agent_ver': version,\n 'host': socket.gethostname().lower(),\n 'wn_name': wnName,\n 'fwjr_id': job_id,\n 'jobtype': jobtype,\n 'jobstate': jobstate,\n 'ts': create_ts\n }\n return newfwjr",
"def run_job(args):\n\n global stop_all\n global jobfiles_global\n global jobwcl\n\n jobwcl = WCL()\n jobfiles = {'infullnames': [args.config, args.workflow],\n 'outfullnames': [],\n 'output_putinfo': {}}\n jobfiles_global = {'infullnames': [args.config, args.workflow],\n 'outfullnames': [],\n 'output_putinfo': {}}\n\n jobstart = time.time()\n with open(args.config, 'r') as wclfh:\n jobwcl.read(wclfh, filename=args.config)\n jobwcl['verify_files'] = miscutils.checkTrue('verify_files', jobwcl, False)\n jobwcl['jobroot'] = os.getcwd()\n jobwcl['job_max_usage'] = 0\n #jobwcl['pre_job_disk_usage'] = pfwutils.diskusage(jobwcl['jobroot'])\n jobwcl['pre_job_disk_usage'] = 0\n\n # Save pointers to archive information for quick lookup\n if jobwcl[pfwdefs.USE_HOME_ARCHIVE_INPUT] != 'never' or \\\n jobwcl[pfwdefs.USE_HOME_ARCHIVE_OUTPUT] != 'never':\n jobwcl['home_archive_info'] = jobwcl[pfwdefs.SW_ARCHIVESECT][jobwcl[pfwdefs.HOME_ARCHIVE]]\n else:\n jobwcl['home_archive_info'] = None\n\n if jobwcl[pfwdefs.USE_TARGET_ARCHIVE_INPUT] != 'never' or \\\n jobwcl[pfwdefs.USE_TARGET_ARCHIVE_OUTPUT] != 'never':\n jobwcl['target_archive_info'] = jobwcl[pfwdefs.SW_ARCHIVESECT][jobwcl[pfwdefs.TARGET_ARCHIVE]]\n else:\n jobwcl['target_archive_info'] = None\n\n # run the tasks (i.e., each wrapper execution)\n stop_all = miscutils.checkTrue('stop_on_fail', jobwcl, True)\n\n try:\n jobfiles['infullnames'] = gather_initial_fullnames()\n jobfiles_global['infullnames'].extend(jobfiles['infullnames'])\n miscutils.coremakedirs('log')\n miscutils.coremakedirs('outputwcl')\n exitcode, jobfiles = job_workflow(args.workflow, jobfiles, jobwcl)\n except Exception:\n (extype, exvalue, trback) = sys.exc_info()\n print '!' * 60\n traceback.print_exception(extype, exvalue, trback, file=sys.stdout)\n exitcode = pfwdefs.PF_EXIT_FAILURE\n print \"Aborting rest of wrapper executions. Continuing to end-of-job tasks\\n\\n\"\n\n try:\n create_junk_tarball(jobwcl, jobfiles, exitcode)\n except:\n print \"Error creating junk tarball\"\n # if should transfer at end of job\n if jobfiles['output_putinfo']:\n print \"\\n\\nCalling file transfer for end of job (%s files)\" % \\\n (len(jobfiles['output_putinfo']))\n\n copy_output_to_archive(jobwcl, jobfiles, jobfiles['output_putinfo'], 'job',\n 'job_output', exitcode)\n else:\n print \"\\n\\n0 files to transfer for end of job\"\n if miscutils.fwdebug_check(1, \"PFWRUNJOB_DEBUG\"):\n miscutils.fwdebug_print(\"len(jobfiles['outfullnames'])=%s\" % \\\n (len(jobfiles['outfullnames'])))\n print \"\\nDESDMTIME: pfwrun_job %0.3f\" % (time.time()-jobstart)\n return exitcode",
"def processjob(self, job):\n self.model.add_documents(job)\n self.jobsdone += 1\n if SAVE_DEBUG and self.jobsdone % SAVE_DEBUG == 0:\n fname = os.path.join(tempfile.gettempdir(), 'lsi_worker.pkl')\n self.model.save(fname)",
"def ConcatenatePipelineSettingsToJob( jobInfoPath, batchName ):\n global submissionInfo\n jobWriterPath = os.path.join( submissionInfo[\"RepoDirs\"][\"submission/Integration/Main\"], \"JobWriter.py\" )\n scenePath = NodegraphAPI.GetSourceFile()\n argArray = [\"-ExecuteScript\", jobWriterPath, \"Katana\", \"--write\", \"--scene-path\", scenePath, \"--job-path\", jobInfoPath, \"--batch-name\", batchName]\n CallDeadlineCommand( argArray, False )",
"def load_from_jobpath(self, job_id=None, db_entry=None, convert_to_object=True):\n return self._project.load_from_jobpath(\n job_id=job_id, db_entry=db_entry, convert_to_object=convert_to_object\n )",
"def jobs(\n ctx: typer.Context,\n op_id: str = typer.Argument(\n ...,\n autocompletion=completion_op_id,\n callback=check_for_op_id,\n help=\"A valid op-id. e.g. get_markets_prices\",\n ),\n param_string: Optional[str] = typer.Option(\n None,\n \"--param-string\",\n \"-p\",\n help=\"Optional. Full or partial parameters as a json encoded dictionary string. \"\n \"Keys must be valid parameters for selected op_id.\",\n ),\n default_params: bool = typer.Option(\n False,\n \"-d\",\n \"--default-params\",\n help=\"Include all parameters that are required, or have default values. \"\n \"Missing values will be 'NOTSET'.\",\n ),\n callback_path: Optional[Path] = typer.Option(\n None,\n \"-c\",\n \"--callbacks\",\n help=\"Optional. Path to custom callbacks to be used. \",\n ),\n file_name: str = typer.Option(\n \"created-jobs/${esi_job_op_id}-${esi_job_uid}\",\n \"-n\",\n \"--file-name\",\n help=(\n \"File name for the new job, must be unique if multiple jobs. \"\n \"Can include directories, \"\n \"and the file type suffix will be added based on --format-id.\"\n ),\n ),\n data_path: Optional[Path] = typer.Option(\n None,\n \"--data-file\",\n \"-i\",\n help=(\n \"Optional. Path to json, csv, or yaml file with full or partial parameters. \"\n \"Must result in a list of dicts.\"\n ),\n ),\n format_id: FormatChoices = typer.Option(\n FormatChoices.json,\n \"-f\",\n \"--format-id\",\n show_choices=True,\n help=\"Output file format.\",\n ),\n path_out: Path = typer.Argument(\n \"./tmp\",\n help=\"Parent path for saving the new jobs, will be prepended to --file-name.\",\n ),\n):\n operation_manifest: OperationManifest = ctx.obj[\"operation_manifest\"]\n # path_out = optional_object(path_out, Path, \".\")\n if path_out.is_file:\n typer.BadParameter(\"path_out must not be a file.\")\n file_data: Optional[List[Dict]] = get_params_from_file(data_path)\n parameters: Dict = decode_param_string(param_string)\n if callback_path is None:\n callback_collection = default_callback_collection()\n else:\n callback_collection = load_callbacks(callback_path)\n jobs_: List[EsiJob] = []\n try:\n op_info = operation_manifest.op_info(op_id)\n if not file_data:\n job = op_info.create_job(\n parameters,\n callback_collection,\n include_default_params=default_params,\n # only_required_default_params=False,\n # allow_notset=False,\n )\n jobs_.append(job)\n else:\n for params in file_data:\n params.update(parameters)\n job = op_info.create_job(\n params,\n callback_collection,\n include_default_params=default_params,\n # only_required_default_params=False,\n # allow_notset=False,\n )\n jobs_.append(job)\n except Exception as ex:\n raise typer.BadParameter(\n f\"Exception creating job. {ex.__class__.__name__}: {ex}\"\n )\n for job in jobs_:\n file_path = resolve_job_file_path(job, file_name, path_out)\n try:\n save_path = job.serialize_file(file_path, format_id)\n except Exception as ex:\n raise typer.BadParameter(\n f\"Error saving job to {save_path}. {ex.__class__.__name__}, {ex}\"\n )\n logger.info(\"Saved job %s at %s\", job.uid, file_path)\n typer.echo(f\"{len(jobs_)} jobs saved to {path_out}\")\n report_finished_task(ctx)",
"def created_job(new_job, bulk_request):\n bulk_request.return_value = '''<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n <jobInfo xmlns=\"http://www.force.com/2009/06/asyncapi/dataload\">\n <id>THEJOBID</id>\n <operation>update</operation>\n <object>Lead</object>\n </jobInfo>\n '''\n new_job.create()\n return new_job",
"def modify_job(filename, batch_name, node_choice, ppn_list, qe_switch, atoms):\n\tfin = open(filename, \"r\"); file = fin.read(); fin.close()\n\t# fin_head = open(\"/home/twchang/bin/others/job.sh-mod\", 'r').read()\n\tfin_head = Job.job_head\n\tnode_str = \"+\".join([\"node{:02d}:ppn={}\".format(node_choice[i], ppn_list[i]) for i in range(len(ppn_list))])\n\ttail = re.search(r\".*(\\s*-np.*)\", file, re.S).group(1).strip()\n\ttail = re.sub(r\"\\s*>>\\s*out\", r\"\", tail)\n\tif qe_switch:\n\t\ttail = re.sub(r\"(NPROCS\\s*).*(/bin/)\", r\"\\g<1>/data2/twchang/opt/q-e-qe-6.4.1\\g<2>\", tail, re.S)\n\telse: # vasp mode\n\t\ttail = re.sub(r\"(NPROCS\\s*).*(/bin/).*(\\n)\", r\"\\g<1>/home/twchang\\g<2>vasp_noncol \\g<3>\", tail, re.S)\n\tfile = fin_head + \" \" + tail\n\tfile = re.sub(r\"(#PBS\\s+-N\\s+).*\", r\"\\g<1>{}\".format(batch_name), file) # if batch_name != \"\" else file # modify batch_name\n\tfile = re.sub(r\"(#PBS\\s+-l\\s+nodes=).*\", r\"\\g<1>{}\".format(node_str), file) # modify ppn\n\tfile = re.sub(r\"(-np\\s+\\$NPROCS\\s+.*\\s+<).*?(\\..*>\\s+).*?(\\..*)\", r\"\\g<1>{}\\g<2>{}\\g<3>\".format(atoms, atoms), file) if atoms else file\n\tfile = re.sub(r\"/data2/twchang/q-e-qe-6\\.1\\.0/bin\", r\"/data2/twchang/opt/q-e-qe-6.4.1/bin\", file)# if qe_switch else file\n\tfout = open(filename, \"w\"); fout.write(file)",
"def image_resize(job_object):\n try:\n job = json.loads(job_object.arg)\n base64_file = job['image']\n args = job['args'] if 'args' in job else {}\n del job['image']\n logging.info(job)\n \n def write_file(local_path,filename,file_b64):\n logging.debug(\"about to save to \" + \"%s/%s\" % (local_path,filename))\n if not os.path.exists(local_path): os.makedirs(local_path)\n image_file = base64.b64decode(file_b64)\n local_file = open(\"%s/%s\" % (local_path,filename), \"w\")\n local_file.write(image_file)\n local_file.close()\n \n def download_file(url,local_path,filename):\n print \"downloading \" + url\n f = urllib2.urlopen(urllib2.Request(url))\n print \"about to save to \" + \"%s/%s\" % (local_path,filename)\n if not os.path.exists(local_path): os.makedirs(local_path)\n # Open our local file for writing\n local_file = open(\"%s/%s\" % (local_path,filename), \"w\")\n local_file.write(f.read())\n local_file.close()\n \n local_path = '%s/upload/%s' % (options.asset_root,job['path'])\n local_path_wfile = '%s/%s%s' % (local_path,job['file'],job['extension'])\n filename = '%s%s' % (job['file'],job['extension'])\n #download_file(job['url'],local_path,filename)\n write_file(local_path,filename,base64_file)\n \n def resize_and_save(local_file,new_file,maxsize=None,maxh=None,maxw=None,crop=None):\n \"\"\"Resize the image and save\"\"\"\n logging.debug(\"maxw = %s, maxsize=%s, crop=%s\" % (maxw,maxsize,crop))\n img = Image.open(local_file)\n width,height = img.size\n width,height = float(width), float(height)\n ratio = float(1)\n if crop is not None:\n size = float(maxsize)\n if width <= height and width > size:\n ratio = size/width\n elif height < width and height > size:\n ratio = size/height\n else: \n ratio = 1 # too small\n elif maxsize:\n size = float(maxsize)\n if width >= height and width > size:\n ratio = size/width\n elif height > width and height > size:\n ratio = size/height\n else: \n ratio = 1 # too small\n elif maxh:\n size = maxh\n if height > size:\n ratio = size/height\n else:\n # too small\n ratio = 1\n elif maxw:\n size = maxw\n if width > size:\n ratio = size/width\n else:\n # too small\n ratio = 1\n else:\n raise Exception(\"must specify max width, OR max size\")\n \n print(\"old: ratio = %s: size(x,y) = %s,%s\" % (ratio,width,height))\n height = int(height*ratio)\n width = int(width*ratio)\n print(\"new ratio = %s: size(x,y) = %s,%s\" % (ratio,width,height))\n img = img.resize((width, height),Image.ANTIALIAS)\n if crop is not None:\n log.debug(\"in crop %s\" % crop)\n crop = int(crop)\n if width > crop:\n amt = int((int(width) - crop)/2)\n img = img.crop((amt,0,amt + crop, crop))\n elif height > crop:\n amt = int((int(height) - crop)/2)\n img = img.crop((0,amt,crop,amt+crop))\n \n log.debug(\"saving new file %s\" % new_file)\n if img.mode != \"RGB\":\n img = img.convert(\"RGB\")\n img.save(new_file)\n \n \n if os.path.exists(local_path_wfile):\n if args != {}:\n ext = args['extension'] if 'extension' in args else \"_t\"\n resize_and_save(local_path_wfile,\n '%s/%s%s.jpg' % (local_path,job['file'],ext),\n maxsize=args['maxsize'],\n crop=args['crop'])\n else:\n resize_and_save(local_path_wfile,'%s/%s_t.jpg' % (local_path,job['file']),maxsize=100)\n resize_and_save(local_path_wfile,'%s/%s_m.jpg' % (local_path,job['file']),maxw=317)\n resize_and_save(local_path_wfile,'%s/%s_l.jpg' % (local_path,job['file']),maxsize=800)\n keeptrying = False\n else:\n logging.error(\"haven't found file? %s\" % local_path_wfile)\n \n # delete original\n logging.debug(\"About to delete original %s\" % local_path_wfile)\n os.remove(local_path_wfile)\n \n except:\n traceback.print_exc()",
"def __prepare_job(self, job_wrapper, job_destination):\n command_line = None\n client = None\n remote_job_config = None\n compute_environment = None\n try:\n client = self.get_client_from_wrapper(job_wrapper)\n tool = job_wrapper.tool\n remote_job_config = client.setup(tool.id, tool.version)\n rewrite_parameters = LwrJobRunner.__rewrite_parameters( client )\n prepare_kwds = {}\n if rewrite_parameters:\n compute_environment = LwrComputeEnvironment( client, job_wrapper, remote_job_config )\n prepare_kwds[ 'compute_environment' ] = compute_environment\n job_wrapper.prepare( **prepare_kwds )\n self.__prepare_input_files_locally(job_wrapper)\n remote_metadata = LwrJobRunner.__remote_metadata( client )\n dependency_resolution = LwrJobRunner.__dependency_resolution( client )\n metadata_kwds = self.__build_metadata_configuration(client, job_wrapper, remote_metadata, remote_job_config)\n remote_command_params = dict(\n working_directory=remote_job_config['working_directory'],\n metadata_kwds=metadata_kwds,\n dependency_resolution=dependency_resolution,\n )\n remote_working_directory = remote_job_config['working_directory']\n # TODO: Following defs work for LWR, always worked for LWR but should be\n # calculated at some other level.\n remote_job_directory = os.path.abspath(os.path.join(remote_working_directory, os.path.pardir))\n remote_tool_directory = os.path.abspath(os.path.join(remote_job_directory, \"tool_files\"))\n container = self._find_container(\n job_wrapper,\n compute_working_directory=remote_working_directory,\n compute_tool_directory=remote_tool_directory,\n compute_job_directory=remote_job_directory,\n )\n command_line = build_command(\n self,\n job_wrapper=job_wrapper,\n container=container,\n include_metadata=remote_metadata,\n include_work_dir_outputs=False,\n remote_command_params=remote_command_params,\n )\n except Exception:\n job_wrapper.fail( \"failure preparing job\", exception=True )\n log.exception(\"failure running job %d\" % job_wrapper.job_id)\n\n # If we were able to get a command line, run the job\n if not command_line:\n job_wrapper.finish( '', '' )\n\n return command_line, client, remote_job_config, compute_environment",
"def __init__(self, job, project=None):\n self.job = job\n self.build_definition = job.build_definition\n if project:\n self.project = project\n else:\n self.project = self.job.project\n self.current_index = -1\n self.plan = []"
] | [
"0.59139556",
"0.58567005",
"0.5696271",
"0.5631622",
"0.5470596",
"0.52659386",
"0.52299416",
"0.5168686",
"0.49637362",
"0.49448887",
"0.49375767",
"0.48854095",
"0.48788068",
"0.48770913",
"0.4875025",
"0.48702103",
"0.4839494",
"0.48336488",
"0.48224968",
"0.47955766",
"0.47870493",
"0.47827575",
"0.4777441",
"0.47629094",
"0.47607163",
"0.4759288",
"0.47580752",
"0.4739117",
"0.47390223",
"0.4733595"
] | 0.5963013 | 0 |
Convert objects in a Galaxy history into a CWL object. Useful in running conformance tests and implementing the cwlrunner interface via Galaxy. | def output_to_cwl_json(galaxy_output, get_metadata, get_dataset):
def element_to_cwl_json(element):
element_output = GalaxyOutput(
galaxy_output.history_id,
element["object"]["history_content_type"],
element["object"]["id"],
)
return output_to_cwl_json(element_output, get_metadata, get_dataset)
output_metadata = get_metadata(galaxy_output.history_content_type, galaxy_output.history_content_id)
if output_metadata["history_content_type"] == "dataset":
ext = output_metadata["file_ext"]
assert output_metadata["state"] == "ok"
dataset_dict = get_dataset(output_metadata)
if ext == "expression.json":
if "content" in dataset_dict:
return json.loads(dataset_dict["content"])
else:
with open(dataset_dict["path"]) as f:
return json.load(f)
else:
return output_properties(**dataset_dict)
elif output_metadata["history_content_type"] == "dataset_collection":
if output_metadata["collection_type"] == "list":
rval = []
for element in output_metadata["elements"]:
rval.append(element_to_cwl_json(element))
elif output_metadata["collection_type"] == "record":
rval = {}
for element in output_metadata["elements"]:
rval[element["element_identifier"]] = element_to_cwl_json(element)
return rval
else:
raise NotImplementedError("Unknown history content type encountered") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __build_history(self, obj: Object) -> dict:\n previous_history = obj.history\n return {**previous_history, self.__get_timestamp(): {'type_id': obj.type_id, 'redshift': obj.redshift}}",
"def __build_history(self, obj: Object) -> dict:\n previous_history = dict(obj.history)\n return {**previous_history, self.__get_timestamp(): {'type_id': obj.type_id, 'redshift': obj.redshift}}",
"def rc_to_obj(self, goal):\n channels = self.depth_heading_rc(goal)\n channels[self.xchannel] = goal.x_rc_vel\n yrc_cmd = self.get_obj_y(goal, True)\n channels[self.ychannel] = yrc_cmd\n return channels",
"def instantiate_history(self):\n serialized_history = self.cache.get('history')\n history = ast.literal_eval(serialized_history.decode('utf-8'))\n return history",
"def rc_to_obj(self, goal):\n channels = self.depth_heading_rc(goal)\n\n # move to object\n yrc_cmd = self.get_obj_y(goal, False)\n zrc_cmd = self.get_obj_z(goal)\n\n # an integrated controller\n self.depth_I += (zrc_cmd - self.pwm_center) * 0.1 # 1/10 Hz\n # limit depth integral\n if abs(self.depth_I) > self.depth_Imax:\n if self.depth_I < 0:\n self.depth_I = -self.depth_Imax\n else:\n self.depth_I = self.depth_Imax\n\n channels[self.xchannel] = goal.x_rc_vel\n channels[self.ychannel] = yrc_cmd\n channels[self.zchannel] += self.depth_I\n hout = self.get_heading_pwm(goal)\n return channels",
"def to_legacy(self) -> object:\n pass",
"def generate_library_from_change_history(original_library_version, library_change_history):\n updated_library_version = copy.deepcopy(original_library_version)\n if len(library_change_history['changes']) > 0:\n updated_library_version['date'] = library_change_history['changes'][-1]['date'] # date stamp of most recent change\n for change in library_change_history['changes']:\n for added_playlist in change['added_playlists']:\n updated_library_version['library'].append({'name': added_playlist['name'],\n 'tracks': added_playlist['added_songs']})\n\n for removed_playlist in change['removed_playlists']:\n del updated_library_version['library'][_get_playlist_index_by_name(updated_library_version['library'], removed_playlist['name'])]\n\n for modified_playlist in change['modified_playlists']:\n for playlist in updated_library_version['library']:\n if playlist['name'] == modified_playlist['name']:\n for added_song in modified_playlist['added_songs']:\n playlist['tracks'].append(added_song)\n\n for removed_song in modified_playlist['removed_songs']:\n del playlist['tracks'][_get_song_index_by_name(playlist['tracks'], removed_song['title'])]\n return updated_library_version",
"def output_to_cwl_json(\n galaxy_output, get_metadata, get_dataset, get_extra_files, pseduo_location=False,\n):\n def element_to_cwl_json(element):\n element_output = GalaxyOutput(\n galaxy_output.history_id,\n element[\"object\"][\"history_content_type\"],\n element[\"object\"][\"id\"],\n )\n return output_to_cwl_json(element_output, get_metadata, get_dataset, get_extra_files)\n\n output_metadata = get_metadata(galaxy_output.history_content_type, galaxy_output.history_content_id)\n\n def dataset_dict_to_json_content(dataset_dict):\n if \"content\" in dataset_dict:\n return json.loads(dataset_dict[\"content\"])\n else:\n with open(dataset_dict[\"path\"]) as f:\n return json.load(f)\n\n if output_metadata[\"history_content_type\"] == \"dataset\":\n ext = output_metadata[\"file_ext\"]\n assert output_metadata[\"state\"] == \"ok\"\n if ext == \"expression.json\":\n dataset_dict = get_dataset(output_metadata)\n return dataset_dict_to_json_content(dataset_dict)\n else:\n file_or_directory = \"Directory\" if ext == \"directory\" else \"File\"\n if file_or_directory == \"File\":\n dataset_dict = get_dataset(output_metadata)\n properties = output_properties(pseduo_location=pseduo_location, **dataset_dict)\n basename = properties[\"basename\"]\n extra_files = get_extra_files(output_metadata)\n found_index = False\n for extra_file in extra_files:\n if extra_file[\"class\"] == \"File\":\n path = extra_file[\"path\"]\n if path == SECONDARY_FILES_INDEX_PATH:\n found_index = True\n\n if found_index:\n ec = get_dataset(output_metadata, filename=SECONDARY_FILES_INDEX_PATH)\n index = dataset_dict_to_json_content(ec)\n for basename in index[\"order\"]:\n for extra_file in extra_files:\n if extra_file[\"class\"] == \"File\":\n path = extra_file[\"path\"]\n if path == os.path.join(SECONDARY_FILES_EXTRA_PREFIX, basename):\n ec = get_dataset(output_metadata, filename=path)\n if not STORE_SECONDARY_FILES_WITH_BASENAME:\n ec[\"basename\"] = basename + os.path.basename(path)\n else:\n ec[\"basename\"] = os.path.basename(path)\n ec_properties = output_properties(pseduo_location=pseduo_location, **ec)\n if \"secondaryFiles\" not in properties:\n properties[\"secondaryFiles\"] = []\n\n properties[\"secondaryFiles\"].append(ec_properties)\n else:\n basename = output_metadata.get(\"cwl_file_name\")\n if not basename:\n basename = output_metadata.get(\"name\")\n\n listing = []\n properties = {\n \"class\": \"Directory\",\n \"basename\": basename,\n \"listing\": listing,\n }\n\n extra_files = get_extra_files(output_metadata)\n for extra_file in extra_files:\n if extra_file[\"class\"] == \"File\":\n path = extra_file[\"path\"]\n ec = get_dataset(output_metadata, filename=path)\n ec[\"basename\"] = os.path.basename(path)\n ec_properties = output_properties(pseduo_location=pseduo_location, **ec)\n listing.append(ec_properties)\n\n return properties\n\n elif output_metadata[\"history_content_type\"] == \"dataset_collection\":\n if output_metadata[\"collection_type\"] == \"list\":\n rval = []\n for element in output_metadata[\"elements\"]:\n rval.append(element_to_cwl_json(element))\n elif output_metadata[\"collection_type\"] == \"record\":\n rval = {}\n for element in output_metadata[\"elements\"]:\n rval[element[\"element_identifier\"]] = element_to_cwl_json(element)\n return rval\n else:\n raise NotImplementedError(\"Unknown history content type encountered\")",
"def __getState_serializeHistory(history):\n\n # Accumulator for serialized posns\n plyDictList = []\n\n for ply in history:\n # Create dictionary of this ply and append it to accumulator\n plyDict = {'fromRank': ply[0].rankN,\n 'fromFile': ply[0].fileN,\n 'toRank': ply[1].rankN,\n 'toFile': ply[1].fileN}\n plyDictList.append(plyDict)\n\n return plyDictList",
"def example_lchab_to_lchuv():\r\n\r\n print(\"=== Complex Example: LCHab->LCHuv ===\")\r\n # Instantiate an LCHab color object with the given values.\r\n lchab = LCHabColor(0.903, 16.447, 352.252)\r\n # Show a string representation.\r\n print(lchab)\r\n # Convert to LCHuv.\r\n lchuv = convert_color(lchab, LCHuvColor)\r\n print(lchuv)\r\n print(\"=== End Example ===\\n\")",
"def convert_to_model(self, *args):",
"def parseModelHistory(*args):\n return _libsbml.RDFAnnotationParser_parseModelHistory(*args)",
"def get_biosphere_2_3_category_migration_data():\n return {\n \"fields\": [\"categories\", \"type\"],\n \"data\": [\n ((k, \"biosphere\"), {\"categories\": v}) # Exchanges\n for k, v in ECOSPOLD_2_3_BIOSPHERE.items()\n ]\n + [\n ((k, \"emission\"), {\"categories\": v}) # Datasets\n for k, v in ECOSPOLD_2_3_BIOSPHERE.items()\n ],\n }",
"def convert_to_model(self, *args):\n categories_data, *_ = args\n return [Category(**category) for category in categories_data]",
"def history():",
"def _convert_to_object(jsonc_obj):\n\n if isinstance(jsonc_obj, Jsonc):\n plain = {}\n for key, value in jsonc_obj._dict.items():\n plain[key] = _convert_to_object(value)\n return plain\n elif isinstance(jsonc_obj, list):\n plain = []\n for item in jsonc_obj:\n plain.append(_convert_to_object(item))\n return plain\n else:\n return jsonc_obj",
"def object_via_gen_from(self, fit: af.Fit, galaxies: List[Galaxy]) -> object:",
"def cast(self):\n if self.validate():\n if 'blueprint' in self.data:\n # A single blueprint\n obj = Blueprint.Blueprint()\n obj.versionCode = self.versionCode\n obj.data = self.data\n return obj\n elif 'blueprint-book' in self.data:\n # A book of blueprints\n obj = BlueprintBook.BlueprintBook()\n obj.versionCode = self.versionCode\n obj.data = self.data\n return obj\n else:\n # Unknown datatype. Just return the object\n return self\n \n else:\n # Broken validation means just return the object\n return self",
"def create_obj(destination,mtl_name):\r\n\tshutil.copyfile(\"file_cube.obj\",destination)\r\n\tf=open(destination,\"r\")\r\n\tlines=f.readlines()\r\n\tlines[0]=\"mtllib \"+mtl_name+\"\\n\"\r\n\tf.close()\r\n\tf=open(destination,\"w\")\r\n\tf.writelines(lines)\r\n\tf.close()",
"def BoltMotionObjToFeatureObj(all_bolt_data, electrode_pca_dict):\n\n # Store in feature class object\n all_features_obj_dict = dict();\n\n for motion_name in all_bolt_data:\n trial_list = all_bolt_data.get(motion_name)\n print motion_name\n\n feature_list = list()\n # For all objects\n for trial in trial_list:\n\n bolt_feature_obj = extract_features.extract_features(trial, electrode_pca_dict[motion_name])\n\n feature_list.append(bolt_feature_obj)\n\n # Store all of the objects away\n all_features_obj_dict[motion_name] = feature_list\n\n return all_features_obj_dict",
"def __init__(self, *args):\n this = _libsbml.new_ModelHistory(*args)\n try: self.this.append(this)\n except: self.this = this",
"def _convert_to_rdf(self):\n\n job_result = {}\n\n # Add \"/\" at the end of data_namespace if not present.\n vocabulary_namespace = self._namespace_vocabspace_validator(self.vocabulary_namespace)\n data_namespace = self._namespace_vocabspace_validator(self.data_namespace) + self.datasetid + \"/\"\n # Vocabulary prefix\n vocabulary_namespace_prefix = \"losdv\"\n # Data namespace prefix\n data_namespace_prefix = \"losdd\"\n\n # read from json-stat from a url\n source_json = self._get_content()\n _cleanString = self._cleanString\n _prefix_build_concept = self._prefix_build_concept\n\n def conversion_for_old_jstat_version():\n\n scheme = [\n '@prefix qb: <http://purl.org/linked-data/cube#> .'\n '\\n@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .'\n '\\n@prefix skos: <http://www.w3.org/2004/02/skos/core#> .'\n '\\n@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .'\n '\\n@prefix prov: <http://www.w3.org/ns/prov#> .'\n '\\n@prefix dc: <http://purl.org/dc/elements/1.1/> .'\n '\\n@prefix ' + vocabulary_namespace_prefix + ': <' + vocabulary_namespace + '> .\\n@prefix '\n + data_namespace_prefix + ': <' + data_namespace + '> .',\n '\\n@prefix ' + data_namespace_prefix + 'schm: <' + data_namespace + 'conceptscheme/> .']\n\n code_list = ['#CODELIST\\n\\n']\n observations = ['#OBSERVATIONS\\n\\n']\n\n dataset_label = source_json['dataset']['label']\n dataset_source = source_json['dataset']['source']\n dataset_updated = source_json['dataset']['updated']\n dimensions = source_json['dataset']['dimension']\n\n # Building prefix\n\n for data_field_nm in dimensions['id']:\n scheme.append('\\n@prefix ' + data_namespace_prefix + _cleanString(\n data_field_nm) + 'cpt: <' + data_namespace + 'concept/' + _cleanString(data_field_nm) + '/> .')\n\n scheme.append('\\n\\n#SCHEME\\n\\n')\n dataset_values = source_json['dataset']['value']\n n1 = len(dimensions['id'])\n\n # Scheme: Individual terms\n\n try:\n\n for data_field_nm in dimensions['id']:\n scheme.append('' + vocabulary_namespace_prefix + ':' + _cleanString(\n data_field_nm) + ' a qb:ComponentProperty, qb:DimensionProperty ;\\n\\trdfs:label \"' + data_field_nm + '\" ;\\n\\trdfs:range xsd:string .\\n\\n')\n\n scheme.append(\n '' + vocabulary_namespace_prefix + ':value a qb:ComponentProperty, qb:MeasureProperty ;\\n\\trdfs:label \"value\" ;\\n\\trdfs:range xsd:float .\\n\\n')\n\n # Scheme: DSD\n\n scheme.append('' + data_namespace_prefix + ':' + self._cleanString(\n dataset_label) + '_dsd a qb:DataStructureDefinition ;\\n\\tqb:component\\n\\t\\t'\n '[ a qb:ComponentSpecification ;\\n\\t\\t qb:codeList ' +\n data_namespace_prefix + 'schm:measureType ; \\n\\t\\t qb:dimension qb:measureType ;'\n '\\n\\t\\t qb:order 1 \\n\\t] ;\\n\\tqb:component [ qb:measure ' +\n vocabulary_namespace_prefix + ':value ] ;\\n\\t')\n\n for index, data_field_nm in enumerate(dimensions['id']):\n scheme.append(\n 'qb:component\\n\\t\\t[ a qb:ComponentSpecification ;\\n\\t\\t qb:codeList ' + data_namespace_prefix +\n 'schm:' + self._cleanString(\n data_field_nm) + ' ;\\n\\t\\t qb:dimension ' + vocabulary_namespace_prefix + ':' + _cleanString(\n data_field_nm) + ' ;\\n\\t\\t qb:order ' + str(index + 2) + ' \\n\\t\\t] ')\n\n if index == (n1 - 1):\n scheme.append('\\n.\\n\\n')\n else:\n scheme.append(';\\n\\t')\n\n # Scheme: Dataset\n\n scheme.append('' + data_namespace_prefix + ':' + _cleanString(dataset_label) +\n '_dataset a qb:DataSet ;\\n\\tqb:structure ' + data_namespace_prefix + ':' +\n _cleanString(dataset_label) + '_dsd ;\\n\\trdfs:label \"' + \\\n dataset_label + '\" ; \\n\\tprov:generatedAtTime \"' + dataset_updated\n + '\"^^xsd:dateTime ;\\n\\tdc:creator \"' + dataset_source + '\" .\\n\\n')\n\n # Generating Codelist\n\n # Codelist: Conceptscheme\n\n for index, data_field_nm in enumerate(dimensions['id']):\n code_list.append('' + data_namespace_prefix + 'schm:' +\n _cleanString(data_field_nm) + ' a skos:ConceptScheme ;\\n\\t')\n\n skos_members = []\n for k in dimensions[data_field_nm]['category']['index'].keys():\n concept = dimensions[data_field_nm]['category']['label'][k]\n\n skos_members.append(\n 'skos:member ' + _prefix_build_concept(data_namespace_prefix, data_field_nm) + _cleanString(\n concept) + ' ')\n\n code_list.append(';\\n\\t'.join(skos_members) + '.\\n\\n')\n\n # Codelist: Concepts\n\n for data_field_nm in dimensions['id']:\n\n for k in dimensions[data_field_nm]['category']['index'].keys():\n concept = dimensions[data_field_nm]['category']['label'][k]\n code_list.append(\n '' + self._prefix_build_concept(data_namespace_prefix, data_field_nm) + self._cleanString(concept) +\n ' a skos:Concept ;\\n\\trdfs:label \"' + concept + '\" .\\n\\n')\n\n # Generating Observations\n\n all_term = []\n\n for data_field_nm in dimensions['id']:\n labels = []\n\n for k in dimensions[data_field_nm]['category']['index'].keys():\n concept = dimensions[data_field_nm]['category']['label'][k]\n labels.append(self._cleanString(concept))\n\n all_term.append(labels)\n\n size = dimensions['size']\n total_size = 1\n tracker = []\n\n for s in size:\n tracker.append(0)\n total_size *= s\n\n track_size = len(tracker)\n\n # Observations: creating each\n\n for t in range(total_size):\n observations.append(data_namespace_prefix + ':' + str(\n uuid.uuid4()) + ' a qb:Observation ;\\n\\tqb:dataSet ' + data_namespace_prefix + ':' +\n _cleanString(dataset_label) + '_dataset ;\\n\\tqb:measureType ' +\n vocabulary_namespace_prefix + ':value ;\\n\\t')\n\n for index, data_field_nm in enumerate(dimensions['id']):\n observations.append('' + vocabulary_namespace_prefix + ':' + _cleanString(data_field_nm) + ' ')\n observations.append(\n '' + _prefix_build_concept(data_namespace_prefix, data_field_nm) + all_term[index][\n tracker[index]] + ' ;\\n\\t')\n\n tracker[track_size - 1] += 1\n\n for i in range(track_size - 1, -1, -1):\n if i != 0:\n if tracker[i] > size[i] - 1:\n tracker[i] = 0\n tracker[i - 1] += 1\n else:\n if tracker[i] > size[i] - 1:\n tracker[i] = 0\n\n observations.append('qb:measureType ' + vocabulary_namespace_prefix + ':value ;\\n\\t' +\n vocabulary_namespace_prefix + ':value \"' +\n str(dataset_values[t]) + '\"^^xsd:float\\n . \\n\\n')\n\n except Exception as e:\n traceback.print_exc(file=sys.stdout)\n logger.error(\"Errort status code - Mssg: {}\".format(str(e)))\n job_result['status'] = 500\n job_result['Error'] = str(e)\n job_result['version'] = \"old\"\n job_result['ErrorMessage'] = \"Something went wrong in the parsing json-stat to rdf. \" \\\n \"Please ensure json stat in required format\"\n\n return job_result\n\n rdf_content = []\n rdf_content.extend(scheme)\n rdf_content.extend(code_list)\n rdf_content.extend(observations)\n job_result['status'] = 200\n job_result['Error'] = \"None\"\n job_result['version'] = \"old\"\n job_result['SuccessMessage'] = \"RDF file is successfully created\"\n job_result['rdf_content'] = \"\".join(rdf_content)\n\n return job_result\n\n def conversion_for_new_jstat_version():\n\n scheme = [\n '@prefix qb: <http://purl.org/linked-data/cube#> .'\n '\\n@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .'\n '\\n@prefix skos: <http://www.w3.org/2004/02/skos/core#> .'\n '\\n@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .'\n '\\n@prefix prov: <http://www.w3.org/ns/prov#> .'\n '\\n@prefix dc: <http://purl.org/dc/elements/1.1/> .'\n '\\n@prefix ' + vocabulary_namespace_prefix + ': <' + vocabulary_namespace + '> .\\n@prefix '\n + data_namespace_prefix + ': <' + data_namespace + '> .',\n '\\n@prefix ' + data_namespace_prefix + 'schm: <' + data_namespace + 'conceptscheme/> .']\n\n code_list = ['#CODELIST\\n\\n']\n observations = ['#OBSERVATIONS\\n\\n']\n\n dataset_label = source_json['label']\n dataset_source = source_json['source']\n dataset_updated = source_json['updated']\n dimensions = source_json['dimension']\n dataset_values = source_json['value']\n field_nms = source_json['id']\n\n #### Building prefix\n\n for data_field_nm in field_nms:\n scheme.append('\\n@prefix ' + data_namespace_prefix + _cleanString(\n data_field_nm) + 'cpt: <' + data_namespace + 'concept/' + _cleanString(data_field_nm) + '/> .')\n\n scheme.append('\\n\\n#SCHEME\\n\\n')\n\n # Generating Scheme\n\n unit_index = source_json['id'].index('Units')\n n1 = len(field_nms)\n\n try:\n\n # Scheme: Individual terms\n\n for data_field_nm in field_nms:\n scheme.append('' + vocabulary_namespace_prefix + ':' + _cleanString(\n data_field_nm) + ' a qb:ComponentProperty, qb:DimensionProperty ;\\n\\trdfs:label \"' + data_field_nm + '\" ;\\n\\trdfs:range xsd:string .\\n\\n')\n\n scheme.append(\n '' + vocabulary_namespace_prefix + ':value a qb:ComponentProperty, qb:MeasureProperty ;'\n '\\n\\trdfs:label \"value\" ;\\n\\trdfs:range xsd:float .\\n\\n')\n\n # Scheme: DSD\n\n scheme.append('' + data_namespace_prefix + ':' + _cleanString(\n dataset_label) + '_dsd a qb:DataStructureDefinition ;\\n\\tqb:component\\n\\t\\t'\n '[ a qb:ComponentSpecification ;\\n\\t\\t qb:codeList ' +\n data_namespace_prefix + 'schm:measureType ; \\n\\t\\t qb:dimension qb:measureType ;'\n '\\n\\t\\t qb:order 1 \\n\\t] ;\\n\\tqb:component [ qb:measure ' +\n vocabulary_namespace_prefix + ':value ] ;\\n\\t')\n\n for index, data_field_nm in enumerate(field_nms):\n if data_field_nm != 'Units':\n scheme.append('qb:component\\n\\t\\t[ a qb:ComponentSpecification ;\\n\\t\\t qb:codeList '\n + data_namespace_prefix + 'schm:' + _cleanString(data_field_nm) +\n ' ;\\n\\t\\t qb:dimension ' + vocabulary_namespace_prefix + ':' +\n _cleanString(data_field_nm) + ' ;\\n\\t\\t qb:order ' + str(\n index + 2) + ' \\n\\t\\t] ')\n if index == n1 - 1:\n scheme.append('\\n.\\n\\n')\n else:\n scheme.append(';\\n\\t')\n\n # Scheme: Dataset\n\n scheme.append('' + data_namespace_prefix + ':' + _cleanString(dataset_label) +\n '_dataset a qb:DataSet ;\\n\\tqb:structure ' + data_namespace_prefix + ':' +\n _cleanString(dataset_label) + '_dsd ;\\n\\trdfs:label \"' + \\\n dataset_label + '\" ; \\n\\tprov:generatedAtTime \"' + dataset_updated\n + '\"^^xsd:dateTime ;\\n\\tdc:creator \"' + dataset_source + '\" .\\n\\n')\n\n # Generating Code list\n\n # Code list: Conceptscheme\n\n for data_field_nm in field_nms:\n if data_field_nm != 'Units':\n code_list.append('' + data_namespace_prefix + 'schm:' +\n _cleanString(data_field_nm) + ' a skos:ConceptScheme ;\\n\\t')\n\n skos_members = []\n for k in dimensions[data_field_nm]['category']['index'].keys():\n concept = dimensions[data_field_nm]['category']['label'][k]\n # print(concept)\n\n skos_members.append(\n 'skos:member ' + _prefix_build_concept(data_namespace_prefix,\n data_field_nm) + _cleanString(\n concept) + ' ')\n\n code_list.append(';\\n\\t'.join(skos_members) + '.\\n\\n')\n\n # Code list: Concepts\n\n for data_field_nm in field_nms:\n if data_field_nm != 'Units':\n\n for k in dimensions[data_field_nm]['category']['index'].keys():\n concept = dimensions[data_field_nm]['category']['label'][k]\n code_list.append(\n '' + _prefix_build_concept(data_namespace_prefix, data_field_nm) + _cleanString(\n concept) +\n ' a skos:Concept ;\\n\\trdfs:label \"' + concept + '\" .\\n\\n')\n\n # Generating Observations\n\n all_term = []\n for data_field_nm in field_nms:\n if data_field_nm != 'Units':\n labels = []\n for k in dimensions[data_field_nm]['category']['index'].keys():\n concept = dimensions[data_field_nm]['category']['label'][k]\n labels.append(self._cleanString(concept))\n\n all_term.append(labels)\n\n size = source_json['size']\n del size[unit_index]\n total_size = 1\n tracker = []\n\n for s in size:\n tracker.append(0)\n total_size *= s\n\n track_size = len(tracker)\n\n # Observations: creating each\n\n for t in range(total_size):\n observations.append(data_namespace_prefix + ':' + str(\n uuid.uuid4()) + ' a qb:Observation ;\\n\\tqb:dataSet ' + data_namespace_prefix + ':' +\n _cleanString(dataset_label) + '_dataset ;\\n\\tqb:measureType ' +\n vocabulary_namespace_prefix + ':value ;\\n\\t')\n\n cnt_all = 0\n\n for data_field_nm in field_nms:\n\n if data_field_nm != 'Units':\n observations.append('' + vocabulary_namespace_prefix + ':'\n + self._cleanString(data_field_nm) + ' ')\n observations.append('' + self._prefix_build_concept(data_namespace_prefix, data_field_nm) +\n all_term[cnt_all][tracker[cnt_all]] + ' ;\\n\\t')\n cnt_all += 1\n\n tracker[track_size - 1] += 1\n\n for i in range(track_size - 1, -1, -1):\n if i != 0:\n if tracker[i] > size[i] - 1:\n tracker[i] = 0\n tracker[i - 1] += 1\n else:\n if tracker[i] > size[i] - 1:\n tracker[i] = 0\n\n observations.append('qb:measureType ' + vocabulary_namespace_prefix + ':value ;\\n\\t' +\n vocabulary_namespace_prefix + ':value \"' +\n str(dataset_values[t]) + '\"^^xsd:float\\n . \\n\\n')\n\n except Exception as e:\n traceback.print_exc(file=sys.stdout)\n logger.error(\"Errort status code - Mssg: {}\".format(str(e)))\n job_result['status'] = 500\n job_result['Error'] = str(e)\n job_result['version'] = \"New\"\n job_result['Message'] = \"Something went wrong in parsing the json-stat to RDF\"\n\n return job_result\n\n rdf_content = []\n rdf_content.extend(scheme)\n rdf_content.extend(code_list)\n rdf_content.extend(observations)\n job_result['status'] = 200\n job_result['Error'] = \"None\"\n job_result['version'] = \"New\"\n job_result['SuccessMessage'] = \"RDF file is successfully created\"\n job_result['rdf_content'] = \"\".join(rdf_content)\n\n return job_result\n\n # Check for the version of the json-stat file\n\n if \"version\" in source_json.keys():\n return conversion_for_new_jstat_version()\n else:\n return conversion_for_old_jstat_version()",
"def save_history(cube, field, filename): \n\n history.append(cube.attributes['history'])",
"def transform_relationships(self, record):\n\n holdings_type = record.get_holdings_type()\n\n relationships = []\n\n # Category Entry (Lane) (R)\n for field in record.get_fields('655'):\n if field.indicator1 in '12':\n rb = RelationshipBuilder()\n\n # Name/Type\n rel_name = \"Category\"\n rb.set_name(rel_name)\n rb.set_type(self.get_relation_type(rel_name))\n\n # Degree\n rb.set_degree({'1': 'primary',\n '2': 'secondary'}.get(field.indicator1))\n\n # Enumeration: n/a\n # Chronology: n/a\n\n # Target\n rb.set_target(self.build_ref_from_field(field, CONCEPT))\n\n # Notes: n/a\n\n relationships.append(rb.build())\n\n\n # Collection/Location/Call Number (R)\n for field in record.get_fields('852'):\n rb = RelationshipBuilder()\n\n # Name/Type\n if 'b' not in field:\n logger.warning(f\"{record.get_control_number()}: loc code ($b) not found: {field}\")\n continue\n loc_code = field['b'].strip(' .').upper()\n rel_name = self.location_code_to_relator_map.get(loc_code, \"Access\")\n rb.set_name(rel_name)\n rb.set_type(self.get_relation_type(rel_name))\n\n # Degree: n/a\n\n # Enumeration\n # if not digital holdings, h/i are enum on rel to Place, else ignore\n if holdings_type != LaneMARCRecord.DIGITAL:\n # just concat??\n enum = ' '.join(field.get_subfields('h','i')).strip()\n rb.set_enumeration(tfcm.build_simple_ref(enum, STRING) if enum else None)\n\n # Chronology: n/a\n\n # Target\n rb.set_target(self.build_ref_from_field(Field('651',' 7',['a',loc_code]), PLACE))\n\n # Notes\n # map ind 1\n # ...\n # ...\n # ...\n for code, val in field.get_subfields('x','z', with_codes=True):\n rb.add_note(val,\n role = \"annotation\" if code == 'x' else \"documentation\")\n\n relationships.append(rb.build())\n\n # Electronic Location And Access (R)\n for field in record.get_fields('856'):\n rb = RelationshipBuilder()\n\n # Name/Type\n rel_name = field['e'] if 'e' in field else \\\n (\"Access\" if field.indicator2 in '01' else \"Related\")\n rb.set_name(rel_name)\n rb.set_type(self.get_relation_type(rel_name))\n\n # Degree: n/a\n # Enumeration: n/a\n # Chronology: n/a\n\n # Notes\n for code, val in field.get_subfields('9','i','r','x', with_codes=True):\n if code == 'x':\n val = \"Date verified: \" + val\n rb.add_note(val,\n role = \"annotation\" if code in 'irx' else \"documentation\")\n\n # Target\n wrb = WorkRefBuilder()\n\n # field should only have one y or z, but do all just in case.\n link_name = ' '.join(field.get_subfields('y','z')).strip()\n if not link_name:\n link_name = 'Link'\n wrb.add_name(link_name)\n wrb.set_link(link_name,\n href_URI = field['u'] )\n\n for val in field.get_subfields('q'):\n # take a guess at the qualifier type\n qualifier_type = Indexer.simple_element_type_from_value(val)\n if qualifier_type is None:\n qualifier_type = STRING\n wrb.add_qualifier(tfcm.build_simple_ref(val, qualifier_type))\n\n rb.set_target(wrb.build())\n\n relationships.append(rb.build())\n\n # Uniform Title Associated with Version (Lane) (R)\n for field in record.get_fields('963'):\n rb = RelationshipBuilder()\n\n # Name/Type\n rel_name = \"Related uniform title\"\n rb.set_name(rel_name)\n rb.set_type(self.get_relation_type(rel_name))\n\n # Degree: n/a\n # Enumeration: n/a\n\n # Chronology:\n for val in field.get_subfields('d','f'):\n rb.set_time_or_duration_ref(DateTimeParser.parse_as_ref(val))\n field.delete_all_subfields('d')\n field.delete_all_subfields('f')\n\n # Notes: n/a\n\n # these often link to work insts instead of auts, but\n # should be PARSED most similarly to e.g. bib 730 (aut)\n rb.set_target(self.build_ref_from_field(field, WORK_AUT))\n\n relationships.append(rb.build())\n\n return relationships",
"def RDFAnnotationParser_parseModelHistory(*args):\n return _libsbml.RDFAnnotationParser_parseModelHistory(*args)",
"def to_model(cls, obj):\n\n new_model = cls()\n\n for key, value in obj.iteritems():\n if value:\n if key == 'transcripts':\n setattr(new_model, key, [ModelConverter.to_model(Transcript, t) for t in value])\n elif key == 'acts' and cls == Transcript:\n setattr(new_model, key, [ModelConverter.to_model(Act, a) for a in value])\n elif key == 'subtitles':\n setattr(new_model, key, [ModelConverter.to_model(Subtitle, s) for s in value])\n else:\n setattr(new_model, key, value)\n\n return new_model",
"def _from_db_object(boar, db_boar):\n foreign_key = ['category', 'dormitory', 'source']\n for field in boar.fields:\n if field in ['birthday', 'entryday']:\n boar[field] = db_boar[field].strftime(\"%Y-%m-%d\")\n elif field not in foreign_key:\n boar[field] = db_boar[field]\n elif field == 'category' and db_boar.category:\n boar[field] = db_boar.category.name\n elif field == 'dormitory' and db_boar.dormitory:\n boar[field] = db_boar.dormitory.name\n elif field == 'source' and db_boar.source:\n boar[field] = db_boar.source.name\n boar.obj_reset_changes()\n return boar",
"def register_props():\n props_obj = HistoryProps()\n\n bpy.types.Scene.batchapps_history = \\\n bpy.props.PointerProperty(type=HistoryDisplayProps)\n props_obj.display = bpy.context.scene.batchapps_history\n\n return props_obj",
"def test_convert(self):\n gd: GraphDocument = json_loader.load(str(ONT), target_class=GraphDocument)\n g = self.converter.convert(gd)\n g.serialize(OUT)\n oi = SparqlImplementation(OntologyResource(OUT))\n # for r in oi.relationships([\"GO:0005773\"]):\n # print(r)\n self.compliance_tester.test_synonyms(oi)\n self.compliance_tester.test_definitions(oi)\n self.compliance_tester.test_sssom_mappings(oi)\n self.compliance_tester.test_relationships(oi)",
"def parse(cls, history_content: dict):\n sections = cls._parse_sections(history_content)\n return cls(**sections)"
] | [
"0.5449591",
"0.53597903",
"0.53180766",
"0.5217686",
"0.5002477",
"0.48273036",
"0.48258838",
"0.47788423",
"0.47186252",
"0.4703909",
"0.46876624",
"0.46831584",
"0.46785995",
"0.46753672",
"0.46651852",
"0.46631747",
"0.46261874",
"0.45973223",
"0.45775396",
"0.455817",
"0.4549144",
"0.4546078",
"0.45185664",
"0.45057794",
"0.4482329",
"0.44761902",
"0.44601098",
"0.44558772",
"0.44534972",
"0.4450709"
] | 0.56445116 | 0 |
Run the EM algorithm. | def run_em(self, maxiter=400, tol=1e-4, verbose=True, regularization=0.0):
self.means = self.means.T
L = None
for i in xrange(maxiter):
newL = self._expectation()
if i == 0 and verbose:
print("Initial NLL =", -newL)
self._maximization(regularization)
if L is None:
L = newL
else:
dL = np.abs((newL - L) / L)
if i > 5 and dL < tol:
break
L = newL
if i < maxiter - 1:
if verbose:
print("EM converged after {0} iterations".format(i))
print("Final NLL = {0}".format(-newL))
else:
print("Warning: EM didn't converge after {0} iterations"
.format(i))
self.means = self.means.T | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run_em(self, r):\n self.tc.reset()\n self.pf.Y = r.T\n em_data = {}\n if self.print_mode:\n print 'The hessian trace is {}'.format(\n np.trace(self.tc.t_H.get_value()))\n\n print 'Running full EM'\n\n for u in range(self.n_itr):\n t0 = self.n_t * u / self.n_itr\n tf = self.n_t * (u + 1) / self.n_itr\n print('Iteration: {} | Running up to time {}'.format(u, tf))\n\n self.run_e(tf)\n self.run_m(t0, tf, r, n_g_itr=self.n_g_itr)\n\n iteration_data = {\n 'time_steps': tf,\n 'path_means': self.pf.means,\n 'path_sdevs': self.pf.sdevs,\n 'image_est': self.tc.image_est(),\n 'coeff_est': self.tc.get_A()\n }\n\n if self.save_pix_rf_coupling:\n xr = self.pf.XS[t0:tf, :, 0].transpose()\n yr = self.pf.XS[t0:tf, :, 1].transpose()\n w = self.pf.WS[t0:tf].transpose()\n tmp = self.tc.get_sp_rf_coupling(xr, yr, w)\n iteration_data['pix_rf_coupling'] = tmp\n\n if self.save_hessian:\n iteration_data['hessian'] = self.tc.t_H.get_value()\n\n\n em_data[u] = iteration_data\n em_data['mode'] = 'EM'\n\n if self.save_mode:\n self.data['EM_data'] = em_data",
"def _exe_(self):\n print(\"\\n Start simulation (using Pharlap) ...\")\n dic = \"data/sim/{dn}/{rad}/\".format(dn=self.event.strftime(\"%Y.%m.%d.%H.%M\"), rad=self.rad)\n self._estimate_edens_()\n self._compute_()\n plotlib.plot_exp_rays(dic, self.event, self.bmnum, \"bgc\")\n plotlib.plot_exp_rays(dic, self.event, self.bmnum, \"flare\")\n if self.verbose: print(\"\\n Processing Doppler.\")\n self._compute_doppler_()\n rec = self._compute_velocity_()\n return rec",
"def _run(self):\n self._algorithm(self._list, self)",
"def run(self):\n self.membershipFunction()\n self.interpretingMF()\n self.rules()\n self.standardComposition_Min()\n self.standardComposition_Max()\n self.defuzzification()",
"def run(self):\n self.log.overall('Starting run')\n run_start = time()\n for epoch in xrange(self.n_epochs):\n self.agent.reset()\n self.n_epoch = epoch\n self._run_epoch()\n self.log.overall('End of run ({:.2f} s)'.format(time() - run_start))",
"def emb_experiment():\n print(\"EMBEDDINGS EXPERIMENT\")\n\n # set the name of the experiment\n now = datetime.datetime.now()\n experiment_id = str(now.day) + \"_\" + str(now.month) + \"_\" + str(now.hour) + \".\" + str(now.minute)\n experiment_name = 'emb_' + str(experiment_id)\n\n # define if you want to use preprocessed data from file\n use_prep_data = False\n if use_prep_data:\n set_params(preproc_data_id='16_5_10.16.47')\n else:\n set_params(use_preproc_data=False)\n\n # define the changing parameter and its value\n changing_param_name = 'use_word_emb'\n changing_param_value = [0, 1]\n # {0:4, 1:100}, {0:3, 1:100}, {0:2, 1:100}, {0:1, 1:100}] #[{0:1, 1:1}, {0:15, 1:85}]\n\n # set constant parameters\n set_params(epochs=20)\n set_params(dropout=0.3)\n\n # save constant parameters to a new \"experiment_..\" file\n save_constant_parameters(experiment_name, changing_param_name)\n\n # run experiment for every parameter value\n for value in changing_param_value:\n process = psutil.Process(os.getpid())\n print(\"-----MEMORY before starting experiment ------\", int(process.memory_info().rss/(8*10**(3))), \"KB\")\n\n # update the parameter value\n set_params(use_word_emb = value)\n\n # update the model_id for this new model\n now = datetime.datetime.now()\n new_model_id = str(now.day) + \"_\" + str(now.month) + \"_\" + str(now.hour) + \".\" + str(now.minute) + \".\" + str(now.second)\n set_params(model_id=new_model_id)\n\n # evaluate the new model and save the results in the experiment file\n oneExperiment = Process(target=run_experiment, args=(experiment_name, new_model_id, changing_param_name, value,))\n oneExperiment.start()\n oneExperiment.join()\n\n if value == changing_param_value[0]:\n set_params(preproc_data_id=new_model_id)",
"def run(self):\n import time\n\n\n for msr in self.msrs:\n # find state transition matrix\n phi_p, state_prop = self._compute_stm(msr.time)\n\n # use stm to propagate perturbation and covariance\n pert_m = np.matmul(phi_p, self.pert_vec[-1])\n cov_m = np.matmul(phi_p, np.matmul(self.cov_list[-1],\n np.transpose(phi_p)))\n\n\n # compute observation deviation, obs_state matrix\n y_i, h_tilde = self._msr_resid(msr, state_prop)\n\n # calculate kalman gain\n k_gain = self._calc_k_gain(cov_m, h_tilde, msr.cov)\n\n # measurement update\n cov_p, pert_p = self._measurement_update(y_i,\n h_tilde,\n pert_m,\n k_gain,\n cov_m)\n\n # update the state lists\n self.residuals.append(y_i)\n self.prop_state_list.append(state_prop)\n self.estimates.append(np.add(state_prop, np.transpose(pert_p))[0])\n self.cov_list.append(cov_p)\n self.pert_vec.append(pert_p)\n self.times.append(msr.time)",
"def run(self):\n\n # Initialization assumptions\n z = self.draw_normal_initial()\n gradient = self.cv_gradient_initial(z)\n gradient[np.isnan(gradient)] = 0\n variance = np.power(gradient, 2) \n final_parameters = self.current_parameters()\n final_samples = 1\n\n # Create optimizer\n if self.optimizer == 'ADAM':\n self.optim = ADAM(final_parameters, variance, self.learning_rate, 0.9, 0.999)\n elif self.optimizer == 'RMSProp':\n self.optim = RMSProp(final_parameters, variance, self.learning_rate, 0.99)\n\n # Record elbo\n if self.record_elbo is True:\n elbo_records = np.zeros(self.iterations)\n else:\n elbo_records = None\n\n for i in range(self.iterations):\n x = self.draw_normal()\n gradient = self.cv_gradient(x)\n gradient[np.isnan(gradient)] = 0\n self.change_parameters(self.optim.update(gradient))\n\n if self.printer is True:\n self.print_progress(i, self.optim.parameters[::2])\n\n # Construct final parameters using final 10% of samples\n if i > self.iterations-round(self.iterations/10):\n final_samples += 1\n final_parameters = final_parameters+self.optim.parameters\n\n if self.record_elbo is True:\n elbo_records[i] = self.get_elbo(self.optim.parameters[::2])\n\n final_parameters = final_parameters/float(final_samples)\n self.change_parameters(final_parameters)\n final_means = np.array([final_parameters[el] for el in range(len(final_parameters)) if el%2==0])\n final_ses = np.array([final_parameters[el] for el in range(len(final_parameters)) if el%2!=0])\n if not self.quiet_progress:\n print(\"\")\n print(\"Final model ELBO is \" + str(-self.full_neg_posterior(final_means)-self.create_normal_logq(final_means)))\n return self.q, final_means, final_ses, elbo_records",
"def run(self):\n\n # Initialization assumptions\n z = self.draw_normal_initial()\n gradient = self.cv_gradient_initial(z)\n gradient[np.isnan(gradient)] = 0\n variance = np.power(gradient, 2) \n final_parameters = self.current_parameters()\n final_samples = 1\n\n # Create optimizer\n if self.optimizer == 'ADAM':\n self.optim = ADAM(final_parameters, variance, self.learning_rate, 0.9, 0.999)\n elif self.optimizer == 'RMSProp':\n self.optim = RMSProp(final_parameters, variance, self.learning_rate, 0.99)\n\n # Record elbo\n if self.record_elbo is True:\n elbo_records = np.zeros(self.iterations)\n else:\n elbo_records = None\n\n for i in range(self.iterations):\n x = self.draw_normal()\n gradient = self.cv_gradient(x)\n gradient[np.isnan(gradient)] = 0\n self.change_parameters(self.optim.update(gradient))\n\n if self.printer is True:\n self.print_progress(i, self.optim.parameters[::2])\n\n # Construct final parameters using final 10% of samples\n if i > self.iterations-round(self.iterations/10):\n final_samples += 1\n final_parameters = final_parameters+self.optim.parameters\n\n if self.record_elbo is True:\n elbo_records[i] = self.get_elbo(self.optim.parameters[::2])\n\n final_parameters = final_parameters/float(final_samples)\n self.change_parameters(final_parameters)\n final_means = np.array([final_parameters[el] for el in range(len(final_parameters)) if el%2==0])\n final_ses = np.array([final_parameters[el] for el in range(len(final_parameters)) if el%2!=0])\n if not self.quiet_progress:\n print(\"\")\n print(\"Final model ELBO is \" + str(-self.neg_posterior(final_means)-self.create_normal_logq(final_means)))\n return self.q, final_means, final_ses, elbo_records",
"def _exe_(self):\n print(\"\\n Start simulation (using Pharlap) ...\")\n dic = \"data/sim/{dn}/{rad}/\".format(dn=self.event.strftime(\"%Y.%m.%d.%H.%M\"), rad=self.rad)\n self._copy_ne_()\n [self._compute_(case) for case in [\"bgc\", \"flare\"]]\n plotlib.plot_exp_rays(dic, self.event, self.bmnum, \"bgc\")\n plotlib.plot_exp_rays(dic, self.event, self.bmnum, \"flare\")\n self._compute_doppler_()\n rec = self._compute_velocity_()\n return rec",
"def run(self):\n evaluateAllRuns = False\n while True:\n if self.host == \"\":\n # respond to clients\n self.respond2Clients()\n else:\n print(\"Next...\")\n # randomly choose experiment + run\n if not evaluateAllRuns:\n print(\"Randomly fetching run\")\n self.exp, self.runnum, self.detname = randExpRunDet()\n else:\n\t\t try:\n print(\"Fecthing next run in experiment\")\n self.exp, self.runnum, self.detname = nextExpRunDet(self.goodExp, self.runList[0])\n if self.exp is None:\n self.runList.pop(0)\n continue\n except:\n evaluateAllRuns = False\n continue\n if not self.checkStatus(self.exp, self.runnum, self.detname):\n print \"trying: exp %s, run %s, det %s\"%(self.exp,self.runnum,self.detname)\n try: #temp\n self.ds = safeDataSource(self.exp, self.runnum)\n except: #temp\n continue #temp\n self.run = self.ds.runs().next()\n self.times = self.run.times()\n #Start temp code\n if self.detname is None:\n continue\n #End temp code\n self.det = psana.Detector(self.detname)\n self.det.do_reshape_2d_to_3d(flag=True)\n try:\n self.iX = np.array(self.det.indexes_x(self.run), dtype=np.int64)\n self.iY = np.array(self.det.indexes_y(self.run), dtype=np.int64)\n self.ipx, self.ipy = self.det.point_indexes(self.run, pxy_um=(0, 0))\n self.alg = PyAlgos()\n self.alg.set_peak_selection_pars(npix_min=2, npix_max=30, amax_thr=300, atot_thr=600, son_min=10)\n mask = self.det.mask(self.runnum, calib=True, status=True, edges=True, central=True, unbond=True, unbondnbrs=True)\n\n samples = np.linspace(0, len(self.times), num=100, endpoint=False, retstep=False, dtype='int')\n offset = np.floor(np.random.uniform(0, len(self.times)-samples[-1])).astype('int')\n mysamples = samples + offset\n numCrystals = 0\n for self.eventNum in mysamples:\n self.evt = self.run.event(self.times[self.eventNum])\n calib = self.det.calib(self.evt)\n if calib is not None:\n peaks = self.alg.peak_finder_v3r3(calib, rank=3, r0=3, dr=2, nsigm=10, mask=mask.astype(np.uint16))\n if self.likelihood(peaks) >= self.goodLikelihood:\n numCrystals += 1\n if numCrystals >= self.minCrystals:\n self.numSaved +=1\n self.updateStatus(self.exp, self.runnum, self.detname, self.numSaved)\n self.lastGood = True\n break\n except:\n print \"Could not analyse this run\"\n #If an experiment has not had all of its runs evaluated yet\n # and if the last randomly selected run in this experiment was good\n # then all the runs in this experiment should be evaluated\n if (self.exp not in self.goodList) and self.lastGood:\n self.goodExp = self.exp #Save the name of this experiment\n self.goodRun = self.runnum #Save the run that has already been evaluated\n self.lastGood = False #Reset the condition that the last run was \"good\"\n self.goodList.append(self.goodExp) #Add this experiment name to the list of experiments that have had all runs evaluated\n self.runList = returnRunList(self.goodExp, self.goodRun) #save list of all runs in this good exp\n evaluateAllRuns = True #rerun loop with new algorithm that evaluates each run in an experiment\n continue\n if evaluateAllRuns: #If the loop is currently evaluating all of the runs in an experiment\n if(len(self.runList) > 1):\n self.runList.pop(0) #Remove runs from the list of runs each time they are evaluated\n else:\n self.runList.pop(0)#Remove runs until the list is completely empty\n evaluateAllRuns = False #Stop evaluated all the runs of an experiment, go back to random fetching",
"def main():\n bee_model = md.BeeForagingModel(GRID_WIDTH, GRID_HEIGHT, 10, 30, 7)\n\n iteration_size = 1000\n\n for i in range(45):\n print(f'ITERATION {i*iteration_size}')\n\n print({k: len(v) for k, v in bee_model.grid.grids.items()})\n start_time = time.time()\n bee_model.run_model(iteration_size)\n print(time.time() - start_time)",
"def run():\n import hmmmAssembler ; reload(hmmmAssembler) # import helpers\n hmmmAssembler.main(Random) # this runs the code!",
"def run(self):\n self.run()",
"def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=False) # set agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.0) # reduce update_delay to speed up simulation\n sim.run(n_trials=num_of_experiments) # press Esc or close pygame window to quit\n \n pd.Series(a.success).to_pickle('success_' + exp_id + '.pickle')\n a.Q_table.to_pickle('qtable_' + exp_id + '.pickle')\n pd.Series(a.q_delta_avg).to_pickle('convergence_' + exp_id + '.pickle')\n pd.Series(a.t_total).to_pickle('steps_' + exp_id + '.pickle')",
"def run(self):\n for msr in self.msrs:\n # find state transition matrix\n phi_p, state_prop = self._compute_stm(msr.time)\n\n # use stm to propagate perturbation and covariance\n cov_m = np.matmul(phi_p, np.matmul(self.cov_list[-1],\n np.transpose(phi_p)))\n\n # compute observation deviation, obs_state matrix\n y_i, h_tilde = self._msr_resid(msr, state_prop)\n\n # calculate kalman gain\n k_gain = self._calc_k_gain(cov_m, h_tilde, msr.cov)\n\n # measurement update\n cov_p, state_est = self._measurement_update(y_i,\n h_tilde,\n k_gain,\n cov_m,\n state_prop)\n\n # update the state lists\n self.residuals.append(y_i)\n self.prop_state_list.append(state_est)\n self.estimates.append(state_est)\n self.cov_list.append(cov_p)\n self.times.append(msr.time)",
"def run(self) -> None:\n for episode in range(1, self.episodes + 1):\n print('Episode:', episode)\n steps, state_action_history = self.run_one_episode()\n self.steps_per_episode.append(steps)\n if episode % parameters.CACHING_INTERVAL == 0 or steps < 1000:\n visualize.animate_track(state_action_history, f'agent-{episode}')\n\n print('Training completed.')\n visualize.plot_steps_per_episode(self.steps_per_episode)\n visualize.plot_epsilon(self.agent.epsilon_history)\n\n if parameters.VISUALIZE_FINAL_GAME:\n print('Showing one episode with the greedy strategy.')\n self.agent.epsilon = 0\n steps, state_action_history = self.run_one_episode()\n print(f'Episode completed in {steps} steps.')\n visualize.animate_track(state_action_history)",
"def Main():\n EnigmaSim = simulation() #Creates the simulation object\n EnigmaSim.Run() #Runs the simulation",
"def run(self):\n while True:\n self.sm.run()\n time.sleep(0.05)",
"def _run(self):\n emulators = self.create_emulators()\n try:\n while True:\n command = self.queue.get()\n if command == self.Command.NEXT:\n for i, (emulator, action) in enumerate(zip(emulators, self.action)):\n new_s, reward, is_done, info = emulator.next(action)\n if is_done:\n self.state[i], info = emulator.reset()\n else: #so we never return terminal states\n self.state[i] = new_s\n self.reward[i] = reward\n self.is_done[i] = is_done\n for k in self.info:\n self.info[k][i] = info[k]\n self.barrier.put(True)\n elif command == self.Command.RESET:\n\n for i, emulator in enumerate(emulators):\n self.state[i], info = emulator.reset()\n for k in self.info:\n self.info[k][i] = info[k]\n self.barrier.put(True)\n elif command == self.Command.CLOSE:\n break\n else:\n raise WorkerError(\"{} has received unknown command {}\".format(type(self),command))\n finally:\n for emulator in emulators: emulator.close()\n logging.debug('WorkerProcess#{} finished!'.format(self.id+1))",
"def main():\n\n print(\"\\nBeginning simulation: current date and time {}\\n\".format(datetime.datetime.now()))\n\n # Initialise the particles, potential and energy array\n particles = np.random.rand(n_particles, 3) * L\n lj_pot = np.zeros((n_particles, n_particles))\n energy = np.zeros(n_steps + 1)\n\n # Calculate the initial energies and then do the MCMC iterations and *hopefully* converge\n particles, lj_pot, energy = initial_energy(particles, lj_pot, energy)\n particles, lj_pot, energy = mcmc(particles, lj_pot, energy)\n pressure = compute_pressure(particles)\n\n return particles, lj_pot, energy, pressure",
"def run(self):\n self.cmdloop()",
"def _run(self, **params):\n# if softEvidence is None:\n# self.softEvidence = self.mln.softEvidence\n# else:\n# self.softEvidence = softEvidence\n # initialize chains\n chains = MCMCInference.ChainGroup(self)\n for i in range(self.chains):\n chain = GibbsSampler.Chain(self, self.queries)\n chains.chain(chain)\n# if self.softEvidence is not None:\n# chain.setSoftEvidence(self.softEvidence)\n # do Gibbs sampling\n# if verbose and details: print \"sampling...\"\n converged = 0\n steps = 0\n if self.verbose:\n bar = ProgressBar(color='green', steps=self.maxsteps)\n while converged != self.chains and steps < self.maxsteps:\n converged = 0\n steps += 1\n print('STEP {} / {}'.format(steps, self.maxsteps))\n for chain in chains.chains:\n chain.step()\n if self.verbose:\n bar.inc()\n bar.label('%d / %d' % (steps, self.maxsteps))\n# if self.useConvergenceTest:\n# if chain.converged and numSteps >= minSteps:\n# converged += 1\n# if verbose and details:\n# if numSteps % infoInterval == 0:\n# print \"step %d (fraction converged: %.2f)\" % (numSteps, float(converged) / numChains)\n# if numSteps % resultsInterval == 0:\n# chainGroup.getResults()\n# chainGroup.printResults(shortOutput=True)\n # get the results\n return chains.results()[0]",
"def test_ML_check_cms_aem_emvevex(self):\n\n self.setup_logFile_for_logger('madgraph.check_cmd')\n files = ['acceptance_test_aem_emvevex.pkl',\n 'acceptance_test_aem_emvevex.log',\n 'acceptance_test_aem_emvevex_widths_increased.pkl',\n 'acceptance_test_aem_emvevex_widths_increased.log']\n output_name = 'SAVEDTMP_CHECK_acceptance_test_aem_emvevex__%s__'\n \n try:\n cwd = os.getcwd()\n \n # Change this when we will make the CMS-ready EW model the default\n self.do('import model loop_qcd_qed_sm')\n for mode in ['NWA','CMS']:\n if path.isdir(pjoin(MG5DIR,output_name%mode)):\n shutil.rmtree(pjoin(MG5DIR,output_name%mode))\n \n # Make sure it works for an initial run\n command = 'check cms -reuse a e- > e- ve ve~ [virt=QCD QED] '\n options = {'name':'acceptance_test_aem_emvevex',\n 'lambdaCMS':'(1.0e-6,2)',\n 'show_plot':'False',\n 'seed':'666',\n 'resonances':'2',\n 'recompute_width':'first_time',\n 'report':'full'}\n cmd = command+' '.join('--%s=%s'%(opt, value) for opt, value in \n options.items())\n # print \"Running first CMS check cmd: \",cmd\n self.do(cmd)\n self.assertEqual(cwd, os.getcwd())\n for mode in ['NWA','CMS']:\n self.assertTrue(path.isdir(pjoin(MG5DIR,output_name%mode)))\n self.assertTrue(path.isfile(pjoin(MG5DIR,\n 'acceptance_test_aem_emvevex.pkl')))\n self.assertTrue(path.isfile(self.tmp_path['madgraph.check_cmd']))\n res = open(self.tmp_path['madgraph.check_cmd']).read()\n self.assertTrue(res.count('=== FAILED ===')==0)\n self.assertTrue(res.count('=== PASSED ===')==2)\n self.assertTrue(path.isfile(pjoin(MG5DIR,\n 'acceptance_test_aem_emvevex.log')))\n res = open(pjoin(MG5DIR,'acceptance_test_aem_emvevex.log')).read()\n self.assertTrue(res.count('=== FAILED ===')==0)\n self.assertTrue(res.count('=== PASSED ===')==2)\n \n # Now for a Reuse-run with the widths modified by 1%\n self.setup_logFile_for_logger('madgraph.check_cmd',restore=True)\n self.setup_logFile_for_logger('madgraph.check_cmd')\n # Now copy the card with recomputed widths in it\n for mode in ['NWA','CMS']:\n self.assertTrue(path.isfile(pjoin(MG5DIR,output_name%mode,\n 'Cards','param_card.dat_recomputed_widths')))\n shutil.copy(pjoin(MG5DIR,output_name%mode,'Cards',\n 'param_card.dat_recomputed_widths'),\n pjoin(MG5DIR,output_name%mode,'Cards','param_card.dat'))\n options['tweak']='allwidths->1.1*allwidths(widths_increased)'\n options['recompute_width']='never'\n cmd = command+' '.join('--%s=%s'%(opt, value) for opt, value in \n options.items())\n # print \"Running second CMS check cmd: \",cmd\n self.do(cmd)\n self.assertEqual(cwd, os.getcwd())\n self.assertTrue(path.isfile(pjoin(MG5DIR,\n 'acceptance_test_aem_emvevex_widths_increased.pkl')))\n self.assertTrue(path.isfile(self.tmp_path['madgraph.check_cmd']))\n res = open(self.tmp_path['madgraph.check_cmd']).read()\n self.assertTrue(res.count('=== FAILED ===')==2)\n self.assertTrue(res.count('=== PASSED ===')==0)\n self.assertTrue(path.isfile(pjoin(MG5DIR,\n 'acceptance_test_aem_emvevex_widths_increased.log')))\n res = open(pjoin(MG5DIR,\n 'acceptance_test_aem_emvevex_widths_increased.log')).read()\n self.assertTrue(res.count('=== FAILED ===')==2)\n self.assertTrue(res.count('=== PASSED ===')==0)\n \n # Clean up duties\n for mode in ['NWA','CMS']:\n shutil.rmtree(pjoin(MG5DIR,output_name%mode))\n for file in files:\n try:\n os.remove(pjoin(MG5DIR,file))\n except:\n pass\n self.setup_logFile_for_logger('madgraph.check_cmd',restore=True)\n\n except KeyError as e:\n self.setup_logFile_for_logger('madgraph.check_cmd',restore=True)\n for mode in ['NWA','CMS']:\n try:\n shutil.rmtree(pjoin(MG5DIR,output_name%mode))\n except:\n pass\n for f in files:\n try:\n os.remove(pjoin(MG5DIR,f))\n except:\n pass\n raise e\n self.setup_logFile_for_logger('madgraph.check_cmd',restore=True)",
"def algorithm_loop(self):",
"def run(self):\n time.sleep(np.random.rand())\n np.random.seed(np.int32(time.time() % 1000 * self.id))\n \n # Put this in a while loop that checks a shared variable\n # Will keep running episodes until the shared variable reports False\n while(self.exit_flag == 0):\n for experience in self.run_episode():\n print(experience.state, experience.reward)\n self.training_q.put(experience)",
"def run(self):\n for worker in self.simulation_workers:\n worker.start()",
"def main():\n my_emr = EmrProcessing()\n\n if \"-s\" in sys.argv:\n my_emr.verbose_mode = False\n else:\n my_emr.verbose_mode = True\n print \"\\nStarting Titanic Data Analysis\"\n my_emr.parse_user_selections()\n\n # Setup\n my_emr.clear_local_output_directory()\n my_emr.update_mapper_file(\"model2\")\n\n # S3 activities\n my_emr.empty_bucket()\n my_emr.create_and_fill_bucket()\n\n # EMR activities\n my_emr.setup_and_run_job()\n my_emr.wait_until_job_completes()\n\n # Cleanup\n my_emr.download_output_files()\n my_emr.post_process_output_file()\n if my_emr.verbose_mode:\n my_emr.print_local_output_files_stats()",
"def run(self):\n logging.info('running experiment...')\n self._prepare()\n self._load_data()\n self._run()\n self._evaluate()\n self._summarise()\n return True",
"def run_algorithm(self):\n population_size = self.population_size\n simulator = self.simulator\n num_generations = self.num_generations\n current_dir = os.getcwd()\n urdf = current_dir + os.sep + os.path.join(\"URDF\", \"Ghost\", \"urdf\", \"Ghost.urdf\")\n simulated_robot = Robot(urdf, (0, 0, 0.4))\n simulated_robot.set_id(simulator.load_new_robot_urdf(simulated_robot))\n # make placeholders\n counter = 0\n best_genome = None\n best_fit = 0\n evals = population_size * (num_generations + 1)\n beam_fit = np.zeros(evals)\n current_population = self.make_population()\n current_population_fitness = [0] * self.population_size\n # print(\"build robots\")\n for k in range(self.population_size):\n #\tprint(\"initial robot \" , k)\n robot = current_population[k]\n simulator.load_robot_parameters(robot.parameters, 0)\n robot.set_fitness(simulator.compute_walk_fitness(1000)[0]) # evaluate the robot's fitness\n fitness = robot.get_fitness()\n current_population_fitness[k] = fitness\n \n if counter == 0:\n beam_fit[counter] = current_population_fitness[k] \n else:\n \n if beam_fit[counter - 1] < current_population_fitness[k]: # if the best overall robot thus far\n best_genome = robot.genome.copy() # update the best robot's genome\n beam_fit[counter] = current_population_fitness[k] \n else:\n beam_fit[counter] = beam_fit[counter - 1]\n best_fit = beam_fit[counter]\n\n counter +=1 \n\n\n #\tprint(\"origional robots evaluated, their fitness is \" , )\n for i in range(num_generations): # perform mutations equal to num_Climb\n #\t\t\tprint(\"start of gen , current population_fitness\" , current_population_fitness)\n population = current_population.copy()\n population_fitness = current_population_fitness.copy()\n print('gen' , i)\n for j in range(self.population_size):\n robot = population[j]\n mut_loc, old_val = robot.mutate_genome() # Mutation: Keep track of mut location and previous vals\n simulator.load_robot_parameters(robot.parameters, 0)\n robot.set_fitness(simulator.compute_walk_fitness(1000)[0]) # evaluate the robot's fitness\n fit_new = robot.get_fitness()\n population_fitness[j] = fit_new\n # BIG POINT - here we keep regardless if the change is better or not\n if fit_new > best_fit: # update learning curve\n best_fit = fit_new\n best_genome = robot.genome.copy()\n beam_fit[counter] = best_fit\n counter += 1\n #\t\t\tprint(\" ... \")\n #\t\t\tprint(\"end of gen , current population_fitness\" , current_population_fitness)\n # concat the populations and population fitnesses\n total_population = current_population + population\n total_population_fitness = current_population_fitness + population_fitness\n # print(\"before quick sort \" , total_population_fitness)\n # print(\" ... \")\n # sort the lists\n self.quick_sort(total_population_fitness, total_population, 0, len(total_population) - 1)\n # print(\" after quick sort \" , total_population_fitness)\n # print(\" ... \")\n # keep the top half\n current_population = total_population[:self.population_size]\n current_population_fitness = total_population_fitness[:self.population_size]\n # print(\"keep \", current_population_fitness)\n #\t\tprint(counter)\n\n if not os.path.exists('./data'):\n os.mkdir('./data')\n\n np.savetxt(\"beam_genome_gen_999_pop_100.csv\", best_genome, delimiter=\",\")\n np.savetxt(\"beam_learning_gen_999_pop_100.csv\", beam_fit, delimiter=\",\")"
] | [
"0.72161376",
"0.628836",
"0.6144694",
"0.61367214",
"0.6123989",
"0.60628617",
"0.60402554",
"0.6032735",
"0.60259813",
"0.60199857",
"0.59711766",
"0.5922304",
"0.59206814",
"0.5912053",
"0.5896723",
"0.5892612",
"0.58919305",
"0.58890396",
"0.58829486",
"0.58683765",
"0.5826373",
"0.5812362",
"0.58047706",
"0.5783567",
"0.5758246",
"0.57562816",
"0.57480556",
"0.57415825",
"0.57341015",
"0.57305574"
] | 0.6405142 | 1 |
The decorator method to be called on the class object. This method will set the proper `discoverable` type to the class. It should return the class passed in, according to the decorator spec. | def discoverable(_class):
# Set the attribute to the class name, to prevent subclasses from also
# being discoverable.
setattr(_class, _get_discoverable_attribute(_class), True)
return _class | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_class_discoverable(_class, default_discoverability=False):\n return bool(getattr(_class, _get_discoverable_attribute(_class),\n default_discoverability))",
"def not_discoverable(_class):\n\n # Set the attribute to the class name, to prevent subclasses from also\n # being not discoverable.\n setattr(_class, _get_discoverable_attribute(_class), False)\n return _class",
"def __class__(self, ???):",
"def checktype(type):\n def decorator(klass):\n register_type(type, klass)\n return klass\n\n return decorator",
"def _get_discoverable_attribute(_class):\n return \"__{}_is_discoverable\".format(_class.__name__)",
"def delegated(cls):\n return cls",
"def jit_class(cls):\n from mindspore import nn\n # Check if cls is of type class.\n if not inspect.isclass(cls):\n raise TypeError(f'Decorator jit_class can only be used for class type, but got {cls}.')\n # Check if cls is nn.Cell.\n if issubclass(cls, nn.Cell):\n raise TypeError(f\"Decorator jit_class is used for user-defined classes and cannot be used for nn.Cell: {cls}.\")\n setattr(cls, '__ms_class__', True)\n return cls",
"def identify_class(self, cls):",
"def __call__ (self, cls):\n # Define a wrapper function to capture the actual instantiation and __init__ params\n @wraps(cls)\n def wrapper_f(*args, **kwargs):\n #print(f'type of cls is {type(cls)}')\n peripheral = self.peripheral_type(**self.kwargs)\n o = cls(*args, **kwargs)\n o.message_debug(f\"Decorating class {cls.__name__} with {self.peripheral_type.__name__}\")\n o.attach_sensor(peripheral)\n return o\n return wrapper_f",
"def _class(self, class_):\r\n\r\n if class_:\r\n if hasattr(class_, '__mro__'):\r\n #this is a class\r\n return class_\r\n else:\r\n #this is an instance\r\n return type(class_)",
"def _class(self, *args):\r\n\r\n if hasattr(args[0], '__mro__'):\r\n #this is a class\r\n return args[0]\r\n else:\r\n #this is an instance\r\n return type(args[0])",
"def __call__(cls, *args, **kwargs):\n if cls not in cls._instance:\n cls._instance[cls] = super(Metaclass, cls).__call__(*args, **kwargs)\n return cls._instance[cls]",
"def decorate_class(cls, klass: type, decorate_subclasses=False, **setting_kwds) -> None:\n assert isinstance(klass, type) # in \"debug\" mode only\n if not isinstance(klass, type): # in either mode, have the same awareness at the same time\n return\n\n # Filter out builtins.\n if not get_file_of_object(klass):\n return\n\n def _deco_class(kls: type):\n t = cls(**setting_kwds)\n _ = t(kls)\n # assert _ == kls\n\n def _deco_class_rec(kls: type):\n _deco_class(kls)\n for subclass in kls.__subclasses__():\n _deco_class_rec(subclass)\n\n if decorate_subclasses:\n _deco_class_rec(klass)\n else:\n _deco_class(klass)\n # (_deco_class_rec if decorate_subclasses else _deco_class)(klass)",
"def resolver(cls) -> Callable:\n annotations = {}\n for subclass in cls.subclasses():\n name = subclass.__name__.split(cls.__name__)[0].lower() # type: ignore\n argument = strawberry.argument(description=subclass._type_definition.description)\n annotations[name] = Annotated[List[subclass], argument] # type: ignore\n defaults = dict.fromkeys(annotations, []) # type: dict\n return functools.partial(resolve_annotations, annotations=annotations, defaults=defaults)",
"def classmethod(self, encoding):\n # Add encodings for hidden self and cmd arguments.\n encoding = ensure_bytes(encoding)\n typecodes = parse_type_encoding(encoding)\n typecodes.insert(1, b'@:')\n encoding = b''.join(typecodes)\n\n def decorator(f):\n def objc_class_method(objc_cls, objc_cmd, *args):\n py_cls = ObjCClass(objc_cls)\n py_cls.objc_cmd = objc_cmd\n args = convert_method_arguments(encoding, args)\n result = f(py_cls, *args)\n if isinstance(result, ObjCClass):\n result = result.ptr.value\n elif isinstance(result, ObjCInstance):\n result = result.ptr.value\n return result\n name = f.__name__.replace('_', ':')\n self.add_class_method(objc_class_method, name, encoding)\n return objc_class_method\n return decorator",
"def type(\n cls: Type = None,\n *,\n name: str = None,\n is_input: bool = False,\n is_interface: bool = False,\n description: str = None,\n federation: Optional[FederationTypeParams] = None,\n):\n\n def wrap(cls):\n wrapped = _wrap_dataclass(cls)\n\n return _process_type(\n wrapped,\n name=name,\n is_input=is_input,\n is_interface=is_interface,\n description=description,\n federation=federation,\n )\n\n if cls is None:\n return wrap\n\n return wrap(cls)",
"def find_class(self, class_name: str) -> Type:\n pass",
"def get_cls(cls, kind: str) -> t.Callable:\n\n kind_cls = cls.REGISTRY.get(kind)\n if not kind_cls:\n raise UndefinedResource(kind)\n return kind_cls",
"def _resolve_moderator(cls):\n if hasattr(cls, 'Moderator') and inspect.isclass(cls.Moderator):\n Moderator = cls.Moderator\n # in python3 __dict__ is dictproxy\n attrs = dict(Moderator.__dict__)\n attrs = clear_builtins(attrs)\n\n return type(\n '%sModerator' % cls.__name__,\n (GenericModerator,),\n attrs,\n )\n else:\n return None",
"def class_based_view_decorator(decorator):\n def _dec(cls):\n assert (isinstance(cls, type) and issubclass(cls, View)), (\n \"Only subclasses of django.views.generic.View may use this decorator.\"\n )\n _method_decorator = method_decorator(decorator)\n cls.dispatch = _method_decorator(cls.dispatch)\n return cls\n\n update_wrapper(_dec, decorator, assigned=available_attrs(decorator))\n return _dec",
"def _metatize_type(obj_type):\n for meta_type in MetaSymbol.__subclasses__():\n obj_cls = _find_meta_type(obj_type, meta_type)\n\n if obj_cls is not None:\n return obj_cls",
"def _find_class(self, class_name: str) -> Type:\n return self.class_resolver.find_class(class_name)",
"def resolver(cls) -> Callable:\n annotations = dict(cls.__annotations__)\n annotations.pop('apply', None)\n defaults = {name: getattr(cls, name) for name in annotations}\n return functools.partial(resolve_annotations, annotations=annotations, defaults=defaults)",
"def type(cls):",
"def func(self):\n return self.__class__",
"def get_obj_class(self, obj_type: str) -> Type[TgnObject]:\n pass",
"def classproperty(func):\n if not isinstance(func, (classmethod, staticmethod)):\n func = classmethod(func)\n\n return ClassPropertyDescriptor(func)",
"def classproperty(func):\n if not isinstance(func, (classmethod, staticmethod)):\n func = classmethod(func)\n\n return ClassPropertyDescriptor(func)",
"def good_classmethod_decorator(decorator): \n def new_decorator(cls, f):\n g = decorator(cls, f)\n g.__name__ = f.__name__\n g.__doc__ = f.__doc__\n g.__dict__.update(f.__dict__)\n return g\n \n new_decorator.__name__ = decorator.__name__\n new_decorator.__doc__ = decorator.__doc__\n new_decorator.__dict__.update(decorator.__dict__)\n\n return new_decorator",
"def annotations_class(cls):\n assert(isclass(cls))\n # To play it safe we avoid to modify the dict while iterating over it,\n # so we previously cache keys.\n # For this we don't use keys() because of Python 3.\n # Todo: Better use inspect.getmembers here\n keys = [key for key in cls.__dict__]\n for key in keys:\n memb = cls.__dict__[key]\n if _check_as_func(memb):\n annotations_func(memb)\n elif isclass(memb):\n annotations_class(memb)\n return cls"
] | [
"0.6100534",
"0.5945838",
"0.5689444",
"0.5677215",
"0.56717265",
"0.55983895",
"0.55354106",
"0.5534171",
"0.55314434",
"0.5498857",
"0.54959726",
"0.5491333",
"0.54092",
"0.54038715",
"0.5341593",
"0.53414214",
"0.5279805",
"0.52688867",
"0.52337843",
"0.52116585",
"0.5203248",
"0.5201972",
"0.51879007",
"0.5173607",
"0.51669616",
"0.50658184",
"0.50613123",
"0.50613123",
"0.50506204",
"0.5048357"
] | 0.70861286 | 0 |
The decorator method to be called on the class object. This method will set the proper `not discoverable` type to the class. It should return the class passed in, according to the decorator spec. | def not_discoverable(_class):
# Set the attribute to the class name, to prevent subclasses from also
# being not discoverable.
setattr(_class, _get_discoverable_attribute(_class), False)
return _class | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def discoverable(_class):\n\n # Set the attribute to the class name, to prevent subclasses from also\n # being discoverable.\n setattr(_class, _get_discoverable_attribute(_class), True)\n return _class",
"def checktype(type):\n def decorator(klass):\n register_type(type, klass)\n return klass\n\n return decorator",
"def _nonetypeclass(*args, **kwargs):\n return None",
"def jit_class(cls):\n from mindspore import nn\n # Check if cls is of type class.\n if not inspect.isclass(cls):\n raise TypeError(f'Decorator jit_class can only be used for class type, but got {cls}.')\n # Check if cls is nn.Cell.\n if issubclass(cls, nn.Cell):\n raise TypeError(f\"Decorator jit_class is used for user-defined classes and cannot be used for nn.Cell: {cls}.\")\n setattr(cls, '__ms_class__', True)\n return cls",
"def ifc_fallback_class(cls):\n\n if \"*\" in classes:\n raise ImportError(\"Already registered {oc} as fallback, cannot register {nc}\".format(\n oc=classes[\"*\"].__name__,\n nc=cls.__name__))\n classes[\"*\"] = cls\n return cls",
"def opaque_class(self, classobj):\n self.restrict_class(classobj, None)",
"def __class__(self, ???):",
"def _class(self, class_):\r\n\r\n if class_:\r\n if hasattr(class_, '__mro__'):\r\n #this is a class\r\n return class_\r\n else:\r\n #this is an instance\r\n return type(class_)",
"def _typechecked_class(cls):\n for name, func in cls.__dict__.items():\n if not name.startswith('__'):\n setattr(cls, name, _typechecked_func(func))\n return cls",
"def Unprotected():\n def wrapper(original_class):\n orig_init = original_class.__init__\n\n @functools.wraps(original_class)\n def __init__(self, *args, **kws):\n self.falcon_security__roles = []\n self.falcon_security__unprotected = True\n orig_init(self, *args, **kws)\n\n original_class.__init__ = __init__\n return original_class\n return wrapper",
"def Typed(excepted_type, cls=None):\r\n if cls is None:\r\n return lambda cls: Typed(excepted_type, cls)\r\n super_set = cls.__set__\r\n\r\n def __set__(self, instance, value):\r\n if not isinstance(value, excepted_type):\r\n raise TypeError('expected' + str(excepted_type))\r\n super_set(self, instance, value)\r\n\r\n cls.__set__ = __set__\r\n\r\n return cls",
"def _resolve_moderator(cls):\n if hasattr(cls, 'Moderator') and inspect.isclass(cls.Moderator):\n Moderator = cls.Moderator\n # in python3 __dict__ is dictproxy\n attrs = dict(Moderator.__dict__)\n attrs = clear_builtins(attrs)\n\n return type(\n '%sModerator' % cls.__name__,\n (GenericModerator,),\n attrs,\n )\n else:\n return None",
"def delegated(cls):\n return cls",
"def secure_class(cls): # type: ignore\n return cls",
"def __call__(cls, *args, **kwargs):\n if cls not in cls._instance:\n cls._instance[cls] = super(Metaclass, cls).__call__(*args, **kwargs)\n return cls._instance[cls]",
"def ms_class(cls):\n\n logger.warning(\"'mindspore.ms_class' will be deprecated and removed in a future version. \"\n \"Please use 'mindspore.jit_class' instead.\")\n\n # Check if cls is of type class.\n if not inspect.isclass(cls):\n raise TypeError(f'Decorator ms_class can only be used for class type, but got {cls}.')\n # Check if cls is nn.Cell.\n if issubclass(cls, ms.nn.Cell):\n raise TypeError(f\"Decorator ms_class is used for user-defined classes and cannot be used for nn.Cell: {cls}.\")\n logger.info(f'Found ms_class: {cls}.')\n setattr(cls, '__ms_class__', True)\n return cls",
"def is_class_discoverable(_class, default_discoverability=False):\n return bool(getattr(_class, _get_discoverable_attribute(_class),\n default_discoverability))",
"def _get_discoverable_attribute(_class):\n return \"__{}_is_discoverable\".format(_class.__name__)",
"def __call__ (self, cls):\n # Define a wrapper function to capture the actual instantiation and __init__ params\n @wraps(cls)\n def wrapper_f(*args, **kwargs):\n #print(f'type of cls is {type(cls)}')\n peripheral = self.peripheral_type(**self.kwargs)\n o = cls(*args, **kwargs)\n o.message_debug(f\"Decorating class {cls.__name__} with {self.peripheral_type.__name__}\")\n o.attach_sensor(peripheral)\n return o\n return wrapper_f",
"def nodeclass(cls):\n\n init = cls.__init__\n\n def init_wrapper(self, *args, **kwargs):\n if not hasattr(self, \"_init_run_for_class\"):\n self._init_run_for_class = set()\n if cls not in self._init_run_for_class:\n init(self, *args, **kwargs)\n self._init_run_for_class.add(cls)\n\n cls.__init__ = init_wrapper\n\n # Mark this class as decorated.\n del cls._node_decorator_missing_flag\n\n return cls",
"def identify_class(self, cls):",
"def under_review():\n\n def decorator(cls_or_callable: Union[Callable, Type], feature_name: Optional[str]=None, was_class: bool=False):\n if feature_name is None:\n feature_name = cls_or_callable.__qualname__\n message = f'The feature {feature_name} is currently marked under review.'\n filterwarnings('once', message, UnderReviewWarning)\n if inspect.isclass(cls_or_callable):\n cls_or_callable.__init__ = decorator(cls_or_callable.__init__, feature_name=cls_or_callable.__qualname__, was_class=True)\n cls_or_callable.__doc__ = _create_docstring_message(cls_or_callable.__doc__, message)\n return cls_or_callable\n\n @functools.wraps(cls_or_callable)\n def wrapper(*args, **kwargs):\n _raise_review_warning(message)\n return cls_or_callable(*args, **kwargs)\n if not was_class:\n wrapper.__doc__ = _create_docstring_message(cls_or_callable.__doc__, message)\n return wrapper\n return decorator",
"def _class(self, *args):\r\n\r\n if hasattr(args[0], '__mro__'):\r\n #this is a class\r\n return args[0]\r\n else:\r\n #this is an instance\r\n return type(args[0])",
"def class_based_view_decorator(decorator):\n def _dec(cls):\n assert (isinstance(cls, type) and issubclass(cls, View)), (\n \"Only subclasses of django.views.generic.View may use this decorator.\"\n )\n _method_decorator = method_decorator(decorator)\n cls.dispatch = _method_decorator(cls.dispatch)\n return cls\n\n update_wrapper(_dec, decorator, assigned=available_attrs(decorator))\n return _dec",
"def func(self):\n return self.__class__",
"def request_class(self):\n raise NotImplementedError()",
"def classmethod(self, encoding):\n # Add encodings for hidden self and cmd arguments.\n encoding = ensure_bytes(encoding)\n typecodes = parse_type_encoding(encoding)\n typecodes.insert(1, b'@:')\n encoding = b''.join(typecodes)\n\n def decorator(f):\n def objc_class_method(objc_cls, objc_cmd, *args):\n py_cls = ObjCClass(objc_cls)\n py_cls.objc_cmd = objc_cmd\n args = convert_method_arguments(encoding, args)\n result = f(py_cls, *args)\n if isinstance(result, ObjCClass):\n result = result.ptr.value\n elif isinstance(result, ObjCInstance):\n result = result.ptr.value\n return result\n name = f.__name__.replace('_', ':')\n self.add_class_method(objc_class_method, name, encoding)\n return objc_class_method\n return decorator",
"def format_class(cls, **kwargs): \n _doc_formatter = cls._format_obj(**kwargs) \n try:\n assert USE_WRAPT_OR_NOT and wrapt\n warnings.warn('wrapt based class decorator not implemented')\n except:\n pass\n finally:\n def _class_decorator(_cls):\n try: \n meta_cls = _cls.__metaclass__\n except:\n meta_cls = type\n class metaclass_decorator(meta_cls):\n def __new__(meta, name, bases, attrs):\n name = _cls.__name__\n attrs = _cls.__dict__\n bases = _cls.__bases__\n return meta_cls.__new__(meta, name, bases, attrs)\n metaclass_decorator.__name__ = '__metaclass__'\n class new_cls(_cls):\n __metadata__ = metaclass_decorator\n # We set the __doc__ directly when defining the new class, as to avoid the\n # 'non-writable' issue with __doc__\n # indeed attribute '__doc__' of 'type' objects is not writable:\n # \"AttributeError: attribute '__doc__' of 'type' objects is not writable\"\n # hence new-style classes (child of 'object' type) have non writable docstring\n __doc__ = _doc_formatter(_cls)\n # override new_cls.__init__ to prevent recursion, because new_cls.__init__ \n # is _cls.__init__ and it keeps calling itself.\n # name set after the class declaration\n try:\n new_cls.__name__ = _cls.__name__\n except: pass\n try:\n new_cls.__module__ = _cls.__module__\n except: pass\n return new_cls\n return _class_decorator",
"def find_class(self, class_name: str) -> Type:\n pass",
"def extend_class(cls):\n return lambda f: (setattr(cls, f.__name__, f) or f)"
] | [
"0.7026496",
"0.60254246",
"0.58712715",
"0.58391374",
"0.5762568",
"0.57158124",
"0.55842835",
"0.54794437",
"0.5475729",
"0.54330164",
"0.5426265",
"0.5411975",
"0.53922296",
"0.5384349",
"0.53761977",
"0.5354712",
"0.53032106",
"0.52928644",
"0.5277489",
"0.5265347",
"0.52129525",
"0.52070105",
"0.5167591",
"0.5155891",
"0.5152299",
"0.5136961",
"0.5133253",
"0.50914586",
"0.5088126",
"0.50848246"
] | 0.7357723 | 0 |
Returns true if the class is marked discoverable | def is_class_discoverable(_class, default_discoverability=False):
return bool(getattr(_class, _get_discoverable_attribute(_class),
default_discoverability)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_discoverable_attribute(_class):\n return \"__{}_is_discoverable\".format(_class.__name__)",
"def discoverable(_class):\n\n # Set the attribute to the class name, to prevent subclasses from also\n # being discoverable.\n setattr(_class, _get_discoverable_attribute(_class), True)\n return _class",
"def detect(cls):\n return False",
"def class_is(cls: Class) -> bool:\n pass",
"def not_discoverable(_class):\n\n # Set the attribute to the class name, to prevent subclasses from also\n # being not discoverable.\n setattr(_class, _get_discoverable_attribute(_class), False)\n return _class",
"def predicate(cls: nodes.ClassDef) -> bool:\n if cls.name in CLASS_NAME_SKIPLIST:\n # class looks like an API model class, but it isn't.\n return False\n\n if not cls.name.endswith(\"API\") and \"schema\" not in cls.locals:\n # class does not look like an API model class.\n return False\n\n return True",
"def class_is_interesting(name: str):\n if name.startswith('org.chromium.'):\n return True\n return False",
"def _detect(self):\n return True",
"def discoverable(self):\n return sa.Column(sa.Boolean(), default=False)",
"def has_interfaces(node):\n if \"interfaces\" in node and len(node[\"interfaces\"]):\n return True\n else:\n return False",
"def has_registered_subclasses(cls: type) -> bool:\n has_subclasses = False\n if issubclass(cls, RegistersSubclasses):\n if cls.registered_subclasses():\n has_subclasses = True\n return has_subclasses",
"def is_concrete(self):\r\n targets = list(self.resolve())\r\n return len(targets) == 1 and targets[0] == self",
"def discovered(self):\n return self._discovered",
"def has_classname(self):\n return self.unpack_word(0x4A) > 0",
"def is_reflective(self):\n return self._reflective",
"def is_harvestable(self, name_path, item):\n name = name_path[-1]\n if (\n name.startswith(\"_\")\n or id(item) in self._seen\n or name in self.excludes\n or self._join_path_names(*name_path) in self.excludes\n ):\n return False\n\n self._seen.add(id(item))\n\n return (\n (callable(item) or is_regular_class(name, item) or inspect.ismodule(item))\n and (not self.base_modules or inspect.getmodule(item) in self.base_modules)\n and (not self.predicate or self.predicate(item))\n )",
"def available(self) -> bool:\n raise NotImplementedError",
"def __contains__(self, name):\n return hasattr(self, name)",
"def has_class(self, name):\n return name in self._cached_class",
"def is_registered(self, type):\n attr = self._type_to_attr(type)\n return getattr(self, attr, None) is not None",
"def has_name(self, name):\n\t\treturn name in self.classes",
"def has_name(self, name):\n return name in self.classes",
"def is_on(self) -> bool:\n raise NotImplementedError(\"Device subclass needs to implement this.\")",
"async def discover(self):\n raise NotImplementedError(\"this is a base class\")",
"def __bool__(self):\n return self.taxonomy.exists",
"def is_heritage_completion(self):\n current_line = self.get_current_line()\n\n match = re.match(r\"class\", current_line)\n if match:\n word_before = self.get_word_before()\n if word_before[-1] == \"(\":\n return True\n return False",
"def is_node_support(self, node):\n return self.node_attribute(key=node, name=\"type\") == \"support\"",
"def is_available(self) -> bool:\n raise NotImplementedError() # pragma: nocover",
"def has_sclass(self, w: Wrapper, prop: Any) -> bool:\n if not prop:\n return None\n props = self.sclasses(w)\n if isinstance(prop, str):\n ans = [prop in props]\n else:\n ans = [i in props for i in prop]\n return all(ans)",
"def is_available(self) -> bool:\n raise NotImplementedError"
] | [
"0.69203067",
"0.6642737",
"0.62106764",
"0.6052726",
"0.5998927",
"0.5970129",
"0.5938582",
"0.5887358",
"0.58857167",
"0.5808782",
"0.57919824",
"0.57619464",
"0.5701386",
"0.5670407",
"0.56579703",
"0.5640263",
"0.5625706",
"0.5594766",
"0.5586563",
"0.5519655",
"0.55124557",
"0.5501693",
"0.54965985",
"0.549473",
"0.5492313",
"0.5487368",
"0.5475668",
"0.54710895",
"0.5470464",
"0.54617584"
] | 0.8230393 | 0 |
Get an attribute to set on a class to consider it discoverable | def _get_discoverable_attribute(_class):
return "__{}_is_discoverable".format(_class.__name__) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def discoverable(_class):\n\n # Set the attribute to the class name, to prevent subclasses from also\n # being discoverable.\n setattr(_class, _get_discoverable_attribute(_class), True)\n return _class",
"def get_attr(self, key: str) -> Any:\n raise NotImplementedError(\"This method should be implemented by \"\n \"subclasses.\")",
"def __getattr__(self, attr):\n if attr in self._det_aliases:\n attr = self._det_aliases[attr]\n if attr in self._detector_dir:\n return self._detectors[self._detector_dir[attr]]\n if attr in self._device_sets:\n self.add_detector(attr)\n return self._detectors[self._detector_dir[attr]]",
"def __getattr__(self, attr):\n return self.get(attr)",
"def get_class_attribute(self):\n return self.class_attr",
"def UseAttribute(self) -> bool:",
"def get_attribute(self, attr):\n super().get_attribute(attr) # Keep this line, it triggers the parent class method.\n return getattr(self, attr)",
"def __getattribute__(self, name: str) -> Optional[Any]:\n\n try:\n return type.__getattribute__(self, name)\n except AttributeError as error:\n try:\n return self.__dict__[\"members\"][name]\n except KeyError:\n raise error",
"def get_attribute(self, attr):\n logger.debug(\"GET ATTRIBUTE {}\".format(attr))",
"def __getattr__(self, attr): # or does it ?\n return self.X[attr]",
"def not_discoverable(_class):\n\n # Set the attribute to the class name, to prevent subclasses from also\n # being not discoverable.\n setattr(_class, _get_discoverable_attribute(_class), False)\n return _class",
"def __getattribute__(self,name):\n try:\n return object.__getattribute__(self,name)\n except AttributeError:\n extraPO = object.__getattribute__(self,'_extraPO')\n\n if hasattr(extraPO,name):\n return getattr(extraPO,name) # HIDDEN!\n\n _attr_err_msg = object.__getattribute__(self,'_attr_err_msg')\n\n raise AttributeError(_attr_err_msg(name,[self,extraPO]))",
"def _fget(self):\n # type: (...) -> Any\n try:\n return getattr(self, private_attr)\n except AttributeError:\n raise AttributeError(\n \"'{}' object has no attribute '{}'\".format(\n _get_type_name(type_), attr\n )\n )",
"def get(self, attr):\r\n return self.__dict__.get(attr)",
"def _get(self, name):\n return object.__getattribute__(self, name)",
"def _get(self, name):\n return object.__getattribute__(self, name)",
"def __getattr__(self, attr):\r\n return getattr(self.__instance, attr)",
"def __getattr__(self, attr):\n\t\treturn getattr(self.__instance, attr)",
"def get(self, att):\n return getattr(self, att)",
"def __getattribute__(self, name):\n try:\n return self.gps.__getattribute__(name) \n except:\n return super().__getattribute__(name)",
"def getCustomAttribute(self):\n\t\treturn self.Attribute",
"def __getattr__(self, attr):\n return getattr(self.__instance, attr)",
"def __getattr__(self, attr):\n return getattr(self.__instance, attr)",
"def __getattr__(self, attr):\n return getattr(self.__instance, attr)",
"def __getattr__(self, attr):\n return getattr(self.__instance, attr)",
"def __getattr__(self, attr):\n return getattr(self.__instance, attr)",
"def __getattr__(self, attr):\n return getattr(self.__instance, attr)",
"def __getattr__(self, attr):\n return getattr(self.__instance, attr)",
"def __getattr__(self, attr):\n return getattr(self.__instance, attr)",
"def __getattribute__(self,attr):\n if attr in super(BaseTransformer,self).__getattribute__('_overrides'):\n return super(BaseTransformer,self).__getattribute__('_'+attr)\n return super(BaseTransformer,self).__getattribute__(attr)"
] | [
"0.71000606",
"0.666556",
"0.6570846",
"0.653001",
"0.63913965",
"0.63654155",
"0.6253668",
"0.620802",
"0.6182992",
"0.6182578",
"0.6147421",
"0.61464113",
"0.61088574",
"0.60911614",
"0.608464",
"0.608464",
"0.6043526",
"0.60277534",
"0.6022185",
"0.6009808",
"0.60057694",
"0.5993818",
"0.5993818",
"0.5993818",
"0.5993818",
"0.5993818",
"0.5993818",
"0.5993818",
"0.5993818",
"0.5989522"
] | 0.74975735 | 0 |
Convert file to raw file. | def convert_to_raw(file):
img = Image.open(file)
img = img.convert('L') # convert to 8 bits per pixels
(x, y) = img.size
pixels = bytearray(list(img.getdata()))
filename, file_extension = os.path.splitext(file)
file2 = file.replace(file_extension, '.dat')
file_name = str(x) + 'x' + str(y) + 'x8x1' + '_' + file2
# print(file_name)
with open(file_name, 'wb') as f:
f.write(pixels)
return file_name | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def raw_convert_file():\n try:\n str_out = io.BytesIO()\n str_out.write(process_text(request.data.decode('utf-8')).encode('utf-8'))\n str_out.seek(0)\n\n return send_file(\n str_out,\n attachment_filename='result.txt',\n as_attachment=True,\n mimetype='text/plain'\n )\n except:\n return make_response('', 400)",
"def read_raw(file_path):\n file = open(file_path, 'rb')\n content = file.read()\n file.close()\n return content",
"def convert_file(self, file_path: str) -> None:\n print(\n f\"Converting {os.path.split(file_path)[1]} to {self.output_format}\")\n\n output = AudioSegment.from_file(file_path, format=self.output_format)\n output.export(file_path.replace(self.input_format,\n f'{self.output_format}'), format=self.output_format)",
"def raw_body(self):\n return file_ops.read_utf8(self.raw_file_name)",
"async def transform(self, file):\n\t\tpass",
"def fileread(self, filename):\n data = None\n f = open(filename, 'r')\n data = f.read()\n f.close()\n try:\n data = data.decode()\n except (UnicodeDecodeError, AttributeError):\n data = data.encode(\"utf-8\")\n\n return data",
"def decode(self, file):\n raise NotImplementedError()",
"def _convert(self, filepath):\n raise NotImplementedError()",
"def to_file(self, file_path, smirnoff_data):\n pass",
"def _toFile(self):\n pass",
"def readfile(filename):\n with open(filename, encoding=\"utf-8\") as file:\n raw = file.read()\n return raw",
"def read_file(input_file):\n\n\ttext = open(input_file)\n\traw = text.read()\n#\tdecoded = raw.decode('utf8').encode('ascii', 'replace')\n\tdecoded = raw.decode('utf8')\n\n\t#moves this through the html cleaner\n\ttext = plaintext(decoded)\n\n\treturn text",
"def save_raw(self, filename, typ):\n self.lib.SaveAsRaw(ct.c_char_p(str.encode(filename)),\n ct.c_int(self.savetypes[typ]))",
"def read_raw(self, filename, ignore_data=False, open_dataset=None):\n return super().read_raw(filename, ignore_data=False, open_dataset=None)",
"def convert(file):\n extension = file[-4:]\n if extension == '.wav':\n return file\n if not exists(file):\n raise IOError('%s file not found' % file)\n if not extension in SUPPORTED_EXTENSION:\n raise IOError('%s file format is not supported' % file)\n if not exists(CONVERTION_DIRECTORY):\n makedirs(CONVERTION_DIRECTORY)\n filename = splitext(basename(file))[0]\n path = join(CONVERTION_DIRECTORY, filename + '.wav')\n if (not exists(path)):\n logging.info(\"Converting file %s\" % file)\n CONVERTERS[extension](file).export(path, format='wav')\n return path",
"def raw_file_structure(self):\n return None",
"def _read_file(self) -> str:\n with open(self._file_name) as fp:\n return fp.read()",
"def file_converter(self, **kwds):\n if (self.reformat == 'zarr'):\n # output zarr file\n self.HDF5_to_zarr(**kwds)\n elif (self.reformat == 'HDF5'):\n # output rechunked HDF5 file\n self.HDF5_to_HDF5(**kwds)\n # elif (reformat == 'JPL'):\n # # output JPL captoolkit formatted HDF5 files\n # self.HDF5_to_JPL_HDF5(**kwds)\n elif self.reformat in ('csv','txt'):\n # output reduced files to ascii formats\n self.HDF5_to_ascii(**kwds)\n elif self.reformat in ('dataframe',):\n # output reduced files to pandas dataframe\n return self.HDF5_to_dataframe(**kwds)\n else:\n raise ValueError(f'Unknown format {self.reformat}')",
"def processed_to_raw_path(self, processed_path):\n # Extract useful information from <path>\n stage, hash_dir, cloud_id = \\\n osp.splitext(processed_path)[0].split('/')[-3:]\n\n # Remove the tiling in the cloud_id, if any\n base_cloud_id = self.id_to_base_id(cloud_id)\n\n # Read the raw cloud data\n raw_ext = osp.splitext(self.raw_file_names_3d[0])[1]\n raw_path = osp.join(self.raw_dir, base_cloud_id + raw_ext)\n\n return raw_path",
"def raw_to_xml(self):\n xmlfilename = self.logfilename.replace('.raw','.xml')\n fout = codecs.open(xmlfilename, encoding=\"utf-8\", mode=\"w\")\n for line in codecs.open(self.logfilename,encoding=\"utf-8\"):\n fout.write(sanitize(line))\n\n fout.close()\n return xmlfilename",
"def read_raw(self):\n return self._FITS.read_raw()",
"def read_raw_from_file(fname):\n with open(fname) as fh:\n content = fh.read()\n return parse_raw_string(content)",
"def file_data(self):\n return self.read(self.file)",
"def open_raw(path):\r\n raw_file_reader = RawFileReader.RawFileReaderAdapter.FileFactory(path)\r\n raw_file_reader.SelectInstrument(Business.Device.UV, 1)\r\n return raw_file_reader",
"def read (self, file):\n\t\tself.unpack (file.read (self.size()))",
"def write_raw(content, file_path):\n file = open(file_path, 'wb')\n file.write(content)\n file.close()",
"def readfile(fname, mode='rb'):\n f = open(fname, mode)\n raw = f.read()\n f.close()\n return raw",
"def raw_content(self, file_name):\n if file_name not in self._raw_content:\n raise FileNotAnalyzed(file_name)\n return self._raw_content[file_name]",
"def getraw_encoded(self):\n # update data model\n self.dataModel.setTestData( testData=self.srcEditor.text() )\n\n # return raw file\n return self.dataModel.getRaw()",
"def get_binary(self, filepath):\n with open(filepath, \"rb\") as f:\n return b64encode(f.read()).decode('utf-8')"
] | [
"0.64115816",
"0.6371769",
"0.6140477",
"0.5940787",
"0.5906996",
"0.57152784",
"0.562413",
"0.5623284",
"0.56161326",
"0.5604619",
"0.55842215",
"0.5559293",
"0.55592465",
"0.5539912",
"0.54099935",
"0.5399127",
"0.53941727",
"0.5383642",
"0.5368682",
"0.53505486",
"0.5349888",
"0.5343599",
"0.5336461",
"0.5306005",
"0.52952987",
"0.5294561",
"0.5293032",
"0.52866715",
"0.5282483",
"0.5281433"
] | 0.6456368 | 0 |
Convert a raw file to jpg file. | def convert_to_jpg(raw_file):
match = re.match('(\d+)x(\d+)x(\d+)x(\d+)_(\w+)', raw_file)
if match:
# print(match.group(1))
# print(match.group(2))
# print(match.group(3))
# print(match.group(4))
# print(match.group(5))
x = int(match.group(1))
y = int(match.group(2))
bpp = int(match.group(3))
dimension = int(match.group(4))
filename = match.group(0)
rawData = open(raw_file, 'rb').read()
imgSize = (x, y)
# Use the PIL raw decoder to read the data.
# the 'F;16' informs the raw decoder that we are reading
# a little endian, unsigned integer 16 bit data.
# img = Image.fromstring('L', imgSize, rawData, 'raw', 'F;32')
img = Image.frombuffer('L', imgSize, rawData, 'raw')
img = img.rotate(180)
img = img.transpose(Image.FLIP_LEFT_RIGHT)
img.save(filename + ".jpg") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_image_conversion_for_image_file(self):\n # Given\n with open(self.image_file_path) as f:\n # When\n result = convert_image_to_jpeg(f)\n img = Image.open(result)\n # Then\n self.assertEqual(JpegImageFile.format, img.format)\n self.assertEqual(self._convert_file(self.image_file_path, JpegImageFile.format).getvalue(),\n result.getvalue())",
"def convert_to_jpg_then_compress(self):\n\t\tself._compressed_file_name = 'c_' + self.file_name\n\t\tself._compressed_save_path = self.full_path.replace(self.file_name, self._compressed_file_name).replace('.png', '.jpg')\n\n\t\timage = Image.open(self.full_path)\n\t\timage.save(self._compressed_save_path)\n\n\t\timage = Image.open(self._compressed_save_path)\n\t\timage.save(self._compressed_save_path, quality=85, progressive=False)\n\n\t\tself._compressed_file_size = ufo.get_file_size_in_bytes(self._compressed_save_path)\n\n\t\ttransfer_path = self._compressed_save_path.replace('c_' + self.file_name, self.file_name).replace('/configuration_files/', '/quasar_site_django/')\n\t\tufo.copy_file_to_path(self._compressed_save_path, transfer_path)",
"def convert_to_jpg(filepath):\n\n f, e = os.path.splitext(filepath)\n outfile = f + \".jpg\"\n # We don't want to delete .JPG or .JPEG so we lower the extension\n filepath_lowered = f + e.lower()\n if outfile != filepath_lowered:\n try:\n image = Image.open(filepath)\n\n # Rotate image: https://stackoverflow.com/a/6218425\n # for orientation in ExifTags.TAGS.keys() :\n # if ExifTags.TAGS[orientation] == 'Orientation' : break\n orientation = 274 # get 274 through upper loop\n try:\n exif=dict(image._getexif().items())\n print(exif[orientation])\n if exif[orientation] == 3 :\n image=image.rotate(180, expand=True)\n elif exif[orientation] == 6 :\n image=image.rotate(270, expand=True)\n elif exif[orientation] == 8 :\n image=image.rotate(90, expand=True)\n except (AttributeError, KeyError): # image has no meta data\n pass\n\n image.convert('RGB').save(outfile)\n except IOError:\n print(\"cannot convert\", filepath)\n exit(1)\n os.remove(filepath)\n return outfile\n return filepath",
"def convert_to_raw(file):\n\n img = Image.open(file)\n img = img.convert('L') # convert to 8 bits per pixels\n (x, y) = img.size\n\n pixels = bytearray(list(img.getdata()))\n\n filename, file_extension = os.path.splitext(file)\n file2 = file.replace(file_extension, '.dat')\n file_name = str(x) + 'x' + str(y) + 'x8x1' + '_' + file2\n\n # print(file_name)\n\n with open(file_name, 'wb') as f:\n f.write(pixels)\n\n return file_name",
"def encode_jpg(track_metadata):\n\tprint(\"---- Encoding\", track_metadata.file_name, \"to JPG...\")\n\tnew_file_name = track_metadata.file_name + \".jpg\"\n\tshutil.copy(track_metadata.file_name, new_file_name) #Work only on a copy.\n\tect_command = [\"/home/ruben/encoding/Efficient-Compression-Tool/build/ect\", \"-9\", \"-strip\", \"--mt-deflate\", new_file_name]\n\tprint(ect_command)\n\tprocess = subprocess.Popen(ect_command, stdout=subprocess.PIPE)\n\t(cout, cerr) = process.communicate()\n\texit_code = process.wait()\n\tif(exit_code != 0): #0 is success.\n\t\traise Exception(\"ECT failed with exit code {exit_code}. CERR: {cerr}\".format(exit_code=exit_code, cerr=cerr))\n\n\t#Delete old file.\n\tif os.path.exists(track_metadata.file_name):\n\t\tos.remove(track_metadata.file_name)\n\n\ttrack_metadata.file_name = new_file_name\n\ttrack_metadata.codec = \"jpg\"",
"def convert_chunks_to_jpeg(raw_chunks_dir, jpeg_quality=95,\n slicing_plane=\"xy\"):\n with open(\"info\") as f:\n info = json.load(f)\n for scale_index in range(len(info[\"scales\"])):\n make_jpeg_chunks(info, scale_index,\n raw_chunks_dir,\n jpeg_quality=jpeg_quality,\n slicing_plane=slicing_plane)",
"def JPGtoPNGConverter(source, dest):\n files = os.listdir(f\"./{source}\")\n if not os.path.exists(f\"./{dest}\"):os.makedirs(f\"./{dest}\")\n\n for file in files:\n if os.path.splitext(file)[-1] == \".jpg\":\n img = Image.open(f\"./{source}/{file}\")\n clean_text = os.path.splitext(file)[0]\n img.save(f\"./{dest}/{clean_text}.png\",\"png\")\n else:\n print(f\"Your filename: {file} is not in .JPG format !!\")\n return \"All files converted successfully :) \"",
"def convert(filename):\n fullname = os.path.join(app.root_path, app.config['UPLOAD_FOLDER'], filename)\n base, ext = filename.rsplit('.', 1)\n if ext not in app.config['DISPLAY_EXTENSIONS']:\n # Convert to jpeg using imagemagick. Could use the python bindings, but this\n # will do for now\n output_file = base + '.jpg'\n full_output = os.path.join(app.root_path, app.config['UPLOAD_FOLDER'], output_file)\n convert_params = ['convert' , '-density', '300', fullname, full_output]\n if subprocess.call(convert_params):\n # Returns 0 on success\n raise ConvertError(\"Error during format conversion\", [fullname, full_output])\n\n os.remove(fullname)\n filename = output_file\n fullname = full_output\n\n # Size conversion is non-optional\n size = \"%dx%d\" % (app.config['MAX_X'], app.config['MAX_Y'])\n\n colour = request.form.get('colour', 'black')\n if colour not in ['black', 'white', 'green']:\n colour = 'black'\n if colour == 'green':\n colour = '#2c882e'\n\n convert_params = ['mogrify' , '-resize', size + '>', fullname]\n if subprocess.call(convert_params):\n raise ConvertError(\"Error during resize\", [fullname])\n\n convert_params = ['mogrify' , '-gravity', 'center', '-extent', size, '-background', colour, fullname]\n if subprocess.call(convert_params):\n raise ConvertError(\"Error during extent change\", [fullname])\n\n return filename",
"def convert_to_jpg(directory, image_to_convert):\n extension = '.jpg'\n print(image_to_convert)\n split_im = image_to_convert.split('.')\n image_name = split_im[0]\n print(image_name)\n jpg_image = directory+image_name+extension\n os.rename(directory+image_to_convert, jpg_image)\n return",
"def image(self):\n # TODO: make sure this method works for png, gif, tiff\n if self.has_metadata:\n self.extract_metadata()\n tempdir_path = self.make_tempdir()\n tempfile_path = os.path.join(tempdir_path, self.filename)\n warnings.simplefilter('error', Image.DecompressionBombWarning)\n try: # Do image conversions\n img_in = Image.open(self.src_path)\n img_out = Image.frombytes(img_in.mode, img_in.size, img_in.tobytes())\n img_out.save(tempfile_path)\n self.src_path = tempfile_path\n except Exception as e: # Catch decompression bombs\n # TODO: change this from all Exceptions to specific DecompressionBombWarning\n self.add_error(e, \"Caught exception (possible decompression bomb?) while translating file {}.\".format(self.src_path))\n self.make_dangerous()\n self.add_file_string('Image file')\n self.set_property('processing_type', 'image')",
"def convert_to_jpeg(image, quality=90):\n\n\t# Load the image into a new BytesIO\n\tsImg = BytesIO(image)\n\n\t# Create an empty BytesIO for the new image\n\tsNewImg = BytesIO(b'')\n\n\t# Create a new Pillow instance from the raw data\n\toImg = Pillow.open(sImg)\n\n\t# If the mode is not valid\n\tif oImg.mode not in ('1','L','RGB','RGBA'):\n\t\toImg = oImg.convert('RGB');\n\n\t# Save the new image as a JPEG\n\toImg.save(sNewImg, 'JPEG', quality=quality, subsampling=0)\n\n\t# Pull out the raw string\n\tsRet = sNewImg.getvalue()\n\n\t# Close the image\n\toImg.close()\n\n\t# Return the new image\n\treturn sRet",
"def raw_audio_to_jpgs(data, target, sample_dir, bird_index, rec_index,\n cutoff=0.25, min_snips=None,\n snip_length=4, rate=44100, num_jitters=None, jitter=0.25):\n snippets, logs = snip_audio(data, snip_length=snip_length, cutoff=cutoff,\n min_snips=min_snips, num_jitters=num_jitters,\n jitter=jitter, rate=rate)\n first = True\n for i, collection in enumerate(snippets):\n for j, snip in enumerate(collection):\n if first:\n # raw_to_wav then write to file in 'sample_dir'\n raw_to_wav(snip, os.path.join(sample_dir, str(rec_index) + '.wav'))\n first = False\n spectrograms = compute_spectrograms(snip)\n for k in range(len(spectrograms[0])):\n start_time = float(logs[i][2])/rate\n label = str(bird_index) + '_' + str(rec_index)\n label += '_i%d_%dp%d_c%d' % (i, int(start_time),\n int((start_time % 1)*10),\n int(100.*logs[i][0]))\n path = spectrogram_to_jpg(spectrograms[0][k], label, target=target)",
"def pic_format(file_head: bin):\n res = \"unknown\"\n if b'\\xff\\xd8\\xff' in file_head:\n res = 'jpg'\n elif b'\\x89PNG\\r\\n\\x1a\\n' in file_head:\n res = 'png'\n\n return res",
"def _jpeg(self, tile: bytes) -> np.ndarray:\n jpeg_tables = self.JPEGTables\n jpeg_table_bytes = struct.pack(\n f\"{self._file_reader._endian}{jpeg_tables.count}{jpeg_tables.tag_type.format}\",\n *self.JPEGTables.value,\n )\n # # https://github.com/mapbox/COGDumper/tree/master/cogdumper\n if jpeg_table_bytes:\n if tile[0] == 0xFF and tile[1] == 0xD8:\n # insert tables, first removing the SOI and EOI\n tile = tile[0:2] + jpeg_table_bytes[2:-2] + tile[2:]\n else:\n raise Exception(\"Missing SOI marker for JPEG tile\")\n decoded = imagecodecs.jpeg_decode(tile)\n return np.rollaxis(decoded, 2, 0)",
"def toImage(self,b,name):\n savePath = join(self.path,name+\".jpeg\")\n if not os.path.exists(savePath):\n h = self.h\n im = self.toArray(b,h)\n x = Image.fromarray(im)\n x.save(savePath,\"JPEG\")\n del im\n del x\n return savePath",
"def convert_to_image(self, frame, base64_encode=False):\n #NOTE: tuple (85010, 1) ndarray --> data reduction\n img_buf_arr = cv2.imencode(\".jpeg\", frame)[1]\n if base64_encode:\n img_buf_arr = b\"data:image/jpeg;base64,\" + base64.b64encode(img_buf_arr)\n return img_buf_arr\n return bytes(img_buf_arr)",
"def create_compressed_file(self):\n\t\tself._compressed_file_name = 'c_' + self.file_name\n\t\tself._compressed_save_path = self.full_path.replace(self.file_name, self._compressed_file_name)\n\t\tself._is_png = 'png' in self.file_extension\n\t\tself._is_jpg = 'jpg' in self.file_extension\n\n\t\timage = Image.open(self.full_path)\n\n\t\tif self._is_png:\n\t\t\timage.save(self._compressed_save_path, quality=85, optimize=False, compress_level=9)\n\t\telif self._is_jpg:\n\t\t\timage.save(self._compressed_save_path, quality=85, progressive=False)\n\t\telse:\n\t\t\tprint('Non-recognized asset format!!')\n\t\t\texit()\n\n\t\tself._compressed_file_size = ufo.get_file_size_in_bytes(self._compressed_save_path)\n\n\n\t\ttransfer_path = self._compressed_save_path.replace('c_' + self.file_name, self.file_name).replace('/configuration_files/', '/quasar_site_django/')\n\t\tufo.copy_file_to_path(self._compressed_save_path, transfer_path)",
"def read_image(filename, representation):\n img = imread(filename)\n img = int2float(img)\n if representation == GS_REP:\n img = rgb2gray(img)\n return img",
"def convert(inputpath, targetformat):\n outputpath = os.path.splitext(inputpath)[0] + targetformat\n print(\"converting {0} to {1}\".format(inputpath, outputpath))\n\n reader = imageio.get_reader(inputpath)\n fps = reader.get_meta_data()['fps']\n\n writer = imageio.get_writer(outputpath, fps=fps)\n for i, im in enumerate(reader):\n sys.stdout.write(\"\\rframe {0}\".format(i))\n sys.stdout.flush()\n writer.append_data(im)\n print(\"Finalizing...\")\n writer.close()\n print(\"Done.\")",
"def jpg2rgb(image_data: bytes) -> np.ndarray:\n\n im = Image.open(io.BytesIO(image_data))\n im = im.convert(\"RGB\")\n im = im.resize((96, 96))\n data = np.array(im)\n\n data = rgb2gray(data)\n\n return data",
"def _process_image(filename, coder):\n # Read the image file.\n with tf.gfile.FastGFile(filename, 'rb') as f:\n image_data = f.read()\n \n # Convert any PNG to JPEG's for consistency.\n if _is_png(filename):\n print('Converting PNG to JPEG for %s' % filename)\n image_data = coder.png_to_jpeg(image_data)\n # Decode the RGB JPEG.\n image = coder.decode_jpeg(image_data)\n\n # Check that image converted to RGB\n assert len(image.shape) == 3\n height = image.shape[0]\n width = image.shape[1]\n assert image.shape[2] == 3\n\n return image_data, height, width",
"def to_image_file(self, path, params=None):\n assert(isinstance(path, str))\n assert(len(path) > 0)\n\n if params is not None:\n assert(isinstance(params, dict))\n return cv2.imwrite(path, self.to_numpy(), **params)\n\n return cv2.imwrite(path, self.to_numpy())",
"def read_image(filepath, format=None):\n image = Image.open(filepath)\n\n # capture and ignore this bug:\n # https://github.com/python-pillow/Pillow/issues/3973\n try:\n image = ImageOps.exif_transpose(image)\n except Exception:\n pass\n\n if format is not None:\n # PIL only supports RGB, so convert to RGB and flip channels over below\n conversion_format = format\n if format == \"BGR\":\n conversion_format = \"RGB\"\n image = image.convert(conversion_format)\n image = np.asarray(image)\n if format == \"BGR\":\n # flip channels if needed\n image = image[:, :, ::-1]\n # PIL squeezes out the channel dimension for \"L\", so make it HWC\n if format == \"L\":\n image = np.expand_dims(image, -1)\n return image",
"def parser_image_data(jpeg_file_path):\n image = tf.io.read_file(jpeg_file_path)\n image = tf.image.decode_jpeg(image)\n image = tf.image.resize(image, [image_height, image_width])\n image = tf.cast(image, dtype=tf.float32)\n image = (image / 127.5) - 1.0\n return image",
"def jpg(self, id, **params):\n if 'async' in params:\n params.pop('async')\n self.request('/encoded_video/' + str(id) + '/thumbnails', 'POST', body=urllib.urlencode(params))\n return True\n\n if len(params) > 0:\n params = '?' + urllib.urlencode(params)\n else:\n params = ''\n\n return self.request('/encoded_video/' + str(id) + '.jpg' + params)",
"def pdf_to_jpeg(job):\n source_file, output_folder, first_page, last_page, output_file, poppler_path = job\n\n image_list = convert_from_path(\n source_file,\n dpi=200,\n first_page=first_page,\n last_page=last_page,\n fmt='jpeg',\n output_file=output_file,\n output_folder=output_folder,\n paths_only=True,\n jpegopt=dict(quality=100, optimize=True),\n poppler_path=poppler_path,\n )\n\n return image_list",
"def convert_and_save_image(image, path):\n img = Image.open(image)\n r, g, b, a = img.split()\n img = Image.merge(\"RGB\", (r, g, b))\n image_name = image.split(\".\")[0].split('/')[-1]\n img.save(f'{path}/{image_name}.bmp')\n img.close()",
"def convert_photo(link):\n\n image = open(link, \"rb\") #Open binary file in read-only mode\n image_read = image.read()\n image_base64 = base64.b64encode(image_read)\n\n return image_base64",
"def image_conversion(source_path, dest_fmt, wipe=False):\n\n from numpy import array_equal\n from os import remove\n\n # the name of the file before format extension\n source_name = source_path.split(\".\")[0]\n\n dest_path = source_name + \".\" + dest_fmt\n source_image = read_image(source_path)\n write_image(dest_path, source_image)\n\n if wipe:\n check_image = read_image(dest_path)\n if array_equal(check_image, source_image):\n remove(source_path)\n else:\n print('{0} and {1} differ... something went wrong!'.format(source_path, dest_path))",
"def preprocess_image(filename):\n\n image_string = tf.io.read_file(filename)\n image = tf.image.decode_jpeg(image_string, channels=3)\n image = tf.image.convert_image_dtype(image, tf.float32)\n image = tf.image.resize(image, target_shape)\n return image"
] | [
"0.64591265",
"0.62869",
"0.5926924",
"0.5918034",
"0.56613654",
"0.5598152",
"0.5569632",
"0.5559631",
"0.5479805",
"0.54383266",
"0.54034483",
"0.5335417",
"0.53307116",
"0.52239347",
"0.51870555",
"0.51752836",
"0.5163474",
"0.5141427",
"0.51402164",
"0.5122234",
"0.50765324",
"0.50517464",
"0.50226325",
"0.50074995",
"0.49977887",
"0.49945647",
"0.49887472",
"0.49866197",
"0.4964314",
"0.49611914"
] | 0.6899732 | 0 |
Wrapper function on top of the interpolation executable. It is also a benchmarking function, it returns the name of the output image and the time needed to do all the iterations | def interpolate(file_in, file_out, device, iterations, interpolation_type, new_width, new_height):
command_string = './Interpolate ' + device + ' ' + str(iterations) + ' ' + interpolation_type + ' ' + file_in + ' ' + file_out + ' ' + str(new_width) + ' ' + str(new_height)
program_out = str(subprocess.check_output(command_string.split(), stderr=subprocess.STDOUT), 'utf-8')
print(program_out) # can be commented, avoid output polution
program_out = program_out.splitlines()
# Attention, time and file name respectively at lines 8 and 9 of the output
seconds = float(program_out[8])
out_file = program_out[9]
return (seconds, out_file) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def problem2():\n\n data = loaddata(\"data/bayerdata.npy\")\n r, g, b = separatechannels(data)\n\n img = assembleimage(r, g, b)\n display_image(img)\n\n img_interpolated = interpolate(r, g, b)\n display_image(img_interpolated)",
"def Resampler(name):\n\n def resample_average(path, dsquery, dstile, image_format):\n for i in range(1, dstile.RasterCount+1):\n res = gdal.RegenerateOverview(dsquery.GetRasterBand(i), dstile.GetRasterBand(i), \"average\")\n if res != 0:\n raise ImageOutputException(\"RegenerateOverview() failed with error %d\" % res)\n\n gdal_write(path, dstile, image_format)\n\n def resample_antialias(path, dsquery, dstile, image_format):\n querysize = dsquery.RasterXSize\n tilesize = dstile.RasterXSize\n\n array = numpy.zeros((querysize, querysize, 4), numpy.uint8)\n for i in range(dstile.RasterCount):\n array[:,:,i] = gdalarray.BandReadAsArray(dsquery.GetRasterBand(i+1), 0, 0, querysize, querysize)\n im = Image.fromarray(array, 'RGBA') # Always four bands\n im1 = im.resize((tilesize,tilesize), Image.ANTIALIAS)\n\n if os.path.exists(path):\n im0 = Image.open(path)\n im1 = Image.composite(im1, im0, im1)\n\n ensure_dir_exists(path)\n\n if image_format == \"JPEG\":\n im1.save(path, image_format, quality=jpeg_quality)\n else:\n im1.save(path, image_format)\n\n\n if name == \"average\":\n return resample_average\n elif name == \"antialias\":\n return resample_antialias\n\n resampling_methods = {\n \"near\" : gdal.GRA_NearestNeighbour,\n \"bilinear\" : gdal.GRA_Bilinear,\n \"cubic\" : gdal.GRA_Cubic,\n \"cubicspline\" : gdal.GRA_CubicSpline,\n \"lanczos\" : gdal.GRA_Lanczos\n }\n\n resampling_method = resampling_methods[name]\n\n def resample_gdal(path, dsquery, dstile, image_format):\n querysize = dsquery.RasterXSize\n tilesize = dstile.RasterXSize\n\n dsquery.SetGeoTransform( (0.0, tilesize / float(querysize), 0.0, 0.0, 0.0, tilesize / float(querysize)) )\n dstile.SetGeoTransform( (0.0, 1.0, 0.0, 0.0, 0.0, 1.0) )\n\n res = gdal.ReprojectImage(dsquery, dstile, None, None, resampling_method)\n if res != 0:\n raise ImageOutputException(\"ReprojectImage() failed with error %d\" % res)\n\n gdal_write(path, dstile, image_format)\n\n return resample_gdal",
"def exercise():\n\n #\n # Convert Lena Tiff image to raw format\n #\n for f in glob.glob('*.jpg'):\n os.remove(f)\n \n for f in glob.glob('*.dat'):\n os.remove(f)\n \n input_raw_file = convert_to_raw('Lena.tiff')\n\n for device in ['cpu', 'gpu']:\n for interp in ['nn', 'bl']:\n for (w,h) in ((256, 300), (486, 486),(2000, 1000),(1000, 2000),(8000, 4000)):\n (t, f) = interpolate(input_raw_file, device + '_' + interp + '_lena.dat', device, 0, interp, w, h)\n convert_to_jpg(f)\n\n \n for f in glob.glob('*.dat'):\n convert_to_jpg(f)\n os.remove(f)\n \n quit()",
"def ImageOutput(name, out_ds, tile_size, resampling, init_dest, output_dir, verbose,mbtiles):\n\n resampler = Resampler(resampling)\n\n if name == \"hybrid\":\n return HybridImageOutput(out_ds, tile_size, resampler, init_dest, output_dir, verbose)\n\n if name == \"png\":\n image_format = \"PNG\"\n elif name == \"jpeg\":\n image_format = \"JPEG\"\n\n return SimpleImageOutput(out_ds, tile_size, resampler, init_dest, output_dir, verbose, [image_format],mbtiles)",
"def compute(swatch, image, dir, out):\r\n\tif out:\r\n\t\tprint_to = open(out.encode('utf-8'), 'w')\r\n\telse:\r\n\t\tprint_to = sys.stdout\r\n\tif image:\r\n\t\tcompute_results(image, swatch, print_to)\r\n\telif dir:\r\n\t\tcompute_results(dir, swatch, print_to)",
"def run_script(input_dir, output_dir, output_file, bstp_num):\n\n \"\"\"+++++++++++++++++++++++++++++++++++\"\"\"\n print(\"\"\" Load results from step 1 & 2 \"\"\")\n start_0 = time.time()\n data_dim_file_name = output_dir + \"/temp/data_dim.mat\"\n mat = loadmat(data_dim_file_name)\n data_dim = mat['data_dim']\n data_dim = np.array([int(i) for i in data_dim[0, :]])\n n, l, m, p, g, g_num = data_dim\n y_design_file_name = output_dir + \"/temp/y_design.mat\"\n mat = loadmat(y_design_file_name)\n y_design = mat['y_design']\n resy_design_file_name = output_dir + \"/temp/resy_design.mat\"\n mat = loadmat(resy_design_file_name)\n resy_design = mat['resy_design']\n efit_eta_file_name = output_dir + \"/temp/efit_eta.mat\"\n mat = loadmat(efit_eta_file_name)\n efit_eta = mat['efit_eta']\n esig_eta_file_name = output_dir + \"/temp/esig_eta.mat\"\n mat = loadmat(esig_eta_file_name)\n esig_eta = mat['esig_eta']\n hat_mat_file_name = output_dir + \"/temp/hat_mat.mat\"\n mat = loadmat(hat_mat_file_name)\n hat_mat = mat['hat_mat']\n snp_file_name = output_dir + \"/temp/snp.mat\"\n mat = loadmat(snp_file_name)\n snp = mat['snp']\n # read the image size\n img_size_file_name = input_dir + \"img_size.txt\"\n img_size = np.loadtxt(img_size_file_name)\n img_size = np.array([int(i) for i in img_size])\n # read the image index of non-background region\n img_idx_file_name = input_dir + \"img_idx.txt\"\n img_idx = np.loadtxt(img_idx_file_name)\n img_idx = np.array([int(i) for i in img_idx])\n end_0 = time.time()\n print(\"Elapsed time in Step 3 is \", end_0 - start_0)\n\n \"\"\"+++++++++++++++++++++++++++++++++++\"\"\"\n print(\"\"\" Step 3. Significant locus-voxel and locus-subregion detection \"\"\")\n start_3 = time.time()\n alpha = 1e-5\n c_alpha = -10**alpha\n bstp_num = int(bstp_num)\n max_stat_bstp, max_area_bstp = wild_bstp(snp, y_design, resy_design, efit_eta, esig_eta, hat_mat,\n img_size, img_idx, c_alpha, g_num, bstp_num)\n print(max_stat_bstp)\n print(max_area_bstp)\n bstp_out = np.hstack((max_stat_bstp, max_area_bstp))\n bstp_out_file_name = output_dir + output_file\n np.savetxt(bstp_out_file_name, bstp_out)\n end_3 = time.time()\n print(\"Elapsed time in Step 3 is \", end_3 - start_3)",
"def multi_run_wrapper(args):\n\treturn img_preprocessing(*args)",
"def __call__(self, x, y):\n #- TODO: compare speed to solution at\n #- http://stackoverflow.com/questions/12729228/simple-efficient-bilinear-interpolation-of-images-in-numpy-and-python\n \n #- Find where we are in grid\n #- clip to 1 because we will use i and i-1\n #- clip to len(x)-1 to allow extrapolation beyond grid boundary\n ix = np.searchsorted(self.x, x).clip(1, len(self.x)-1)\n iy = np.searchsorted(self.y, y).clip(1, len(self.y)-1)\n \n #- Interpolation distances from points\n dx = (x - self.x[ix-1]) / (self.x[ix] - self.x[ix-1])\n dy = (y - self.y[iy-1]) / (self.y[iy] - self.y[iy-1])\n\n #- Interpolate, allowing x and/or y to be multi-dimensional\n #- NOTE: these are the slow steps, about equal time each\n \n #- Original code with what appears to be vestigial transposes\n # data1 = (self.data[ix-1,iy-1].T*(1-dx) + self.data[ix,iy-1].T*dx).T\n # data2 = (self.data[ix-1,iy].T*(1-dx) + self.data[ix,iy].T*dx).T\n # dataxy = (data1.T*(1-dy) + data2.T*dy).T\n\n #- Updated without transposes\n data1 = (self.data[ix-1,iy-1]*(1-dx) + self.data[ix,iy-1]*dx)\n data2 = (self.data[ix-1,iy]*(1-dx) + self.data[ix,iy]*dx)\n dataxy = (data1*(1-dy) + data2*dy)\n\n return dataxy",
"def get_resample(name: str) -> str:\n\n methods = {\n \"first\":\n \"\"\"\nimport numpy as np\n\ndef first(in_ar, out_ar, xoff, yoff, xsize, ysize, raster_xsize,raster_ysize, buf_radius, gt, **kwargs):\n y = np.ones(in_ar[0].shape)\n for i in reversed(range(len(in_ar))):\n mask = in_ar[i] == 0\n y *= mask\n y += in_ar[i]\n\n np.clip(y,0,255, out=out_ar)\n\"\"\",\n \"last\":\n \"\"\"\nimport numpy as np\n\ndef last(in_ar, out_ar, xoff, yoff, xsize, ysize, raster_xsize,raster_ysize, buf_radius, gt, **kwargs):\n y = np.ones(in_ar[0].shape)\n for i in range(len(in_ar)):\n mask = in_ar[i] == 0\n y *= mask\n y += in_ar[i]\n\n np.clip(y,0,255, out=out_ar)\n\"\"\",\n \"max\":\n \"\"\"\nimport numpy as np\n\ndef max(in_ar, out_ar, xoff, yoff, xsize, ysize, raster_xsize,raster_ysize, buf_radius, gt, **kwargs):\n y = np.max(in_ar, axis=0)\n np.clip(y,0,255, out=out_ar)\n\"\"\",\n \"average\":\n \"\"\"\nimport numpy as np\n\ndef average(in_ar, out_ar, xoff, yoff, xsize, ysize, raster_xsize,raster_ysize, buf_radius, gt, **kwargs):\n div = np.zeros(in_ar[0].shape)\n for i in range(len(in_ar)):\n div += (in_ar[i] != 0)\n div[div == 0] = 1\n \n y = np.sum(in_ar, axis = 0, dtype = 'uint16')\n y = y / div\n \n np.clip(y,0,255, out = out_ar)\n\"\"\"}\n\n if name not in methods:\n raise ValueError(\n \"ERROR: Unrecognized resampling method (see documentation): '{}'.\".\n format(name))\n\n return methods[name]",
"def main(fn_input, fn_output):\n # read file\n inter = Interpolator()\n inter.read_file(fn_input)\n inter.write_interpolated(fn_output)",
"def main(filename, iterations, save_diagnostics, output_dir, burnin):\n #data = []\n #with open(filename,'rb') as json_data:\n #skip header\n #jsondata = json.load(json_data)\n #j=0\n #while j<271:\n #eruption_time = jsondata[j]['FIELD1']\n #waiting_time = jsondata[j]['FIELD2']\n #data.append([float(eruption_time), float(waiting_time)])\n #j=j+1\n\n #generate ida images\n data = np.array([[131,3,1],[49,1,1],[17,7,1],[55,7,19],[80,5,1],[40,2,2],[91,21,6],[19,16,1],[27,7,1],[15,50,2],[37,1,7],[17,3,1],[22,32,2],[68,2,1],[26,2,3],[15,2,3],[246,2,1],[25,2,1],[19,1,1],[98,1,2],[54,13,1],[168,2,4],[20,102,5],[40,2,1],[41,1,1],[44,19,16],[17,6,1],[92,12,1],[17,2,1],[16,5,3],[45,11,1],[20,10,1],[26,1,2],[21,9,9],[26,10,1],[187,4,2],[65,28,4],[17,9,33],[23,39,1],[58,4,4],[41,107,3],[28,3,1],[16,1,1],[17,16,4],[17,16,1],[17,5,1],[83,2,2],[17,1,2],[26,4,2],[22,7,2],[16,1,1],[15,2,1],[15,2,1],[111,8,1],[25,6,1],[112,4,1],[19,10,2],[38,25,4],[29,1,5],[17,2,1],[111,9,8],[53,5,4],[29,7,1],[25,8,2],[23,2,134],[32,6,1],[27,1,1],[61,4,2],[41,163,4],[57,11,2],[24,2,1],[16,18,1],[81,7,14],[169,5,1],[19,4,1],[412,5,1],[32,2,7],[19,28,3],[17,11,1],[44,4,5],[27,2,2],[18,1,7],[15,3,3],[18,10,1],[19,6,10],[46,2,5],[20,12,3],[25,6,4],[18,4,1],[15,40,8],[16,11,16],[237,1,1],[26,13,2],[26,4,1],[101,5,5],[50,2,1],[22,45,5],[16,7,2],[17,4,2],[19,2,3],[22,1,1],[260,6,1],[20,15,1],[24,5,1],[33,2,1],[16,1,5],[21,18,1],[22,1,1],[18,13,2],[124,3,1],[16,6,1],[19,6,2],[71,2,1],[232,2,2],[21,2,1],[231,11,1],[201,49,2],[28,12,1],[68,5,1],[56,26,7],[17,1,8],[19,10,2],[120,13,2],[218,3,1],[46,5,6],[57,4,1],[30,5,2],[17,8,4],[17,22,1],[15,5,1],[16,7,1],[26,13,1],[28,22,2],[100,1,2],[58,12,2],[52,9,11],[21,4,2],[18,4,1],[699,1,1],[401,6,3],[20,7,1],[20,3,13],[27,1,1],[35,2,2],[27,6,1],[15,13,1],[17,6,1],[26,28,4],[89,2,3],[36,11,2],[17,11,2],[15,1,1],[59,3,1],[15,3,1],[20,11,1],[49,1,1],[24,3,1],[25,7,1],[29,1,1],[61,2,2],[28,3,13],[82,2,8],[22,2,1],[21,25,3],[73,3,2],[22,8,1],[51,3,12],[16,6,1],[64,2,4],[22,2,2],[19,7,1],[69,2,1],[17,8,9],[19,1,13],[28,35,3],[134,2,1],[19,12,1],[27,13,1],[17,10,1],[16,17,4],[46,2,3],[15,1,2],[35,15,2],[20,6,1],[16,10,3],[33,11,1],[20,8,4],[15,5,1],[33,5,2],[460,6,1],[132,2,1],[73,14,3],[34,5,1],[123,1,2],[15,8,1],[30,1,1],[16,1,1],[73,3,1],[54,4,1],[17,1,9],[17,17,3],[22,1,3],[46,16,8],[18,1,1],[22,3,2],[21,4,1],[40,5,1],[19,2,1],[16,11,1],[19,4,1],[26,4,1],[87,1,3],[75,1,8],[25,1,1],[16,1,1],[17,10,3],[15,44,2],[79,3,1],[21,19,1],[292,5,13],[27,4,1],[25,2,1],[23,34,1],[36,2,1],[15,2,7],[18,3,3],[62,1,7],[16,61,5],[15,5,1],[36,5,1],[67,8,3],[18,4,1],[23,2,1],[16,21,3],[32,7,1],[22,6,1],[88,5,1],[19,2,4],[38,2,1],[47,6,28],[18,35,3],[159,15,1],[25,3,5],[295,9,4],[26,2,1],[27,8,3],[86,6,1],[24,25,4],[18,1,2],[16,6,1],[64,16,1],[39,1,2],[30,1,4],[44,1,3],[82,11,4],[28,13,2],[46,19,1],[15,26,1],[30,6,11],[51,3,6],[19,20,1],[940,6,4],[21,6,1],[29,2,1],[20,2,1],[31,2,1],[21,2,3],[25,27,1],[26,2,1],[17,4,1],[64,7,1],[126,7,15],[18,8,1],[20,13,2],[16,7,2],[18,2,1],[19,4,5],[29,1,1],[80,12,2],[42,14,6],[107,2,1],[15,4,1],[48,16,1],[62,3,2],[15,13,1],[29,48,7],[25,4,1],[17,5,20],[19,7,3],[22,10,3],[58,15,3],[17,14,1],[121,2,2],[33,64,11],[16,15,2],[39,6,2],[25,69,7],[69,2,1],[41,6,2],[20,5,1],[42,22,4],[18,17,4],[16,14,3],[27,14,1],[20,1,1],[44,1,101],[33,9,1],[26,2,8],[30,24,3],[27,24,2],[34,7,1],[39,6,3],[20,2,3],[55,5,1],[22,22,2],[17,2,1],[55,3,1],[29,10,5],[60,12,2],[18,13,3],[93,3,2],[15,3,1],[26,5,5],[18,1,1],[17,16,2],[15,13,3],[22,12,1],[256,19,27],[18,7,8],[22,3,1],[35,3,4],[16,2,1],[19,6,2],[24,1,1],[29,3,2],[36,21,8],[24,1,1],[18,6,2],[26,24,11],[19,15,2],[16,1,1],[28,4,1],[60,11,1],[62,4,2],[70,2,1],[75,1,2],[125,3,1],[21,6,1],[165,23,2],[108,1,1],[35,5,1],[251,19,12],[137,4,1],[81,11,4],[104,19,4],[18,18,3],[19,13,1],[18,112,5],[19,6,2],[28,7,2],[23,9,1],[20,15,7],[34,1,1],[24,12,3],[15,5,1],[40,9,4],[24,41,6],[35,1,1],[17,3,1],[17,3,4],[46,7,2],[21,8,10],[17,7,4],[36,6,1],[32,6,2],[31,1,1],[17,32,5],[26,3,4],[16,4,1],[21,2,1],[19,4,1],[33,4,1],[46,7,1],[28,9,1],[169,9,24],[24,18,2],[103,6,1],[93,1,1],[156,2,1],[58,7,1],[55,30,3],[15,5,1],[20,9,1],[19,20,1],[44,1,3],[16,2,1],[23,4,1],[22,10,1],[16,138,5],[17,2,1],[17,1,2],[70,8,5],[15,3,6],[22,6,1],[20,1,1],[35,2,4],[15,3,1],[26,119,46],[390,18,2],[22,4,1],[175,5,2],[23,4,1],[26,2,21],[17,1,2],[112,4,1],[18,22,5],[22,2,1],[122,13,1],[18,1,1],[27,7,1],[26,18,5],[18,1,3],[28,1,15],[35,11,1],[15,2,1],[55,6,5],[67,3,1],[30,5,7],[31,12,1],[16,9,12],[43,7,1],[23,21,1],[43,2,7],[53,40,1],[58,6,1],[29,27,11],[65,6,2],[27,4,2],[15,7,2],[17,26,13],[48,4,79],[30,2,6],[25,1,1],[20,20,6],[59,2,5],[15,14,4],[18,7,1],[18,2,1],[28,7,1],[35,1,1],[15,12,4],[52,2,2],[16,25,1],[91,1,1],[27,7,3],[62,4,1],[29,11,1],[25,4,3],[15,1,1],[40,6,2],[19,2,2],[24,14,2],[33,5,1],[58,3,3],[23,1,4],[15,2,2],[92,5,1],[17,2,1],[16,10,1],[50,8,1],[24,2,1],[73,1,1],[30,33,55],[18,15,1],[15,9,4],[23,1,3],[17,5,1],[43,3,1],[15,9,2],[19,4,2],[20,20,4],[31,1,2],[21,3,1],[79,9,13],[20,3,24],[56,2,1],[26,1,2],[15,3,1],[30,12,1],[64,6,1],[327,8,47],[39,2,1],[22,17,5],[18,6,3],[74,14,2],[17,4,1],[39,1,3],[520,9,3],[65,9,1],[36,1,4],[264,3,3],[16,1,1],[18,5,3],[22,16,3],[21,2,1],[15,3,3],[49,5,1],[37,19,2],[19,13,2],[30,1,1],[44,4,1],[19,9,31],[22,4,2],[21,4,5],[16,4,1],[40,17,1],[15,12,4],[43,4,3],[21,30,1],[60,16,3],[28,2,1],[38,16,2],[19,3,1],[68,18,4],[1,4,3],[1,9,1],[1,2,2],[1,1,4],[1,148,4],[1,6,1],[1,16,1],[1,4,1],[1,19,3],[1,7,3],[1,2,2],[1,4,2],[1,47,5],[1,2,2],[1,1,4],[1,1,2],[1,1,2],[1,1,1],[1,4,2],[1,7,1],[1,4,6],[1,2,1],[1,5,4],[1,9,3],[1,9,2],[1,7,1],[1,4,1],[1,10,2],[1,1,1],[1,5,1],[1,5,1],[1,2,16],[1,2,1],[1,1,1],[1,3,2],[1,8,3],[1,1,18],[1,5,1],[1,14,3],[1,6,6],[1,7,1],[1,1,1],[1,16,1],[1,2,1],[1,2,1],[1,1,2],[1,4,4],[1,4,1],[1,9,1],[1,25,7],[1,1,1],[1,8,2],[1,1,4],[1,77,8],[1,1,3],[1,6,3],[1,4,2],[1,2,2],[1,2,1],[1,40,1],[1,26,3],[1,1,4],[1,1,1],[1,2,2],[1,1,2],[1,15,1],[1,35,86],[1,3,2],[1,4,1],[1,2,1],[1,4,3],[1,30,1],[1,2,1],[1,4,2],[1,2,1],[1,1,1],[1,2,1],[1,3,1],[1,2,3],[1,3,1],[1,14,1],[1,3,2],[1,7,4],[1,6,2],[1,2,1],[1,23,2],[1,4,1],[1,4,3],[1,26,3],[1,47,15],[1,3,5],[1,5,1],[1,3,1],[1,2,1],[1,2,1],[1,3,1],[1,36,1],[1,2,1],[1,1,9],[1,6,1],[1,2,1],[1,8,3],[1,7,1],[1,33,2],[1,14,4],[1,13,3],[1,2,1],[1,5,1],[1,7,2],[1,9,3],[1,6,1],[1,3,1],[1,9,1],[1,2,2],[1,2,1],[1,6,3],[1,4,2],[1,2,1],[1,1,1],[1,13,4],[1,9,2],[1,4,2],[1,7,14],[1,8,1],[1,3,1],[1,25,2],[1,2,1],[1,11,1],[1,2,1],[1,1,1],[1,3,3],[1,3,2],[1,2,1],[1,2,1],[1,2,8],[1,9,1],[1,13,9],[1,3,1],[1,8,1],[1,102,71],[1,22,1],[1,2,3],[1,22,2],[1,1,1],[1,3,1],[1,12,1],[1,3,2],[1,1,1],[1,5,2],[1,30,6],[1,14,1],[1,2,1],[1,1,1],[1,5,1],[1,8,1],[1,4,2],[1,3,1],[1,2,1],[1,1,1],[1,1,1],[1,12,1],[1,14,1],[1,10,2],[1,22,3],[1,15,2],[1,4,2],[1,5,1],[1,10,2],[1,10,26],[1,1,2],[1,1,2],[1,17,1],[1,1,1],[1,7,1],[1,1,1],[1,8,2],[1,5,2],[1,15,1],[1,16,2],[1,7,1],[1,26,1],[1,16,2],[1,13,6],[1,3,3],[1,2,1],[1,2,1],[1,5,3],[1,1,1],[1,4,1],[1,1,1],[1,2,2],[1,13,4],[1,50,2],[1,12,3],[1,2,1],[1,16,5],[1,2,8],[1,3,5],[1,1,1],[1,25,1],[1,5,1],[1,13,2],[1,1,2],[1,8,1],[1,13,1],[1,4,4],[1,2,3],[1,7,2],[1,2,4],[1,2,1],[1,1,2],[1,4,1],[1,3,2],[1,8,4],[1,4,1],[1,2,2],[1,2,1],[1,3,1],[1,7,1],[1,8,5],[1,34,4],[1,2,3],[1,1,1],[1,8,3],[1,3,1],[1,26,2],[1,3,1],[1,1,6],[1,2,4],[1,7,1],[1,9,2],[1,3,93],[1,2,1],[1,3,2],[1,3,3],[1,15,3],[1,12,1],[1,1,1],[1,1,5],[1,4,1],[1,1,4],[1,2,1],[1,6,4],[1,9,1],[1,1,9],[1,11,1],[1,68,2],[1,7,1],[1,11,1],[1,6,1],[1,5,2],[1,2,1],[1,19,1],[1,3,1],[1,1,2],[1,37,1],[1,19,1],[1,4,5],[1,8,1],[1,1,1],[1,7,1],[1,3,1],[1,4,1],[1,6,7],[1,2,1],[1,14,3],[1,4,1],[1,6,5],[1,1,1],[1,1,1],[1,2,1],[1,1,2],[1,7,2],[1,8,1],[1,17,136],[1,6,1],[1,3,2],[1,9,12],[1,7,2],[1,2,9],[1,1,4],[1,3,1],[1,10,1],[1,6,16],[1,8,1],[1,2,2],[1,2,2],[1,4,3],[1,3,3],[1,24,3],[1,68,28],[1,16,1],[1,9,2],[1,1,2],[1,18,7],[1,3,1],[1,5,2],[1,1,3],[1,3,1],[1,3,8],[1,73,5],[1,6,3],[1,5,1],[1,2,1],[1,15,7],[1,80,2],[1,3,1],[1,12,3],[1,8,1],[1,2,1],[1,9,5],[1,3,2],[1,319,20],[1,2,1],[1,4,6],[1,5,4],[1,25,1],[1,8,1],[1,6,5],[1,18,1],[1,2,2],[1,5,2],[1,10,1],[1,10,1],[1,2,1],[1,6,2],[1,7,2],[1,39,1],[1,7,79],[1,28,4],[1,2,1],[1,4,1],[1,25,5],[1,23,3],[1,10,3],[1,2,1],[1,13,1],[1,2,2],[1,6,1],[1,6,4],[1,12,1],[1,4,1],[1,3,1],[1,10,1],[1,4,2],[1,7,1],[1,11,1],[1,6,1],[1,4,2],[1,3,3],[1,1,1],[1,1,1],[1,3,3],[1,3,2],[1,15,1],[1,1,1],[1,1,4],[1,26,2],[1,1,1],[1,7,1],[1,4,63],[1,1,19],[1,96,7],[1,7,2],[1,6,1],[1,4,1],[1,18,2],[1,1,2],[1,4,1],[1,3,3],[1,18,1],[1,3,1],[1,14,1],[1,6,2],[1,13,1],[1,1,5],[1,13,2],[1,1,1],[1,4,4],[1,10,1],[1,2,1],[1,12,3],[1,7,1],[1,8,1],[1,3,1],[1,2,2],[1,4,5],[1,9,1],[1,2,1],[1,2,1],[1,6,8],[1,32,3],[1,3,2],[1,6,1],[1,5,1],[1,7,1],[1,4,2],[1,2,1],[1,5,4],[1,1,2],[1,9,1],[1,2,1],[1,11,1],[1,5,2],[1,2,1],[1,1,1],[1,3,1],[1,7,13],[1,4,4],[1,1,1],[1,6,1],[1,1,3],[1,6,6],[1,6,1],[1,4,4],[1,10,1],[1,15,1],[1,3,7],[1,6,1],[1,9,1],[1,14,23],[1,14,2],[1,6,3],[1,2,1],[1,9,1],[1,1,3],[1,6,4],[1,15,2],[1,8,1],[1,6,6],[1,16,10],[1,5,4],[1,30,3],[1,7,1],[1,4,1],[1,3,1],[1,6,6],[1,1,2],[1,3,2],[1,1,1],[1,1,1],[1,1,1],[1,2,5],[1,2,1],[1,2,5],[1,24,1],[1,3,1],[1,6,1],[1,2,1],[1,4,1],[1,2,2],[1,4,1],[1,1,1],[1,3,1],[1,8,2],[1,4,2],[1,2,2],[1,2,1],[1,12,6],[1,2,1],[1,32,42],[1,7,1],[1,7,1],[1,12,1],[1,2,1],[1,6,1],[1,42,1],[1,2,1],[1,1,2],[1,2,1],[1,6,1],[1,2,2],[1,8,1],[1,22,4],[1,1,1],[1,11,20],[1,6,2],[1,2,1],[1,4,2],[1,9,1],[1,10,1],[1,16,5],[1,3,2],[1,8,1],[1,6,3],[1,1,2],[1,6,1],[1,2,1],[1,28,1],[1,18,1],[1,17,8],[1,4,1],[1,2,2],[1,13,1],[1,25,3],[1,7,4],[1,3,1],[1,1,1],[1,3,3],[1,4,1],[1,7,5],[1,2,2],[1,5,1],[1,2,2],[1,2,2],[1,14,1],[1,3,3],[1,4,1],[1,1,2],[1,11,1],[1,2,1],[1,6,1],[1,7,6],[1,7,1],[1,2,2],[1,2,1],[1,31,4],[1,4,3],[1,14,6],[1,4,4],[1,1,1],[1,2,1],[1,12,5],[1,4,1],[1,7,1],[1,3,1],[1,4,1],[1,11,1],[1,12,1],[1,3,2],[1,9,1],[1,17,2],[1,9,5],[1,6,1],[1,13,2],[1,5,1],[1,4,3],[1,3,1],[1,1,4],[1,7,1],[1,4,1],[1,3,1],[1,56,3],[1,1,1],[1,9,1],[1,4,1],[1,15,1],[1,2,1],[1,12,1],[1,4,2],[1,1,1],[1,1,1],[1,149,2],[1,56,1],[1,4,5],[1,2,2],[1,11,3],[1,2,3],[1,1,2],[1,2,1],[1,15,4],[1,2,2],[1,4,1],[1,17,2],[1,10,5],[1,14,2],[1,8,2],[1,4,2],[1,4,1],[1,6,1],[1,5,1],[1,7,2],[1,20,5],[1,3,1],[1,4,1],[1,11,1],[1,2,1],[1,1,3],[1,5,2],[1,6,1],[1,4,3],[1,4,3],[1,4,2],[1,7,3],[1,5,1],[1,1,1],[1,2,1],[1,8,1],[1,7,1],[1,2,1],[1,1,1],[1,1,1],[1,4,3],[1,11,1],[1,43,1],[1,7,8],[1,8,1],[1,1,1],[1,8,6],[1,9,3],[1,19,1],[1,2,1],[1,43,3],[1,4,5],[1,2,3],[1,4,1],[1,17,1],[1,9,1],[1,8,72],[1,2,1],[1,4,2],[1,16,1],[1,15,1],[1,8,1],[1,3,1],[1,7,8],[1,4,1],[1,23,2],[1,1,2],[1,1,1],[1,15,7],[1,7,4],[1,3,4],[1,5,1],[1,1,1],[1,6,83],[1,1,1],[1,4,3],[1,2,1],[1,3,2],[1,9,2],[1,5,1],[1,22,1],[1,3,6],[1,6,4],[1,4,1],[1,1,4],[1,1,1],[1,5,3],[1,1,2],[1,15,2],[1,8,1],[1,5,2],[1,1,1],[1,4,10],[1,63,1],[1,2,2],[1,2,1],[1,9,1],[1,4,3],[1,2,1],[1,24,1],[1,2,2],[1,2,2],[1,6,2],[1,13,5],[1,34,5],[1,10,1],[1,3,1],[1,22,9],[1,41,1],[1,1,4],[1,13,2],[1,18,1],[1,4,4],[1,7,1],[1,4,3],[1,14,4],[1,3,2],[1,2,1],[1,7,10],[1,15,3],[1,6,1],[1,1,1],[1,2,5],[1,4,10],[1,5,2],[1,12,6],[1,6,1],[1,19,134],[1,11,1],[1,233,9],[1,4,2],[1,40,1],[1,2,1],[1,10,1],[1,3,1],[1,3,1],[1,3,1],[1,35,1],[1,2,7],[1,1,3],[1,3,1],[1,14,2],[1,1,1],[1,7,1],[1,6,5],[1,10,1],[1,5,3],[1,8,1],[1,11,1],[1,13,1],[1,8,9],[1,5,1],[1,3,1],[1,11,1],[1,2,1],[1,5,1],[1,7,1],[1,9,3],[1,2,3],[1,2,2],[1,29,2],[1,2,1],[1,4,3],[1,1,2],[1,2,2],[1,3,6],[1,11,1],[1,1,1],[1,11,1],[1,4,1],[1,6,1],[1,3,5],[1,4,1],[1,4,3],[1,34,1],[1,4,2],[1,1,9],[1,18,1],[1,9,3],[1,15,1],[1,4,4],[1,4,2],[1,9,1],[1,4,1],[1,10,1],[1,2,1],[1,2,4],[1,4,1],[1,1,2],[1,3,3],[1,2,1],[1,47,14],[1,3,1],[1,2,1],[1,3,1],[1,1,1],[1,20,1],[1,14,6],[1,2,2],[1,16,2],[1,2,1],[1,1,31],[1,5,9],[1,10,2],[1,10,3],[1,19,1],[1,1,1],[1,13,2],[1,5,1],[1,1,2],[1,1,2],[1,24,1],[1,9,2],[1,4,1],[1,10,3],[1,35,6],[1,1,1],[1,2,1],[1,1,1],[1,3,1],[1,4,5],[1,4,1],[1,1,1],[1,4,1],[1,10,2],[1,55,6],[1,3,22],[1,28,4],[1,6,3],[1,10,1],[1,6,187],[1,3,2],[1,12,5],[1,7,1],[1,4,1],[1,2,2],[1,2,1],[1,31,9],[1,2,8],[1,20,2],[1,36,2],[1,2,2],[1,15,5],[1,5,2],[1,3,2],[1,8,1],[1,1,1],[1,2,1],[1,37,1],[1,17,4],[1,8,1],[1,19,2],[1,7,1],[1,1,1],[1,1,1],[1,2,1],[1,9,1],[1,2,1],[1,2,1],[1,2,1],[1,19,1],[1,33,3],[1,4,1],[1,7,1],[1,3,1],[1,46,4],[1,2,1],[1,3,2],[1,1,2],[1,2,2],[1,14,1],[1,3,1],[1,11,2],[1,2,2],[1,21,2],[1,34,2],[1,4,1],[1,1,1],[1,2,1],[1,22,1],[1,64,9],[1,21,10],[1,3,3],[1,6,1],[1,16,2],[1,3,1],[1,31,4],[1,1,1],[1,1,2],[1,1,1],[1,3,1],[1,5,4],[1,27,1],[1,1,1],[1,2,2],[1,17,10],[1,4,1],[1,25,1],[1,41,1],[1,18,4],[1,17,40],[1,9,1],[1,2,1],[1,7,1],[1,21,2],[1,2,3],[1,3,1],[1,14,1],[1,8,2],[1,2,1],[1,2,2],[1,5,1],[1,1,2],[1,4,1],[1,6,5],[1,9,17],[1,5,1],[1,6,1],[1,4,1],[1,1,1],[1,3,1],[1,61,9],[1,6,1],[1,9,2],[1,2,2],[1,9,1],[1,7,4],[1,12,1],[1,2,2],[1,40,1],[1,17,13],[1,1,7],[1,11,2],[1,20,2],[1,2,1],[1,1,1],[1,12,10],[1,5,3],[1,2,1],[1,1,1],[1,23,2],[1,9,3],[1,4,1],[1,5,2],[1,4,1],[1,19,5],[1,5,1],[1,1,4],[1,5,1],[1,8,1],[1,9,1],[1,5,3],[1,43,3],[1,1,2],[1,3,1],[1,2,2],[1,15,38],[1,3,1],[1,25,1],[1,1,4],[1,5,6],[1,2,1],[1,4,3],[1,4,2],[1,3,1],[1,9,1],[1,4,1],[1,13,2],[1,7,4],[1,2,6],[1,12,1],[1,8,3],[1,1,4],[1,13,1],[1,3,4],[1,3,2],[1,2,2],[1,4,1],[1,6,1],[1,14,3],[1,7,1],[1,8,1],[1,8,1],[1,3,1],[1,32,5],[1,16,2],[1,2,3],[1,38,1],[1,5,4],[1,10,2],[1,2,7],[1,3,1],[1,8,1],[1,3,2],[1,1,3],[1,4,2],[1,71,12],[1,8,4],[1,2,12],[1,3,1],[1,12,2],[1,2,1],[1,5,1],[1,2,28],[1,19,5],[1,10,1],[1,9,2],[1,3,1],[1,7,6],[1,11,1],[1,2,1],[1,27,2],[1,7,4],[1,4,2],[1,12,8],[1,8,96],[1,12,1],[1,2,4],[1,7,5],[1,15,3],[1,3,2],[1,18,2],[1,25,3],[1,7,2],[1,18,2],[1,6,1],[1,10,2],[1,4,1],[1,1,3],[1,5,1],[1,19,2],[1,8,1],[1,50,4],[1,8,1],[1,11,1],[1,9,1],[1,2,1],[1,2,5],[1,3,1],[1,6,2],[1,1,1],[1,13,5],[1,19,1],[1,7,2],[1,17,1],[1,6,1],[1,4,1],[1,7,3],[1,13,3],[1,7,4],[1,5,2],[1,4,1],[1,11,16],[1,7,1],[1,1,1],[1,2,1],[1,2,1],[1,14,3],[1,30,1],[1,2,6],[1,6,2],[1,3,1],[1,4,1],[1,9,11],[1,6,1],[1,35,1],[1,2,8],[1,1,2],[1,3,2],[1,1,1],[1,9,1],[1,2,57],[1,2,1],[1,5,1],[1,4,2],[1,15,1],[1,12,3],[1,4,3],[1,17,1],[1,12,2],[1,21,12],[1,2,1],[1,9,1],[1,9,47],[1,49,4],[1,5,1],[1,4,1],[1,24,1],[1,2,2],[1,64,2],[1,48,7],[1,2,2],[1,10,2],[1,3,1],[1,11,1],[1,5,1],[1,1,2],[1,2,4],[1,6,1],[1,19,6],[1,6,2],[1,3,2],[1,1,1],[1,22,2],[1,3,2],[1,5,14],[1,2,1],[1,11,1],[1,4,2],[1,6,1],[1,24,10],[1,7,1],[1,2,74],[1,6,1],[1,28,1],[1,1,1],[1,1,1],[1,10,1],[1,88,4],[1,9,4],[1,26,1],[1,3,1],[1,4,1],[1,4,1],[1,6,1],[1,23,1],[1,2,7],[1,1,3],[1,7,1],[1,1,1],[1,5,2],[1,4,1],[1,2,1],[1,1,1],[1,15,5],[1,22,1],[1,6,3],[1,12,2],[1,48,14],[1,7,1],[1,5,1],[1,10,5],[1,5,1],[1,6,5],[1,2,3],[1,14,3],[1,3,1],[1,8,4],[1,2,5],[1,34,3],[1,2,1],[1,4,1],[1,6,7],[1,3,1],[1,3,3],[1,32,2],[1,3,1],[1,3,1],[1,2,1],[1,3,1],[1,39,8],[1,1,1],[1,15,8],[1,3,4],[1,2,3],[1,1,3],[1,38,18],[1,6,1],[1,25,4],[1,2,1],[1,8,1],[1,3,1],[1,24,1],[1,5,5],[1,5,4],[1,2,3],[1,2,1],[1,5,4],[1,51,1],[1,23,3],[1,2,1],[1,2,1],[1,1,2],[1,7,2],[1,3,1],[1,1,1],[1,4,1],[1,2,1],[1,7,6],[1,8,1],[1,11,1],[1,2,6],[1,2,1],[1,2,1],[1,1,1],[1,26,1],[1,3,1],[1,2,1],[1,2,1],[1,2,1],[1,12,2],[1,1,3],[1,3,1],[1,2,4],[1,19,3],[1,3,1],[1,3,2],[1,49,3],[1,2,1],[1,21,3],[1,1,1],[1,5,1],[1,4,1],[1,2,2],[1,2,1],[1,1,1],[1,7,4],[1,2,1],[1,2,1],[1,2,1],[1,3,2],[1,26,2],[1,9,1],[1,2,2],[1,12,1],[1,4,32],[1,4,1],[1,17,1],[1,1,2],[1,77,4],[1,2,1],[1,12,1],[1,2,1],[1,2,4],[1,5,2],[1,10,3],[1,4,3],[1,2,1],[1,1,3],[1,16,4],[1,3,1],[1,40,2],[1,13,1],[1,2,1],[1,6,2],[1,12,2],[1,6,11],[1,6,1],[1,1,1],[1,10,6],[1,1,1],[1,6,5],[1,38,4],[1,2,7],[1,9,1],[1,5,2],[1,3,1],[1,2,1],[1,5,2],[1,4,1],[1,1,1],[1,1,1],[1,4,2],[1,4,3],[1,5,2],[1,1,4],[1,11,4],[1,14,4],[1,4,1],[1,17,2],[1,2,2],[1,39,1],[1,9,21],[1,14,2],[1,4,4],[1,4,3],[1,9,2],[1,1,1],[1,3,2],[1,1,1],[1,1,7],[1,16,4],[1,5,1],[1,2,1],[1,2,1],[1,2,1],[1,98,19],[1,4,1],[1,1,1],[1,5,1],[1,7,1],[1,1,3],[1,9,1],[1,4,2],[1,2,1],[1,7,2],[1,2,1],[1,1,2],[1,1,1],[1,5,2],[1,6,1],[1,11,6],[1,5,4],[1,40,5],[1,1,2],[1,9,1],[1,2,1],[1,6,1],[1,5,1],[1,11,2],[1,4,1],[1,3,17],[1,1,1],[1,1,5],[1,9,5],[1,60,1],[1,3,7],[1,3,4],[1,5,1],[1,3,10],[1,5,2],[1,7,1],[1,2,1],[1,14,14],[1,4,3],[1,1,2],[1,2,4],[1,5,1],[1,11,7],[1,3,1],[1,29,3],[1,2,4],[1,8,1],[1,53,1],[1,10,1],[1,7,2],[1,2,13],[1,58,1],[1,5,6],[1,2,1],[1,4,2],[1,4,2],[1,4,2],[1,5,2],[1,2,3],[1,12,2],[1,4,6],[1,34,1],[1,1,1],[1,8,1],[1,4,1],[1,2,1],[1,2,2],[1,16,1],[1,4,2],[1,3,13],[1,2,2],[1,46,2],[1,4,1],[1,6,1],[1,1,2],[1,2,1],[1,3,6],[1,3,1],[1,19,1],[1,2,1],[1,23,1],[1,3,1],[1,1,1],[1,7,2],[1,4,4],[1,18,3],[1,1,1],[1,7,2],[1,2,2],[1,7,1],[1,2,1],[1,2,1],[1,6,1],[1,9,4],[1,3,1],[1,5,1],[1,13,1],[1,2,2],[1,33,1],[1,12,1],[1,9,3],[1,2,1],[1,1,1],[1,18,1],[1,1,3],[1,3,15],[1,2,4],[1,17,1],[1,1,1],[1,1,1],[1,4,8],[1,1,2],[1,31,19],[1,1,5],[1,7,6],[1,12,4],[1,2,4],[1,7,8],[1,4,2],[1,13,2],[1,19,18],[1,42,4],[1,3,1],[1,17,1],[1,3,3],[1,4,2],[1,12,1],[1,1,6],[1,23,2],[1,3,1],[1,20,1],[1,21,4],[1,1,1],[1,3,2],[1,10,1],[1,9,1],[1,8,6],[1,21,3],[1,5,1],[1,7,6],[1,2,1],[1,5,1],[1,1,2],[1,11,1],[1,8,212],[1,9,3],[1,6,1],[1,1,2],[1,25,12],[1,4,1],[1,14,15],[1,4,1],[1,13,1],[1,2,2],[1,3,1],[1,4,1],[1,3,1],[1,1,1],[1,3,1],[1,9,7],[1,1,1],[1,6,1],[1,8,2],[1,8,1],[1,2,3],[1,3,1],[1,2,3],[1,1,2],[1,10,1],[1,6,1],[1,12,3],[1,12,1],[1,1,1],[1,2,1],[1,2,4],[1,4,1],[1,2,1],[1,1,1],[1,4,1],[1,23,2],[1,4,2],[1,20,1],[1,17,4],[1,8,2],[1,4,6],[1,4,1],[1,6,1],[1,10,1],[1,6,2],[1,1,1],[1,3,1],[1,4,1],[1,4,1],[1,16,143],[1,7,1],[1,10,1],[1,7,2],[1,3,3],[1,8,3],[1,2,1],[1,49,1],[1,2,7],[1,14,4],[1,31,3],[1,29,1],[1,31,8],[1,5,2],[1,7,1],[1,1,1],[1,4,5],[1,1,1],[1,7,3],[1,1,2],[1,5,3],[1,3,1],[1,7,4],[1,129,9],[1,13,1],[1,11,4],[1,6,28],[1,6,1],[1,6,1],[1,20,1],[1,2,1],[1,16,3],[1,3,3],[1,5,1],[1,64,1],[1,4,2],[1,7,1],[1,21,3],[1,2,2],[1,9,1],[1,2,1],[1,5,6],[1,6,6],[1,3,1],[1,5,1],[1,3,1],[1,3,1],[1,6,2],[1,2,3],[1,4,1],[1,1,1],[1,12,37],[1,6,1],[1,1,1],[1,4,2],[1,4,8],[1,6,2],[1,2,2],[1,19,1],[1,1,1],[1,1,3],[1,3,1],[1,4,5],[1,15,2],[1,8,3],[1,1,1],[1,2,2],[1,3,1],[1,10,1],[1,4,1],[1,1,2],[1,19,1],[1,5,2],[1,4,4],[1,3,2],[1,3,17],[1,1,1],[1,1,1],[1,2,1],[1,18,3],[1,3,1],[1,16,4],[1,5,1],[1,11,2],[1,19,8],[1,2,1],[1,2,1],[1,1,6],[1,3,1],[1,2,1],[1,1,1],[1,2,1],[1,11,3],[1,17,4],[1,4,1],[1,4,4],[1,5,2],[1,1,1],[1,1,2],[1,10,12],[1,2,2],[1,8,1],[1,1,2],[1,8,1],[1,17,2],[1,2,1],[1,4,1],[1,6,1],[1,20,21],[1,5,7],[1,3,1],[1,13,2],[1,3,6],[1,8,3],[1,12,1],[1,12,2],[1,3,2],[1,15,2],[1,6,1],[1,9,5],[1,5,3],[1,4,1],[1,7,4],[1,4,4],[1,9,4],[1,11,1],[1,3,1],[1,17,1],[1,71,5],[1,7,1],[1,3,1],[1,5,1],[1,1,1],[1,1,2],[1,2,1],[1,1,2],[1,10,2],[1,3,1],[1,2,2],[1,5,1],[1,28,4],[1,2,1],[1,1,1],[1,9,1],[1,3,2],[1,8,2],[1,13,1],[1,2,1],[1,6,1],[1,25,79],[1,30,24],[1,10,31],[1,5,1],[1,9,1],[1,1,1],[1,4,1],[1,118,14],[1,18,3],[1,30,1],[1,10,3],[1,5,1],[1,5,1],[1,1,1],[1,6,1],[1,9,3],[1,6,2],[1,5,1],[1,2,2],[1,3,1],[1,7,4],[1,8,2],[1,10,2],[1,1,8],[1,41,1],[1,21,4],[1,6,1],[1,13,3],[1,5,1],[1,34,7],[1,22,1],[1,9,8],[1,5,3],[1,11,1],[1,2,1],[1,6,1],[1,4,1],[1,72,1],[1,44,3],[1,2,1],[1,1,1],[1,3,1],[1,8,2],[1,1,3],[1,14,1],[1,3,2],[1,1,1],[1,9,2],[1,17,1],[1,9,35],[1,3,1],[1,6,1],[1,2,11],[1,5,3],[1,1,1],[1,2,1],[1,14,7],[1,51,44],[1,3,6],[1,1,1],[1,6,2],[1,2,1],[1,11,2],[1,8,3],[1,3,2],[1,3,3],[1,4,1],[1,2,1],[1,5,1],[1,8,5],[1,60,1],[1,6,3],[1,36,2],[1,1,1],[1,2,1],[1,10,2],[1,26,2],[1,7,3],[1,6,1],[1,6,2],[1,3,3],[1,2,3],[1,6,2],[1,2,2],[1,2,2],[1,5,2],[1,2,1],[1,15,5],[1,1,2],[1,1,3],[1,37,24],[1,8,2],[1,17,2],[1,31,1],[1,14,2],[1,2,1],[1,16,2],[1,3,1],[1,2,2],[1,1,2],[1,2,3],[1,4,2],[1,1,1],[1,9,5],[1,1,2],[1,1,4],[1,4,18],[1,6,1],[1,12,1],[1,3,85],[1,17,2],[1,4,1],[1,7,1],[1,4,1],[1,3,1],[1,22,2],[1,1,1],[1,15,27],[1,4,1],[1,1,1],[1,1,3],[1,3,1],[1,35,2],[1,1,1],[1,33,4],[1,2,1],[1,3,3],[1,6,1],[1,9,1],[1,8,1],[1,6,1],[1,16,2],[1,20,2],[1,5,1],[1,1,5],[1,2,2],[1,12,25],[1,6,1],[1,13,1],[1,2,1],[1,2,1],[1,10,1],[1,2,1],[1,37,3],[1,2,1],[1,58,11],[1,14,3],[1,6,1],[1,6,1],[1,1,3],[1,1,1],[1,9,2],[1,1,502],[1,45,5],[1,5,1],[1,4,1],[1,2,8],[1,5,1],[1,1,1],[1,7,1],[1,4,1],[1,3,4],[1,1,1],[1,10,1],[1,9,1],[1,13,1],[1,10,8],[1,4,4],[1,7,1],[1,1,2],[1,2,2],[1,9,2],[1,13,2],[1,8,1],[1,1,1],[1,2,4],[1,29,1],[1,8,2],[1,7,3],[1,30,7],[1,1,1],[1,10,10],[1,3,1],[1,1,1],[1,5,1],[1,4,3],[1,7,1],[1,43,8],[1,1,2],[1,9,1],[1,1,1],[1,3,6],[1,9,1],[1,1,1],[1,7,1],[1,6,1],[1,2,2],[1,13,4],[1,13,3],[1,2,3],[1,8,1],[1,11,2],[1,9,53],[1,2,1],[1,16,1],[1,6,3],[1,48,3],[1,4,1],[1,7,3],[1,2,2],[1,8,1],[1,8,1],[1,26,2],[1,3,1],[1,8,2],[1,121,2],[1,2,2],[1,8,1],[1,2,2],[1,4,2],[1,8,1],[1,1,1],[1,4,1],[1,3,3],[1,7,1],[1,7,2],[1,2,1],[1,8,2],[1,34,28],[1,3,2],[1,3,1],[1,5,1],[1,9,1],[1,7,1],[1,14,4],[1,1,1],[1,34,4],[1,1,1],[1,6,1],[1,3,1],[1,2,1],[1,4,1],[1,5,2],[1,10,1],[1,41,5],[1,7,2],[1,19,4],[1,3,3],[1,12,3],[1,7,1],[1,4,2],[1,16,1],[1,3,1],[1,8,4],[1,9,2],[1,8,2],[1,2,1],[1,10,2],[1,8,1],[1,16,2],[1,7,2],[1,5,1],[1,2,3],[1,15,4],[1,3,5],[1,4,4],[1,1,1],[1,3,2],[1,5,1],[1,8,4],[1,4,1],[1,41,7],[1,2,1],[1,1,3],[1,1,6],[1,2,1],[1,10,2],[1,10,2],[1,3,3],[1,39,4],[1,1,2],[1,5,7],[1,12,2],[1,15,5],[1,4,1],[1,13,1],[1,3,1],[1,44,3],[1,1,2],[1,1,1],[1,6,1],[1,3,1],[1,3,2],[1,7,15],[1,1,1],[1,11,4],[1,3,1],[1,1,3],[1,1,1],[1,2,1],[1,9,4],[1,22,1],[1,46,2],[1,3,18],[1,22,8],[1,3,1],[1,4,10],[1,12,16],[1,2,1],[1,8,3],[1,1,1],[1,2,4],[1,1,1],[1,6,4],[1,7,1],[1,7,4],[1,14,4],[1,1,1],[1,13,2],[1,61,1],[1,6,2],[1,16,1],[1,14,7],[1,9,2],[1,18,2],[1,9,3],[1,1,2],[1,4,1],[1,6,1],[1,6,4],[1,10,1],[1,5,2],[1,7,1],[1,3,1],[1,11,2],[1,53,1],[1,10,2],[1,17,1],[1,2,2],[1,5,14],[1,17,1],[1,2,1],[1,5,1],[1,28,2],[1,8,2],[1,4,1],[1,4,2],[1,21,1],[1,3,1],[1,3,2],[1,5,2],[1,5,1],[1,3,13],[1,13,2],[1,124,753],[1,2,2],[1,43,1],[1,6,1],[1,2,2],[1,11,1],[1,22,1],[1,5,2],[1,5,1],[1,8,1],[1,2,4],[1,2,2],[1,9,1],[1,6,1],[1,2,1],[1,6,1],[1,14,3],[1,21,1],[1,3,4],[1,3,3],[1,3,1],[1,2,2],[1,2,2],[1,5,2],[1,11,1],[1,6,1],[1,3,1],[1,64,1],[1,6,1],[1,2,12],[1,5,1],[1,6,4],[1,10,1],[1,14,1],[1,14,1],[1,2,1],[1,2,1],[1,8,4],[1,17,2],[1,5,3],[1,64,1],[1,33,3],[1,18,2],[1,1,1],[1,42,9],[1,20,2],[1,10,2],[1,2,2],[1,3,1],[1,13,1],[1,5,1],[1,39,5],[1,8,2],[1,6,1],[1,3,2],[1,12,1],[1,2,4],[1,8,1],[1,2,1],[1,4,5],[1,7,1],[1,2,1],[1,2,1],[1,5,2],[1,15,3],[1,6,1],[1,1,1],[1,11,2],[1,4,2],[1,1,1],[1,7,3],[1,7,2],[1,3,1],[1,3,1],[1,2,1],[1,8,3],[1,3,1],[1,7,12],[1,8,1],[1,4,2],[1,6,2],[1,9,1],[1,3,30],[1,8,3],[1,8,2],[1,8,1],[1,11,1],[1,13,1],[1,2,1],[1,16,1],[1,10,1],[1,3,1],[1,6,4],[1,29,2],[1,4,2],[1,4,1],[1,1,1],[1,7,1],[1,1,1],[1,4,11],[1,1,1],[1,6,1],[1,26,1],[1,3,1],[1,2,1],[1,10,1],[1,4,1],[1,14,2],[1,10,1],[1,5,2],[1,5,1],[1,2,1],[1,26,33],[1,1,1],[1,11,2],[1,8,5],[1,18,1],[1,2,1],[1,5,1],[1,4,2],[1,5,1],[1,11,2],[1,1,2],[1,2,2],[1,6,6],[1,10,1],[1,14,1],[1,2,1],[1,13,1],[1,14,1],[1,8,2],[1,21,2],[1,1,2],[1,1,1],[1,14,1],[1,2,1],[1,15,2],[1,4,1],[1,3,1],[1,10,2],[1,4,2],[1,5,1],[1,11,22],[1,8,3],[1,4,1],[1,3,2],[1,1,2],[1,25,3],[1,2,1],[1,11,2],[1,5,2],[1,39,1],[1,1,1],[1,415,128],[1,6,1],[1,5,1],[1,8,5],[1,2,3],[1,1,1],[1,1,1],[1,4,1],[1,2,4],[1,4,1],[1,2,9],[1,4,2],[1,23,3],[1,6,9],[1,5,4],[1,2,5],[1,1,1],[1,7,1],[1,3,7],[1,1,2],[1,2,16],[1,5,2],[1,1,3],[1,4,1],[1,11,1],[1,2,2],[1,2,1],[1,10,1],[1,6,2],[1,11,1],[1,28,1],[1,21,3],[1,3,2],[1,3,1],[1,4,1],[1,1,2],[1,7,1],[1,11,4],[1,4,2],[1,22,4],[1,1,1],[1,1,1],[1,12,7],[1,1,1],[1,4,2],[1,2,1],[1,6,4],[1,14,3],[1,8,2],[1,1,11],[1,13,2],[1,4,1],[1,3,2],[1,95,10],[1,1,2],[1,4,2],[1,27,2],[1,2,1],[1,19,1],[1,13,4],[1,1,1],[1,37,1],[1,4,1],[1,5,1],[1,7,5],[1,1,1],[1,4,5],[1,5,1],[1,1,1],[1,16,2],[1,22,1],[1,4,2],[1,24,4],[1,10,1],[1,77,6],[1,21,1],[1,11,1],[1,2,1],[1,1,1],[1,4,5],[1,2,4],[1,55,4],[1,17,1],[1,1,3],[1,2,2],[1,7,1],[1,17,1],[1,34,2],[1,4,1],[1,2,2],[1,1,2],[1,100,1],[1,17,2],[1,8,6],[1,11,2],[1,11,2],[1,3,1],[1,5,2],[1,1,1],[1,6,7],[1,15,5],[1,7,1],[1,4,1],[1,5,1],[1,6,2],[1,7,1],[1,2,2],[1,10,2],[1,17,1],[1,10,2],[1,6,3],[1,21,1],[1,2,1],[1,78,4],[1,6,1],[1,1,2],[1,5,1],[1,186,9],[1,16,3],[1,15,13],[1,30,4],[1,2,1],[1,15,3],[1,13,1],[1,3,1],[1,1,1],[1,2,2],[1,5,5],[1,7,1],[1,16,1],[1,2,1],[1,14,2],[1,11,5],[1,9,1],[1,13,2],[1,2,1],[1,4,64],[1,4,1],[1,18,4],[1,3,1],[1,1,1],[1,16,2],[1,4,1],[1,11,4],[1,9,3],[1,3,1],[1,4,1],[1,1,1],[1,10,3],[1,7,1],[1,13,1],[1,16,4],[1,1,16],[1,2,2],[1,18,6],[1,42,2],[1,1,3],[1,15,1],[1,3,1],[1,43,1],[1,1,1],[1,27,2],[1,1,3],[1,1,5],[1,13,1],[1,1,1],[1,10,11],[1,8,1],[1,9,1],[1,13,1],[1,1,2],[1,13,3],[1,1,1],[1,5,1],[1,14,2],[1,14,1],[1,13,1],[1,4,3],[1,25,1],[1,1,3],[1,3,3],[1,4,1],[1,1,1],[1,4,4],[1,15,1],[1,2,1],[1,1,1],[1,7,12],[1,68,2],[1,13,2],[1,2,1],[1,6,4],[1,46,6],[1,1,1],[1,2,2],[1,4,1],[1,2,1],[1,11,5],[1,1,1],[1,9,1],[1,9,1],[1,13,1],[1,4,1],[1,14,1],[1,42,9],[1,5,1],[1,4,1],[1,24,7],[1,7,1],[1,17,1],[1,2,1],[1,2,5],[1,3,6],[1,2,1],[1,15,4],[1,3,2],[1,33,2],[1,30,4],[1,27,4],[1,1,1],[1,14,4],[1,2,3],[1,26,7],[1,22,1],[1,2,2],[1,2,2],[1,166,3],[1,4,4],[1,9,1],[1,12,15],[1,2,6],[1,13,2],[1,4,3],[1,9,2],[1,2,3],[1,3,3],[1,9,2],[1,22,1],[1,5,3],[1,3,4],[1,2,3],[1,3,1],[1,23,1],[1,18,1],[1,6,1],[1,4,1],[1,9,3],[1,35,1],[1,73,2],[1,1,3],[1,31,5],[1,25,1],[1,3,4],[1,11,1],[1,9,4],[1,2,1],[1,27,36],[1,23,5],[1,4,2],[1,1,2],[1,29,2],[1,3,2],[1,1,1],[1,4,1],[1,12,1],[1,36,16],[1,5,14],[1,19,1],[1,6,1],[1,6,1],[1,4,1],[1,6,1],[1,4,2],[1,9,7],[1,7,1],[1,30,4],[1,4,1],[1,18,3],[1,2,2],[1,3,1],[1,9,2],[1,2,2],[1,1,2],[1,1,2],[1,14,1],[1,3,1],[1,5,2],[1,10,1],[1,9,1],[1,10,3],[1,4,1],[1,2,1],[1,4,4],[1,2,1],[1,3,3],[1,39,2],[1,3,1],[1,1,3],[1,14,1],[1,2,4],[1,13,1],[1,4,6],[1,3,5],[1,5,4],[1,8,1],[1,131,1],[1,28,1],[1,5,1],[1,8,5],[1,2,9],[1,4,2],[1,5,1],[1,46,3],[1,7,3],[1,1,1],[1,7,3],[1,2,1],[1,4,1],[1,2,1],[1,2,1],[1,2,1],[1,4,6],[1,5,1],[1,9,3],[1,2,2],[1,9,1],[1,42,3],[1,11,3],[1,5,1],[1,1,2],[1,6,1],[1,37,51],[1,2,1],[1,4,3],[1,23,2],[1,1,15],[1,5,4],[1,1,4],[1,18,3],[1,12,3],[1,4,2],[1,4,1],[1,2,7],[1,2,6],[1,3,6],[1,6,1],[1,10,3],[1,4,2],[1,1,2],[1,4,1],[1,4,3],[1,1,3],[1,3,1],[1,6,2],[1,10,2],[1,6,4],[1,4,3],[1,7,2],[1,2,2],[1,4,1],[1,1,1],[1,4,5],[1,14,1],[1,20,4],[1,7,15],[1,18,2],[1,6,1],[1,1,1],[1,7,1],[1,5,2],[1,6,2],[1,4,1],[1,6,3],[1,2,1],[1,6,1],[1,4,1],[1,7,1],[1,7,4],[1,7,1],[1,1,1],[1,24,4],[1,2,2],[1,3,5],[1,8,1],[1,15,2],[1,5,1],[1,2,3],[1,2,2],[1,4,1],[1,6,1],[1,2,3],[1,11,1],[1,23,5],[1,2,2],[1,1,1],[1,8,1],[1,17,6],[1,1,1],[1,9,2],[1,1,1],[1,10,1],[1,5,1],[1,6,1],[1,6,1],[1,5,1],[1,2,6],[1,2,1],[1,9,1],[1,14,1],[1,18,8],[1,39,2],[1,13,1],[1,6,1],[1,6,2],[1,9,1],[1,14,1],[1,5,4],[1,26,2],[1,4,1],[1,7,2],[1,5,5],[1,2,1],[1,20,2],[1,14,1],[1,10,1],[1,4,1],[1,3,1],[1,10,2],[1,9,12],[1,4,4],[1,2,1],[1,4,1],[1,4,1],[1,2,1],[1,8,1],[1,2,4],[1,1,1],[1,33,2],[1,4,1],[1,5,1],[1,205,1],[1,2,1],[1,15,3],[1,5,1],[1,1,1],[1,1,1],[1,1,1],[1,13,1],[1,14,5],[1,6,4],[1,3,1],[1,7,5],[1,42,2],[1,11,1],[1,24,2],[1,11,2],[1,11,2],[1,12,1],[1,7,1],[1,1,1],[1,3,2],[1,21,1],[1,13,1],[1,2,1],[1,37,6],[1,8,4],[1,2,2],[1,2,2],[1,36,1],[1,8,1],[1,19,11],[1,19,7],[1,8,1],[1,18,2],[1,7,2],[1,8,1],[1,1,1],[1,4,1],[1,3,3],[1,10,1],[1,6,1],[1,4,1],[1,10,1],[1,25,1],[1,14,1],[1,14,3],[1,4,1],[1,2,1],[1,2,2],[1,4,2],[1,3,4],[1,62,11],[1,4,1],[1,39,3],[1,65,2],[1,3,1],[1,11,2],[1,4,1],[1,2,2],[1,1,1],[1,2,3],[1,2,1],[1,17,7],[1,7,4],[1,1,4],[1,62,3],[1,17,3],[1,26,3],[1,15,1],[1,2,1],[1,4,6],[1,1,2],[1,8,2],[1,16,2],[1,1,1],[1,7,2],[1,4,1],[1,1,1],[1,7,2],[1,8,2],[1,12,1],[1,1,2],[1,2,1],[1,2,1],[1,26,7],[1,2,1],[1,5,1],[1,5,1],[1,5,1],[1,1,1],[1,6,27],[1,5,4],[1,6,1],[1,8,1],[1,38,2],[1,26,2],[1,13,1],[1,20,2],[1,6,6],[1,2,2],[1,2,1],[1,16,2],[1,88,1],[1,4,1],[1,5,3],[1,1,4],[1,1,4],[1,12,2],[1,3,1],[1,3,1],[1,3,1],[1,2,3],[1,6,1],[1,2,4],[1,28,2],[1,17,3],[1,10,1],[1,51,3],[1,1,1],[1,15,4],[1,10,14],[1,1,3],[1,3,3],[1,1,1],[1,5,1],[1,3,1],[1,23,3],[1,10,1],[1,1,1],[1,21,6],[1,11,1],[1,8,1],[1,1,1],[1,2,1],[1,1,3],[1,26,1],[1,1,2],[1,4,1],[1,4,1],[1,6,1],[1,6,1],[1,2,2],[1,11,5],[1,15,2],[1,13,1],[1,2,2],[1,4,1],[1,4,1],[1,2,6],[1,13,3],[1,23,2],[1,18,2],[1,8,2],[1,1,1],[1,4,1],[1,7,1],[1,2,1],[1,8,6],[1,12,1],[1,23,4],[1,9,4],[1,2,2],[1,8,1],[1,7,2],[1,2,2],[1,2,4],[1,8,16],[1,22,3],[1,2,1],[1,2,4],[1,2,1],[1,9,2],[1,3,3],[1,4,1],[1,3,9],[1,3,1],[1,2,2],[1,2,3],[1,11,1],[1,5,1],[1,5,1],[1,2,2],[1,10,20],[1,2,2],[1,2,1],[1,3,3],[1,10,1],[1,2,3],[1,2,1],[1,5,1],[1,4,2],[1,8,1],[1,2,2],[1,6,1],[1,5,1],[1,9,1],[1,3,2],[1,1,1],[1,2,6],[1,1,1],[1,5,1],[1,2,1],[1,16,1],[1,6,1],[1,2,1],[1,2,1],[1,5,1],[1,9,1],[1,10,16],[1,4,1],[1,4,2],[1,5,2],[1,8,1],[1,16,2],[1,2,1],[1,5,1],[1,1,2],[1,55,2],[1,20,1],[1,11,1],[1,5,2],[1,13,1],[1,1,1],[1,10,6],[1,5,2],[1,21,1],[1,7,3],[1,5,1],[1,7,1],[1,3,1],[1,6,1],[1,46,3],[1,8,5],[1,5,1],[1,2,1],[1,2,6],[1,22,1],[1,42,1],[1,1,1],[1,4,2],[1,13,1],[1,3,3],[1,2,2],[1,4,2],[1,1,3],[1,88,1],[1,24,4],[1,4,1],[1,3,1],[1,5,1],[1,17,6],[1,6,2],[1,20,3],[1,47,2],[1,2,7],[1,13,1],[1,1,3],[1,1,2],[1,2,2],[1,2,2],[1,4,3],[1,7,1],[1,3,1],[1,10,1],[1,2,1],[1,2,5],[1,1,2],[1,17,2],[1,12,4],[1,24,1],[1,3,1],[1,1,3],[1,6,1],[1,2,5],[1,3,1],[1,1,1],[1,13,2],[1,6,1],[1,2,1],[1,10,2],[1,4,1],[1,1,1],[1,18,7],[1,7,2],[1,8,1],[1,5,1],[1,2,1],[1,4,1],[1,2,2],[1,14,1],[1,13,1],[1,10,4],[1,4,4],[1,6,4],[1,4,1],[1,16,2],[1,8,2],[1,3,3],[1,3,1],[1,21,2],[1,7,1],[1,2,1],[1,2,1],[1,2,3],[1,4,1],[1,6,1],[1,28,1],[1,2,7],[1,3,1],[1,23,4],[1,2,1],[1,6,1],[1,2,1],[1,4,1],[1,3,2],[1,1,1],[1,9,2],[1,9,2],[1,2,1],[1,4,2],[1,10,1],[1,12,1],[1,4,2],[1,7,1],[1,2,2],[1,9,1],[1,16,5],[1,31,2],[1,16,2],[1,22,3],[1,2,1],[1,6,1],[1,1,1],[1,6,3],[1,14,2],[1,5,3],[1,81,3],[1,8,2],[1,1,1],[1,61,9],[1,1,4],[1,2,1],[1,11,3],[1,3,5],[1,3,6],[1,4,7],[1,1,2],[1,5,2],[1,2,1],[1,3,2],[1,9,5],[1,9,1],[1,1,3],[1,3,2],[1,13,3],[1,14,1],[1,15,6],[1,6,1],[1,2,1],[1,7,1],[1,2,1],[1,10,2],[1,2,2],[1,14,1],[1,2,2],[1,3,3],[1,3,1],[1,4,1],[1,59,2],[1,5,2],[1,4,2],[1,1,1],[1,2,1],[1,4,1],[1,2,2],[1,5,4],[1,4,1],[1,4,1],[1,10,3],[1,2,2],[1,2,3],[1,8,1],[1,2,1],[1,1,1],[1,18,1],[1,6,1],[1,12,3],[1,5,3],[1,3,1],[1,7,3],[1,10,2],[1,2,23],[1,1,12],[1,1,1],[1,32,3],[1,2,1],[1,4,1],[1,12,2],[1,4,1],[1,3,1],[1,5,1],[1,4,2],[1,4,1],[1,16,2],[1,1,1],[1,4,1],[1,7,1],[1,2,4],[1,8,1],[1,4,4],[1,1,1],[1,1,2],[1,6,3],[1,8,2],[1,23,15],[1,2,2],[1,2,1],[1,2,1],[1,11,1],[1,3,2],[1,9,2],[1,4,2],[1,2,3],[1,34,1],[1,7,1],[1,2,4],[1,65,2],[1,41,3],[1,1,2],[1,1,1],[1,6,1],[1,6,1],[1,7,1],[1,3,1],[1,14,9],[1,6,1],[1,6,5],[1,2,13],[1,5,2],[1,2,1],[1,4,1],[1,17,1],[1,5,1],[1,1,1],[1,3,2],[1,9,1],[1,1,4],[1,48,2],[1,7,1],[1,4,1],[1,3,1],[1,4,2],[1,118,3],[1,2,1],[1,2,4],[1,2,1],[1,12,13],[1,2,1],[1,4,2],[1,4,1],[1,6,1],[1,1,1],[1,7,2],[1,10,1],[1,21,5],[1,5,2],[1,9,1],[1,2,2],[1,1,1],[1,1,1],[1,1,1],[1,3,1],[1,1,1],[1,7,1],[1,83,9],[1,6,2],[1,7,2],[1,13,1],[1,4,2],[1,3,1],[1,8,2],[1,2,1],[1,10,3],[1,2,1],[1,2,1],[1,9,11],[1,2,1],[1,3,1],[1,17,1],[1,7,2],[1,8,2],[1,20,1],[1,2,1],[1,1,2],[1,8,1],[1,2,1],[1,6,1],[1,21,3],[1,1,2],[1,5,5],[1,2,1],[1,2,3],[1,2,1],[1,2,2],[1,16,1],[1,2,1],[1,2,1],[1,3,1],[1,17,1],[1,6,1],[1,4,15],[1,1,1],[1,11,1],[1,84,15],[1,31,3],[1,2,2],[1,8,1],[1,9,1],[1,2,3],[1,15,2],[1,4,1],[1,18,1],[1,3,1],[1,1,1],[1,2,4],[1,2,2],[1,2,1],[1,2,1],[1,25,1],[1,3,1],[1,141,13],[1,4,2],[1,2,2],[1,14,2],[1,7,1],[1,30,9],[1,17,1],[1,1,2],[1,6,1],[1,2,1],[1,2,1],[1,8,1],[1,2,1],[1,10,1],[1,6,3],[1,12,1],[1,68,1],[1,2,1],[1,10,2],[1,14,2],[1,26,9],[1,7,3],[1,3,3],[1,6,6],[1,3,1],[1,18,4],[1,3,1],[1,4,4],[1,2,1],[1,1,1],[1,37,8],[1,8,6],[1,2,1],[1,9,6],[1,5,2],[1,3,1],[1,3,2],[1,2,1],[1,3,1],[1,13,7],[1,9,1],[1,122,2],[1,2,1],[1,22,6],[1,11,2],[1,16,2],[1,28,46],[1,2,4],[1,7,1],[1,2,3],[1,2,6],[1,2,2],[1,1,2],[1,1,1],[1,5,1],[1,1,2],[1,3,2],[1,7,6],[1,11,1],[1,21,1],[1,40,6],[1,14,2],[1,21,1],[1,1,1],[1,14,2],[1,21,1],[1,2,1],[1,1,1],[1,1,2],[1,40,2],[1,4,2],[1,1,3],[1,1,1],[1,107,2],[1,4,6],[1,136,6],[1,5,1],[1,9,1],[1,24,3],[1,7,1],[1,10,5],[1,29,3],[1,12,2],[1,10,3],[1,5,3],[1,2,1],[1,59,1],[1,5,2],[1,13,2],[1,1,2],[1,50,2],[1,1,3],[1,2,3],[1,6,1],[1,4,2],[1,5,4],[1,3,2],[1,8,1],[1,4,2],[1,1,1],[1,17,1],[1,13,3],[1,2,1],[1,7,1],[1,3,1],[1,8,1],[1,1,1],[1,20,1],[1,4,4],[1,1,2],[1,2,1],[1,2,1],[1,2,2],[1,1,2],[1,13,2],[1,4,1],[1,4,1],[1,3,1],[1,2,1],[1,4,4],[1,13,5],[1,9,1],[1,8,1],[1,12,1],[1,15,3],[1,2,1],[1,2,2],[1,4,1],[1,2,2],[1,1,1],[1,3,1],[1,13,1],[1,4,1],[1,9,4],[1,3,2],[1,2,1],[1,4,4],[1,1,3],[1,15,1],[1,4,1],[1,2,1],[1,3,1],[1,2,1],[1,3,6],[1,5,1],[1,7,10],[1,1,2],[1,6,2],[1,7,2],[1,3,1],[1,3,3],[1,6,1],[1,13,1],[1,22,3],[1,6,5],[1,6,1],[1,3,1],[1,3,1],[1,21,5],[1,11,2],[1,6,3],[1,38,4],[1,6,4],[1,4,1],[1,2,1],[1,5,5],[1,5,3],[1,40,1],[1,4,3],[1,8,1],[1,13,2],[1,4,2],[1,1,1],[1,9,9],[1,1,1],[1,12,2],[1,36,1],[1,2,1],[1,18,3],[1,28,1],[1,5,1],[1,20,4],[1,40,3],[1,3,1],[1,5,3],[1,2,1],[1,31,3],[1,6,1],[1,3,1],[1,1,5],[1,3,3],[1,36,1],[1,1,1],[1,22,2],[1,9,2],[1,2,4],[1,2,2],[1,4,4],[1,2,1],[1,6,1],[1,3,3],[1,5,1],[1,13,2],[1,4,1],[1,1,3],[1,1,1],[1,11,5],[1,4,1],[1,2,3],[1,26,1],[1,9,1],[1,6,1],[1,15,1],[1,23,5],[1,3,5],[1,4,3],[1,8,1],[1,9,4],[1,2,1],[1,7,1],[1,1,6],[1,4,1],[1,43,1],[1,2,3],[1,1,1],[1,15,4],[1,3,1],[1,1,1],[1,10,1],[1,79,1],[1,1,14],[1,2,1],[1,6,1],[1,1,1],[1,24,1],[1,2,3],[1,9,2],[1,2,3],[1,8,1],[1,115,15],[1,1,1],[1,1,2],[1,3,1],[1,9,24],[1,6,1],[1,3,6],[1,10,3],[1,3,1],[1,1,1],[1,3,2],[1,2,1],[1,11,1],[1,5,1],[1,1,1],[1,2,1],[1,3,1],[1,5,1],[1,11,1],[1,2,1],[1,7,7],[1,15,1],[1,6,2],[1,51,7],[1,2,1],[1,54,1],[1,5,1],[1,1,1],[1,7,5],[1,1,1],[1,4,1],[1,3,1],[1,22,4],[1,5,3],[1,5,1],[1,64,9],[1,6,1],[1,28,6],[1,5,1],[1,11,1],[1,2,2],[1,4,2],[1,1,4],[1,8,1],[1,1,5],[1,7,1],[1,2,1],[1,2,2],[1,8,1],[1,11,3],[1,8,3],[1,7,1],[1,10,5],[1,5,1],[1,98,5],[1,18,1],[1,1,1],[1,5,1],[1,2,2],[1,14,2],[1,3,1],[1,1,1],[1,11,3],[1,7,9],[1,5,3],[1,3,1],[1,3,3],[1,125,34],[1,1,1],[1,2,1],[1,6,2],[1,2,2],[1,11,7],[1,5,2],[1,5,5],[1,6,1],[1,10,2],[1,14,2],[1,4,3],[1,8,7],[1,2,3],[1,2,2],[1,13,1],[1,6,1],[1,10,5],[1,11,1],[1,4,2],[1,14,1],[1,1,6],[1,15,1],[1,1,3],[1,5,3],[1,7,1],[1,2,1],[1,1,3],[1,2,4],[1,3,1],[1,8,3],[1,2,3],[1,2,1],[1,2,2],[1,2,1],[1,4,1],[1,16,2],[1,1,2],[1,1,5],[1,7,1],[1,3,1],[1,2,1],[1,16,3],[1,4,1],[1,8,2],[1,16,6],[1,12,2],[1,84,26],[1,10,2],[1,2,2],[1,5,1],[1,1,1],[1,8,1],[1,4,1],[1,4,1],[1,4,2],[1,4,1],[1,4,10],[1,14,2],[1,4,2],[1,5,2],[1,19,1],[1,4,3],[1,8,2],[1,6,1],[1,2,5],[1,2,1],[1,16,4],[1,4,1],[1,2,2],[1,7,1],[1,4,2],[1,4,1],[1,8,1],[1,10,2],[1,3,2],[1,3,1],[1,10,2],[1,1,1],[1,12,3],[1,37,1],[1,10,1],[1,16,4],[1,1,1],[1,11,1],[1,4,1],[1,8,6],[1,3,2],[1,66,2],[1,14,1],[1,2,4],[1,2,2],[1,7,2],[1,24,2],[1,5,1],[1,1,1],[1,1,1],[1,3,1],[1,31,2],[1,24,1],[1,8,5],[1,8,2],[1,3,4],[1,64,1],[1,1,4],[1,4,47],[1,8,4],[1,25,1],[1,19,2],[1,4,1],[1,33,4],[1,16,2],[1,4,1],[1,1,1],[1,2,3],[1,27,1],[1,20,1],[1,10,3],[1,2,1],[1,2,1],[1,76,1],[1,2,1],[1,5,1],[1,2,2],[1,15,3],[1,40,2],[1,4,22],[1,2,2],[1,2,2],[1,10,1],[1,3,1],[1,55,4],[1,2,7],[1,7,1],[1,4,6],[1,2,1],[1,2,1],[1,28,1],[1,2,2],[1,6,2],[1,6,2],[1,4,15],[1,3,2],[1,1,1],[1,29,1],[1,13,1],[1,16,1],[1,4,1],[1,7,7],[1,3,3],[1,16,4],[1,12,11],[1,1,1],[1,2,4],[1,54,2],[1,1,2],[1,6,2],[1,1,3],[1,2,2],[1,1,1],[1,2,1],[1,11,4],[1,9,1],[1,20,1],[1,1,1],[1,17,3],[1,1,1],[1,9,2],[1,2,2],[1,3,1],[1,29,19],[1,28,1],[1,8,3],[1,21,8],[1,7,3],[1,6,2],[1,5,2],[1,11,1],[1,1,2],[1,7,1],[1,22,1],[1,9,1],[1,3,3],[1,8,2],[1,5,1],[1,23,2],[1,11,5],[1,17,2],[1,5,5],[1,4,3],[1,33,1],[1,2,3],[1,6,1],[1,32,1],[1,6,2],[1,64,2],[1,3,1],[1,7,1],[1,3,6],[1,12,1],[1,1,1],[1,9,1],[1,38,3],[1,1,1],[1,3,1],[1,3,5],[1,78,16],[1,3,1],[1,7,1],[1,26,1],[1,9,2],[1,113,2],[1,9,1],[1,5,9],[1,3,2],[1,4,1],[1,2,1],[1,5,1],[1,24,3],[1,11,4],[1,38,2],[1,13,3],[1,7,3],[1,1,1],[1,1,2],[1,3,3],[1,5,3],[1,6,1],[1,7,1],[1,3,1],[1,4,2],[1,3,1],[1,3,1],[1,1,2],[1,2,1],[1,18,8],[1,1,3],[1,1,1],[1,2,5],[1,13,9],[1,2,2],[1,6,1],[1,5,1],[1,13,3],[1,7,1],[1,3,2],[1,2,1],[1,4,1],[1,2,2],[1,6,2],[1,4,3],[1,1,3],[1,3,2],[1,12,8],[1,6,1],[1,7,1],[1,6,3],[1,9,4],[1,16,17],[1,1,2],[1,4,1],[1,2,1],[1,2,1],[1,2,1],[1,1,1],[1,4,2],[1,4,1],[1,8,1],[1,14,17],[1,7,1],[1,7,6],[1,5,1],[1,4,2],[1,80,2],[1,13,1],[1,11,1],[1,9,1],[1,2,4],[1,3,1],[1,2,1],[1,5,2],[1,3,1],[1,1,2],[1,12,1],[1,8,5],[1,6,3],[1,17,1],[1,3,4],[1,1,2],[1,5,2],[1,1,3],[1,2,2],[1,2,3],[1,2,1],[1,4,1],[1,1,1],[1,14,1],[1,2,1],[1,16,4],[1,15,2],[1,3,3],[1,8,8],[1,6,1],[1,25,4],[1,6,1],[1,7,3],[1,36,2],[1,2,1],[1,32,2],[1,1,1],[1,7,1],[1,14,2],[1,21,1],[1,3,1],[1,27,7],[1,6,3],[1,1,5],[1,5,4],[1,12,2],[1,2,1],[1,2,1],[1,8,7],[1,8,8],[1,7,1],[1,2,1],[1,4,1],[1,1,7],[1,10,3],[1,17,1],[1,1,1],[1,8,6],[1,29,5],[1,12,2],[1,7,2],[1,7,1],[1,2,2],[1,2,1],[1,2,1],[1,54,9],[1,1,1],[1,12,2],[1,8,1],[1,8,4],[1,39,1],[1,3,3],[1,9,4],[1,6,5],[1,2,1],[1,15,2],[1,18,1],[1,2,2],[1,1,1],[1,1,1],[1,2,4],[1,3,1],[1,6,1],[1,3,3],[1,4,3],[1,3,2],[1,1,1],[1,2,2],[1,16,12],[1,4,2],[1,15,2],[1,6,1],[1,7,1],[1,9,8],[1,70,2],[1,5,1],[1,4,3],[1,24,4],[1,8,6],[1,18,43],[1,23,3],[1,10,1],[1,14,8],[1,6,4],[1,2,1],[1,2,1],[1,1,1],[1,2,1],[1,9,3],[1,6,4],[1,5,3],[1,43,2],[1,5,1],[1,11,1],[1,1,2],[1,5,3],[1,4,2],[1,16,2],[1,16,10],[1,5,1],[1,2,2],[1,2,1],[1,2,3],[1,4,6],[1,3,12],[1,6,1],[1,10,1],[1,1,2],[1,13,1],[1,3,1],[1,5,2],[1,6,1],[1,3,1],[1,2,1],[1,1,1],[1,13,1],[1,20,1],[1,20,2],[1,8,1],[1,5,2],[1,2,2],[1,10,5],[1,1,3],[1,7,2],[1,4,1],[1,15,18],[1,1,4],[1,5,2],[1,4,1],[1,1,11],[1,1,3],[1,4,1],[1,1,1],[1,2,1],[1,2,12],[1,5,1],[1,3,1],[1,25,2],[1,16,1],[1,10,1],[1,18,1],[1,28,3],[1,5,6],[1,4,2],[1,2,2],[1,51,124],[1,4,2],[1,5,1],[1,28,1],[1,4,5],[1,6,2],[1,20,1],[1,7,1],[1,5,3],[1,11,1],[1,4,3],[1,1,1],[1,6,3],[1,5,1],[1,3,1],[1,10,2],[1,64,5],[1,12,12],[1,5,2],[1,6,1],[1,8,2],[1,28,8],[1,19,1],[1,2,1],[1,1,1],[2,6,1],[2,2,2],[2,4,5],[2,11,1],[2,4,1],[2,4,1],[2,14,1],[2,19,2],[2,2,1],[2,6,4],[2,2,1],[2,6,2],[2,4,1],[2,12,2],[2,15,2],[2,5,1],[2,11,1],[2,11,1],[2,2,2],[2,3,3],[2,5,9],[2,2,1],[2,1,1],[2,1,4],[2,2,1],[2,4,1],[2,11,1],[2,6,1],[2,2,2],[2,8,1],[2,81,7],[2,8,1],[2,5,1],[2,6,3],[2,2,2],[2,39,1],[2,5,2],[2,5,2],[2,2,4],[2,10,2],[2,4,2],[2,2,1],[2,6,6],[2,8,2],[2,56,1],[2,9,1],[2,1,1],[2,16,3],[2,5,2],[2,3,2],[2,12,25],[2,4,4],[2,6,2],[2,7,1],[2,30,11],[2,4,1],[2,16,5],[2,8,2],[2,7,2],[2,11,1],[2,7,1],[2,2,1],[2,1,1],[2,2,9],[2,39,6],[2,2,1],[2,2,1],[2,7,1],[2,19,1],[2,11,2],[2,8,2],[2,4,7],[2,2,1],[2,7,1],[2,1,1],[2,4,1],[2,6,1],[2,6,1],[2,2,4],[2,26,37],[2,2,1],[2,13,2],[2,35,10],[2,13,1],[2,6,1],[2,10,2],[2,19,9],[2,7,1],[2,7,1],[2,2,2],[2,1,1],[2,5,2],[2,10,2],[2,6,1],[2,6,1],[2,6,1],[2,2,2],[2,1,1],[2,6,60],[2,8,1],[2,18,1],[2,4,2],[2,1,1],[2,1,1],[2,2,3],[2,21,2],[2,7,2],[2,11,3],[2,14,2],[2,3,2],[2,12,1],[2,1,2],[2,34,1],[2,1,1],[2,16,1],[2,1,1],[2,11,1],[2,14,1],[2,8,1],[2,9,1],[2,8,1],[2,3,1],[2,4,4],[2,4,1],[2,44,3],[2,4,1],[2,19,6],[2,19,2],[2,3,2],[2,17,2],[2,17,4],[2,1,6],[2,5,3],[2,27,6],[2,5,3],[2,6,3],[2,22,2],[2,22,3],[2,13,19],[2,8,1],[2,2,2],[2,7,1],[2,9,3],[2,2,1],[2,11,1],[2,8,1],[2,4,1],[2,8,2],[2,4,1],[2,1,1],[2,16,1],[2,2,1],[2,4,1],[2,9,11],[2,3,3],[2,3,1],[2,1,2],[2,3,1],[2,28,1],[2,8,5],[2,6,2],[2,8,1],[2,1,1],[2,10,1],[2,6,1],[2,55,1],[2,1,1],[2,4,2],[2,3,2],[2,16,4],[2,11,1],[2,2,3],[2,15,1],[2,1,10],[2,8,2],[2,15,1],[2,1,1],[2,7,114],[2,10,3],[2,1,1],[2,5,1],[2,3,3],[2,2,1],[2,1,1],[2,8,1],[2,96,1],[2,10,3],[2,3,2],[2,2,1],[2,1,1],[2,3,1],[2,25,2],[2,3,1],[2,12,4],[2,2,9],[2,3,1],[2,2,1],[2,9,1],[2,12,1],[2,18,1],[2,23,6],[2,9,85],[2,2,8],[2,1,2],[2,26,1],[2,8,2],[2,6,3],[2,1,4],[2,6,1],[2,8,3],[2,9,2],[2,1,1],[2,7,1],[2,1,3],[2,7,1],[2,3,2],[2,10,1],[2,2,2],[2,8,2],[2,4,4],[2,23,2],[2,8,5],[2,1,1],[2,3,3],[2,7,2],[2,1,1],[2,2,1],[2,1,7],[2,10,1],[2,18,1],[2,39,5],[2,13,2],[2,7,2],[2,6,2],[2,9,1],[2,5,1],[2,7,1],[2,35,2],[2,2,2],[2,5,2],[2,1,1],[2,9,2],[2,18,1],[2,2,3],[2,35,1],[2,6,5],[2,2,2],[2,2,1],[2,12,2],[2,1,1],[2,10,1],[2,6,1],[2,2,1],[2,15,2],[2,7,1],[2,5,4],[2,4,1],[2,2,14],[2,2,1],[2,5,3],[2,21,2],[2,10,1],[2,2,1],[2,8,1],[2,16,1],[2,9,2],[2,11,2],[2,1,6],[2,12,2],[2,18,2],[2,2,4],[2,4,3],[2,7,11],[2,3,1],[2,28,5],[2,1,4],[2,8,1],[2,2,5],[2,2,1],[2,3,1],[2,10,2],[2,3,3],[2,2,1],[2,17,1],[2,6,1],[2,16,1],[2,10,16],[2,17,1],[2,4,2],[2,1,1],[2,3,3],[2,7,3],[2,5,1],[2,11,1],[2,13,1],[2,3,1],[2,6,1],[2,5,2],[2,17,2],[2,33,13],[2,2,10],[2,3,5],[2,4,3],[2,5,1],[2,2,4],[2,8,2],[2,14,1],[2,16,1],[2,2,3],[2,19,6],[2,5,1],[2,8,2],[2,7,1],[2,1,1],[2,11,1],[2,2,2],[2,11,10],[2,10,1],[2,14,1],[2,1,7],[2,10,1],[2,34,1],[2,2,1],[2,2,4],[2,9,2],[2,16,1],[2,2,4],[2,8,3],[2,1,2],[2,3,5],[2,13,5],[2,20,1],[2,25,8],[2,9,1],[2,1,1],[2,15,3],[2,6,2],[2,394,278],[2,11,2],[2,1,1],[2,3,15],[2,4,2],[2,3,6],[2,6,3],[2,1,12],[2,2,1],[2,1,3],[2,11,2],[2,20,3],[2,31,9],[2,25,7],[2,15,2],[2,11,31],[2,17,2],[2,5,1],[2,2,2],[2,4,1],[2,6,2],[2,27,2],[2,10,2],[2,1,2],[2,26,5],[2,5,14],[2,12,2],[2,5,2],[2,2,1],[2,2,3],[2,6,1],[2,1,3],[2,9,3],[2,18,1],[2,5,5],[2,29,13],[2,14,1],[2,1,4],[2,3,1],[2,5,1],[2,19,4],[2,11,7],[2,8,3],[2,18,1],[2,3,5],[2,11,1],[2,4,1],[2,10,4],[2,19,2],[2,10,3],[2,12,2],[2,19,9],[2,73,3],[2,13,3],[2,12,1],[2,4,5],[2,55,1],[2,6,6],[2,27,2],[2,2,1],[2,20,1],[2,8,1],[2,1,1],[2,29,2],[2,10,8],[2,5,2],[2,10,2],[2,14,1],[2,10,1],[2,1,1],[2,4,2],[2,5,1],[2,1,4],[2,4,2],[2,9,1],[2,9,4],[2,2,1],[2,4,1],[2,6,2],[2,2,2],[2,10,15],[2,17,1],[2,9,1],[2,9,1],[2,8,2],[2,4,1],[2,4,1],[2,243,2],[2,9,3],[2,12,2],[2,4,3],[2,2,1],[2,1,2],[2,57,4],[2,7,2],[2,8,2],[2,14,2],[2,2,1],[2,6,1],[2,7,2],[2,8,1],[2,4,3],[2,36,5],[2,3,1],[2,1,1],[2,45,8],[2,1,1],[2,2,3],[2,9,1],[2,1,1],[2,13,2],[2,44,6],[2,2,1],[2,36,1],[2,4,1],[2,5,1],[2,3,2],[2,1,1],[2,28,2],[2,9,1],[2,3,3],[2,10,2],[2,16,1],[2,1,1],[2,1,1],[2,13,1],[2,14,3],[2,65,1],[2,7,1],[2,2,1],[2,11,8],[2,4,1],[2,17,1],[2,6,1],[2,15,5],[2,15,1],[2,17,2],[2,8,1],[2,8,1],[2,1,2],[2,5,7],[2,1,1],[2,3,2],[2,2,1],[2,4,1],[2,32,1],[2,3,1],[2,1,1],[2,1,1],[2,2,2],[2,2,1],[2,8,2],[2,11,3],[2,2,3],[2,42,3],[2,5,1],[2,6,2],[2,1,1],[2,9,1],[2,2,2],[2,5,1],[2,2,1],[2,7,1],[2,7,6],[2,6,2],[2,3,1],[2,1,3],[2,15,1],[2,23,1],[2,1,1],[2,3,1],[2,4,2],[2,8,1],[2,2,7],[2,3,4],[2,6,5],[2,4,1],[2,5,3],[2,16,5],[2,11,1],[2,13,1],[2,22,3],[2,10,5],[2,2,2],[2,2,2],[2,6,1],[2,7,1],[2,4,2],[2,4,3],[2,7,3],[2,7,4],[2,1,1],[2,71,9],[2,4,8],[2,33,4],[2,16,2],[2,1,18],[2,15,1],[2,3,1],[2,8,1],[2,6,3],[2,4,2],[2,1,1],[2,7,2],[2,2,8],[2,2,1],[2,8,1],[2,1,3],[2,5,1],[2,2,2],[2,11,1],[2,17,3],[2,118,1],[2,8,4],[2,14,1],[2,3,4],[2,14,1],[2,2,2],[2,4,3],[2,2,1],[2,11,1],[2,8,10],[2,1,2],[2,3,3],[2,2,2],[2,12,1],[2,2,2],[2,26,3],[2,3,2],[2,3,3],[2,19,1],[2,1,13],[2,23,2],[2,3,1],[2,7,4],[2,10,4],[2,2,3],[2,71,3],[2,3,3],[2,23,1],[2,1,1],[2,34,3],[2,62,1],[2,4,1],[2,7,2],[2,2,8],[2,6,1],[2,20,3],[2,26,2],[2,5,2],[2,2,1],[2,7,1],[2,1,1],[2,7,2],[2,28,7],[2,4,1],[2,2,2],[2,4,1],[2,7,1],[2,2,3],[2,3,1],[2,8,3],[2,43,1],[2,2,1],[2,1,4],[2,2,1],[2,13,3],[2,4,2],[2,6,1],[2,17,1],[2,2,8],[2,32,1],[2,11,2],[2,5,2],[2,45,3],[2,9,1],[2,14,2],[2,9,1],[2,2,1],[2,10,5],[2,2,1],[2,13,1],[2,2,2],[2,3,5],[2,2,1],[2,17,3],[2,11,1],[2,15,1],[2,13,4],[2,7,7],[2,10,2],[2,6,4],[2,2,3],[2,1,3],[2,27,2],[2,2,3],[2,2,1],[2,3,1],[2,3,9],[2,3,46],[2,11,1],[2,30,1],[2,5,1],[2,8,8],[2,2,1],[2,1,1],[2,2,1],[2,6,7],[2,1,1],[2,4,1],[2,4,2],[2,15,2],[2,6,7],[2,4,2],[2,5,1],[2,1,4],[2,2,3],[2,1,2],[2,2,2],[2,1,7],[2,15,2],[2,18,3],[2,2,1],[2,6,1],[2,8,1],[2,134,20],[2,26,1],[2,2,2],[2,8,4],[2,1,1],[2,3,1],[2,14,1],[2,3,1],[2,26,1],[2,19,1],[2,1,1],[2,1,1],[2,7,1],[2,5,2],[2,5,8],[2,3,4],[2,1,1],[2,2,2],[2,16,1],[2,7,2],[2,6,1],[2,1,6],[2,4,3],[2,2,2],[2,2,2],[2,2,1],[2,2,1],[2,1,2],[2,8,3],[2,4,1],[2,9,1],[2,18,33],[2,14,1],[2,1,1],[2,3,2],[2,7,1],[2,14,4],[2,4,2],[2,31,7],[2,19,2],[2,11,4],[2,2,1],[2,7,2],[2,2,1],[2,2,3],[2,52,4],[2,4,1],[2,1,1],[2,4,3],[2,11,1],[2,3,2],[2,6,1],[2,10,3],[2,6,1],[2,12,1],[2,10,2],[2,4,2],[2,23,2],[2,3,3],[2,8,1],[2,21,6],[2,2,2],[2,1,1],[2,1,1],[2,16,3],[2,9,2],[2,5,1],[2,2,2],[2,1,4],[2,4,1],[2,1,25],[2,24,2],[2,6,1],[2,3,4],[2,10,4],[2,6,2],[2,35,2],[2,2,2],[2,1,1],[2,25,10],[2,8,1],[2,1,2],[2,1,1],[2,2,1],[2,3,8],[2,2,1],[2,2,1],[2,5,2],[2,4,3],[2,2,8],[2,1,1],[2,4,2],[2,3,3],[2,12,1],[2,3,2],[2,4,1],[2,2,4],[2,7,2],[2,1,1],[2,73,14],[2,90,1],[2,4,1],[2,2,1],[2,1,1],[2,6,3],[2,1,1],[2,4,1],[2,10,3],[2,2,3],[2,1,1],[2,6,1],[2,37,2],[2,10,1],[2,2,2],[2,60,2],[2,16,3],[2,6,1],[2,1,1],[2,3,4],[2,38,5],[2,6,2],[2,2,1],[2,2,1],[2,9,2],[2,11,1],[2,6,1],[2,9,1],[2,2,2],[2,4,3],[2,8,1],[2,3,2],[2,1,9],[2,14,2],[2,8,1],[2,30,4],[2,2,1],[2,31,2],[2,31,1],[2,21,23],[2,1,5],[2,4,1],[2,2,1],[2,5,3],[2,4,2],[2,10,2],[2,2,2],[2,18,1],[2,15,1],[2,2,1],[2,1,2],[2,5,1],[2,13,1],[2,14,4],[2,1,4],[2,5,1],[2,109,3],[2,18,2],[2,1,2],[2,164,114],[2,8,1],[2,2,3],[2,4,1],[2,1,1],[2,10,1],[2,9,2],[2,4,3],[2,1,75],[2,6,1],[2,17,2],[2,3,1],[2,9,1],[2,2,1],[2,21,1],[2,30,3],[2,7,2],[2,2,2],[2,63,5],[2,16,3],[2,6,1],[2,2,8],[2,25,2],[2,31,3],[2,126,21],[2,10,1],[2,2,2],[2,14,7],[2,6,10],[2,4,3],[2,7,1],[2,12,1],[2,2,1],[2,3,2],[2,2,15],[2,1,4],[2,4,1],[2,3,1],[2,4,1],[2,6,2],[2,7,3],[2,2,3],[2,9,2],[2,6,1],[2,2,1],[2,16,1],[2,22,2],[2,10,1],[2,10,4],[2,7,2],[2,13,1],[2,3,1],[2,7,2],[2,23,12],[2,3,1],[2,6,1],[2,4,2],[2,29,2],[2,5,3],[2,8,1],[2,1,1],[2,6,1],[2,3,1],[2,17,2],[2,15,1],[2,2,1],[2,6,1],[2,2,2],[2,30,1],[2,3,1],[2,2,2],[2,2,5],[2,2,1],[2,37,5],[2,6,2],[2,7,6],[2,2,3],[2,3,3],[2,2,5],[2,75,6],[2,2,3],[2,10,1],[2,2,3],[2,7,2],[2,30,1],[2,12,33],[2,1,1],[2,3,4],[2,14,1],[2,9,2],[2,8,1],[2,1,1],[2,9,1],[2,4,1],[2,2,1],[2,7,1],[2,4,1],[2,3,1],[2,4,3],[2,1,1],[2,5,2],[2,3,4],[2,4,2],[2,6,3],[2,13,5],[2,4,2],[2,6,1],[2,2,5],[2,2,3],[2,1,1],[2,14,1],[2,5,1],[2,4,2],[2,9,1],[2,7,6],[2,4,1],[2,19,2],[2,23,1],[2,20,7],[2,9,1],[2,4,1],[2,12,2],[2,9,4],[2,3,2],[2,3,7],[2,3,1],[2,10,2],[2,6,1],[2,7,1],[2,1,1],[2,9,1],[2,6,1],[2,1,1],[2,17,2],[2,9,1],[2,5,2],[2,1,1],[2,11,2],[2,9,1],[2,1,1],[2,3,6],[2,2,1],[2,5,9],[2,12,2],[2,2,1],[2,6,2],[2,17,4],[2,2,2],[2,7,1],[2,596,5],[2,6,1],[2,2,1],[2,58,125],[2,6,1],[2,8,1],[2,2,1],[2,3,1],[2,1,2],[2,11,4],[2,1,1],[2,9,6],[2,2,8],[2,1,1],[2,6,2],[2,1,1],[2,2,1],[2,7,2],[2,7,3],[2,14,2],[2,1,1],[2,18,9],[2,2,5],[2,2,12],[2,8,4],[2,6,4],[2,3,1],[2,19,2],[2,4,1],[2,2,1],[2,4,3],[2,3,1],[2,13,1],[2,1,1],[2,7,1],[2,1,1],[2,8,1],[2,13,14],[2,11,1],[2,31,1],[2,4,1],[2,6,1],[2,3,2],[2,26,1],[2,4,2],[2,1,1],[2,2,2],[2,1,2],[2,1,1],[2,7,1],[2,8,1],[2,6,2],[2,19,13],[2,2,3],[2,8,3],[2,1,6],[2,5,1],[2,1,1],[2,6,1],[2,9,1],[2,2,2],[2,35,1],[2,1,1],[2,27,2],[2,54,2],[2,6,2],[2,5,1],[2,2,1],[2,2,4],[2,2,1],[2,2,1],[2,14,1],[2,9,1],[2,53,17],[2,2,1],[2,10,1],[2,9,1],[2,23,1],[2,7,1],[2,12,4],[2,1,2],[2,8,1],[2,7,4],[2,2,1],[2,2,1],[2,3,1],[2,11,1],[2,2,2],[2,6,1],[2,2,1],[2,18,4],[2,3,4],[2,8,2],[2,13,1],[2,2,1],[2,1,2],[2,14,4],[2,8,11],[2,1,1],[2,8,3],[2,7,3],[2,90,1],[2,20,2],[2,16,1],[2,20,2],[2,3,1],[2,8,10],[2,10,1],[2,10,1],[2,1,1],[2,3,1],[2,5,1],[2,37,3],[2,24,3],[2,10,1],[2,3,1],[2,2,4],[2,4,1],[2,19,2],[2,1,1],[2,5,1],[2,8,1],[2,3,1],[2,1,1],[2,2,1],[2,2,32],[2,2,1],[2,4,1],[2,1,1],[2,2,2],[2,5,1],[2,2,3],[2,25,9],[2,2,1],[2,4,4],[2,2,1],[2,15,1],[2,59,1],[2,3,2],[2,4,1],[2,9,2],[2,3,10],[2,6,1],[2,5,5],[2,8,2],[2,2,2],[2,4,2],[2,10,1],[2,126,1],[2,3,1],[2,8,1],[2,9,2],[2,1,30],[2,25,1],[2,7,3],[2,2,2],[2,1,3],[2,21,1],[2,38,1],[2,48,1],[2,22,1],[2,4,2],[2,55,2],[2,5,1],[2,15,1],[2,14,44],[2,4,1],[2,1,2],[2,2,3],[2,2,1],[2,3,3],[2,6,1],[2,2,1],[2,26,7],[2,4,1],[2,1,2],[2,3,2],[2,6,2],[2,10,1],[2,18,3],[2,2,1],[2,38,2],[2,1,1],[2,8,1],[2,8,1],[2,3,1],[2,4,1],[2,1,1],[2,1,2],[2,4,1],[2,26,2],[2,3,3],[2,2,1],[2,6,1],[2,19,1],[2,3,4],[2,2,1],[2,4,1],[2,11,1],[2,9,1],[2,9,1],[2,9,1],[2,1,1],[2,1,1],[2,7,1],[2,2,1],[2,11,4],[2,10,2],[2,4,1],[2,6,1],[2,4,1],[2,8,1],[2,11,1],[2,1,1],[2,7,1],[2,8,2],[2,9,1],[2,8,1],[2,41,2],[2,2,4],[2,1,6],[2,2,1],[2,6,3],[2,128,5],[2,2,1],[2,13,13],[2,6,1],[2,1,3],[2,3,3],[2,7,2],[2,10,12],[2,2,1],[2,8,1],[2,1,1],[2,7,1],[2,2,1],[2,10,2],[2,11,10],[2,1,1],[2,8,3],[2,4,5],[2,2,1],[2,14,2],[2,4,1],[2,4,1],[2,7,1],[2,6,1],[2,7,3],[2,1,1],[2,2,1],[2,7,2],[2,2,1],[2,6,1],[2,8,1],[2,2,4],[2,6,1],[2,43,1],[2,108,3],[2,8,1],[2,13,1],[2,4,1],[2,10,3],[2,2,1],[2,24,2],[2,1,2],[2,4,2],[2,2,2],[2,40,6],[2,6,2],[2,6,2],[2,4,3],[2,28,5],[2,4,1],[2,15,1],[2,12,1],[2,1,1],[2,27,1],[3,1,1],[3,5,2],[3,16,2],[3,16,3],[3,1,2],[3,98,2],[3,91,7],[3,6,37],[3,4,1],[3,9,1],[3,97,2],[3,6,1],[3,23,3],[3,115,1],[3,2,1],[3,1,1],[3,1,1],[3,14,4],[3,1,1],[3,28,1],[3,1,1],[3,6,1],[3,15,5],[3,3,1],[3,52,1],[3,2,3],[3,3,1],[3,4,5],[3,13,1],[3,16,3],[3,13,1],[3,17,1],[3,4,4],[3,6,7],[3,14,1],[3,32,1],[3,3,3],[3,11,4],[3,1,1],[3,8,6],[3,9,7],[3,2,1],[3,9,2],[3,5,2],[3,26,12],[3,11,3],[3,12,2],[3,4,2],[3,6,2],[3,30,6],[3,1,2],[3,10,1],[3,1,1],[3,4,1],[3,7,1],[3,30,29],[3,2,3],[3,2,2],[3,2,1],[3,11,1],[3,2,3],[3,3,1],[3,9,1],[3,2,2],[3,5,1],[3,1,2],[3,1,13],[3,6,9],[3,1,1],[3,6,2],[3,1,3],[3,4,1],[3,6,1],[3,9,3],[3,1,1],[3,9,2],[3,19,45],[3,2,1],[3,7,8],[3,21,3],[3,6,2],[3,2,1],[3,6,1],[3,5,1],[3,2,1],[3,15,7],[3,2,1],[3,9,3],[3,11,1],[3,4,1],[3,7,1],[3,2,1],[3,19,1],[3,5,1],[3,2,1],[3,1,1],[3,22,3],[3,21,5],[3,13,1],[3,2,1],[3,4,1],[3,23,1],[3,8,1],[3,3,2],[3,2,2],[3,4,1],[3,12,2],[3,5,2],[3,16,8],[3,6,1],[3,1,2],[3,2,1],[3,7,1],[3,6,1],[3,6,3],[3,45,1],[3,4,5],[3,1,2],[3,3,1],[3,2,1],[3,1,1],[3,12,1],[3,8,1],[3,3,1],[3,6,1],[3,2,2],[3,9,2],[3,5,2],[3,2,1],[3,3,1],[3,15,1],[3,11,1],[3,4,1],[3,9,2],[3,3,1],[3,4,1],[3,1,3],[3,6,15],[3,6,3],[3,2,6],[3,1,3],[3,3,2],[3,15,1],[3,6,1],[3,7,1],[3,5,1],[3,9,1],[3,49,2],[3,5,2],[3,9,4],[3,39,1],[3,4,3],[3,1,5],[3,1,2],[3,2,1],[3,14,2],[3,4,3],[3,18,1],[3,5,4],[3,19,3],[3,3,1],[3,2,1],[3,3,2],[3,48,10],[3,1,1],[3,5,6],[3,12,3],[3,1,2],[3,5,4],[3,4,1],[3,4,1],[3,5,1],[3,1,1],[3,10,1],[3,10,2],[3,6,3],[3,2,7],[3,4,1],[3,9,2],[3,1,1],[3,2,1],[3,4,6],[3,1,1],[3,25,9],[3,11,1],[3,2,1],[3,8,2],[3,1,1],[3,9,3],[3,4,6],[3,1,7],[3,1,1],[3,4,1],[3,11,2],[3,14,1],[3,65,2],[3,6,1],[3,5,2],[3,2,2],[3,13,1],[3,2,5],[3,2,1],[3,4,2],[3,25,1],[3,2,1],[3,2,3],[3,9,1],[3,5,5],[3,46,1],[3,6,2],[3,12,9],[3,4,4],[3,2,3],[3,13,5],[3,39,16],[3,3,1],[3,1,2],[3,68,14],[3,5,1],[3,11,1],[3,7,1],[3,4,1],[3,53,11],[3,4,3],[3,4,1],[3,2,1],[3,4,1],[3,1,1],[3,1,2],[3,8,4],[3,5,1],[3,6,5],[3,6,13],[3,403,3],[3,23,1],[3,3,3],[3,14,1],[3,10,1],[3,3,2],[3,46,11],[3,4,3],[3,29,1],[3,41,2],[3,11,1],[3,15,3],[3,11,2],[3,6,1],[3,3,1],[3,17,2],[3,14,3],[3,5,4],[3,2,1],[3,2,1],[3,5,6],[3,6,1],[3,54,2],[3,2,1],[3,4,2],[3,1,1],[3,7,1],[3,8,34],[3,7,1],[3,1,2],[3,3,2],[3,2,5],[3,1,1],[3,15,12],[3,13,1],[3,5,1],[3,1,1],[3,5,1],[3,39,1],[3,26,9],[3,11,1],[3,6,1],[3,2,1],[3,19,4],[3,4,5],[3,10,1],[3,11,6],[3,4,1],[3,38,1],[3,1,1],[3,1,3],[3,2,1],[3,5,10],[3,4,1],[3,18,2],[3,4,1],[3,19,1],[3,1,1],[3,8,6],[3,1,1],[3,9,1],[3,8,3],[3,15,4],[3,9,3],[3,13,1],[3,10,1],[3,1,2],[3,5,4],[3,4,2],[3,4,1],[3,28,1],[3,6,2],[3,9,1],[3,1,2],[3,2,2],[3,25,1],[3,5,8],[3,5,3],[3,8,2],[3,2,1],[3,14,5],[3,2,1],[3,11,3],[3,10,1],[3,2,2],[3,1,1],[3,3,1],[3,9,1],[3,39,9],[3,27,2],[3,1,1],[3,1,3],[3,12,3],[3,6,1],[3,14,2],[3,17,3],[3,198,1],[3,3,1],[3,5,1],[3,1,1],[3,2,4],[3,12,1],[3,31,1],[3,8,14],[3,25,2],[3,16,2],[3,18,2],[3,2,3],[3,2,3],[3,6,28],[3,22,3],[3,6,1],[3,8,2],[3,4,3],[3,3,3],[3,8,1],[3,1,1],[3,1,2],[3,1,1],[3,1,1],[3,1,2],[3,6,2],[3,2,3],[3,4,1],[3,3,1],[3,1,1],[3,3,2],[3,8,10],[3,6,1],[3,2,1],[3,2,1],[3,5,1],[3,29,6],[3,10,1],[3,3,8],[3,1,3],[3,2,2],[3,3,1],[3,3,4],[3,5,19],[3,15,1],[3,65,1],[3,2,2],[3,60,3],[3,52,1],[3,1,1],[3,4,2],[3,4,1],[3,6,1],[3,7,4],[3,1,1],[3,13,1],[3,8,3],[3,13,1],[3,6,1],[3,3,2],[3,14,1],[3,2,2],[3,4,1],[3,1,1],[3,11,29],[3,7,1],[3,21,6],[3,4,1],[3,1,1],[3,2,1],[3,9,1],[3,2,4],[3,3,1],[3,2,3],[3,1,2],[3,3,2],[3,3,4],[3,16,2],[3,9,2],[3,2,1],[3,17,8],[3,9,4],[3,7,1],[3,6,4],[3,1,2],[3,2,1],[3,4,4],[3,2,1],[3,3,1],[3,3,1],[3,11,1],[3,2,2],[3,2,1],[3,2,3],[3,2,2],[3,10,6],[3,10,4],[3,1,1],[3,8,3],[3,29,2],[3,7,1],[3,2,1],[3,4,1],[3,11,1],[3,2,1],[3,2,2],[3,13,3],[3,4,1],[3,3,1],[3,2,4],[3,18,1],[3,12,1],[3,6,3],[3,3,1],[3,5,1],[3,3,2],[3,9,2],[3,5,1],[3,5,1],[3,11,1],[3,1,1],[3,39,18],[3,3,2],[3,4,1],[3,17,2],[3,14,2],[3,10,6],[3,1,1],[3,4,5],[3,2,1],[3,4,6],[3,12,1],[3,106,80],[3,32,1],[3,7,1],[3,8,1],[3,2,1],[3,33,2],[3,33,7],[3,10,1],[3,3,2],[3,4,3],[3,16,3],[3,7,1],[3,8,1],[3,16,1],[3,8,1],[3,8,1],[3,30,1],[3,7,1],[3,2,1],[3,3,10],[3,27,1],[3,2,1],[3,1,3],[3,2,1],[3,23,1],[3,1,1],[3,5,2],[3,6,1],[3,2,1],[3,2,13],[3,1,3],[3,6,2],[3,5,1],[3,26,1],[3,4,5],[3,2,1],[3,9,1],[3,6,1],[3,2,1],[3,21,2],[3,15,1],[3,4,2],[3,2,1],[3,30,1],[3,4,2],[3,2,1],[3,2,58],[3,8,2],[3,13,1],[3,16,2],[3,10,6],[3,6,1],[3,6,1],[3,2,6],[3,1,1],[3,2,4],[3,11,9],[3,25,2],[3,4,2],[3,1,1],[3,9,9],[3,1,9],[3,3,3],[3,4,1],[3,2,3],[3,5,2],[3,2,7],[3,2,1],[3,2,1],[3,6,3],[3,3,4],[3,1,2],[3,4,3],[3,7,118],[3,7,1],[3,6,1],[3,3,1],[3,1,15],[3,1,2],[3,4,2],[3,2,1],[3,4,1],[3,6,1],[3,23,1],[3,1,1],[3,3,1],[3,4,1],[3,10,3],[3,2,2],[3,6,5],[3,8,1],[3,3,1],[3,4,1],[3,20,2],[3,14,2],[3,7,1],[3,21,29],[3,10,2],[3,10,2],[3,3,3],[3,2,1],[3,3,2],[3,24,3],[3,3,1],[3,9,1],[3,6,1],[3,22,1],[3,13,1],[3,5,2],[3,1,1],[3,9,1],[3,10,2],[3,4,1],[3,7,1],[3,2,1],[3,12,4],[3,48,2],[3,43,1],[3,6,1],[3,1,1],[3,4,1],[3,14,10],[3,2,1],[3,1,1],[3,1,1],[3,3,1],[3,11,5],[3,36,1],[3,4,49],[3,11,1],[3,8,1],[3,2,2],[3,3,1],[3,3,1],[3,8,3],[3,15,8],[3,30,9],[3,23,5],[3,10,1],[3,7,6],[3,1,1],[3,9,2],[3,6,1],[3,3,1],[3,3,1],[3,2,1],[3,21,1],[3,13,2],[3,4,2],[3,9,2],[3,8,1],[3,2,2],[3,4,2],[3,1,1],[3,9,2],[3,32,2],[3,2,2],[3,10,1],[3,1,4],[3,4,3],[3,14,3],[3,5,2],[3,2,1],[3,3,1],[3,5,3],[3,14,3],[3,2,3],[3,6,1],[3,4,1],[3,1,1],[3,16,1],[3,3,1],[3,2,1],[3,5,1],[3,33,1],[3,3,1],[3,14,4],[3,8,3],[3,12,2],[3,14,1],[3,2,1],[3,1,1],[3,13,2],[3,8,1],[3,9,1],[3,17,1],[3,14,2],[3,16,1],[3,12,4],[3,2,1],[3,2,2],[3,20,1],[3,2,2],[3,8,4],[3,7,3],[3,8,1],[3,1,2],[3,5,5],[3,29,1],[3,1,1],[3,2,1],[3,8,2],[3,2,1],[3,7,9],[3,3,2],[3,7,1],[3,6,1],[3,6,2],[3,1,26],[3,3,3],[3,7,1],[3,2,2],[3,8,2],[3,7,1],[3,3,1],[3,4,4],[3,11,1],[3,5,15],[3,28,1],[3,3,8],[3,3,3],[3,2,4],[3,6,4],[3,3,2],[3,2,2],[3,5,1],[3,12,2],[3,10,2],[3,1,1],[3,6,1],[3,2,1],[3,3,2],[4,8,1],[4,3,1],[4,23,1],[4,4,9],[4,6,2],[4,9,1],[4,9,6],[4,5,9],[4,8,1],[4,2,1],[4,2,3],[4,8,1],[4,1,1],[4,4,1],[4,8,1],[4,2,1],[4,16,1],[4,1,8],[4,4,1],[4,1,3],[4,18,1],[4,2,1],[4,4,9],[4,2,1],[4,3,1],[4,9,2],[4,2,1],[4,7,3],[4,5,4],[4,27,2],[4,1,1],[4,8,2],[4,7,1],[4,8,1],[4,9,4],[4,3,2],[4,6,4],[4,2,2],[4,13,5],[4,8,1],[4,10,2],[4,1,1],[4,2,1],[4,1,2],[4,6,2],[4,5,2],[4,8,2],[4,16,2],[4,7,2],[4,102,5],[4,2,2],[4,1,1],[4,2,1],[4,1,2],[4,2,1],[4,29,4],[4,2,1],[4,1,1],[4,1,4],[4,3,2],[4,6,1],[4,19,2],[4,4,3],[4,1,12],[4,1,1],[4,62,3],[4,14,1],[4,1,1],[4,1,1],[4,7,4],[4,9,1],[4,15,1],[4,16,15],[4,2,2],[4,2,1],[4,41,3],[4,7,8],[4,7,3],[4,5,1],[4,9,1],[4,6,1],[4,1,3],[4,15,1],[4,5,4],[4,28,2],[4,11,3],[4,15,1],[4,1,1],[4,1,1],[4,12,1],[4,16,4],[4,12,5],[4,5,2],[4,8,4],[4,124,115],[4,11,3],[4,46,10],[4,4,1],[4,3,1],[4,2,1],[4,27,1],[4,1,1],[4,20,1],[4,2,1],[4,4,1],[4,53,1],[4,18,1],[4,1,1],[4,8,2],[4,3,1],[4,2,1],[4,5,1],[4,2,3],[4,2,5],[4,3,1],[4,8,1],[4,2,5],[4,8,2],[4,9,2],[4,48,1],[4,9,1],[4,20,2],[4,4,4],[4,3,2],[4,8,2],[4,6,2],[4,12,6],[4,9,1],[4,3,1],[4,4,1],[4,5,3],[4,5,1],[4,8,4],[4,3,1],[4,7,1],[4,6,2],[4,15,16],[4,6,1],[4,50,4],[4,23,4],[4,9,7],[4,8,2],[4,1,1],[4,2,1],[4,9,1],[4,12,1],[4,4,3],[4,2,2],[4,42,4],[4,1,1],[4,6,1],[4,11,10],[4,6,11],[4,7,1],[4,4,2],[4,4,2],[4,6,1],[4,59,4],[4,1,1],[4,2,7],[4,12,20],[4,11,3],[4,4,1],[4,12,3],[4,6,3],[4,7,2],[4,17,4],[4,106,8],[4,6,2],[4,7,1],[4,1,1],[4,8,1],[4,4,6],[4,3,1],[4,4,3],[4,14,3],[4,15,2],[4,4,1],[4,44,91],[4,7,2],[4,3,2],[4,2,1],[4,23,2],[4,30,1],[4,2,2],[4,10,1],[4,6,9],[4,6,2],[4,3,2],[4,3,2],[4,20,1],[4,4,1],[4,18,2],[4,12,1],[4,20,14],[4,10,1],[4,3,1],[4,2,1],[4,3,2],[4,3,3],[4,6,3],[4,2,4],[4,8,1],[4,8,5],[4,3,1],[4,10,2],[4,2,1],[4,1,1],[4,10,1],[4,25,2],[4,1,1],[4,4,1],[4,63,2],[4,1,1],[4,4,1],[4,6,7],[4,2,3],[4,8,1],[4,19,2],[4,11,1],[4,30,10],[4,4,4],[4,2,3],[4,2,1],[4,43,29],[4,2,1],[4,1,1],[4,17,1],[4,14,1],[4,13,1],[4,6,4],[4,2,2],[4,1,2],[4,3,1],[4,7,3],[4,4,1],[4,4,1],[4,1,1],[4,13,5],[4,2,1],[4,1,1],[4,5,1],[4,4,2],[4,13,2],[4,10,4],[4,8,1],[4,3,1],[4,2,2],[4,8,3],[4,4,2],[4,6,1],[4,7,1],[4,14,29],[4,19,1],[4,7,1],[4,19,1],[4,24,2],[4,2,1],[4,1,1],[4,28,1],[4,1,1],[4,2,1],[4,3,1],[4,2,1],[4,1,7],[4,2,4],[4,3,1],[4,29,1],[4,2,1],[4,14,1],[4,2,1],[4,28,3],[4,11,3],[4,1,2],[4,21,2],[4,1,1],[4,15,1],[4,17,1],[4,16,1],[4,13,1],[4,2,1],[4,15,5],[4,19,1],[4,17,1],[4,5,3],[4,12,2],[4,33,1],[4,8,1],[4,15,4],[4,2,11],[4,4,1],[4,1,10],[4,39,1],[4,28,1],[4,25,2],[4,1,1],[4,14,2],[4,8,32],[4,9,1],[4,7,1],[4,6,2],[4,1,2],[4,3,1],[4,6,2],[4,12,2],[4,2,2],[4,5,2],[4,18,1],[4,5,3],[4,6,2],[4,25,1],[4,3,16],[4,14,4],[4,2,6],[4,14,2],[4,3,1],[4,4,1],[4,9,3],[4,28,2],[4,9,1],[4,2,1],[4,7,1],[4,2,1],[4,1,4],[4,4,3],[4,1,1],[4,16,6],[4,3,1],[4,10,1],[4,12,3],[4,8,1],[4,4,1],[4,15,2],[4,4,1],[4,2,3],[4,2,9],[4,4,1],[4,7,2],[4,14,1],[4,31,3],[4,13,1],[4,19,2],[4,8,3],[4,2,1],[4,12,1],[4,5,1],[4,45,3],[4,6,1],[4,1,1],[4,12,6],[4,4,3],[4,3,1],[4,5,2],[4,4,4],[4,19,2],[4,8,1],[4,2,1],[4,27,2],[4,73,3],[4,22,2],[4,1,2],[4,7,46],[4,9,2],[4,2,1],[4,524,305],[4,7,1],[4,26,1],[4,2,1],[4,6,1],[4,30,2],[4,6,1],[4,25,92],[4,2,1],[4,13,1],[4,1,4],[4,1,7],[4,6,1],[4,8,2],[4,6,1],[4,4,2],[4,2,6],[4,12,2],[4,2,2],[4,5,2],[4,3,2],[4,13,1],[4,4,1],[4,6,3],[4,14,1],[4,15,1],[4,25,1],[4,3,1],[4,9,4],[4,94,3],[4,11,2],[4,12,4],[4,7,3],[4,3,1],[4,9,2],[4,3,1],[4,2,1],[4,8,3],[4,7,5],[4,2,45],[4,10,1],[4,10,4],[4,5,3],[4,6,6],[5,5,1],[5,2,1],[5,3,3],[5,11,2],[5,28,1],[5,8,1],[5,4,1],[5,4,1],[5,12,1],[5,7,1],[5,1,1],[5,38,7],[5,6,2],[5,4,2],[5,5,1],[5,2,2],[5,2,7],[5,1,4],[5,4,1],[5,4,1],[5,1,2],[5,3,1],[5,7,1],[5,2,1],[5,10,2],[5,4,1],[5,2,1],[5,2,2],[5,3,1],[5,15,78],[5,2,1],[5,1,5],[5,10,1],[5,6,4],[5,10,2],[5,5,1],[5,1,1],[5,1,1],[5,2,2],[5,6,1],[5,2,2],[5,6,2],[5,10,2],[5,3,1],[5,6,2],[5,4,3],[5,16,5],[5,47,48],[5,2,5],[5,6,7],[5,4,2],[5,3,1],[5,2,1],[5,8,1],[5,7,1],[5,2,2],[5,2,1],[5,3,1],[5,7,4],[5,1,1],[5,1,1],[5,8,6],[5,1,4],[5,9,3],[5,11,4],[5,6,1],[5,6,1],[5,2,1],[5,5,1],[5,84,1],[5,2,33],[5,8,1],[5,6,3],[5,5,3],[5,2,1],[5,10,2],[5,3,1],[5,68,9],[5,6,2],[5,21,11],[5,3,4],[5,3,1],[5,16,3],[5,2,2],[5,2,1],[5,14,2],[5,24,2],[5,19,1],[5,1,4],[5,1,1],[5,3,1],[5,6,1],[5,2,1],[5,5,2],[5,4,3],[5,26,3],[5,2,1],[5,6,4],[5,2,1],[5,6,3],[5,5,1],[5,8,3],[5,1,3],[5,9,1],[5,1,2],[5,11,2],[5,23,1],[5,7,1],[5,2,2],[5,3,2],[5,2,1],[5,11,2],[5,8,2],[5,1,1],[5,4,1],[5,2,1],[5,7,1],[5,11,1],[5,1,1],[5,33,1],[5,4,1],[5,5,1],[5,17,3],[5,1,2],[5,18,2],[5,1,2],[5,1,1],[5,2,3],[5,4,2],[5,2,1],[5,13,7],[5,5,1],[5,19,4],[5,23,9],[5,11,6],[5,7,2],[5,10,1],[5,2,1],[5,26,1],[5,3,3],[5,3,2],[5,3,2],[5,15,3],[5,2,1],[5,3,1],[5,4,1],[5,8,1],[5,4,1],[5,23,1],[5,6,1],[5,1,3],[5,124,17],[5,1,1],[5,1,1],[5,15,1],[5,11,2],[5,2,1],[5,2,2],[5,3,2],[5,1,1],[5,6,4],[5,6,1],[5,3,3],[5,6,5],[5,17,1],[5,7,2],[5,5,1],[5,11,1],[5,3,2],[5,36,2],[5,17,7],[5,4,1],[5,7,2],[5,2,1],[5,2,1],[5,2,1],[5,7,10],[5,4,1],[5,1,3],[5,19,2],[5,2,2],[5,3,1],[5,8,3],[5,4,1],[5,15,1],[5,2,3],[5,13,2],[5,1,3],[5,7,1],[5,23,48],[5,9,1],[5,12,10],[5,16,1],[5,10,1],[5,7,5],[5,2,1],[5,3,1],[5,23,2],[5,4,1],[5,18,1],[5,13,2],[5,54,136],[5,6,2],[5,2,2],[5,5,1],[5,6,1],[5,15,8],[5,14,9],[5,4,1],[5,7,2],[5,3,3],[5,117,5],[5,25,8],[5,14,4],[5,25,3],[5,7,1],[5,7,1],[5,15,3],[5,3,2],[5,4,1],[5,6,4],[5,14,4],[5,7,1],[5,20,1],[5,6,5],[5,12,1],[5,9,3],[5,2,1],[5,4,20],[5,4,3],[5,1,1],[5,1,1],[5,8,1],[5,4,1],[5,1,1],[5,6,3],[5,19,1],[5,14,1],[5,22,2],[5,2,1],[5,11,2],[5,1,1],[5,10,1],[5,4,1],[5,23,3],[5,3,1],[5,15,1],[5,8,4],[5,11,4],[5,4,1],[5,2,1],[5,8,6],[5,2,4],[5,2,7],[5,3,2],[5,2,1],[5,1,1],[5,1,1],[5,11,2],[5,4,10],[5,11,4],[5,110,4],[5,6,1],[5,2,1],[5,96,34],[6,4,1],[6,7,3],[6,2,1],[6,6,2],[6,10,1],[6,2,1],[6,10,1],[6,59,2],[6,7,4],[6,4,2],[6,3,1],[6,6,1],[6,1,4],[6,7,3],[6,2,3],[6,1,1],[6,12,1],[6,1,39],[6,28,1],[6,3,4],[6,8,3],[6,4,4],[6,9,2],[6,15,1],[6,10,1],[6,1,1],[6,2,1],[6,7,1],[6,2,1],[6,93,1],[6,14,6],[6,2,2],[6,55,39],[6,15,2],[6,23,3],[6,3,3],[6,35,2],[6,5,15],[6,1,7],[6,8,19],[6,10,10],[6,3,2],[6,6,3],[6,1,2],[6,6,1],[6,2,1],[6,4,1],[6,127,20],[6,20,18],[6,3,1],[6,9,2],[6,2,3],[6,10,1],[6,27,1],[6,9,1],[6,9,1],[6,28,1],[6,1,1],[6,10,1],[6,11,1],[6,5,1],[6,4,1],[6,82,35],[6,2,1],[6,1,1],[6,3,1],[6,2,1],[6,2,11],[6,2,8],[6,3,2],[6,12,3],[6,5,6],[6,42,4],[6,8,1],[6,2,1],[6,2,2],[6,10,3],[6,6,2],[6,48,2],[6,2,3],[6,2,2],[6,2,1],[6,4,1],[6,10,1],[6,1,1],[6,7,1],[6,35,1],[6,17,1],[6,21,2],[6,1,1],[6,4,2],[6,25,1],[6,7,2],[6,12,4],[6,2,6],[6,24,4],[6,2,1],[6,5,1],[6,2,1],[6,2,1],[6,3,2],[6,4,2],[6,2,1],[6,2,1],[6,2,9],[6,2,2],[6,5,1],[6,8,10],[6,1,1],[6,12,2],[6,10,1],[6,4,2],[6,12,4],[6,1,3],[6,3,2],[6,8,1],[6,4,4],[6,12,5],[6,4,2],[6,10,1],[6,1,1],[6,12,1],[6,6,4],[6,2,1],[6,3,2],[6,1,1],[6,3,5],[6,6,1],[6,32,1],[6,10,1],[6,6,5],[6,27,2],[6,7,1],[6,2,1],[6,10,2],[6,5,1],[6,8,2],[6,3,2],[6,9,2],[6,22,1],[6,2,2],[6,10,1],[6,3,4],[6,1,1],[6,3,6],[6,8,2],[6,44,1],[6,1,1],[6,9,7],[6,9,5],[6,19,4],[6,7,1],[6,1,1],[6,10,1],[6,14,2],[6,4,3],[6,4,1],[6,6,1],[6,3,1],[6,4,1],[6,6,3],[6,6,2],[6,6,1],[6,1,3],[6,12,13],[6,3,2],[6,1,4],[6,15,1],[6,39,4],[6,5,1],[6,1,5],[6,11,3],[6,5,7],[6,9,2],[6,1,1],[6,12,1],[6,12,1],[6,1,4],[6,11,1],[6,3,1],[6,6,2],[6,5,2],[6,2,1],[6,1,2],[6,2,1],[6,41,23],[6,3,1],[6,15,1],[6,1,1],[6,1,1],[6,2,2],[6,3,1],[6,10,1],[6,17,6],[6,5,2],[6,30,1],[7,2,2],[7,10,2],[7,8,3],[7,9,4],[7,4,1],[7,8,1],[7,2,1],[7,7,134],[7,16,1],[7,5,3],[7,3,1],[7,6,2],[7,1,1],[7,5,1],[7,5,1],[7,2,1],[7,24,1],[7,8,4],[7,9,2],[7,1,1],[7,6,2],[7,9,2],[7,1,1],[7,5,28],[7,1,1],[7,2,2],[7,7,2],[7,11,1],[7,2,1],[7,17,32],[7,5,1],[7,2,1],[7,3,2],[7,7,4],[7,15,3],[7,3,1],[7,6,2],[7,1,1],[7,2,1],[7,1,1],[7,1,11],[7,2,1],[7,8,1],[7,6,1],[7,2,1],[7,57,1],[7,20,46],[7,6,2],[7,6,1],[7,1,2],[7,28,7],[7,3,5],[7,4,1],[7,4,6],[7,2,2],[7,3,3],[7,2,3],[7,2,1],[7,1,1],[7,2,6],[7,4,1],[7,3,1],[7,23,1],[7,7,2],[7,7,1],[7,4,3],[7,2,1],[7,1,1],[7,4,2],[7,15,2],[7,6,1],[7,2,1],[7,14,1],[7,1,1],[7,1,1],[7,4,2],[7,2,1],[7,4,1],[7,2,1],[7,4,3],[7,22,1],[7,10,1],[7,2,1],[7,1,2],[7,7,2],[7,1,2],[7,12,1],[7,3,1],[7,2,4],[7,3,8],[7,2,1],[7,6,1],[7,5,3],[7,8,2],[7,5,1],[7,6,1],[7,6,1],[7,5,1],[7,9,5],[7,3,1],[7,3,2],[7,3,19],[7,28,3],[7,2,2],[7,3,1],[7,51,4],[7,2,1],[7,2,1],[7,22,2],[7,5,1],[7,2,1],[7,4,2],[7,2,1],[7,6,2],[7,6,1],[7,3,1],[7,37,1],[7,9,1],[7,8,2],[7,2,1],[7,4,1],[7,2,1],[7,18,1],[7,9,2],[7,1,1],[7,5,1],[7,2,1],[7,13,1],[7,45,1],[7,1,3],[7,7,5],[7,16,1],[7,7,1],[7,1,1],[7,3,1],[7,8,1],[7,1,1],[7,1,4],[7,2,2],[7,6,1],[7,6,1],[7,2,1],[7,16,1],[7,11,1],[7,1,1],[7,2,1],[7,3,2],[7,8,8],[7,33,1],[7,2,8],[7,4,1],[7,6,7],[7,12,3],[7,17,1],[7,9,5],[7,3,2],[7,3,2],[7,4,1],[7,1,1],[7,2,2],[7,6,1],[8,9,1],[8,79,3],[8,3,1],[8,14,4],[8,2,4],[8,10,5],[8,7,3],[8,8,1],[8,6,1],[8,7,1],[8,8,2],[8,9,1],[8,30,2],[8,1,1],[8,1,5],[8,15,2],[8,10,3],[8,5,3],[8,1,2],[8,3,1],[8,16,1],[8,3,1],[8,3,3],[8,3,4],[8,2,1],[8,6,2],[8,4,4],[8,5,3],[8,8,4],[8,8,3],[8,4,3],[8,13,7],[8,2,1],[8,2,1],[8,1,1],[8,4,1],[8,10,3],[8,16,9],[8,3,2],[8,1,2],[8,2,5],[8,5,2],[8,156,14],[8,1,1],[8,5,1],[8,252,690],[8,5,1],[8,25,21],[8,1,1],[8,39,12],[8,1,4],[8,6,1],[8,25,7],[8,1,1],[8,7,1],[8,46,11],[8,3,1],[8,1,1],[8,14,1],[8,24,1],[8,16,3],[8,6,3],[8,5,1],[8,1,2],[8,12,2],[8,2,1],[8,2,5],[8,6,1],[8,6,1],[8,14,1],[8,7,1],[8,6,1],[8,4,6],[8,1,2],[8,3,1],[8,2,14],[8,7,12],[8,2,2],[8,25,15],[8,8,3],[8,6,6],[8,5,1],[8,1,1],[8,2,3],[8,18,3],[8,2,2],[8,3,1],[8,4,1],[8,3,3],[8,4,2],[8,12,2],[8,1,1],[8,4,1],[8,18,1],[8,2,2],[8,11,3],[8,5,1],[8,6,1],[8,13,1],[8,6,1],[8,23,1],[8,18,3],[8,13,2],[8,4,1],[8,38,4],[8,1,1],[8,6,1],[8,10,2],[8,2,7],[8,10,7],[8,1,1],[8,4,7],[8,2,1],[8,2,2],[8,7,1],[8,17,1],[8,10,5],[8,4,4],[8,8,4],[8,3,2],[8,2,1],[8,33,1],[8,8,6],[8,15,1],[8,2,1],[8,7,4],[8,6,3],[8,2,1],[8,1,2],[8,3,1],[8,4,1],[8,4,2],[8,27,1],[8,10,1],[9,8,2],[9,2,2],[9,7,1],[9,11,1],[9,35,5],[9,3,1],[9,2,2],[9,6,7],[9,16,2],[9,7,15],[9,3,1],[9,9,1],[9,5,1],[9,3,1],[9,3,1],[9,4,1],[9,2,5],[9,1,1],[9,5,4],[9,1,1],[9,13,1],[9,14,4],[9,3,1],[9,35,3],[9,41,1],[9,8,3],[9,2,5],[9,8,2],[9,13,3],[9,10,1],[9,4,1],[9,35,12],[9,9,1],[9,12,1],[9,4,1],[9,2,4],[9,1,2],[9,6,4],[9,1,4],[9,20,3],[9,4,3],[9,3,3],[9,1,4],[9,2,11],[9,11,2],[9,19,1],[9,5,1],[9,6,2],[9,1,1],[9,3,1],[9,15,3],[9,2,1],[9,6,1],[9,13,1],[9,2,1],[9,11,2],[9,3,5],[9,6,1],[9,16,1],[9,4,1],[9,3,2],[9,3,1],[9,2,5],[9,13,1],[9,3,1],[9,2,2],[9,7,1],[9,2,3],[9,3,4],[9,5,1],[9,4,1],[9,10,2],[9,36,1],[9,7,2],[9,3,1],[9,4,2],[9,5,5],[9,12,1],[9,4,1],[9,2,2],[9,12,1],[9,13,1],[9,12,1],[9,2,4],[9,1,1],[9,1,2],[9,6,6],[9,1,2],[9,8,4],[9,7,2],[9,15,4],[10,3,25],[10,2,1],[10,4,2],[10,8,1],[10,2,1],[10,1,1],[10,21,1],[10,21,19],[10,4,4],[10,4,8],[10,2,1],[10,1,3],[10,3,5],[10,6,1],[10,8,5],[10,4,1],[10,24,5],[10,2,2],[10,24,1],[10,6,4],[10,1,2],[10,25,1],[10,14,1],[10,6,3],[10,2,3],[10,6,1],[10,15,2],[10,54,3],[10,12,1],[10,21,1],[10,7,1],[10,4,4],[10,5,1],[10,10,3],[10,37,1],[10,8,3],[10,11,1],[10,2,4],[10,6,1],[10,30,1],[10,35,1],[10,4,2],[10,2,1],[10,5,2],[10,6,1],[10,4,4],[10,12,1],[10,12,1],[10,44,4],[10,16,3],[10,1,64],[10,27,1],[10,9,3],[10,17,2],[10,25,2],[10,2,2],[10,7,3],[10,89,1],[10,7,30],[10,2,4],[10,2,3],[10,2,1],[10,3,3],[10,11,1],[10,7,1],[10,2,1],[10,4,2],[10,1,1],[10,1,1],[10,6,2],[10,7,3],[10,4,1],[10,2,2],[10,18,1],[10,4,1],[10,19,1],[10,14,6],[10,5,1],[10,5,6],[10,12,1],[11,5,6],[11,15,8],[11,9,1],[11,3,2],[11,6,3],[11,24,4],[11,27,3],[11,2,2],[11,5,9],[11,13,1],[11,3,1],[11,2,25],[11,10,1],[11,4,11],[11,7,2],[11,49,1],[11,4,1],[11,12,1],[11,7,1],[11,1,2],[11,10,6],[11,2,1],[11,4,2],[11,1,2],[11,2,1],[11,5,1],[11,4,3],[11,1,1],[11,6,1],[11,4,3],[11,95,2],[11,8,1],[11,18,1],[11,5,1],[11,16,12],[11,13,2],[11,7,6],[11,56,1],[11,6,1],[11,8,1],[11,21,14],[11,2,7],[11,5,1],[11,1,1],[11,5,2],[11,2,1],[11,15,1],[11,3,3],[11,26,1],[11,6,6],[11,1,1],[11,10,7],[11,6,3],[11,6,1],[11,8,2],[11,1,2],[11,35,2],[11,19,2],[11,8,2],[11,4,1],[11,7,2],[11,4,5],[11,3,5],[11,17,1],[11,3,3],[11,2,1],[11,12,1],[11,2,8],[11,85,1],[11,4,1],[11,9,1],[11,2,2],[11,2,1],[11,6,2],[11,6,3],[11,18,3],[11,1,1],[11,8,1],[11,22,1],[11,7,1],[11,4,2],[11,4,1],[11,8,3],[11,10,4],[11,24,1],[11,10,19],[11,12,8],[12,5,1],[12,1,7],[12,4,1],[12,21,6],[12,12,2],[12,16,1],[12,1,1],[12,2,1],[12,3,1],[12,8,9],[12,1,1],[12,17,2],[12,16,6],[12,14,1],[12,3,3],[12,27,3],[12,2,1],[12,3,3],[12,14,4],[12,1,3],[12,10,1],[12,5,7],[12,7,3],[12,13,5],[12,4,1],[12,47,4],[12,18,1],[12,31,2],[12,8,1],[12,5,4],[12,1,1],[12,26,1],[12,13,2],[12,5,2],[12,4,3],[12,15,5],[12,2,1],[12,2,1],[12,3,1],[12,5,1],[12,11,1],[12,4,3],[12,1,1],[12,7,2],[12,6,1],[12,14,6],[12,32,4],[12,14,1],[12,31,1],[12,7,3],[12,9,7],[12,5,1],[12,6,1],[12,6,6],[12,7,8],[12,2,1],[12,3,1],[12,4,3],[12,1,1],[12,19,2],[12,11,1],[12,7,2],[12,8,1],[12,15,4],[12,5,1],[12,9,3],[12,2,1],[12,1,1],[12,8,9],[12,3,6],[12,15,1],[13,1,11],[13,7,2],[13,10,1],[13,13,4],[13,3,2],[13,1,2],[13,2,1],[13,3,4],[13,3,1],[13,4,3],[13,5,1],[13,10,13],[13,5,4],[13,2,3],[13,3,2],[13,72,2],[13,7,3],[13,19,2],[13,4,1],[13,5,6],[13,4,2],[13,2,1],[13,2,1],[13,34,11],[13,5,2],[13,9,5],[13,6,2],[13,5,5],[13,9,5],[13,9,1],[13,19,3],[13,4,1],[13,3,1],[13,7,2],[13,1,1],[13,11,7],[13,4,7],[13,6,1],[13,2,1],[13,1,1],[13,21,1],[13,6,15],[13,5,2],[13,1,1],[13,1,2],[14,2,1],[14,18,1],[14,8,2],[14,5,1],[14,2,2],[14,5,2],[14,2,1],[14,8,2],[14,4,1],[14,8,5],[14,14,1],[14,9,6],[14,18,2],[14,4,1],[14,6,1],[14,18,1],[14,6,6],[14,4,1],[14,6,2],[14,6,8],[14,3,1],[14,2,3],[14,1,1],[14,17,4],[14,4,3],[14,15,3],[14,4,8],[14,15,2],[14,6,1],[14,9,22],[14,7,3],[14,7,6],[14,2,2],[14,1,1],[14,7,4],[14,10,1],[14,1,1]])\n #data = np.array([[131,3,1],[49,1,1],[17,7,1],[55,7,19],[80,5,1],[40,2,2],[91,21,6],[19,16,1],[27,7,1],[15,50,2],[37,1,7],[17,3,1],[22,32,2],[68,2,1],[26,2,3],[15,2,3],[246,2,1],[25,2,1],[19,1,1],[98,1,2],[54,13,1],[168,2,4],[20,102,5],[40,2,1],[41,1,1],[44,19,16],[17,6,1],[92,12,1],[17,2,1],[16,5,3],[45,11,1],[20,10,1],[26,1,2],[21,9,9],[26,10,1],[187,4,2],[65,28,4],[17,9,33],[23,39,1],[58,4,4],[41,107,3],[28,3,1],[16,1,1],[17,16,4],[17,16,1],[17,5,1],[83,2,2],[17,1,2],[26,4,2],[22,7,2],[16,1,1],[15,2,1],[15,2,1],[111,8,1],[25,6,1],[112,4,1],[19,10,2],[38,25,4],[29,1,5],[17,2,1],[111,9,8],[53,5,4],[29,7,1],[25,8,2],[23,2,134],[32,6,1],[27,1,1],[61,4,2],[41,163,4],[57,11,2],[24,2,1],[16,18,1],[81,7,14],[169,5,1],[19,4,1],[412,5,1],[32,2,7],[19,28,3],[17,11,1],[44,4,5],[27,2,2],[18,1,7],[15,3,3],[18,10,1],[19,6,10],[46,2,5],[20,12,3],[25,6,4],[18,4,1],[15,40,8],[16,11,16],[237,1,1],[26,13,2],[26,4,1],[101,5,5],[50,2,1],[22,45,5],[16,7,2],[17,4,2],[19,2,3],[22,1,1],[260,6,1],[20,15,1],[24,5,1],[33,2,1],[16,1,5],[21,18,1],[22,1,1],[18,13,2],[124,3,1],[16,6,1],[19,6,2],[71,2,1],[232,2,2],[21,2,1],[231,11,1],[201,49,2],[28,12,1],[68,5,1],[56,26,7],[17,1,8],[19,10,2],[120,13,2],[218,3,1],[46,5,6],[57,4,1],[30,5,2],[17,8,4],[17,22,1],[15,5,1],[16,7,1],[26,13,1],[28,22,2],[100,1,2],[58,12,2],[52,9,11],[21,4,2],[18,4,1],[699,1,1],[401,6,3],[20,7,1],[20,3,13],[27,1,1],[35,2,2],[27,6,1],[15,13,1],[17,6,1],[26,28,4],[89,2,3],[36,11,2],[17,11,2],[15,1,1],[59,3,1],[15,3,1],[20,11,1],[49,1,1],[24,3,1],[25,7,1],[29,1,1],[61,2,2],[28,3,13],[82,2,8],[22,2,1],[21,25,3],[73,3,2],[22,8,1],[51,3,12],[16,6,1],[64,2,4],[22,2,2],[19,7,1],[69,2,1],[17,8,9],[19,1,13],[28,35,3],[134,2,1],[19,12,1],[27,13,1],[17,10,1],[16,17,4],[46,2,3],[15,1,2],[35,15,2],[20,6,1],[16,10,3],[33,11,1],[20,8,4],[15,5,1],[33,5,2],[460,6,1],[132,2,1],[73,14,3],[34,5,1],[123,1,2],[15,8,1],[30,1,1],[16,1,1],[73,3,1],[54,4,1],[17,1,9],[17,17,3],[22,1,3],[46,16,8],[18,1,1],[22,3,2],[21,4,1],[40,5,1],[19,2,1],[16,11,1],[19,4,1],[26,4,1],[87,1,3],[75,1,8],[25,1,1],[2230,5,1],[16,1,1],[17,10,3],[15,44,2],[79,3,1],[21,19,1],[292,5,13],[27,4,1],[25,2,1],[23,34,1],[36,2,1],[15,2,7],[18,3,3],[62,1,7],[16,61,5],[15,5,1],[36,5,1],[67,8,3],[18,4,1],[23,2,1],[16,21,3],[32,7,1],[22,6,1],[88,5,1],[19,2,4],[38,2,1],[47,6,28],[18,35,3],[159,15,1],[25,3,5],[295,9,4],[26,2,1],[27,8,3],[86,6,1],[24,25,4],[18,1,2],[16,6,1],[64,16,1],[39,1,2],[30,1,4],[44,1,3],[82,11,4],[28,13,2],[46,19,1],[15,26,1],[30,6,11],[51,3,6],[19,20,1],[940,6,4],[21,6,1],[29,2,1],[20,2,1],[31,2,1],[21,2,3],[25,27,1],[26,2,1],[17,4,1],[64,7,1],[126,7,15],[18,8,1],[20,13,2],[16,7,2],[18,2,1],[19,4,5],[29,1,1],[80,12,2],[42,14,6],[107,2,1],[15,4,1],[48,16,1],[62,3,2],[15,13,1],[29,48,7],[25,4,1],[17,5,20],[19,7,3],[22,10,3],[58,15,3],[17,14,1],[121,2,2],[33,64,11],[16,15,2],[39,6,2],[25,69,7],[69,2,1],[41,6,2],[20,5,1],[42,22,4],[18,17,4],[16,14,3],[27,14,1],[20,1,1],[44,1,101],[33,9,1],[26,2,8],[30,24,3],[27,24,2],[34,7,1],[39,6,3],[20,2,3],[55,5,1],[22,22,2],[17,2,1],[55,3,1],[29,10,5],[60,12,2],[18,13,3],[93,3,2],[15,3,1],[26,5,5],[18,1,1],[17,16,2],[15,13,3],[22,12,1],[256,19,27],[18,7,8],[22,3,1],[35,3,4],[16,2,1],[19,6,2],[24,1,1],[29,3,2],[36,21,8],[24,1,1],[18,6,2],[26,24,11],[19,15,2],[16,1,1],[28,4,1],[60,11,1],[62,4,2],[70,2,1],[75,1,2],[125,3,1],[21,6,1],[165,23,2],[108,1,1],[35,5,1],[251,19,12],[137,4,1],[81,11,4],[104,19,4],[18,18,3],[19,13,1],[18,112,5],[19,6,2],[28,7,2],[23,9,1],[20,15,7],[34,1,1],[24,12,3],[15,5,1],[40,9,4],[24,41,6],[35,1,1],[17,3,1],[17,3,4],[46,7,2],[21,8,10],[17,7,4],[36,6,1],[32,6,2],[31,1,1],[17,32,5],[26,3,4],[16,4,1],[21,2,1],[19,4,1],[33,4,1],[46,7,1],[28,9,1],[169,9,24],[24,18,2],[103,6,1],[93,1,1],[156,2,1],[58,7,1],[55,30,3],[15,5,1],[20,9,1],[19,20,1],[44,1,3],[16,2,1],[23,4,1],[22,10,1],[16,138,5],[17,2,1],[17,1,2],[70,8,5],[15,3,6],[22,6,1],[20,1,1],[35,2,4],[15,3,1],[26,119,46],[390,18,2],[22,4,1],[175,5,2],[23,4,1],[26,2,21],[17,1,2],[112,4,1],[18,22,5],[22,2,1],[122,13,1],[18,1,1],[27,7,1],[26,18,5],[18,1,3],[28,1,15],[35,11,1],[15,2,1],[55,6,5],[67,3,1],[30,5,7],[31,12,1],[16,9,12],[43,7,1],[23,21,1],[43,2,7],[53,40,1],[58,6,1],[29,27,11],[65,6,2],[27,4,2],[15,7,2],[17,26,13],[48,4,79],[30,2,6],[25,1,1],[20,20,6],[59,2,5],[15,14,4],[18,7,1],[18,2,1],[28,7,1],[35,1,1],[15,12,4],[52,2,2],[16,25,1],[91,1,1],[27,7,3],[62,4,1],[29,11,1],[25,4,3],[15,1,1],[40,6,2],[19,2,2],[24,14,2],[33,5,1],[58,3,3],[23,1,4],[15,2,2],[1263,4,1],[92,5,1],[17,2,1],[16,10,1],[50,8,1],[24,2,1],[73,1,1],[30,33,55],[18,15,1],[15,9,4],[23,1,3],[17,5,1],[43,3,1],[15,9,2],[19,4,2],[20,20,4],[31,1,2],[21,3,1],[79,9,13],[20,3,24],[56,2,1],[26,1,2],[15,3,1],[30,12,1],[64,6,1],[327,8,47],[39,2,1],[22,17,5],[18,6,3],[74,14,2],[17,4,1],[39,1,3],[520,9,3],[65,9,1],[36,1,4],[264,3,3],[16,1,1],[18,5,3],[22,16,3],[21,2,1],[15,3,3],[49,5,1],[37,19,2],[19,13,2],[30,1,1],[44,4,1],[19,9,31],[22,4,2],[21,4,5],[16,4,1],[40,17,1],[15,12,4],[43,4,3],[21,30,1],[60,16,3],[28,2,1],[38,16,2],[19,3,1],[68,18,4],[1,4,3],[1,9,1],[1,2,2],[1,1,4],[1,148,4],[1,6,1],[1,16,1],[1,4,1],[1,19,3],[1,7,3],[1,2,2],[1,4,2],[1,47,5],[1,2,2],[1,1,4],[1,1,2],[1,1,2],[1,1,1],[1,4,2],[1,7,1],[1,4,6],[1,2,1],[1,5,4],[1,9,3],[1,9,2],[1,7,1],[1,4,1],[1,10,2],[1,1,1],[1,5,1],[1,5,1],[1,2,16],[1,2,1],[1,1,1],[1,3,2],[1,8,3],[1,1,18],[1,5,1],[1,14,3],[1,6,6],[1,7,1],[1,1,1],[1,16,1],[1,2,1],[1,2,1],[1,1,2],[1,4,4],[1,4,1],[1,9,1],[1,25,7],[1,1,1],[1,8,2],[1,1,4],[1,77,8],[1,1,3],[1,6,3],[1,4,2],[1,2,2],[1,2,1],[1,40,1],[1,26,3],[1,1,4],[1,1,1],[1,2,2],[1,1,2],[1,15,1],[1,35,86],[1,3,2],[1,4,1],[1,2,1],[1,4,3],[1,30,1],[1,2,1],[1,4,2],[1,2,1],[1,1,1],[1,2,1],[1,3,1],[1,2,3],[1,3,1],[1,14,1],[1,3,2],[1,7,4],[1,6,2],[1,2,1],[1,23,2],[1,4,1],[1,4,3],[1,26,3],[1,47,15],[1,3,5],[1,5,1],[1,3,1],[1,2,1],[1,2,1],[1,3,1],[1,36,1],[1,2,1],[1,1,9],[1,6,1],[1,2,1],[1,8,3],[1,7,1],[1,33,2],[1,14,4],[1,13,3],[1,2,1],[1,5,1],[1,7,2],[1,9,3],[1,6,1],[1,3,1],[1,9,1],[1,2,2],[1,2,1],[1,6,3],[1,4,2],[1,2,1],[1,1,1],[1,13,4],[1,9,2],[1,4,2],[1,7,14],[1,8,1],[1,3,1],[1,25,2],[1,2,1],[1,11,1],[1,2,1],[1,1,1],[1,3,3],[1,3,2],[1,2,1],[1,2,1],[1,2,8],[1,9,1],[1,13,9],[1,3,1],[1,8,1],[1,102,71],[1,22,1],[1,2,3],[1,22,2],[1,1,1],[1,3,1],[1,12,1],[1,3,2],[1,1,1],[1,5,2],[1,30,6],[1,14,1],[1,2,1],[1,1,1],[1,5,1],[1,8,1],[1,4,2],[1,3,1],[1,2,1],[1,1,1],[1,1,1],[1,12,1],[1,14,1],[1,10,2],[1,22,3],[1,15,2],[1,4,2],[1,5,1],[1,10,2],[1,10,26],[1,1,2],[1,1,2],[1,17,1],[1,1,1],[1,7,1],[1,1,1],[1,8,2],[1,5,2],[1,15,1],[1,16,2],[1,7,1],[1,26,1],[1,16,2],[1,13,6],[1,3,3],[1,2,1],[1,2,1],[1,5,3],[1,1,1],[1,4,1],[1,1,1],[1,2,2],[1,13,4],[1,50,2],[1,12,3],[1,2,1],[1,16,5],[1,2,8],[1,3,5],[1,1,1],[1,25,1],[1,5,1],[1,13,2],[1,1,2],[1,8,1],[1,13,1],[1,4,4],[1,2,3],[1,7,2],[1,2,4],[1,2,1],[1,1,2],[1,4,1],[1,3,2],[1,8,4],[1,4,1],[1,2,2],[1,2,1],[1,3,1],[1,7,1],[1,8,5],[1,34,4],[1,2,3],[1,1,1],[1,8,3],[1,3,1],[1,26,2],[1,3,1],[1,1,6],[1,2,4],[1,7,1],[1,9,2],[1,3,93],[1,2,1],[1,3,2],[1,3,3],[1,15,3],[1,12,1],[1,1,1],[1,1,5],[1,4,1],[1,1,4],[1,2,1],[1,6,4],[1,9,1],[1,1,9],[1,11,1],[1,68,2],[1,7,1],[1,11,1],[1,6,1],[1,5,2],[1,2,1],[1,19,1],[1,3,1],[1,1,2],[1,37,1],[1,19,1],[1,4,5],[1,8,1],[1,1,1],[1,7,1],[1,3,1],[1,4,1],[1,6,7],[1,2,1],[1,14,3],[1,4,1],[1,6,5],[1,1,1],[1,1,1],[1,2,1],[1,1,2],[1,7,2],[1,8,1],[1,17,136],[1,6,1],[1,3,2],[1,9,12],[1,7,2],[1,2,9],[1,1,4],[1,3,1],[1,10,1],[1,6,16],[1,8,1],[1,2,2],[1,2,2],[1,4,3],[1,3,3],[1,24,3],[1,68,28],[1,16,1],[1,9,2],[1,1,2],[1,18,7],[1,3,1],[1,5,2],[1,1,3],[1,3,1],[1,3,8],[1,73,5],[1,6,3],[1,5,1],[1,2,1],[1,15,7],[1,80,2],[1,3,1],[1,12,3],[1,8,1],[1,2,1],[1,9,5],[1,3,2],[1,319,20],[1,2,1],[1,4,6],[1,5,4],[1,25,1],[1,8,1],[1,6,5],[1,18,1],[1,2,2],[1,5,2],[1,10,1],[1,10,1],[1,2,1],[1,6,2],[1,7,2],[1,39,1],[1,7,79],[1,28,4],[1,2,1],[1,4,1],[1,25,5],[1,23,3],[1,10,3],[1,2,1],[1,13,1],[1,2,2],[1,6,1],[1,6,4],[1,12,1],[1,4,1],[1,3,1],[1,10,1],[1,4,2],[1,7,1],[1,11,1],[1,6,1],[1,4,2],[1,3,3],[1,1,1],[1,1,1],[1,3,3],[1,3,2],[1,15,1],[1,1,1],[1,1,4],[1,26,2],[1,1,1],[1,7,1],[1,4,63],[1,1,19],[1,96,7],[1,7,2],[1,6,1],[1,4,1],[1,18,2],[1,1,2],[1,4,1],[1,3,3],[1,18,1],[1,3,1],[1,14,1],[1,6,2],[1,13,1],[1,1,5],[1,13,2],[1,1,1],[1,4,4],[1,10,1],[1,2,1],[1,12,3],[1,7,1],[1,8,1],[1,3,1],[1,2,2],[1,4,5],[1,9,1],[1,2,1],[1,2,1],[1,6,8],[1,32,3],[1,3,2],[1,6,1],[1,5,1],[1,7,1],[1,4,2],[1,2,1],[1,5,4],[1,1,2],[1,9,1],[1,2,1],[1,11,1],[1,5,2],[1,2,1],[1,1,1],[1,3,1],[1,7,13],[1,4,4],[1,1,1],[1,6,1],[1,1,3],[1,6,6],[1,6,1],[1,4,4],[1,10,1],[1,15,1],[1,3,7],[1,6,1],[1,9,1],[1,14,23],[1,14,2],[1,6,3],[1,2,1],[1,9,1],[1,1,3],[1,6,4],[1,15,2],[1,8,1],[1,6,6],[1,16,10],[1,5,4],[1,30,3],[1,7,1],[1,4,1],[1,3,1],[1,6,6],[1,1,2],[1,3,2],[1,1,1],[1,1,1],[1,1,1],[1,2,5],[1,2,1],[1,2,5],[1,24,1],[1,3,1],[1,6,1],[1,2,1],[1,4,1],[1,2,2],[1,4,1],[1,1,1],[1,3,1],[1,8,2],[1,4,2],[1,2,2],[1,2,1],[1,12,6],[1,2,1],[1,32,42],[1,7,1],[1,7,1],[1,12,1],[1,2,1],[1,6,1],[1,42,1],[1,2,1],[1,1,2],[1,2,1],[1,6,1],[1,2,2],[1,8,1],[1,22,4],[1,1,1],[1,11,20],[1,6,2],[1,2,1],[1,4,2],[1,9,1],[1,10,1],[1,16,5],[1,3,2],[1,8,1],[1,6,3],[1,1,2],[1,6,1],[1,2,1],[1,28,1],[1,18,1],[1,17,8],[1,4,1],[1,2,2],[1,13,1],[1,25,3],[1,7,4],[1,3,1],[1,1,1],[1,3,3],[1,4,1],[1,7,5],[1,2,2],[1,5,1],[1,2,2],[1,2,2],[1,14,1],[1,3,3],[1,4,1],[1,1,2],[1,11,1],[1,2,1],[1,6,1],[1,7,6],[1,7,1],[1,2,2],[1,2,1],[1,31,4],[1,4,3],[1,14,6],[1,4,4],[1,1,1],[1,2,1],[1,12,5],[1,4,1],[1,7,1],[1,3,1],[1,4,1],[1,11,1],[1,12,1],[1,3,2],[1,9,1],[1,17,2],[1,9,5],[1,6,1],[1,13,2],[1,5,1],[1,4,3],[1,3,1],[1,1,4],[1,7,1],[1,4,1],[1,3,1],[1,56,3],[1,1,1],[1,9,1],[1,4,1],[1,15,1],[1,2,1],[1,12,1],[1,4,2],[1,1,1],[1,1,1],[1,149,2],[1,56,1],[1,4,5],[1,2,2],[1,11,3],[1,2,3],[1,1,2],[1,2,1],[1,15,4],[1,2,2],[1,4,1],[1,17,2],[1,10,5],[1,14,2],[1,8,2],[1,4,2],[1,4,1],[1,6,1],[1,5,1],[1,7,2],[1,20,5],[1,3,1],[1,4,1],[1,11,1],[1,2,1],[1,1,3],[1,5,2],[1,6,1],[1,4,3],[1,4,3],[1,4,2],[1,7,3],[1,5,1],[1,1,1],[1,2,1],[1,8,1],[1,7,1],[1,2,1],[1,1,1],[1,1,1],[1,4,3],[1,11,1],[1,43,1],[1,7,8],[1,8,1],[1,1,1],[1,8,6],[1,9,3],[1,19,1],[1,2,1],[1,43,3],[1,4,5],[1,2,3],[1,4,1],[1,17,1],[1,9,1],[1,8,72],[1,2,1],[1,4,2],[1,16,1],[1,15,1],[1,8,1],[1,3,1],[1,7,8],[1,4,1],[1,23,2],[1,1,2],[1,1,1],[1,15,7],[1,7,4],[1,3,4],[1,5,1],[1,1,1],[1,6,83],[1,1,1],[1,4,3],[1,2,1],[1,3,2],[1,9,2],[1,5,1],[1,22,1],[1,3,6],[1,6,4],[1,4,1],[1,1,4],[1,1,1],[1,5,3],[1,1,2],[1,15,2],[1,8,1],[1,5,2],[1,1,1],[1,4,10],[1,63,1],[1,2,2],[1,2,1],[1,9,1],[1,4,3],[1,2,1],[1,24,1],[1,2,2],[1,2,2],[1,6,2],[1,13,5],[1,34,5],[1,10,1],[1,3,1],[1,22,9],[1,41,1],[1,1,4],[1,13,2],[1,18,1],[1,4,4],[1,7,1],[1,4,3],[1,14,4],[1,3,2],[1,2,1],[1,7,10],[1,15,3],[1,6,1],[1,1,1],[1,2,5],[1,4,10],[1,5,2],[1,12,6],[1,6,1],[1,19,134],[1,11,1],[1,233,9],[1,4,2],[1,40,1],[1,2,1],[1,10,1],[1,3,1],[1,3,1],[1,3,1],[1,35,1],[1,2,7],[1,1,3],[1,3,1],[1,14,2],[1,1,1],[1,7,1],[1,6,5],[1,10,1],[1,5,3],[1,8,1],[1,11,1],[1,13,1],[1,8,9],[1,5,1],[1,3,1],[1,11,1],[1,2,1],[1,5,1],[1,7,1],[1,9,3],[1,2,3],[1,2,2],[1,29,2],[1,2,1],[1,4,3],[1,1,2],[1,2,2],[1,3,6],[1,11,1],[1,1,1],[1,11,1],[1,4,1],[1,6,1],[1,3,5],[1,4,1],[1,4,3],[1,34,1],[1,4,2],[1,1,9],[1,18,1],[1,9,3],[1,15,1],[1,4,4],[1,4,2],[1,9,1],[1,4,1],[1,10,1],[1,2,1],[1,2,4],[1,4,1],[1,1,2],[1,3,3],[1,2,1],[1,47,14],[1,3,1],[1,2,1],[1,3,1],[1,1,1],[1,20,1],[1,14,6],[1,2,2],[1,16,2],[1,2,1],[1,1,31],[1,5,9],[1,10,2],[1,10,3],[1,19,1],[1,1,1],[1,13,2],[1,5,1],[1,1,2],[1,1,2],[1,24,1],[1,9,2],[1,4,1],[1,10,3],[1,35,6],[1,1,1],[1,2,1],[1,1,1],[1,3,1],[1,4,5],[1,4,1],[1,1,1],[1,4,1],[1,10,2],[1,55,6],[1,3,22],[1,28,4],[1,6,3],[1,10,1],[1,6,187],[1,3,2],[1,12,5],[1,7,1],[1,4,1],[1,2,2],[1,2,1],[1,31,9],[1,2,8],[1,20,2],[1,36,2],[1,2,2],[1,15,5],[1,5,2],[1,3,2],[1,8,1],[1,1,1],[1,2,1],[1,37,1],[1,17,4],[1,8,1],[1,19,2],[1,7,1],[1,1,1],[1,1,1],[1,2,1],[1,9,1],[1,2,1],[1,2,1],[1,2,1],[1,19,1],[1,33,3],[1,4,1],[1,7,1],[1,3,1],[1,46,4],[1,2,1],[1,3,2],[1,1,2],[1,2,2],[1,14,1],[1,3,1],[1,11,2],[1,2,2],[1,21,2],[1,34,2],[1,4,1],[1,1,1],[1,2,1],[1,22,1],[1,64,9],[1,21,10],[1,3,3],[1,6,1],[1,16,2],[1,3,1],[1,31,4],[1,1,1],[1,1,2],[1,1,1],[1,3,1],[1,5,4],[1,27,1],[1,1,1],[1,2,2],[1,17,10],[1,4,1],[1,25,1],[1,41,1],[1,18,4],[1,17,40],[1,9,1],[1,2,1],[1,7,1],[1,21,2],[1,2,3],[1,3,1],[1,14,1],[1,8,2],[1,2,1],[1,2,2],[1,5,1],[1,1,2],[1,4,1],[1,6,5],[1,9,17],[1,5,1],[1,6,1],[1,4,1],[1,1,1],[1,3,1],[1,61,9],[1,6,1],[1,9,2],[1,2,2],[1,9,1],[1,7,4],[1,12,1],[1,2,2],[1,40,1],[1,17,13],[1,1,7],[1,11,2],[1,20,2],[1,2,1],[1,1,1],[1,12,10],[1,5,3],[1,2,1],[1,1,1],[1,23,2],[1,9,3],[1,4,1],[1,5,2],[1,4,1],[1,19,5],[1,5,1],[1,1,4],[1,5,1],[1,8,1],[1,9,1],[1,5,3],[1,43,3],[1,1,2],[1,3,1],[1,2,2],[1,15,38],[1,3,1],[1,25,1],[1,1,4],[1,5,6],[1,2,1],[1,4,3],[1,4,2],[1,3,1],[1,9,1],[1,4,1],[1,13,2],[1,7,4],[1,2,6],[1,12,1],[1,8,3],[1,1,4],[1,13,1],[1,3,4],[1,3,2],[1,2,2],[1,4,1],[1,6,1],[1,14,3],[1,7,1],[1,8,1],[1,8,1],[1,3,1],[1,32,5],[1,16,2],[1,2,3],[1,38,1],[1,5,4],[1,10,2],[1,2,7],[1,3,1],[1,8,1],[1,3,2],[1,1,3],[1,4,2],[1,71,12],[1,8,4],[1,2,12],[1,3,1],[1,12,2],[1,2,1],[1,5,1],[1,2,28],[1,19,5],[1,10,1],[1,9,2],[1,3,1],[1,7,6],[1,11,1],[1,2,1],[1,27,2],[1,7,4],[1,4,2],[1,12,8],[1,8,96],[1,12,1],[1,2,4],[1,965,1303],[1,7,5],[1,15,3],[1,3,2],[1,18,2],[1,25,3],[1,7,2],[1,18,2],[1,6,1],[1,10,2],[1,4,1],[1,1,3],[1,5,1],[1,19,2],[1,8,1],[1,50,4],[1,8,1],[1,11,1],[1,9,1],[1,2,1],[1,2,5],[1,3,1],[1,6,2],[1,1,1],[1,13,5],[1,19,1],[1,7,2],[1,17,1],[1,6,1],[1,4,1],[1,7,3],[1,13,3],[1,7,4],[1,5,2],[1,4,1],[1,11,16],[1,7,1],[1,1,1],[1,2,1],[1,2,1],[1,14,3],[1,30,1],[1,2,6],[1,6,2],[1,3,1],[1,4,1],[1,9,11],[1,6,1],[1,35,1],[1,2,8],[1,1,2],[1,3,2],[1,1,1],[1,9,1],[1,2,57],[1,2,1],[1,5,1],[1,4,2],[1,15,1],[1,12,3],[1,4,3],[1,17,1],[1,12,2],[1,21,12],[1,2,1],[1,9,1],[1,9,47],[1,49,4],[1,5,1],[1,4,1],[1,24,1],[1,2,2],[1,64,2],[1,48,7],[1,2,2],[1,10,2],[1,3,1],[1,11,1],[1,5,1],[1,1,2],[1,2,4],[1,6,1],[1,19,6],[1,6,2],[1,3,2],[1,1,1],[1,22,2],[1,3,2],[1,5,14],[1,2,1],[1,11,1],[1,4,2],[1,6,1],[1,24,10],[1,7,1],[1,2,74],[1,6,1],[1,28,1],[1,1,1],[1,1,1],[1,10,1],[1,88,4],[1,9,4],[1,26,1],[1,3,1],[1,4,1],[1,4,1],[1,6,1],[1,23,1],[1,2,7],[1,1,3],[1,7,1],[1,1,1],[1,5,2],[1,4,1],[1,2,1],[1,1,1],[1,15,5],[1,22,1],[1,6,3],[1,12,2],[1,48,14],[1,7,1],[1,5,1],[1,10,5],[1,5,1],[1,6,5],[1,2,3],[1,14,3],[1,3,1],[1,8,4],[1,2,5],[1,34,3],[1,2,1],[1,4,1],[1,6,7],[1,3,1],[1,3,3],[1,32,2],[1,3,1],[1,3,1],[1,2,1],[1,3,1],[1,39,8],[1,1,1],[1,15,8],[1,3,4],[1,2,3],[1,1,3],[1,38,18],[1,6,1],[1,25,4],[1,2,1],[1,8,1],[1,3,1],[1,24,1],[1,5,5],[1,5,4],[1,2,3],[1,2,1],[1,5,4],[1,51,1],[1,23,3],[1,2,1],[1,2,1],[1,1,2],[1,7,2],[1,3,1],[1,1,1],[1,4,1],[1,2,1],[1,7,6],[1,8,1],[1,11,1],[1,2,6],[1,2,1],[1,2,1],[1,1,1],[1,26,1],[1,3,1],[1,2,1],[1,2,1],[1,2,1],[1,12,2],[1,1,3],[1,3,1],[1,2,4],[1,19,3],[1,3,1],[1,3,2],[1,49,3],[1,2,1],[1,21,3],[1,1,1],[1,5,1],[1,4,1],[1,2,2],[1,2,1],[1,1,1],[1,7,4],[1,2,1],[1,2,1],[1,2,1],[1,3,2],[1,26,2],[1,9,1],[1,2,2],[1,12,1],[1,4,32],[1,4,1],[1,17,1],[1,1,2],[1,77,4],[1,2,1],[1,12,1],[1,2,1],[1,2,4],[1,5,2],[1,10,3],[1,4,3],[1,2,1],[1,1,3],[1,16,4],[1,3,1],[1,40,2],[1,13,1],[1,2,1],[1,6,2],[1,12,2],[1,6,11],[1,6,1],[1,1,1],[1,10,6],[1,1,1],[1,6,5],[1,38,4],[1,2,7],[1,9,1],[1,5,2],[1,3,1],[1,2,1],[1,5,2],[1,4,1],[1,1,1],[1,1,1],[1,4,2],[1,4,3],[1,5,2],[1,1,4],[1,11,4],[1,14,4],[1,4,1],[1,17,2],[1,2,2],[1,39,1],[1,9,21],[1,14,2],[1,4,4],[1,4,3],[1,9,2],[1,1,1],[1,3,2],[1,1,1],[1,1,7],[1,16,4],[1,5,1],[1,2,1],[1,2,1],[1,2,1],[1,98,19],[1,4,1],[1,1,1],[1,5,1],[1,7,1],[1,1,3],[1,9,1],[1,4,2],[1,2,1],[1,7,2],[1,2,1],[1,1,2],[1,1,1],[1,5,2],[1,6,1],[1,11,6],[1,5,4],[1,40,5],[1,1,2],[1,9,1],[1,2,1],[1,6,1],[1,5,1],[1,11,2],[1,4,1],[1,3,17],[1,1,1],[1,1,5],[1,9,5],[1,60,1],[1,3,7],[1,3,4],[1,5,1],[1,3,10],[1,5,2],[1,7,1],[1,2,1],[1,14,14],[1,4,3],[1,1,2],[1,2,4],[1,5,1],[1,11,7],[1,3,1],[1,29,3],[1,2,4],[1,8,1],[1,53,1],[1,10,1],[1,7,2],[1,2,13],[1,58,1],[1,5,6],[1,2,1],[1,4,2],[1,4,2],[1,4,2],[1,5,2],[1,2,3],[1,12,2],[1,4,6],[1,34,1],[1,1,1],[1,8,1],[1,4,1],[1,2,1],[1,2,2],[1,16,1],[1,4,2],[1,3,13],[1,2,2],[1,46,2],[1,4,1],[1,6,1],[1,1,2],[1,2,1],[1,3,6],[1,3,1],[1,19,1],[1,2,1],[1,23,1],[1,3,1],[1,1,1],[1,7,2],[1,4,4],[1,18,3],[1,1,1],[1,7,2],[1,2,2],[1,7,1],[1,2,1],[1,2,1],[1,6,1],[1,9,4],[1,3,1],[1,5,1],[1,13,1],[1,2,2],[1,33,1],[1,12,1],[1,9,3],[1,2,1],[1,1,1],[1,18,1],[1,1,3],[1,3,15],[1,2,4],[1,17,1],[1,1,1],[1,1,1],[1,4,8],[1,1,2],[1,31,19],[1,1,5],[1,7,6],[1,12,4],[1,2,4],[1,7,8],[1,4,2],[1,13,2],[1,19,18],[1,42,4],[1,3,1],[1,17,1],[1,3,3],[1,4,2],[1,12,1],[1,1,6],[1,23,2],[1,3,1],[1,20,1],[1,21,4],[1,1,1],[1,3,2],[1,10,1],[1,9,1],[1,8,6],[1,21,3],[1,5,1],[1,7,6],[1,2,1],[1,5,1],[1,1,2],[1,11,1],[1,8,212],[1,9,3],[1,6,1],[1,1,2],[1,25,12],[1,4,1],[1,14,15],[1,4,1],[1,13,1],[1,2,2],[1,3,1],[1,4,1],[1,3,1],[1,1,1],[1,3,1],[1,9,7],[1,1,1],[1,6,1],[1,8,2],[1,8,1],[1,2,3],[1,3,1],[1,2,3],[1,1,2],[1,10,1],[1,6,1],[1,12,3],[1,12,1],[1,1,1],[1,2,1],[1,2,4],[1,4,1],[1,2,1],[1,1,1],[1,4,1],[1,23,2],[1,4,2],[1,20,1],[1,17,4],[1,8,2],[1,4,6],[1,4,1],[1,6,1],[1,10,1],[1,6,2],[1,1,1],[1,3,1],[1,4,1],[1,4,1],[1,16,143],[1,7,1],[1,10,1],[1,7,2],[1,3,3],[1,8,3],[1,2,1],[1,49,1],[1,2,7],[1,14,4],[1,31,3],[1,29,1],[1,31,8],[1,5,2],[1,7,1],[1,1,1],[1,4,5],[1,1,1],[1,7,3],[1,1,2],[1,5,3],[1,3,1],[1,7,4],[1,129,9],[1,13,1],[1,11,4],[1,6,28],[1,6,1],[1,6,1],[1,20,1],[1,2,1],[1,16,3],[1,3,3],[1,5,1],[1,64,1],[1,4,2],[1,7,1],[1,21,3],[1,2,2],[1,9,1],[1,2,1],[1,5,6],[1,6,6],[1,3,1],[1,5,1],[1,3,1],[1,3,1],[1,6,2],[1,2,3],[1,4,1],[1,1,1],[1,12,37],[1,6,1],[1,1,1],[1,4,2],[1,4,8],[1,6,2],[1,2,2],[1,19,1],[1,1,1],[1,1,3],[1,3,1],[1,4,5],[1,15,2],[1,8,3],[1,1,1],[1,2,2],[1,3,1],[1,10,1],[1,4,1],[1,1,2],[1,19,1],[1,5,2],[1,4,4],[1,3,2],[1,3,17],[1,1,1],[1,1,1],[1,2,1],[1,18,3],[1,3,1],[1,16,4],[1,5,1],[1,11,2],[1,19,8],[1,2,1],[1,2,1],[1,1,6],[1,3,1],[1,2,1],[1,1,1],[1,2,1],[1,11,3],[1,17,4],[1,4,1],[1,4,4],[1,5,2],[1,1,1],[1,1,2],[1,10,12],[1,2,2],[1,8,1],[1,1,2],[1,8,1],[1,17,2],[1,2,1],[1,4,1],[1,6,1],[1,20,21],[1,5,7],[1,3,1],[1,13,2],[1,3,6],[1,8,3],[1,12,1],[1,12,2],[1,3,2],[1,15,2],[1,6,1],[1,9,5],[1,5,3],[1,4,1],[1,7,4],[1,4,4],[1,9,4],[1,11,1],[1,3,1],[1,17,1],[1,71,5],[1,7,1],[1,3,1],[1,5,1],[1,1,1],[1,1,2],[1,2,1],[1,1,2],[1,10,2],[1,3,1],[1,2,2],[1,5,1],[1,28,4],[1,2,1],[1,1,1],[1,9,1],[1,3,2],[1,8,2],[1,13,1],[1,2,1],[1,6,1],[1,25,79],[1,30,24],[1,10,31],[1,5,1],[1,9,1],[1,1,1],[1,4,1],[1,118,14],[1,18,3],[1,30,1],[1,10,3],[1,5,1],[1,5,1],[1,1,1],[1,6,1],[1,9,3],[1,6,2],[1,5,1],[1,2,2],[1,3,1],[1,7,4],[1,8,2],[1,10,2],[1,1,8],[1,41,1],[1,21,4],[1,6,1],[1,13,3],[1,5,1],[1,34,7],[1,22,1],[1,9,8],[1,5,3],[1,11,1],[1,2,1],[1,6,1],[1,4,1],[1,72,1],[1,44,3],[1,2,1],[1,1,1],[1,3,1],[1,8,2],[1,1,3],[1,14,1],[1,3,2],[1,1,1],[1,9,2],[1,17,1],[1,9,35],[1,3,1],[1,6,1],[1,2,11],[1,5,3],[1,1257,55],[1,1,1],[1,2,1],[1,14,7],[1,51,44],[1,3,6],[1,1,1],[1,6,2],[1,2,1],[1,11,2],[1,8,3],[1,3,2],[1,3,3],[1,4,1],[1,2,1],[1,5,1],[1,8,5],[1,60,1],[1,6,3],[1,36,2],[1,1,1],[1,2,1],[1,10,2],[1,26,2],[1,7,3],[1,6,1],[1,6,2],[1,3,3],[1,2,3],[1,6,2],[1,2,2],[1,2,2],[1,5,2],[1,2,1],[1,15,5],[1,1,2],[1,1,3],[1,37,24],[1,8,2],[1,17,2],[1,31,1],[1,14,2],[1,2,1],[1,16,2],[1,3,1],[1,2,2],[1,1,2],[1,2,3],[1,4,2],[1,1,1],[1,9,5],[1,1,2],[1,1,4],[1,4,18],[1,6,1],[1,12,1],[1,3,85],[1,17,2],[1,4,1],[1,7,1],[1,4,1],[1,3,1],[1,22,2],[1,1,1],[1,15,27],[1,4,1],[1,1,1],[1,1,3],[1,3,1],[1,35,2],[1,1,1],[1,33,4],[1,2,1],[1,3,3],[1,6,1],[1,9,1],[1,8,1],[1,6,1],[1,16,2],[1,20,2],[1,5,1],[1,1,5],[1,2,2],[1,12,25],[1,6,1],[1,13,1],[1,2,1],[1,2,1],[1,10,1],[1,2,1],[1,37,3],[1,2,1],[1,58,11],[1,14,3],[1,6,1],[1,6,1],[1,1,3],[1,1,1],[1,9,2],[1,1,502],[1,45,5],[1,5,1],[1,4,1],[1,2,8],[1,5,1],[1,1,1],[1,7,1],[1,4,1],[1,3,4],[1,1,1],[1,10,1],[1,9,1],[1,13,1],[1,10,8],[1,4,4],[1,7,1],[1,1,2],[1,2,2],[1,9,2],[1,13,2],[1,8,1],[1,1,1],[1,2,4],[1,29,1],[1,8,2],[1,7,3],[1,30,7],[1,1,1],[1,10,10],[1,3,1],[1,1,1],[1,5,1],[1,4,3],[1,7,1],[1,43,8],[1,1,2],[1,9,1],[1,1,1],[1,3,6],[1,9,1],[1,1,1],[1,7,1],[1,6,1],[1,2,2],[1,13,4],[1,13,3],[1,2,3],[1,8,1],[1,11,2],[1,9,53],[1,2,1],[1,16,1],[1,6,3],[1,48,3],[1,4,1],[1,7,3],[1,2,2],[1,8,1],[1,8,1],[1,26,2],[1,3,1],[1,8,2],[1,121,2],[1,2,2],[1,8,1],[1,2,2],[1,4,2],[1,8,1],[1,1,1],[1,4,1],[1,3,3],[1,7,1],[1,7,2],[1,2,1],[1,8,2],[1,34,28],[1,3,2],[1,3,1],[1,5,1],[1,9,1],[1,7,1],[1,14,4],[1,1,1],[1,34,4],[1,1,1],[1,6,1],[1,3,1],[1,2,1],[1,4,1],[1,5,2],[1,10,1],[1,41,5],[1,7,2],[1,19,4],[1,3,3],[1,12,3],[1,7,1],[1,4,2],[1,16,1],[1,3,1],[1,8,4],[1,9,2],[1,8,2],[1,2,1],[1,10,2],[1,8,1],[1,16,2],[1,7,2],[1,5,1],[1,2,3],[1,15,4],[1,3,5],[1,4,4],[1,1,1],[1,3,2],[1,5,1],[1,8,4],[1,4,1],[1,41,7],[1,2,1],[1,1,3],[1,1,6],[1,2,1],[1,10,2],[1,10,2],[1,3,3],[1,39,4],[1,1,2],[1,5,7],[1,12,2],[1,15,5],[1,4,1],[1,13,1],[1,3,1],[1,44,3],[1,1,2],[1,1,1],[1,6,1],[1,3,1],[1,3,2],[1,7,15],[1,1,1],[1,11,4],[1,3,1],[1,1,3],[1,1,1],[1,2,1],[1,9,4],[1,22,1],[1,46,2],[1,3,18],[1,22,8],[1,3,1],[1,4,10],[1,12,16],[1,2,1],[1,8,3],[1,1,1],[1,2,4],[1,1,1],[1,6,4],[1,7,1],[1,7,4],[1,14,4],[1,1,1],[1,13,2],[1,61,1],[1,6,2],[1,16,1],[1,14,7],[1,9,2],[1,18,2],[1,9,3],[1,1,2],[1,4,1],[1,6,1],[1,6,4],[1,10,1],[1,5,2],[1,7,1],[1,3,1],[1,11,2],[1,53,1],[1,10,2],[1,17,1],[1,2,2],[1,5,14],[1,17,1],[1,2,1],[1,5,1],[1,28,2],[1,8,2],[1,4,1],[1,4,2],[1,21,1],[1,3,1],[1,3,2],[1,5,2],[1,5,1],[1,3,13],[1,13,2],[1,124,753],[1,2,2],[1,43,1],[1,6,1],[1,2,2],[1,11,1],[1,22,1],[1,5,2],[1,5,1],[1,8,1],[1,2,4],[1,2,2],[1,9,1],[1,6,1],[1,2,1],[1,6,1],[1,14,3],[1,21,1],[1,3,4],[1,3,3],[1,3,1],[1,2,2],[1,2,2],[1,5,2],[1,11,1],[1,6,1],[1,3,1],[1,64,1],[1,6,1],[1,2,12],[1,5,1],[1,6,4],[1,10,1],[1,14,1],[1,14,1],[1,2,1],[1,2,1],[1,8,4],[1,17,2],[1,5,3],[1,64,1],[1,33,3],[1,18,2],[1,1,1],[1,42,9],[1,20,2],[1,10,2],[1,2,2],[1,3,1],[1,13,1],[1,5,1],[1,39,5],[1,8,2],[1,6,1],[1,3,2],[1,12,1],[1,2,4],[1,8,1],[1,2,1],[1,4,5],[1,7,1],[1,2,1],[1,2,1],[1,5,2],[1,15,3],[1,6,1],[1,1,1],[1,11,2],[1,4,2],[1,1,1],[1,7,3],[1,7,2],[1,3,1],[1,3,1],[1,2,1],[1,8,3],[1,3,1],[1,7,12],[1,8,1],[1,4,2],[1,6,2],[1,9,1],[1,3,30],[1,8,3],[1,8,2],[1,8,1],[1,11,1],[1,13,1],[1,2,1],[1,16,1],[1,10,1],[1,3,1],[1,6,4],[1,29,2],[1,4,2],[1,4,1],[1,1,1],[1,7,1],[1,1,1],[1,4,11],[1,1,1],[1,6,1],[1,26,1],[1,3,1],[1,2,1],[1,10,1],[1,4,1],[1,14,2],[1,10,1],[1,5,2],[1,5,1],[1,2,1],[1,26,33],[1,1,1],[1,11,2],[1,8,5],[1,18,1],[1,2,1],[1,5,1],[1,4,2],[1,5,1],[1,11,2],[1,1,2],[1,2,2],[1,6,6],[1,10,1],[1,14,1],[1,2,1],[1,13,1],[1,14,1],[1,8,2],[1,21,2],[1,1,2],[1,1,1],[1,14,1],[1,2,1],[1,15,2],[1,4,1],[1,3,1],[1,10,2],[1,4,2],[1,5,1],[1,11,22],[1,8,3],[1,4,1],[1,3,2],[1,1,2],[1,25,3],[1,2,1],[1,11,2],[1,5,2],[1,39,1],[1,1,1],[1,415,128],[1,6,1],[1,5,1],[1,8,5],[1,2,3],[1,1,1],[1,1,1],[1,4,1],[1,2,4],[1,4,1],[1,2,9],[1,4,2],[1,23,3],[1,6,9],[1,5,4],[1,2,5],[1,1,1],[1,7,1],[1,3,7],[1,1,2],[1,2,16],[1,5,2],[1,1,3],[1,4,1],[1,11,1],[1,2,2],[1,2,1],[1,10,1],[1,6,2],[1,11,1],[1,28,1],[1,21,3],[1,3,2],[1,3,1],[1,4,1],[1,1,2],[1,7,1],[1,11,4],[1,4,2],[1,22,4],[1,1,1],[1,1,1],[1,12,7],[1,1,1],[1,4,2],[1,2,1],[1,6,4],[1,14,3],[1,8,2],[1,1,11],[1,13,2],[1,4,1],[1,3,2],[1,95,10],[1,1,2],[1,4,2],[1,27,2],[1,2,1],[1,19,1],[1,13,4],[1,1,1],[1,37,1],[1,4,1],[1,5,1],[1,7,5],[1,1,1],[1,4,5],[1,5,1],[1,1,1],[1,16,2],[1,22,1],[1,4,2],[1,24,4],[1,10,1],[1,77,6],[1,21,1],[1,11,1],[1,2,1],[1,1,1],[1,4,5],[1,2,4],[1,55,4],[1,17,1],[1,1,3],[1,2,2],[1,7,1],[1,17,1],[1,34,2],[1,4,1],[1,2,2],[1,1,2],[1,100,1],[1,17,2],[1,8,6],[1,11,2],[1,11,2],[1,3,1],[1,5,2],[1,1,1],[1,6,7],[1,15,5],[1,7,1],[1,4,1],[1,5,1],[1,6,2],[1,7,1],[1,2,2],[1,10,2],[1,17,1],[1,10,2],[1,6,3],[1,21,1],[1,2,1],[1,78,4],[1,6,1],[1,1,2],[1,5,1],[1,186,9],[1,16,3],[1,15,13],[1,30,4],[1,2,1],[1,15,3],[1,13,1],[1,3,1],[1,1,1],[1,2,2],[1,5,5],[1,7,1],[1,16,1],[1,2,1],[1,14,2],[1,11,5],[1,9,1],[1,13,2],[1,2,1],[1,4,64],[1,4,1],[1,18,4],[1,3,1],[1,1,1],[1,16,2],[1,4,1],[1,11,4],[1,9,3],[1,3,1],[1,4,1],[1,1,1],[1,10,3],[1,7,1],[1,13,1],[1,16,4],[1,1,16],[1,2,2],[1,18,6],[1,42,2],[1,1,3],[1,15,1],[1,3,1],[1,43,1],[1,1,1],[1,27,2],[1,1,3],[1,1,5],[1,13,1],[1,1,1],[1,10,11],[1,8,1],[1,9,1],[1,13,1],[1,1,2],[1,13,3],[1,1,1],[1,5,1],[1,14,2],[1,14,1],[1,13,1],[1,4,3],[1,25,1],[1,1,3],[1,3,3],[1,4,1],[1,1,1],[1,4,4],[1,15,1],[1,2,1],[1,1,1],[1,7,12],[1,68,2],[1,13,2],[1,2,1],[1,6,4],[1,46,6],[1,1,1],[1,2,2],[1,4,1],[1,2,1],[1,11,5],[1,1,1],[1,9,1],[1,9,1],[1,13,1],[1,4,1],[1,14,1],[1,42,9],[1,5,1],[1,4,1],[1,24,7],[1,7,1],[1,17,1],[1,2,1],[1,2,5],[1,3,6],[1,2,1],[1,15,4],[1,3,2],[1,33,2],[1,30,4],[1,27,4],[1,1,1],[1,14,4],[1,2,3],[1,26,7],[1,22,1],[1,2,2],[1,2,2],[1,166,3],[1,4,4],[1,9,1],[1,12,15],[1,2,6],[1,13,2],[1,4,3],[1,9,2],[1,2,3],[1,3,3],[1,9,2],[1,22,1],[1,5,3],[1,3,4],[1,2,3],[1,3,1],[1,23,1],[1,18,1],[1,6,1],[1,4,1],[1,9,3],[1,35,1],[1,73,2],[1,1,3],[1,31,5],[1,25,1],[1,3,4],[1,11,1],[1,9,4],[1,2,1],[1,27,36],[1,23,5],[1,4,2],[1,1,2],[1,29,2],[1,3,2],[1,1,1],[1,4,1],[1,12,1],[1,36,16],[1,5,14],[1,19,1],[1,6,1],[1,6,1],[1,4,1],[1,6,1],[1,4,2],[1,9,7],[1,7,1],[1,30,4],[1,4,1],[1,18,3],[1,2,2],[1,3,1],[1,9,2],[1,2,2],[1,1,2],[1,1,2],[1,14,1],[1,3,1],[1,5,2],[1,10,1],[1,9,1],[1,10,3],[1,4,1],[1,2,1],[1,4,4],[1,2,1],[1,3,3],[1,39,2],[1,3,1],[1,1,3],[1,14,1],[1,2,4],[1,13,1],[1,4,6],[1,3,5],[1,5,4],[1,8,1],[1,131,1],[1,28,1],[1,5,1],[1,965,1303],[1,8,5],[1,2,9],[1,4,2],[1,5,1],[1,46,3],[1,7,3],[1,1,1],[1,7,3],[1,2,1],[1,4,1],[1,2,1],[1,2,1],[1,2,1],[1,4,6],[1,5,1],[1,9,3],[1,2,2],[1,9,1],[1,42,3],[1,11,3],[1,5,1],[1,1,2],[1,6,1],[1,37,51],[1,2,1],[1,4,3],[1,23,2],[1,1,15],[1,5,4],[1,1,4],[1,18,3],[1,12,3],[1,4,2],[1,4,1],[1,2,7],[1,2,6],[1,3,6],[1,6,1],[1,10,3],[1,4,2],[1,1,2],[1,4,1],[1,4,3],[1,1,3],[1,3,1],[1,6,2],[1,10,2],[1,6,4],[1,4,3],[1,7,2],[1,2,2],[1,4,1],[1,1,1],[1,4,5],[1,14,1],[1,20,4],[1,7,15],[1,18,2],[1,6,1],[1,1,1],[1,7,1],[1,5,2],[1,6,2],[1,4,1],[1,6,3],[1,2,1],[1,6,1],[1,4,1],[1,7,1],[1,7,4],[1,7,1],[1,1,1],[1,24,4],[1,2,2],[1,3,5],[1,8,1],[1,15,2],[1,5,1],[1,2,3],[1,2,2],[1,4,1],[1,6,1],[1,2,3],[1,11,1],[1,23,5],[1,2,2],[1,1,1],[1,8,1],[1,17,6],[1,1,1],[1,9,2],[1,1,1],[1,10,1],[1,5,1],[1,6,1],[1,6,1],[1,5,1],[1,2,6],[1,2,1],[1,9,1],[1,14,1],[1,18,8],[1,39,2],[1,13,1],[1,6,1],[1,6,2],[1,9,1],[1,14,1],[1,5,4],[1,26,2],[1,4,1],[1,7,2],[1,5,5],[1,2,1],[1,20,2],[1,14,1],[1,10,1],[1,4,1],[1,3,1],[1,10,2],[1,9,12],[1,4,4],[1,2,1],[1,4,1],[1,4,1],[1,2,1],[1,8,1],[1,2,4],[1,1,1],[1,33,2],[1,4,1],[1,5,1],[1,205,1],[1,2,1],[1,15,3],[1,5,1],[1,1,1],[1,1,1],[1,1,1],[1,13,1],[1,14,5],[1,6,4],[1,3,1],[1,7,5],[1,42,2],[1,11,1],[1,24,2],[1,11,2],[1,11,2],[1,12,1],[1,7,1],[1,1,1],[1,3,2],[1,21,1],[1,13,1],[1,2,1],[1,37,6],[1,8,4],[1,2,2],[1,2,2],[1,36,1],[1,8,1],[1,19,11],[1,19,7],[1,8,1],[1,18,2],[1,7,2],[1,8,1],[1,1,1],[1,4,1],[1,3,3],[1,10,1],[1,6,1],[1,4,1],[1,10,1],[1,25,1],[1,14,1],[1,14,3],[1,4,1],[1,2,1],[1,2,2],[1,4,2],[1,3,4],[1,62,11],[1,4,1],[1,39,3],[1,65,2],[1,3,1],[1,11,2],[1,4,1],[1,2,2],[1,1,1],[1,2,3],[1,2,1],[1,17,7],[1,7,4],[1,1,4],[1,62,3],[1,17,3],[1,26,3],[1,15,1],[1,2,1],[1,4,6],[1,1,2],[1,8,2],[1,16,2],[1,1,1],[1,7,2],[1,4,1],[1,1,1],[1,7,2],[1,8,2],[1,12,1],[1,1,2],[1,2,1],[1,2,1],[1,26,7],[1,2,1],[1,5,1],[1,5,1],[1,5,1],[1,1,1],[1,6,27],[1,5,4],[1,6,1],[1,8,1],[1,38,2],[1,26,2],[1,13,1],[1,20,2],[1,6,6],[1,2,2],[1,2,1],[1,16,2],[1,88,1],[1,4,1],[1,5,3],[1,1,4],[1,1,4],[1,12,2],[1,3,1],[1,3,1],[1,3,1],[1,2,3],[1,6,1],[1,2,4],[1,28,2],[1,17,3],[1,10,1],[1,51,3],[1,1,1],[1,15,4],[1,10,14],[1,1,3],[1,3,3],[1,1,1],[1,5,1],[1,3,1],[1,23,3],[1,10,1],[1,1,1],[1,21,6],[1,11,1],[1,8,1],[1,1,1],[1,2,1],[1,1,3],[1,26,1],[1,1,2],[1,4,1],[1,4,1],[1,6,1],[1,6,1],[1,2,2],[1,11,5],[1,15,2],[1,13,1],[1,2,2],[1,4,1],[1,4,1],[1,2,6],[1,13,3],[1,23,2],[1,18,2],[1,8,2],[1,1,1],[1,4,1],[1,7,1],[1,2,1],[1,8,6],[1,12,1],[1,23,4],[1,9,4],[1,2,2],[1,8,1],[1,7,2],[1,2,2],[1,2,4],[1,8,16],[1,22,3],[1,2,1],[1,2,4],[1,2,1],[1,9,2],[1,3,3],[1,4,1],[1,3,9],[1,3,1],[1,2,2],[1,2,3],[1,11,1],[1,5,1],[1,5,1],[1,2,2],[1,10,20],[1,2,2],[1,2,1],[1,3,3],[1,10,1],[1,2,3],[1,2,1],[1,5,1],[1,4,2],[1,8,1],[1,2,2],[1,6,1],[1,5,1],[1,9,1],[1,3,2],[1,1,1],[1,2,6],[1,1,1],[1,5,1],[1,2,1],[1,16,1],[1,6,1],[1,2,1],[1,2,1],[1,5,1],[1,9,1],[1,10,16],[1,4,1],[1,4,2],[1,5,2],[1,8,1],[1,16,2],[1,2,1],[1,5,1],[1,1,2],[1,55,2],[1,20,1],[1,11,1],[1,5,2],[1,13,1],[1,1,1],[1,10,6],[1,5,2],[1,21,1],[1,7,3],[1,5,1],[1,7,1],[1,3,1],[1,6,1],[1,46,3],[1,8,5],[1,5,1],[1,2,1],[1,2,6],[1,22,1],[1,42,1],[1,1,1],[1,4,2],[1,13,1],[1,3,3],[1,2,2],[1,4,2],[1,1,3],[1,88,1],[1,24,4],[1,4,1],[1,3,1],[1,5,1],[1,17,6],[1,6,2],[1,20,3],[1,47,2],[1,2,7],[1,13,1],[1,1,3],[1,1,2],[1,2,2],[1,2,2],[1,4,3],[1,7,1],[1,3,1],[1,10,1],[1,2,1],[1,2,5],[1,1,2],[1,17,2],[1,12,4],[1,24,1],[1,3,1],[1,1,3],[1,6,1],[1,2,5],[1,3,1],[1,1,1],[1,13,2],[1,6,1],[1,2,1],[1,10,2],[1,4,1],[1,1,1],[1,18,7],[1,7,2],[1,8,1],[1,5,1],[1,2,1],[1,4,1],[1,2,2],[1,14,1],[1,13,1],[1,10,4],[1,4,4],[1,6,4],[1,4,1],[1,16,2],[1,8,2],[1,3,3],[1,3,1],[1,21,2],[1,7,1],[1,2,1],[1,2,1],[1,2,3],[1,4,1],[1,6,1],[1,28,1],[1,2,7],[1,3,1],[1,23,4],[1,2,1],[1,6,1],[1,2,1],[1,4,1],[1,3,2],[1,1,1],[1,9,2],[1,9,2],[1,2,1],[1,4,2],[1,10,1],[1,12,1],[1,4,2],[1,7,1],[1,2,2],[1,9,1],[1,16,5],[1,31,2],[1,16,2],[1,22,3],[1,2,1],[1,6,1],[1,1,1],[1,6,3],[1,14,2],[1,5,3],[1,81,3],[1,8,2],[1,1,1],[1,61,9],[1,1,4],[1,2,1],[1,11,3],[1,3,5],[1,3,6],[1,4,7],[1,1,2],[1,5,2],[1,2,1],[1,3,2],[1,9,5],[1,9,1],[1,1,3],[1,3,2],[1,13,3],[1,14,1],[1,15,6],[1,6,1],[1,2,1],[1,7,1],[1,2,1],[1,10,2],[1,2,2],[1,14,1],[1,2,2],[1,3,3],[1,3,1],[1,4,1],[1,59,2],[1,5,2],[1,4,2],[1,1,1],[1,2,1],[1,4,1],[1,2,2],[1,5,4],[1,4,1],[1,4,1],[1,10,3],[1,2,2],[1,2,3],[1,8,1],[1,2,1],[1,1,1],[1,18,1],[1,6,1],[1,12,3],[1,5,3],[1,3,1],[1,7,3],[1,10,2],[1,2,23],[1,1,12],[1,1,1],[1,32,3],[1,2,1],[1,4,1],[1,12,2],[1,4,1],[1,3,1],[1,5,1],[1,4,2],[1,4,1],[1,16,2],[1,1,1],[1,4,1],[1,7,1],[1,2,4],[1,8,1],[1,4,4],[1,1,1],[1,1,2],[1,6,3],[1,8,2],[1,23,15],[1,2,2],[1,2,1],[1,2,1],[1,11,1],[1,3,2],[1,9,2],[1,4,2],[1,2,3],[1,34,1],[1,7,1],[1,2,4],[1,65,2],[1,41,3],[1,1,2],[1,1,1],[1,6,1],[1,6,1],[1,7,1],[1,3,1],[1,14,9],[1,6,1],[1,6,5],[1,2,13],[1,5,2],[1,2,1],[1,4,1],[1,17,1],[1,5,1],[1,1,1],[1,3,2],[1,9,1],[1,1,4],[1,48,2],[1,7,1],[1,4,1],[1,3,1],[1,4,2],[1,118,3],[1,2,1],[1,2,4],[1,2,1],[1,12,13],[1,2,1],[1,4,2],[1,4,1],[1,6,1],[1,1,1],[1,7,2],[1,10,1],[1,21,5],[1,5,2],[1,9,1],[1,2,2],[1,1,1],[1,1,1],[1,1,1],[1,3,1],[1,1,1],[1,7,1],[1,83,9],[1,6,2],[1,7,2],[1,13,1],[1,4,2],[1,3,1],[1,8,2],[1,2,1],[1,10,3],[1,2,1],[1,2,1],[1,9,11],[1,2,1],[1,3,1],[1,17,1],[1,7,2],[1,8,2],[1,20,1],[1,2,1],[1,1,2],[1,8,1],[1,2,1],[1,6,1],[1,21,3],[1,1,2],[1,5,5],[1,2,1],[1,2,3],[1,2,1],[1,2,2],[1,16,1],[1,2,1],[1,2,1],[1,3,1],[1,17,1],[1,6,1],[1,4,15],[1,1,1],[1,11,1],[1,84,15],[1,31,3],[1,2,2],[1,8,1],[1,9,1],[1,2,3],[1,15,2],[1,4,1],[1,18,1],[1,3,1],[1,1,1],[1,2,4],[1,2,2],[1,2,1],[1,2,1],[1,25,1],[1,3,1],[1,141,13],[1,4,2],[1,2,2],[1,14,2],[1,7,1],[1,30,9],[1,17,1],[1,1,2],[1,6,1],[1,2,1],[1,2,1],[1,8,1],[1,2,1],[1,10,1],[1,6,3],[1,12,1],[1,68,1],[1,2,1],[1,10,2],[1,14,2],[1,26,9],[1,7,3],[1,3,3],[1,6,6],[1,3,1],[1,18,4],[1,3,1],[1,4,4],[1,2,1],[1,1,1],[1,37,8],[1,8,6],[1,2,1],[1,9,6],[1,5,2],[1,3,1],[1,3,2],[1,2,1],[1,3,1],[1,13,7],[1,9,1],[1,122,2],[1,2,1],[1,22,6],[1,11,2],[1,16,2],[1,28,46],[1,2,4],[1,7,1],[1,2,3],[1,2,6],[1,2,2],[1,1,2],[1,1,1],[1,5,1],[1,1,2],[1,3,2],[1,7,6],[1,11,1],[1,21,1],[1,40,6],[1,14,2],[1,21,1],[1,1,1],[1,14,2],[1,21,1],[1,2,1],[1,1,1],[1,1,2],[1,40,2],[1,4,2],[1,1,3],[1,1,1],[1,107,2],[1,4,6],[1,136,6],[1,5,1],[1,9,1],[1,24,3],[1,7,1],[1,10,5],[1,29,3],[1,12,2],[1,10,3],[1,5,3],[1,2,1],[1,59,1],[1,5,2],[1,13,2],[1,1,2],[1,50,2],[1,1,3],[1,2,3],[1,6,1],[1,4,2],[1,5,4],[1,3,2],[1,8,1],[1,4,2],[1,1,1],[1,17,1],[1,13,3],[1,2,1],[1,7,1],[1,3,1],[1,8,1],[1,1,1],[1,20,1],[1,4,4],[1,1,2],[1,2,1],[1,2,1],[1,2,2],[1,1,2],[1,13,2],[1,4,1],[1,4,1],[1,3,1],[1,2,1],[1,4,4],[1,13,5],[1,9,1],[1,8,1],[1,12,1],[1,15,3],[1,2,1],[1,2,2],[1,4,1],[1,2,2],[1,1,1],[1,3,1],[1,13,1],[1,4,1],[1,9,4],[1,3,2],[1,2,1],[1,4,4],[1,1,3],[1,15,1],[1,4,1],[1,2,1],[1,3,1],[1,2,1],[1,3,6],[1,5,1],[1,7,10],[1,1,2],[1,6,2],[1,7,2],[1,3,1],[1,3,3],[1,6,1],[1,13,1],[1,22,3],[1,6,5],[1,6,1],[1,3,1],[1,3,1],[1,21,5],[1,11,2],[1,6,3],[1,38,4],[1,6,4],[1,4,1],[1,2,1],[1,5,5],[1,5,3],[1,40,1],[1,4,3],[1,8,1],[1,13,2],[1,4,2],[1,1,1],[1,9,9],[1,1,1],[1,12,2],[1,36,1],[1,2,1],[1,18,3],[1,28,1],[1,5,1],[1,20,4],[1,40,3],[1,3,1],[1,5,3],[1,2,1],[1,31,3],[1,6,1],[1,3,1],[1,1,5],[1,3,3],[1,36,1],[1,1,1],[1,22,2],[1,9,2],[1,2,4],[1,2,2],[1,4,4],[1,2,1],[1,6,1],[1,3,3],[1,5,1],[1,13,2],[1,4,1],[1,1,3],[1,1,1],[1,11,5],[1,4,1],[1,2,3],[1,26,1],[1,9,1],[1,6,1],[1,15,1],[1,23,5],[1,3,5],[1,4,3],[1,8,1],[1,9,4],[1,2,1],[1,7,1],[1,1,6],[1,4,1],[1,43,1],[1,2,3],[1,1,1],[1,15,4],[1,3,1],[1,1,1],[1,10,1],[1,79,1],[1,1,14],[1,2,1],[1,6,1],[1,1,1],[1,24,1],[1,2,3],[1,9,2],[1,2,3],[1,8,1],[1,115,15],[1,1,1],[1,1,2],[1,3,1],[1,9,24],[1,6,1],[1,3,6],[1,10,3],[1,3,1],[1,1,1],[1,3,2],[1,2,1],[1,11,1],[1,5,1],[1,1,1],[1,2,1],[1,3,1],[1,5,1],[1,11,1],[1,2,1],[1,7,7],[1,15,1],[1,6,2],[1,51,7],[1,2,1],[1,54,1],[1,5,1],[1,1,1],[1,7,5],[1,1,1],[1,4,1],[1,3,1],[1,22,4],[1,5,3],[1,5,1],[1,64,9],[1,6,1],[1,28,6],[1,5,1],[1,11,1],[1,2,2],[1,4,2],[1,1,4],[1,8,1],[1,1,5],[1,7,1],[1,2,1],[1,2,2],[1,8,1],[1,11,3],[1,8,3],[1,7,1],[1,10,5],[1,5,1],[1,98,5],[1,18,1],[1,1,1],[1,5,1],[1,2,2],[1,14,2],[1,3,1],[1,1,1],[1,11,3],[1,7,9],[1,5,3],[1,3,1],[1,3,3],[1,125,34],[1,1,1],[1,2,1],[1,6,2],[1,2,2],[1,11,7],[1,5,2],[1,5,5],[1,6,1],[1,10,2],[1,14,2],[1,4,3],[1,8,7],[1,2,3],[1,2,2],[1,13,1],[1,6,1],[1,10,5],[1,11,1],[1,4,2],[1,14,1],[1,1,6],[1,15,1],[1,1,3],[1,5,3],[1,7,1],[1,2,1],[1,1,3],[1,2,4],[1,3,1],[1,8,3],[1,2,3],[1,2,1],[1,2,2],[1,2,1],[1,4,1],[1,16,2],[1,1,2],[1,1,5],[1,7,1],[1,3,1],[1,2,1],[1,16,3],[1,4,1],[1,8,2],[1,16,6],[1,12,2],[1,84,26],[1,10,2],[1,2,2],[1,5,1],[1,1,1],[1,8,1],[1,4,1],[1,4,1],[1,4,2],[1,4,1],[1,4,10],[1,14,2],[1,4,2],[1,5,2],[1,19,1],[1,4,3],[1,8,2],[1,6,1],[1,2,5],[1,2,1],[1,16,4],[1,4,1],[1,2,2],[1,7,1],[1,4,2],[1,4,1],[1,8,1],[1,10,2],[1,3,2],[1,3,1],[1,10,2],[1,1,1],[1,12,3],[1,37,1],[1,10,1],[1,16,4],[1,1,1],[1,11,1],[1,4,1],[1,8,6],[1,3,2],[1,66,2],[1,14,1],[1,2,4],[1,2,2],[1,7,2],[1,24,2],[1,5,1],[1,1,1],[1,1,1],[1,3,1],[1,31,2],[1,24,1],[1,8,5],[1,8,2],[1,3,4],[1,64,1],[1,1,4],[1,4,47],[1,8,4],[1,25,1],[1,19,2],[1,4,1],[1,33,4],[1,16,2],[1,4,1],[1,1,1],[1,2,3],[1,27,1],[1,20,1],[1,10,3],[1,2,1],[1,2,1],[1,76,1],[1,2,1],[1,5,1],[1,2,2],[1,15,3],[1,40,2],[1,4,22],[1,2,2],[1,2,2],[1,10,1],[1,3,1],[1,55,4],[1,2,7],[1,7,1],[1,4,6],[1,2,1],[1,2,1],[1,28,1],[1,2,2],[1,6,2],[1,6,2],[1,4,15],[1,3,2],[1,1,1],[1,29,1],[1,13,1],[1,16,1],[1,4,1],[1,7,7],[1,3,3],[1,16,4],[1,12,11],[1,1,1],[1,2,4],[1,54,2],[1,1,2],[1,6,2],[1,1,3],[1,2,2],[1,1,1],[1,2,1],[1,11,4],[1,9,1],[1,20,1],[1,1,1],[1,17,3],[1,1,1],[1,9,2],[1,2,2],[1,3,1],[1,29,19],[1,28,1],[1,8,3],[1,21,8],[1,7,3],[1,6,2],[1,5,2],[1,11,1],[1,1,2],[1,7,1],[1,22,1],[1,9,1],[1,3,3],[1,8,2],[1,5,1],[1,23,2],[1,11,5],[1,17,2],[1,5,5],[1,4,3],[1,33,1],[1,2,3],[1,6,1],[1,32,1],[1,6,2],[1,64,2],[1,3,1],[1,7,1],[1,3,6],[1,12,1],[1,1,1],[1,9,1],[1,38,3],[1,1,1],[1,3,1],[1,3,5],[1,78,16],[1,3,1],[1,7,1],[1,26,1],[1,9,2],[1,113,2],[1,9,1],[1,5,9],[1,3,2],[1,4,1],[1,2,1],[1,5,1],[1,24,3],[1,11,4],[1,38,2],[1,13,3],[1,7,3],[1,1,1],[1,1,2],[1,3,3],[1,5,3],[1,6,1],[1,7,1],[1,3,1],[1,4,2],[1,3,1],[1,3,1],[1,1,2],[1,2,1],[1,18,8],[1,1,3],[1,1,1],[1,2,5],[1,13,9],[1,2,2],[1,6,1],[1,5,1],[1,13,3],[1,7,1],[1,3,2],[1,2,1],[1,4,1],[1,2,2],[1,6,2],[1,4,3],[1,1,3],[1,3,2],[1,12,8],[1,6,1],[1,7,1],[1,6,3],[1,9,4],[1,16,17],[1,1,2],[1,4,1],[1,2,1],[1,2,1],[1,2,1],[1,1,1],[1,4,2],[1,4,1],[1,8,1],[1,14,17],[1,7,1],[1,7,6],[1,5,1],[1,4,2],[1,80,2],[1,13,1],[1,11,1],[1,9,1],[1,2,4],[1,3,1],[1,2,1],[1,5,2],[1,3,1],[1,1,2],[1,12,1],[1,8,5],[1,6,3],[1,17,1],[1,3,4],[1,1,2],[1,5,2],[1,1,3],[1,2,2],[1,2,3],[1,2,1],[1,4,1],[1,1,1],[1,14,1],[1,2,1],[1,16,4],[1,15,2],[1,3,3],[1,8,8],[1,6,1],[1,25,4],[1,6,1],[1,7,3],[1,36,2],[1,2,1],[1,32,2],[1,1,1],[1,7,1],[1,14,2],[1,21,1],[1,3,1],[1,27,7],[1,6,3],[1,1,5],[1,5,4],[1,12,2],[1,2,1],[1,2,1],[1,8,7],[1,8,8],[1,7,1],[1,2,1],[1,4,1],[1,1,7],[1,10,3],[1,17,1],[1,1,1],[1,8,6],[1,29,5],[1,12,2],[1,7,2],[1,7,1],[1,2,2],[1,2,1],[1,2,1],[1,54,9],[1,1,1],[1,12,2],[1,8,1],[1,8,4],[1,39,1],[1,3,3],[1,9,4],[1,6,5],[1,2,1],[1,15,2],[1,18,1],[1,2,2],[1,1,1],[1,1,1],[1,2,4],[1,3,1],[1,6,1],[1,3,3],[1,4,3],[1,3,2],[1,1,1],[1,2,2],[1,16,12],[1,4,2],[1,15,2],[1,6,1],[1,7,1],[1,9,8],[1,70,2],[1,5,1],[1,4,3],[1,24,4],[1,8,6],[1,18,43],[1,23,3],[1,10,1],[1,14,8],[1,6,4],[1,2,1],[1,2,1],[1,1,1],[1,2,1],[1,9,3],[1,6,4],[1,5,3],[1,43,2],[1,5,1],[1,11,1],[1,1,2],[1,5,3],[1,4,2],[1,16,2],[1,16,10],[1,5,1],[1,2,2],[1,2,1],[1,2,3],[1,4,6],[1,3,12],[1,6,1],[1,10,1],[1,1,2],[1,13,1],[1,3,1],[1,5,2],[1,6,1],[1,3,1],[1,2,1],[1,1,1],[1,13,1],[1,20,1],[1,20,2],[1,8,1],[1,5,2],[1,2,2],[1,10,5],[1,1,3],[1,7,2],[1,4,1],[1,15,18],[1,1,4],[1,5,2],[1,4,1],[1,1,11],[1,1,3],[1,4,1],[1,1,1],[1,2,1],[1,2,12],[1,5,1],[1,3,1],[1,25,2],[1,16,1],[1,10,1],[1,18,1],[1,28,3],[1,5,6],[1,4,2],[1,2,2],[1,51,124],[1,4,2],[1,5,1],[1,28,1],[1,4,5],[1,6,2],[1,20,1],[1,7,1],[1,5,3],[1,11,1],[1,4,3],[1,1,1],[1,6,3],[1,5,1],[1,3,1],[1,10,2],[1,64,5],[1,12,12],[1,5,2],[1,6,1],[1,8,2],[1,28,8],[1,19,1],[1,2,1],[1,1,1],[2,6,1],[2,2,2],[2,4,5],[2,11,1],[2,4,1],[2,4,1],[2,14,1],[2,19,2],[2,2,1],[2,6,4],[2,2,1],[2,6,2],[2,4,1],[2,12,2],[2,15,2],[2,5,1],[2,11,1],[2,11,1],[2,2,2],[2,3,3],[2,5,9],[2,2,1],[2,1,1],[2,1,4],[2,2,1],[2,4,1],[2,11,1],[2,6,1],[2,2,2],[2,8,1],[2,81,7],[2,8,1],[2,5,1],[2,6,3],[2,2,2],[2,39,1],[2,5,2],[2,5,2],[2,2,4],[2,10,2],[2,4,2],[2,2,1],[2,6,6],[2,8,2],[2,56,1],[2,9,1],[2,1,1],[2,16,3],[2,5,2],[2,3,2],[2,12,25],[2,4,4],[2,6,2],[2,7,1],[2,30,11],[2,4,1],[2,16,5],[2,8,2],[2,7,2],[2,11,1],[2,7,1],[2,2,1],[2,1,1],[2,2,9],[2,39,6],[2,2,1],[2,2,1],[2,7,1],[2,19,1],[2,11,2],[2,8,2],[2,4,7],[2,2,1],[2,7,1],[2,1,1],[2,4,1],[2,6,1],[2,6,1],[2,2,4],[2,26,37],[2,2,1],[2,13,2],[2,35,10],[2,13,1],[2,6,1],[2,10,2],[2,19,9],[2,7,1],[2,7,1],[2,2,2],[2,1,1],[2,5,2],[2,10,2],[2,6,1],[2,6,1],[2,6,1],[2,2,2],[2,1,1],[2,6,60],[2,8,1],[2,18,1],[2,4,2],[2,1,1],[2,1,1],[2,2,3],[2,21,2],[2,7,2],[2,11,3],[2,14,2],[2,3,2],[2,12,1],[2,1,2],[2,34,1],[2,1,1],[2,16,1],[2,1,1],[2,11,1],[2,14,1],[2,8,1],[2,9,1],[2,8,1],[2,3,1],[2,4,4],[2,4,1],[2,44,3],[2,4,1],[2,19,6],[2,19,2],[2,3,2],[2,17,2],[2,17,4],[2,1,6],[2,5,3],[2,27,6],[2,5,3],[2,6,3],[2,22,2],[2,22,3],[2,13,19],[2,8,1],[2,2,2],[2,7,1],[2,9,3],[2,2,1],[2,11,1],[2,8,1],[2,4,1],[2,8,2],[2,4,1],[2,1,1],[2,16,1],[2,2,1],[2,4,1],[2,9,11],[2,3,3],[2,3,1],[2,1,2],[2,3,1],[2,28,1],[2,8,5],[2,6,2],[2,8,1],[2,1,1],[2,10,1],[2,6,1],[2,55,1],[2,1,1],[2,4,2],[2,3,2],[2,16,4],[2,11,1],[2,2,3],[2,15,1],[2,1,10],[2,8,2],[2,15,1],[2,1,1],[2,7,114],[2,10,3],[2,1,1],[2,5,1],[2,3,3],[2,2,1],[2,1,1],[2,8,1],[2,96,1],[2,10,3],[2,3,2],[2,2,1],[2,1,1],[2,3,1],[2,25,2],[2,3,1],[2,12,4],[2,2,9],[2,3,1],[2,2,1],[2,9,1],[2,12,1],[2,18,1],[2,23,6],[2,9,85],[2,2,8],[2,1,2],[2,26,1],[2,8,2],[2,6,3],[2,1,4],[2,6,1],[2,8,3],[2,9,2],[2,1,1],[2,7,1],[2,1,3],[2,7,1],[2,3,2],[2,10,1],[2,2,2],[2,8,2],[2,4,4],[2,23,2],[2,8,5],[2,1,1],[2,3,3],[2,7,2],[2,1,1],[2,2,1],[2,1,7],[2,10,1],[2,18,1],[2,39,5],[2,13,2],[2,7,2],[2,6,2],[2,9,1],[2,5,1],[2,7,1],[2,35,2],[2,2,2],[2,5,2],[2,1,1],[2,9,2],[2,18,1],[2,2,3],[2,35,1],[2,6,5],[2,2,2],[2,2,1],[2,12,2],[2,1,1],[2,10,1],[2,6,1],[2,2,1],[2,15,2],[2,7,1],[2,5,4],[2,4,1],[2,2,14],[2,2,1],[2,5,3],[2,21,2],[2,10,1],[2,2,1],[2,8,1],[2,16,1],[2,9,2],[2,11,2],[2,1,6],[2,12,2],[2,18,2],[2,2,4],[2,4,3],[2,7,11],[2,3,1],[2,28,5],[2,1,4],[2,8,1],[2,2,5],[2,2,1],[2,3,1],[2,10,2],[2,3,3],[2,2,1],[2,17,1],[2,6,1],[2,16,1],[2,10,16],[2,17,1],[2,4,2],[2,1,1],[2,3,3],[2,7,3],[2,5,1],[2,11,1],[2,13,1],[2,3,1],[2,6,1],[2,5,2],[2,17,2],[2,33,13],[2,2,10],[2,3,5],[2,4,3],[2,5,1],[2,2,4],[2,8,2],[2,14,1],[2,16,1],[2,2,3],[2,19,6],[2,5,1],[2,8,2],[2,7,1],[2,1,1],[2,11,1],[2,2,2],[2,11,10],[2,10,1],[2,14,1],[2,1,7],[2,10,1],[2,34,1],[2,2,1],[2,2,4],[2,9,2],[2,16,1],[2,2,4],[2,8,3],[2,1,2],[2,3,5],[2,13,5],[2,20,1],[2,25,8],[2,9,1],[2,1,1],[2,15,3],[2,6,2],[2,394,278],[2,11,2],[2,1,1],[2,3,15],[2,4,2],[2,3,6],[2,6,3],[2,1,12],[2,2,1],[2,1,3],[2,11,2],[2,20,3],[2,31,9],[2,25,7],[2,15,2],[2,11,31],[2,17,2],[2,5,1],[2,2,2],[2,4,1],[2,6,2],[2,27,2],[2,10,2],[2,1,2],[2,26,5],[2,5,14],[2,12,2],[2,5,2],[2,2,1],[2,2,3],[2,6,1],[2,1,3],[2,9,3],[2,18,1],[2,5,5],[2,29,13],[2,14,1],[2,1,4],[2,3,1],[2,5,1],[2,19,4],[2,11,7],[2,8,3],[2,18,1],[2,3,5],[2,11,1],[2,4,1],[2,10,4],[2,19,2],[2,10,3],[2,12,2],[2,19,9],[2,73,3],[2,13,3],[2,12,1],[2,4,5],[2,55,1],[2,6,6],[2,27,2],[2,2,1],[2,20,1],[2,8,1],[2,1,1],[2,29,2],[2,10,8],[2,5,2],[2,10,2],[2,14,1],[2,10,1],[2,1,1],[2,4,2],[2,5,1],[2,1,4],[2,4,2],[2,9,1],[2,9,4],[2,2,1],[2,4,1],[2,6,2],[2,2,2],[2,10,15],[2,17,1],[2,9,1],[2,9,1],[2,8,2],[2,4,1],[2,4,1],[2,243,2],[2,9,3],[2,12,2],[2,4,3],[2,2,1],[2,1,2],[2,57,4],[2,7,2],[2,8,2],[2,14,2],[2,2,1],[2,6,1],[2,7,2],[2,8,1],[2,4,3],[2,36,5],[2,3,1],[2,1,1],[2,45,8],[2,1,1],[2,2,3],[2,9,1],[2,1,1],[2,13,2],[2,44,6],[2,2,1],[2,36,1],[2,4,1],[2,5,1],[2,3,2],[2,1,1],[2,28,2],[2,9,1],[2,3,3],[2,10,2],[2,16,1],[2,1,1],[2,1,1],[2,13,1],[2,14,3],[2,65,1],[2,7,1],[2,2,1],[2,11,8],[2,4,1],[2,17,1],[2,6,1],[2,15,5],[2,15,1],[2,17,2],[2,8,1],[2,8,1],[2,1,2],[2,5,7],[2,1,1],[2,3,2],[2,2,1],[2,4,1],[2,32,1],[2,3,1],[2,1,1],[2,1,1],[2,2,2],[2,2,1],[2,8,2],[2,11,3],[2,2,3],[2,42,3],[2,5,1],[2,6,2],[2,1,1],[2,9,1],[2,2,2],[2,5,1],[2,2,1],[2,7,1],[2,7,6],[2,6,2],[2,3,1],[2,1,3],[2,15,1],[2,23,1],[2,1,1],[2,3,1],[2,4,2],[2,8,1],[2,2,7],[2,3,4],[2,6,5],[2,4,1],[2,5,3],[2,16,5],[2,11,1],[2,13,1],[2,22,3],[2,10,5],[2,2,2],[2,2,2],[2,6,1],[2,7,1],[2,4,2],[2,4,3],[2,7,3],[2,7,4],[2,1,1],[2,71,9],[2,4,8],[2,33,4],[2,16,2],[2,1,18],[2,15,1],[2,3,1],[2,8,1],[2,6,3],[2,4,2],[2,1,1],[2,7,2],[2,2,8],[2,2,1],[2,8,1],[2,1,3],[2,5,1],[2,2,2],[2,11,1],[2,17,3],[2,118,1],[2,8,4],[2,14,1],[2,3,4],[2,14,1],[2,2,2],[2,4,3],[2,2,1],[2,11,1],[2,8,10],[2,1,2],[2,3,3],[2,2,2],[2,12,1],[2,2,2],[2,26,3],[2,3,2],[2,3,3],[2,19,1],[2,1,13],[2,23,2],[2,3,1],[2,7,4],[2,10,4],[2,2,3],[2,71,3],[2,3,3],[2,23,1],[2,1,1],[2,34,3],[2,62,1],[2,4,1],[2,7,2],[2,2,8],[2,6,1],[2,20,3],[2,26,2],[2,5,2],[2,2,1],[2,7,1],[2,1,1],[2,7,2],[2,28,7],[2,4,1],[2,2,2],[2,4,1],[2,7,1],[2,2,3],[2,3,1],[2,8,3],[2,43,1],[2,2,1],[2,1,4],[2,2,1],[2,13,3],[2,4,2],[2,6,1],[2,17,1],[2,2,8],[2,32,1],[2,11,2],[2,5,2],[2,45,3],[2,9,1],[2,14,2],[2,9,1],[2,2,1],[2,10,5],[2,2,1],[2,13,1],[2,2,2],[2,3,5],[2,2,1],[2,17,3],[2,11,1],[2,15,1],[2,13,4],[2,7,7],[2,10,2],[2,6,4],[2,2,3],[2,1,3],[2,27,2],[2,2,3],[2,2,1],[2,3,1],[2,3,9],[2,3,46],[2,11,1],[2,30,1],[2,5,1],[2,8,8],[2,2,1],[2,1,1],[2,2,1],[2,6,7],[2,1,1],[2,4,1],[2,4,2],[2,15,2],[2,6,7],[2,4,2],[2,5,1],[2,1,4],[2,2,3],[2,1,2],[2,2,2],[2,1,7],[2,15,2],[2,18,3],[2,2,1],[2,6,1],[2,8,1],[2,134,20],[2,26,1],[2,2,2],[2,8,4],[2,1,1],[2,3,1],[2,14,1],[2,3,1],[2,26,1],[2,19,1],[2,1,1],[2,1,1],[2,7,1],[2,5,2],[2,5,8],[2,3,4],[2,1,1],[2,2,2],[2,16,1],[2,7,2],[2,6,1],[2,1,6],[2,4,3],[2,2,2],[2,2,2],[2,2,1],[2,2,1],[2,1,2],[2,8,3],[2,4,1],[2,9,1],[2,18,33],[2,14,1],[2,1,1],[2,3,2],[2,7,1],[2,14,4],[2,4,2],[2,31,7],[2,19,2],[2,11,4],[2,2,1],[2,7,2],[2,2,1],[2,2,3],[2,52,4],[2,4,1],[2,1,1],[2,4,3],[2,11,1],[2,3,2],[2,6,1],[2,10,3],[2,6,1],[2,12,1],[2,10,2],[2,4,2],[2,23,2],[2,3,3],[2,8,1],[2,21,6],[2,2,2],[2,1,1],[2,1,1],[2,16,3],[2,9,2],[2,5,1],[2,2,2],[2,1,4],[2,4,1],[2,1,25],[2,24,2],[2,6,1],[2,3,4],[2,10,4],[2,6,2],[2,35,2],[2,2,2],[2,1,1],[2,25,10],[2,8,1],[2,1,2],[2,1,1],[2,2,1],[2,3,8],[2,2,1],[2,2,1],[2,5,2],[2,4,3],[2,2,8],[2,1,1],[2,4,2],[2,3,3],[2,12,1],[2,3,2],[2,4,1],[2,2,4],[2,7,2],[2,1,1],[2,73,14],[2,90,1],[2,4,1],[2,2,1],[2,1,1],[2,6,3],[2,1,1],[2,4,1],[2,10,3],[2,2,3],[2,1,1],[2,6,1],[2,37,2],[2,10,1],[2,2,2],[2,60,2],[2,16,3],[2,6,1],[2,1,1],[2,3,4],[2,38,5],[2,6,2],[2,2,1],[2,2,1],[2,9,2],[2,11,1],[2,6,1],[2,9,1],[2,2,2],[2,4,3],[2,8,1],[2,3,2],[2,1,9],[2,14,2],[2,8,1],[2,30,4],[2,2,1],[2,31,2],[2,31,1],[2,21,23],[2,1,5],[2,4,1],[2,2,1],[2,5,3],[2,4,2],[2,10,2],[2,2,2],[2,18,1],[2,15,1],[2,2,1],[2,1,2],[2,5,1],[2,13,1],[2,14,4],[2,1,4],[2,5,1],[2,109,3],[2,18,2],[2,1,2],[2,164,114],[2,8,1],[2,2,3],[2,4,1],[2,1,1],[2,10,1],[2,9,2],[2,4,3],[2,1,75],[2,6,1],[2,17,2],[2,3,1],[2,9,1],[2,2,1],[2,21,1],[2,30,3],[2,7,2],[2,2,2],[2,63,5],[2,16,3],[2,6,1],[2,2,8],[2,25,2],[2,31,3],[2,126,21],[2,10,1],[2,2,2],[2,14,7],[2,6,10],[2,4,3],[2,7,1],[2,12,1],[2,2,1],[2,3,2],[2,2,15],[2,1,4],[2,4,1],[2,3,1],[2,4,1],[2,6,2],[2,7,3],[2,2,3],[2,9,2],[2,6,1],[2,2,1],[2,16,1],[2,22,2],[2,10,1],[2,10,4],[2,7,2],[2,13,1],[2,3,1],[2,7,2],[2,23,12],[2,3,1],[2,6,1],[2,4,2],[2,29,2],[2,5,3],[2,8,1],[2,1,1],[2,6,1],[2,3,1],[2,17,2],[2,15,1],[2,2,1],[2,6,1],[2,2,2],[2,30,1],[2,3,1],[2,2,2],[2,2,5],[2,2,1],[2,37,5],[2,6,2],[2,7,6],[2,2,3],[2,3,3],[2,2,5],[2,75,6],[2,2,3],[2,10,1],[2,2,3],[2,7,2],[2,30,1],[2,12,33],[2,1,1],[2,3,4],[2,14,1],[2,9,2],[2,8,1],[2,1,1],[2,9,1],[2,4,1],[2,2,1],[2,7,1],[2,4,1],[2,3,1],[2,4,3],[2,1,1],[2,5,2],[2,3,4],[2,4,2],[2,6,3],[2,13,5],[2,4,2],[2,6,1],[2,2,5],[2,2,3],[2,1,1],[2,14,1],[2,5,1],[2,4,2],[2,9,1],[2,7,6],[2,4,1],[2,19,2],[2,23,1],[2,20,7],[2,9,1],[2,4,1],[2,12,2],[2,9,4],[2,3,2],[2,3,7],[2,3,1],[2,10,2],[2,6,1],[2,7,1],[2,1,1],[2,9,1],[2,6,1],[2,1,1],[2,17,2],[2,9,1],[2,5,2],[2,1,1],[2,11,2],[2,9,1],[2,1,1],[2,3,6],[2,2,1],[2,5,9],[2,12,2],[2,2,1],[2,6,2],[2,17,4],[2,2,2],[2,7,1],[2,596,5],[2,6,1],[2,2,1],[2,58,125],[2,6,1],[2,8,1],[2,2,1],[2,3,1],[2,1,2],[2,11,4],[2,1,1],[2,9,6],[2,2,8],[2,1,1],[2,6,2],[2,1,1],[2,2,1],[2,7,2],[2,7,3],[2,14,2],[2,1,1],[2,18,9],[2,2,5],[2,2,12],[2,8,4],[2,6,4],[2,3,1],[2,19,2],[2,4,1],[2,2,1],[2,4,3],[2,3,1],[2,13,1],[2,1,1],[2,7,1],[2,1,1],[2,8,1],[2,13,14],[2,11,1],[2,31,1],[2,4,1],[2,6,1],[2,3,2],[2,26,1],[2,4,2],[2,1,1],[2,2,2],[2,1,2],[2,1,1],[2,7,1],[2,8,1],[2,6,2],[2,19,13],[2,2,3],[2,8,3],[2,1,6],[2,5,1],[2,1,1],[2,6,1],[2,9,1],[2,2,2],[2,35,1],[2,1,1],[2,27,2],[2,54,2],[2,6,2],[2,5,1],[2,2,1],[2,2,4],[2,2,1],[2,2,1],[2,14,1],[2,9,1],[2,53,17],[2,2,1],[2,10,1],[2,9,1],[2,23,1],[2,7,1],[2,12,4],[2,1,2],[2,8,1],[2,7,4],[2,2,1],[2,2,1],[2,3,1],[2,11,1],[2,2,2],[2,6,1],[2,2,1],[2,18,4],[2,3,4],[2,8,2],[2,13,1],[2,2,1],[2,1,2],[2,14,4],[2,8,11],[2,1,1],[2,8,3],[2,7,3],[2,90,1],[2,20,2],[2,16,1],[2,20,2],[2,3,1],[2,8,10],[2,10,1],[2,10,1],[2,1,1],[2,3,1],[2,5,1],[2,37,3],[2,24,3],[2,10,1],[2,3,1],[2,2,4],[2,4,1],[2,19,2],[2,1,1],[2,5,1],[2,8,1],[2,3,1],[2,1,1],[2,2,1],[2,2,32],[2,2,1],[2,4,1],[2,1,1],[2,2,2],[2,5,1],[2,2,3],[2,25,9],[2,2,1],[2,4,4],[2,2,1],[2,15,1],[2,59,1],[2,3,2],[2,4,1],[2,9,2],[2,3,10],[2,6,1],[2,5,5],[2,8,2],[2,2,2],[2,4,2],[2,10,1],[2,126,1],[2,3,1],[2,8,1],[2,9,2],[2,1,30],[2,25,1],[2,7,3],[2,2,2],[2,1,3],[2,21,1],[2,38,1],[2,48,1],[2,22,1],[2,4,2],[2,55,2],[2,5,1],[2,15,1],[2,14,44],[2,4,1],[2,1,2],[2,2,3],[2,2,1],[2,3,3],[2,6,1],[2,2,1],[2,26,7],[2,4,1],[2,1,2],[2,3,2],[2,6,2],[2,10,1],[2,18,3],[2,2,1],[2,38,2],[2,1,1],[2,8,1],[2,8,1],[2,3,1],[2,4,1],[2,1,1],[2,1,2],[2,4,1],[2,26,2],[2,3,3],[2,2,1],[2,6,1],[2,19,1],[2,3,4],[2,2,1],[2,4,1],[2,11,1],[2,9,1],[2,9,1],[2,9,1],[2,1,1],[2,1,1],[2,7,1],[2,2,1],[2,11,4],[2,10,2],[2,4,1],[2,6,1],[2,4,1],[2,8,1],[2,11,1],[2,1,1],[2,7,1],[2,8,2],[2,9,1],[2,8,1],[2,41,2],[2,2,4],[2,1,6],[2,2,1],[2,6,3],[2,128,5],[2,2,1],[2,13,13],[2,6,1],[2,1,3],[2,3,3],[2,7,2],[2,10,12],[2,2,1],[2,8,1],[2,1,1],[2,7,1],[2,2,1],[2,10,2],[2,11,10],[2,1,1],[2,8,3],[2,4,5],[2,2,1],[2,14,2],[2,4,1],[2,4,1],[2,7,1],[2,6,1],[2,7,3],[2,1,1],[2,2,1],[2,7,2],[2,2,1],[2,6,1],[2,8,1],[2,2,4],[2,6,1],[2,43,1],[2,108,3],[2,8,1],[2,13,1],[2,4,1],[2,10,3],[2,2,1],[2,24,2],[2,1,2],[2,4,2],[2,2,2],[2,40,6],[2,6,2],[2,6,2],[2,4,3],[2,28,5],[2,4,1],[2,15,1],[2,12,1],[2,1,1],[2,27,1],[3,1,1],[3,5,2],[3,16,2],[3,16,3],[3,1,2],[3,98,2],[3,91,7],[3,6,37],[3,4,1],[3,9,1],[3,97,2],[3,6,1],[3,23,3],[3,115,1],[3,2,1],[3,1,1],[3,1,1],[3,14,4],[3,1,1],[3,28,1],[3,1,1],[3,6,1],[3,15,5],[3,3,1],[3,52,1],[3,2,3],[3,3,1],[3,4,5],[3,13,1],[3,16,3],[3,13,1],[3,17,1],[3,4,4],[3,6,7],[3,14,1],[3,32,1],[3,3,3],[3,11,4],[3,1,1],[3,8,6],[3,9,7],[3,2,1],[3,9,2],[3,5,2],[3,26,12],[3,11,3],[3,12,2],[3,4,2],[3,6,2],[3,30,6],[3,1,2],[3,10,1],[3,1,1],[3,4,1],[3,7,1],[3,30,29],[3,2,3],[3,2,2],[3,2,1],[3,11,1],[3,2,3],[3,3,1],[3,9,1],[3,2,2],[3,5,1],[3,1,2],[3,1,13],[3,6,9],[3,1,1],[3,6,2],[3,1,3],[3,4,1],[3,6,1],[3,9,3],[3,1,1],[3,9,2],[3,19,45],[3,2,1],[3,7,8],[3,21,3],[3,6,2],[3,2,1],[3,6,1],[3,5,1],[3,2,1],[3,15,7],[3,2,1],[3,9,3],[3,11,1],[3,4,1],[3,7,1],[3,2,1],[3,19,1],[3,5,1],[3,2,1],[3,1,1],[3,22,3],[3,21,5],[3,13,1],[3,2,1],[3,4,1],[3,23,1],[3,8,1],[3,3,2],[3,2,2],[3,4,1],[3,12,2],[3,5,2],[3,16,8],[3,6,1],[3,1,2],[3,2,1],[3,7,1],[3,6,1],[3,6,3],[3,45,1],[3,4,5],[3,1,2],[3,3,1],[3,2,1],[3,1,1],[3,12,1],[3,8,1],[3,3,1],[3,6,1],[3,2,2],[3,9,2],[3,5,2],[3,2,1],[3,3,1],[3,15,1],[3,11,1],[3,4,1],[3,9,2],[3,3,1],[3,4,1],[3,1,3],[3,6,15],[3,6,3],[3,2,6],[3,1,3],[3,3,2],[3,15,1],[3,6,1],[3,7,1],[3,5,1],[3,9,1],[3,49,2],[3,5,2],[3,9,4],[3,39,1],[3,4,3],[3,1,5],[3,1,2],[3,2,1],[3,14,2],[3,4,3],[3,18,1],[3,5,4],[3,19,3],[3,3,1],[3,2,1],[3,3,2],[3,48,10],[3,1,1],[3,5,6],[3,12,3],[3,1,2],[3,5,4],[3,4,1],[3,4,1],[3,5,1],[3,1,1],[3,10,1],[3,10,2],[3,6,3],[3,2,7],[3,4,1],[3,9,2],[3,1,1],[3,2,1],[3,4,6],[3,1,1],[3,25,9],[3,11,1],[3,2,1],[3,8,2],[3,1,1],[3,9,3],[3,4,6],[3,1,7],[3,1,1],[3,4,1],[3,11,2],[3,14,1],[3,65,2],[3,6,1],[3,5,2],[3,2,2],[3,13,1],[3,2,5],[3,2,1],[3,4,2],[3,25,1],[3,2,1],[3,2,3],[3,9,1],[3,5,5],[3,46,1],[3,6,2],[3,12,9],[3,4,4],[3,2,3],[3,13,5],[3,39,16],[3,3,1],[3,1,2],[3,68,14],[3,5,1],[3,11,1],[3,7,1],[3,4,1],[3,53,11],[3,4,3],[3,4,1],[3,2,1],[3,4,1],[3,1,1],[3,1,2],[3,8,4],[3,5,1],[3,6,5],[3,6,13],[3,403,3],[3,23,1],[3,3,3],[3,14,1],[3,10,1],[3,3,2],[3,46,11],[3,4,3],[3,29,1],[3,41,2],[3,11,1],[3,15,3],[3,11,2],[3,6,1],[3,3,1],[3,17,2],[3,14,3],[3,5,4],[3,2,1],[3,2,1],[3,5,6],[3,6,1],[3,54,2],[3,2,1],[3,4,2],[3,1,1],[3,7,1],[3,8,34],[3,7,1],[3,1,2],[3,3,2],[3,2,5],[3,1,1],[3,15,12],[3,13,1],[3,5,1],[3,1,1],[3,5,1],[3,39,1],[3,26,9],[3,11,1],[3,6,1],[3,2,1],[3,19,4],[3,4,5],[3,10,1],[3,11,6],[3,4,1],[3,38,1],[3,1,1],[3,1,3],[3,2,1],[3,5,10],[3,4,1],[3,18,2],[3,4,1],[3,19,1],[3,1,1],[3,8,6],[3,1,1],[3,9,1],[3,8,3],[3,15,4],[3,9,3],[3,13,1],[3,10,1],[3,1,2],[3,5,4],[3,4,2],[3,4,1],[3,28,1],[3,6,2],[3,9,1],[3,1,2],[3,2,2],[3,25,1],[3,5,8],[3,5,3],[3,8,2],[3,2,1],[3,14,5],[3,2,1],[3,11,3],[3,10,1],[3,2,2],[3,1,1],[3,3,1],[3,9,1],[3,39,9],[3,27,2],[3,1,1],[3,1,3],[3,12,3],[3,6,1],[3,14,2],[3,17,3],[3,198,1],[3,3,1],[3,5,1],[3,1,1],[3,2,4],[3,12,1],[3,31,1],[3,8,14],[3,25,2],[3,16,2],[3,18,2],[3,2,3],[3,2,3],[3,6,28],[3,22,3],[3,6,1],[3,8,2],[3,4,3],[3,3,3],[3,8,1],[3,1,1],[3,1,2],[3,1,1],[3,1,1],[3,1,2],[3,6,2],[3,2,3],[3,4,1],[3,3,1],[3,1,1],[3,3,2],[3,8,10],[3,6,1],[3,2,1],[3,2,1],[3,5,1],[3,29,6],[3,10,1],[3,3,8],[3,1,3],[3,2,2],[3,3,1],[3,3,4],[3,5,19],[3,15,1],[3,65,1],[3,2,2],[3,60,3],[3,52,1],[3,1,1],[3,4,2],[3,4,1],[3,6,1],[3,7,4],[3,1,1],[3,13,1],[3,8,3],[3,13,1],[3,6,1],[3,3,2],[3,14,1],[3,2,2],[3,4,1],[3,1,1],[3,11,29],[3,7,1],[3,21,6],[3,4,1],[3,1,1],[3,2,1],[3,9,1],[3,2,4],[3,3,1],[3,2,3],[3,1,2],[3,3,2],[3,3,4],[3,16,2],[3,9,2],[3,2,1],[3,17,8],[3,9,4],[3,7,1],[3,6,4],[3,1,2],[3,2,1],[3,4,4],[3,2,1],[3,3,1],[3,3,1],[3,11,1],[3,2,2],[3,2,1],[3,2,3],[3,2,2],[3,10,6],[3,10,4],[3,1,1],[3,8,3],[3,29,2],[3,7,1],[3,2,1],[3,4,1],[3,11,1],[3,2,1],[3,2,2],[3,13,3],[3,4,1],[3,3,1],[3,2,4],[3,18,1],[3,12,1],[3,6,3],[3,3,1],[3,5,1],[3,3,2],[3,9,2],[3,5,1],[3,5,1],[3,11,1],[3,1,1],[3,39,18],[3,3,2],[3,4,1],[3,17,2],[3,14,2],[3,10,6],[3,1,1],[3,4,5],[3,2,1],[3,4,6],[3,12,1],[3,106,80],[3,32,1],[3,7,1],[3,8,1],[3,2,1],[3,33,2],[3,33,7],[3,10,1],[3,3,2],[3,4,3],[3,16,3],[3,7,1],[3,8,1],[3,16,1],[3,8,1],[3,8,1],[3,30,1],[3,7,1],[3,2,1],[3,3,10],[3,27,1],[3,2,1],[3,1,3],[3,2,1],[3,23,1],[3,1,1],[3,5,2],[3,6,1],[3,2,1],[3,2,13],[3,1,3],[3,6,2],[3,5,1],[3,26,1],[3,4,5],[3,2,1],[3,9,1],[3,6,1],[3,2,1],[3,21,2],[3,15,1],[3,4,2],[3,2,1],[3,30,1],[3,4,2],[3,2,1],[3,2,58],[3,8,2],[3,13,1],[3,16,2],[3,10,6],[3,6,1],[3,6,1],[3,2,6],[3,1,1],[3,2,4],[3,11,9],[3,25,2],[3,4,2],[3,1,1],[3,9,9],[3,1,9],[3,3,3],[3,4,1],[3,2,3],[3,5,2],[3,2,7],[3,2,1],[3,2,1],[3,6,3],[3,3,4],[3,1,2],[3,4,3],[3,7,118],[3,7,1],[3,6,1],[3,3,1],[3,1,15],[3,1,2],[3,4,2],[3,2,1],[3,4,1],[3,6,1],[3,23,1],[3,1,1],[3,3,1],[3,4,1],[3,10,3],[3,2,2],[3,6,5],[3,8,1],[3,3,1],[3,4,1],[3,20,2],[3,14,2],[3,7,1],[3,21,29],[3,10,2],[3,10,2],[3,3,3],[3,2,1],[3,3,2],[3,24,3],[3,3,1],[3,9,1],[3,6,1],[3,22,1],[3,13,1],[3,5,2],[3,1,1],[3,9,1],[3,10,2],[3,4,1],[3,7,1],[3,2,1],[3,12,4],[3,48,2],[3,43,1],[3,6,1],[3,1,1],[3,4,1],[3,14,10],[3,2,1],[3,1,1],[3,1,1],[3,3,1],[3,11,5],[3,36,1],[3,4,49],[3,11,1],[3,8,1],[3,2,2],[3,3,1],[3,3,1],[3,8,3],[3,15,8],[3,30,9],[3,23,5],[3,10,1],[3,7,6],[3,1,1],[3,9,2],[3,6,1],[3,3,1],[3,3,1],[3,2,1],[3,21,1],[3,13,2],[3,4,2],[3,9,2],[3,8,1],[3,2,2],[3,4,2],[3,1,1],[3,9,2],[3,32,2],[3,2,2],[3,10,1],[3,1,4],[3,4,3],[3,14,3],[3,5,2],[3,2,1],[3,3,1],[3,5,3],[3,14,3],[3,2,3],[3,6,1],[3,4,1],[3,1,1],[3,16,1],[3,3,1],[3,2,1],[3,5,1],[3,33,1],[3,3,1],[3,14,4],[3,8,3],[3,12,2],[3,14,1],[3,2,1],[3,1,1],[3,13,2],[3,8,1],[3,9,1],[3,17,1],[3,14,2],[3,16,1],[3,12,4],[3,2,1],[3,2,2],[3,20,1],[3,2,2],[3,8,4],[3,7,3],[3,8,1],[3,1,2],[3,5,5],[3,29,1],[3,1,1],[3,2,1],[3,8,2],[3,2,1],[3,7,9],[3,3,2],[3,7,1],[3,6,1],[3,6,2],[3,1,26],[3,3,3],[3,7,1],[3,2,2],[3,8,2],[3,7,1],[3,3,1],[3,4,4],[3,11,1],[3,5,15],[3,28,1],[3,3,8],[3,3,3],[3,2,4],[3,6,4],[3,3,2],[3,2,2],[3,5,1],[3,12,2],[3,10,2],[3,1,1],[3,6,1],[3,2,1],[3,3,2],[4,8,1],[4,3,1],[4,23,1],[4,4,9],[4,6,2],[4,9,1],[4,9,6],[4,5,9],[4,8,1],[4,2,1],[4,2,3],[4,8,1],[4,1,1],[4,4,1],[4,8,1],[4,2,1],[4,16,1],[4,1,8],[4,4,1],[4,1,3],[4,18,1],[4,2,1],[4,4,9],[4,2,1],[4,3,1],[4,9,2],[4,2,1],[4,7,3],[4,5,4],[4,27,2],[4,1,1],[4,8,2],[4,7,1],[4,8,1],[4,9,4],[4,3,2],[4,6,4],[4,2,2],[4,13,5],[4,8,1],[4,10,2],[4,1,1],[4,2,1],[4,1,2],[4,6,2],[4,5,2],[4,8,2],[4,16,2],[4,7,2],[4,102,5],[4,2,2],[4,1,1],[4,2,1],[4,1,2],[4,2,1],[4,29,4],[4,2,1],[4,1,1],[4,1,4],[4,3,2],[4,6,1],[4,19,2],[4,4,3],[4,1,12],[4,1,1],[4,62,3],[4,14,1],[4,1,1],[4,1,1],[4,7,4],[4,9,1],[4,15,1],[4,16,15],[4,2,2],[4,2,1],[4,41,3],[4,7,8],[4,7,3],[4,5,1],[4,9,1],[4,6,1],[4,1,3],[4,15,1],[4,5,4],[4,28,2],[4,11,3],[4,15,1],[4,1,1],[4,1,1],[4,12,1],[4,16,4],[4,12,5],[4,5,2],[4,8,4],[4,124,115],[4,11,3],[4,46,10],[4,4,1],[4,3,1],[4,2,1],[4,27,1],[4,1,1],[4,20,1],[4,2,1],[4,4,1],[4,53,1],[4,18,1],[4,1,1],[4,8,2],[4,3,1],[4,2,1],[4,5,1],[4,2,3],[4,2,5],[4,3,1],[4,8,1],[4,2,5],[4,8,2],[4,9,2],[4,48,1],[4,9,1],[4,20,2],[4,4,4],[4,3,2],[4,8,2],[4,6,2],[4,12,6],[4,9,1],[4,3,1],[4,4,1],[4,5,3],[4,5,1],[4,8,4],[4,3,1],[4,7,1],[4,6,2],[4,15,16],[4,6,1],[4,50,4],[4,23,4],[4,9,7],[4,8,2],[4,1,1],[4,2,1],[4,9,1],[4,12,1],[4,4,3],[4,2,2],[4,42,4],[4,1,1],[4,6,1],[4,11,10],[4,6,11],[4,7,1],[4,4,2],[4,4,2],[4,6,1],[4,59,4],[4,1,1],[4,2,7],[4,12,20],[4,11,3],[4,4,1],[4,12,3],[4,6,3],[4,7,2],[4,17,4],[4,106,8],[4,6,2],[4,7,1],[4,1,1],[4,8,1],[4,4,6],[4,3,1],[4,4,3],[4,14,3],[4,15,2],[4,4,1],[4,44,91],[4,7,2],[4,3,2],[4,2,1],[4,23,2],[4,30,1],[4,2,2],[4,10,1],[4,6,9],[4,6,2],[4,3,2],[4,3,2],[4,20,1],[4,4,1],[4,18,2],[4,12,1],[4,20,14],[4,10,1],[4,3,1],[4,2,1],[4,3,2],[4,3,3],[4,6,3],[4,2,4],[4,8,1],[4,8,5],[4,3,1],[4,10,2],[4,2,1],[4,1,1],[4,10,1],[4,25,2],[4,1,1],[4,4,1],[4,63,2],[4,1,1],[4,4,1],[4,6,7],[4,2,3],[4,8,1],[4,19,2],[4,11,1],[4,30,10],[4,4,4],[4,2,3],[4,2,1],[4,43,29],[4,2,1],[4,1,1],[4,17,1],[4,14,1],[4,13,1],[4,6,4],[4,2,2],[4,1,2],[4,3,1],[4,7,3],[4,4,1],[4,4,1],[4,1,1],[4,13,5],[4,2,1],[4,1,1],[4,5,1],[4,4,2],[4,13,2],[4,10,4],[4,8,1],[4,3,1],[4,2,2],[4,8,3],[4,4,2],[4,6,1],[4,7,1],[4,14,29],[4,19,1],[4,7,1],[4,19,1],[4,24,2],[4,2,1],[4,1,1],[4,28,1],[4,1,1],[4,2,1],[4,3,1],[4,2,1],[4,1,7],[4,2,4],[4,3,1],[4,29,1],[4,2,1],[4,14,1],[4,2,1],[4,28,3],[4,11,3],[4,1,2],[4,21,2],[4,1,1],[4,15,1],[4,17,1],[4,16,1],[4,13,1],[4,2,1],[4,15,5],[4,19,1],[4,17,1],[4,5,3],[4,12,2],[4,33,1],[4,8,1],[4,15,4],[4,2,11],[4,4,1],[4,1,10],[4,39,1],[4,28,1],[4,25,2],[4,1,1],[4,14,2],[4,8,32],[4,9,1],[4,7,1],[4,6,2],[4,1,2],[4,3,1],[4,6,2],[4,12,2],[4,2,2],[4,5,2],[4,18,1],[4,5,3],[4,6,2],[4,25,1],[4,3,16],[4,14,4],[4,2,6],[4,14,2],[4,3,1],[4,4,1],[4,9,3],[4,28,2],[4,9,1],[4,2,1],[4,7,1],[4,2,1],[4,1,4],[4,4,3],[4,1,1],[4,16,6],[4,3,1],[4,10,1],[4,12,3],[4,8,1],[4,4,1],[4,15,2],[4,4,1],[4,2,3],[4,2,9],[4,4,1],[4,7,2],[4,14,1],[4,31,3],[4,13,1],[4,19,2],[4,8,3],[4,2,1],[4,12,1],[4,5,1],[4,45,3],[4,6,1],[4,1,1],[4,12,6],[4,4,3],[4,3,1],[4,5,2],[4,4,4],[4,19,2],[4,8,1],[4,2,1],[4,27,2],[4,73,3],[4,22,2],[4,1,2],[4,7,46],[4,9,2],[4,2,1],[4,524,305],[4,7,1],[4,26,1],[4,2,1],[4,6,1],[4,30,2],[4,6,1],[4,25,92],[4,2,1],[4,13,1],[4,1,4],[4,1,7],[4,6,1],[4,8,2],[4,6,1],[4,4,2],[4,2,6],[4,12,2],[4,2,2],[4,5,2],[4,3,2],[4,13,1],[4,4,1],[4,6,3],[4,14,1],[4,15,1],[4,25,1],[4,3,1],[4,9,4],[4,94,3],[4,11,2],[4,12,4],[4,7,3],[4,3,1],[4,9,2],[4,3,1],[4,2,1],[4,8,3],[4,7,5],[4,2,45],[4,10,1],[4,10,4],[4,5,3],[4,6,6],[5,5,1],[5,2,1],[5,3,3],[5,11,2],[5,28,1],[5,8,1],[5,4,1],[5,4,1],[5,12,1],[5,7,1],[5,1,1],[5,38,7],[5,6,2],[5,4,2],[5,5,1],[5,2,2],[5,2,7],[5,1,4],[5,4,1],[5,4,1],[5,1,2],[5,3,1],[5,7,1],[5,2,1],[5,10,2],[5,4,1],[5,2,1],[5,2,2],[5,3,1],[5,15,78],[5,2,1],[5,1,5],[5,10,1],[5,6,4],[5,10,2],[5,5,1],[5,1,1],[5,1,1],[5,2,2],[5,6,1],[5,2,2],[5,6,2],[5,10,2],[5,3,1],[5,6,2],[5,4,3],[5,16,5],[5,47,48],[5,2,5],[5,6,7],[5,4,2],[5,3,1],[5,2,1],[5,8,1],[5,7,1],[5,2,2],[5,2,1],[5,3,1],[5,7,4],[5,1,1],[5,1,1],[5,8,6],[5,1,4],[5,9,3],[5,11,4],[5,6,1],[5,6,1],[5,2,1],[5,5,1],[5,84,1],[5,2,33],[5,8,1],[5,6,3],[5,5,3],[5,2,1],[5,10,2],[5,3,1],[5,68,9],[5,6,2],[5,21,11],[5,3,4],[5,3,1],[5,16,3],[5,2,2],[5,2,1],[5,14,2],[5,24,2],[5,19,1],[5,1,4],[5,1,1],[5,3,1],[5,6,1],[5,2,1],[5,5,2],[5,4,3],[5,26,3],[5,2,1],[5,6,4],[5,2,1],[5,6,3],[5,5,1],[5,8,3],[5,1,3],[5,9,1],[5,1,2],[5,11,2],[5,23,1],[5,7,1],[5,2,2],[5,3,2],[5,2,1],[5,11,2],[5,8,2],[5,1,1],[5,4,1],[5,2,1],[5,7,1],[5,11,1],[5,1,1],[5,33,1],[5,4,1],[5,5,1],[5,17,3],[5,1,2],[5,18,2],[5,1,2],[5,1,1],[5,2,3],[5,4,2],[5,2,1],[5,13,7],[5,5,1],[5,19,4],[5,23,9],[5,11,6],[5,7,2],[5,10,1],[5,2,1],[5,26,1],[5,3,3],[5,3,2],[5,3,2],[5,15,3],[5,2,1],[5,3,1],[5,4,1],[5,8,1],[5,4,1],[5,23,1],[5,6,1],[5,1,3],[5,124,17],[5,1,1],[5,1,1],[5,15,1],[5,11,2],[5,2,1],[5,2,2],[5,3,2],[5,1,1],[5,6,4],[5,6,1],[5,3,3],[5,6,5],[5,17,1],[5,7,2],[5,5,1],[5,11,1],[5,3,2],[5,36,2],[5,17,7],[5,4,1],[5,7,2],[5,2,1],[5,2,1],[5,2,1],[5,7,10],[5,4,1],[5,1,3],[5,19,2],[5,2,2],[5,3,1],[5,8,3],[5,4,1],[5,15,1],[5,2,3],[5,13,2],[5,1,3],[5,7,1],[5,23,48],[5,9,1],[5,12,10],[5,16,1],[5,10,1],[5,7,5],[5,2,1],[5,3,1],[5,23,2],[5,4,1],[5,18,1],[5,13,2],[5,54,136],[5,6,2],[5,2,2],[5,5,1],[5,6,1],[5,15,8],[5,14,9],[5,4,1],[5,7,2],[5,3,3],[5,117,5],[5,25,8],[5,14,4],[5,25,3],[5,7,1],[5,7,1],[5,15,3],[5,3,2],[5,4,1],[5,6,4],[5,14,4],[5,7,1],[5,20,1],[5,6,5],[5,12,1],[5,9,3],[5,2,1],[5,4,20],[5,4,3],[5,1,1],[5,1,1],[5,8,1],[5,4,1],[5,1,1],[5,6,3],[5,19,1],[5,14,1],[5,22,2],[5,2,1],[5,11,2],[5,1,1],[5,10,1],[5,4,1],[5,23,3],[5,3,1],[5,15,1],[5,8,4],[5,11,4],[5,4,1],[5,2,1],[5,8,6],[5,2,4],[5,2,7],[5,3,2],[5,2,1],[5,1,1],[5,1,1],[5,11,2],[5,4,10],[5,11,4],[5,110,4],[5,6,1],[5,2,1],[5,96,34],[6,4,1],[6,7,3],[6,2,1],[6,6,2],[6,10,1],[6,2,1],[6,10,1],[6,59,2],[6,7,4],[6,4,2],[6,3,1],[6,6,1],[6,1,4],[6,7,3],[6,2,3],[6,1,1],[6,12,1],[6,1,39],[6,28,1],[6,3,4],[6,8,3],[6,4,4],[6,9,2],[6,15,1],[6,10,1],[6,1,1],[6,2,1],[6,7,1],[6,2,1],[6,93,1],[6,14,6],[6,2,2],[6,55,39],[6,15,2],[6,23,3],[6,3,3],[6,35,2],[6,5,15],[6,1,7],[6,8,19],[6,10,10],[6,3,2],[6,6,3],[6,1,2],[6,6,1],[6,2,1],[6,4,1],[6,127,20],[6,20,18],[6,3,1],[6,9,2],[6,2,3],[6,10,1],[6,27,1],[6,9,1],[6,9,1],[6,28,1],[6,1,1],[6,10,1],[6,11,1],[6,5,1],[6,4,1],[6,82,35],[6,2,1],[6,1,1],[6,3,1],[6,2,1],[6,2,11],[6,2,8],[6,3,2],[6,12,3],[6,5,6],[6,42,4],[6,8,1],[6,2,1],[6,2,2],[6,10,3],[6,6,2],[6,48,2],[6,2,3],[6,2,2],[6,2,1],[6,4,1],[6,10,1],[6,1,1],[6,7,1],[6,35,1],[6,17,1],[6,21,2],[6,1,1],[6,4,2],[6,25,1],[6,7,2],[6,12,4],[6,2,6],[6,24,4],[6,2,1],[6,5,1],[6,2,1],[6,2,1],[6,3,2],[6,4,2],[6,2,1],[6,2,1],[6,2,9],[6,2,2],[6,5,1],[6,8,10],[6,1,1],[6,12,2],[6,10,1],[6,4,2],[6,12,4],[6,1,3],[6,3,2],[6,8,1],[6,4,4],[6,12,5],[6,4,2],[6,10,1],[6,1,1],[6,12,1],[6,6,4],[6,2,1],[6,3,2],[6,1,1],[6,3,5],[6,6,1],[6,32,1],[6,10,1],[6,6,5],[6,27,2],[6,7,1],[6,2,1],[6,10,2],[6,5,1],[6,8,2],[6,3,2],[6,9,2],[6,22,1],[6,2,2],[6,10,1],[6,3,4],[6,1,1],[6,3,6],[6,8,2],[6,44,1],[6,1,1],[6,9,7],[6,9,5],[6,19,4],[6,7,1],[6,1,1],[6,10,1],[6,14,2],[6,4,3],[6,4,1],[6,6,1],[6,3,1],[6,4,1],[6,6,3],[6,6,2],[6,6,1],[6,1,3],[6,12,13],[6,3,2],[6,1,4],[6,15,1],[6,39,4],[6,5,1],[6,1,5],[6,11,3],[6,5,7],[6,9,2],[6,1,1],[6,12,1],[6,12,1],[6,1,4],[6,11,1],[6,3,1],[6,6,2],[6,5,2],[6,2,1],[6,1,2],[6,2,1],[6,41,23],[6,3,1],[6,15,1],[6,1,1],[6,1,1],[6,2,2],[6,3,1],[6,10,1],[6,17,6],[6,5,2],[6,30,1],[7,2,2],[7,10,2],[7,8,3],[7,9,4],[7,4,1],[7,8,1],[7,2,1],[7,7,134],[7,16,1],[7,5,3],[7,3,1],[7,6,2],[7,1,1],[7,5,1],[7,5,1],[7,2,1],[7,24,1],[7,8,4],[7,9,2],[7,1,1],[7,6,2],[7,9,2],[7,1,1],[7,5,28],[7,1,1],[7,2,2],[7,7,2],[7,11,1],[7,2,1],[7,17,32],[7,5,1],[7,2,1],[7,3,2],[7,7,4],[7,15,3],[7,3,1],[7,6,2],[7,1,1],[7,2,1],[7,1,1],[7,1,11],[7,2,1],[7,8,1],[7,6,1],[7,2,1],[7,57,1],[7,20,46],[7,6,2],[7,6,1],[7,1,2],[7,28,7],[7,3,5],[7,4,1],[7,4,6],[7,2,2],[7,3,3],[7,2,3],[7,2,1],[7,1,1],[7,2,6],[7,4,1],[7,3,1],[7,23,1],[7,7,2],[7,7,1],[7,4,3],[7,2,1],[7,1,1],[7,4,2],[7,15,2],[7,6,1],[7,2,1],[7,14,1],[7,1,1],[7,1,1],[7,4,2],[7,2,1],[7,4,1],[7,2,1],[7,4,3],[7,22,1],[7,10,1],[7,2,1],[7,1,2],[7,7,2],[7,1,2],[7,12,1],[7,3,1],[7,2,4],[7,3,8],[7,2,1],[7,6,1],[7,5,3],[7,8,2],[7,5,1],[7,6,1],[7,6,1],[7,5,1],[7,9,5],[7,3,1],[7,3,2],[7,3,19],[7,28,3],[7,2,2],[7,3,1],[7,51,4],[7,2,1],[7,2,1],[7,22,2],[7,5,1],[7,2,1],[7,4,2],[7,2,1],[7,6,2],[7,6,1],[7,3,1],[7,37,1],[7,9,1],[7,8,2],[7,2,1],[7,4,1],[7,2,1],[7,18,1],[7,9,2],[7,1,1],[7,5,1],[7,2,1],[7,13,1],[7,45,1],[7,1,3],[7,7,5],[7,16,1],[7,7,1],[7,1,1],[7,3,1],[7,8,1],[7,1,1],[7,1,4],[7,2,2],[7,6,1],[7,6,1],[7,2,1],[7,16,1],[7,11,1],[7,1,1],[7,2,1],[7,3,2],[7,8,8],[7,33,1],[7,2,8],[7,4,1],[7,6,7],[7,12,3],[7,17,1],[7,9,5],[7,3,2],[7,3,2],[7,4,1],[7,1,1],[7,2,2],[7,6,1],[8,9,1],[8,79,3],[8,3,1],[8,14,4],[8,2,4],[8,10,5],[8,7,3],[8,8,1],[8,6,1],[8,7,1],[8,8,2],[8,9,1],[8,30,2],[8,1,1],[8,1,5],[8,15,2],[8,10,3],[8,5,3],[8,1,2],[8,3,1],[8,16,1],[8,3,1],[8,3,3],[8,3,4],[8,2,1],[8,6,2],[8,4,4],[8,5,3],[8,8,4],[8,8,3],[8,4,3],[8,13,7],[8,2,1],[8,2,1],[8,1,1],[8,4,1],[8,10,3],[8,16,9],[8,3,2],[8,1,2],[8,2,5],[8,5,2],[8,156,14],[8,1,1],[8,5,1],[8,252,690],[8,5,1],[8,25,21],[8,1,1],[8,39,12],[8,1,4],[8,6,1],[8,25,7],[8,1,1],[8,7,1],[8,46,11],[8,3,1],[8,1,1],[8,14,1],[8,24,1],[8,16,3],[8,6,3],[8,5,1],[8,1,2],[8,12,2],[8,2,1],[8,2,5],[8,6,1],[8,6,1],[8,14,1],[8,7,1],[8,6,1],[8,4,6],[8,1,2],[8,3,1],[8,2,14],[8,7,12],[8,2,2],[8,25,15],[8,8,3],[8,6,6],[8,5,1],[8,1,1],[8,2,3],[8,18,3],[8,2,2],[8,3,1],[8,4,1],[8,3,3],[8,4,2],[8,12,2],[8,1,1],[8,4,1],[8,18,1],[8,2,2],[8,11,3],[8,5,1],[8,6,1],[8,13,1],[8,6,1],[8,23,1],[8,18,3],[8,13,2],[8,4,1],[8,38,4],[8,1,1],[8,6,1],[8,10,2],[8,2,7],[8,10,7],[8,1,1],[8,4,7],[8,2,1],[8,2,2],[8,7,1],[8,17,1],[8,10,5],[8,4,4],[8,8,4],[8,3,2],[8,2,1],[8,33,1],[8,8,6],[8,15,1],[8,2,1],[8,7,4],[8,6,3],[8,2,1],[8,1,2],[8,3,1],[8,4,1],[8,4,2],[8,27,1],[8,10,1],[9,8,2],[9,2,2],[9,7,1],[9,11,1],[9,35,5],[9,3,1],[9,2,2],[9,6,7],[9,16,2],[9,7,15],[9,3,1],[9,9,1],[9,5,1],[9,3,1],[9,3,1],[9,4,1],[9,2,5],[9,1,1],[9,5,4],[9,1,1],[9,13,1],[9,14,4],[9,3,1],[9,35,3],[9,41,1],[9,8,3],[9,2,5],[9,8,2],[9,13,3],[9,10,1],[9,4,1],[9,35,12],[9,9,1],[9,12,1],[9,4,1],[9,2,4],[9,1,2],[9,6,4],[9,1,4],[9,20,3],[9,4,3],[9,3,3],[9,1,4],[9,2,11],[9,11,2],[9,19,1],[9,5,1],[9,6,2],[9,1,1],[9,3,1],[9,15,3],[9,2,1],[9,6,1],[9,13,1],[9,2,1],[9,11,2],[9,3,5],[9,6,1],[9,16,1],[9,4,1],[9,3,2],[9,3,1],[9,2,5],[9,13,1],[9,3,1],[9,2,2],[9,7,1],[9,2,3],[9,3,4],[9,5,1],[9,4,1],[9,10,2],[9,36,1],[9,7,2],[9,3,1],[9,4,2],[9,5,5],[9,12,1],[9,4,1],[9,2,2],[9,12,1],[9,13,1],[9,12,1],[9,2,4],[9,1,1],[9,1,2],[9,6,6],[9,1,2],[9,8,4],[9,7,2],[9,15,4],[10,3,25],[10,2,1],[10,4,2],[10,8,1],[10,2,1],[10,1,1],[10,21,1],[10,21,19],[10,4,4],[10,4,8],[10,2,1],[10,1,3],[10,3,5],[10,6,1],[10,8,5],[10,4,1],[10,24,5],[10,2,2],[10,24,1],[10,6,4],[10,1,2],[10,25,1],[10,14,1],[10,6,3],[10,2,3],[10,6,1],[10,15,2],[10,54,3],[10,12,1],[10,21,1],[10,7,1],[10,4,4],[10,5,1],[10,10,3],[10,37,1],[10,8,3],[10,11,1],[10,2,4],[10,6,1],[10,30,1],[10,35,1],[10,4,2],[10,2,1],[10,5,2],[10,6,1],[10,4,4],[10,12,1],[10,12,1],[10,44,4],[10,16,3],[10,1,64],[10,27,1],[10,9,3],[10,17,2],[10,25,2],[10,2,2],[10,7,3],[10,89,1],[10,7,30],[10,2,4],[10,2,3],[10,2,1],[10,3,3],[10,11,1],[10,7,1],[10,2,1],[10,4,2],[10,1,1],[10,1,1],[10,6,2],[10,7,3],[10,4,1],[10,2,2],[10,18,1],[10,4,1],[10,19,1],[10,14,6],[10,5,1],[10,5,6],[10,12,1],[11,5,6],[11,15,8],[11,9,1],[11,3,2],[11,6,3],[11,24,4],[11,27,3],[11,2,2],[11,5,9],[11,13,1],[11,3,1],[11,2,25],[11,10,1],[11,4,11],[11,7,2],[11,49,1],[11,4,1],[11,12,1],[11,7,1],[11,1,2],[11,10,6],[11,2,1],[11,4,2],[11,1,2],[11,2,1],[11,5,1],[11,4,3],[11,1,1],[11,6,1],[11,4,3],[11,95,2],[11,8,1],[11,18,1],[11,5,1],[11,16,12],[11,13,2],[11,7,6],[11,56,1],[11,6,1],[11,8,1],[11,21,14],[11,2,7],[11,5,1],[11,1,1],[11,5,2],[11,2,1],[11,15,1],[11,3,3],[11,26,1],[11,6,6],[11,1,1],[11,10,7],[11,6,3],[11,6,1],[11,8,2],[11,1,2],[11,35,2],[11,19,2],[11,8,2],[11,4,1],[11,7,2],[11,4,5],[11,3,5],[11,17,1],[11,3,3],[11,2,1],[11,12,1],[11,2,8],[11,85,1],[11,4,1],[11,9,1],[11,2,2],[11,2,1],[11,6,2],[11,6,3],[11,18,3],[11,1,1],[11,8,1],[11,22,1],[11,7,1],[11,4,2],[11,4,1],[11,8,3],[11,10,4],[11,24,1],[11,10,19],[11,12,8],[12,5,1],[12,1,7],[12,4,1],[12,21,6],[12,12,2],[12,16,1],[12,1,1],[12,2,1],[12,3,1],[12,8,9],[12,1,1],[12,17,2],[12,16,6],[12,14,1],[12,3,3],[12,27,3],[12,2,1],[12,3,3],[12,14,4],[12,1,3],[12,10,1],[12,5,7],[12,7,3],[12,13,5],[12,4,1],[12,47,4],[12,18,1],[12,31,2],[12,8,1],[12,5,4],[12,1,1],[12,26,1],[12,13,2],[12,5,2],[12,4,3],[12,15,5],[12,2,1],[12,2,1],[12,3,1],[12,5,1],[12,11,1],[12,4,3],[12,1,1],[12,7,2],[12,6,1],[12,14,6],[12,32,4],[12,14,1],[12,31,1],[12,7,3],[12,9,7],[12,5,1],[12,6,1],[12,6,6],[12,7,8],[12,2,1],[12,3,1],[12,4,3],[12,1,1],[12,19,2],[12,11,1],[12,7,2],[12,8,1],[12,15,4],[12,5,1],[12,9,3],[12,2,1],[12,1,1],[12,8,9],[12,3,6],[12,15,1],[13,1,11],[13,7,2],[13,10,1],[13,13,4],[13,3,2],[13,1,2],[13,2,1],[13,3,4],[13,3,1],[13,4,3],[13,5,1],[13,10,13],[13,5,4],[13,2,3],[13,3,2],[13,72,2],[13,7,3],[13,19,2],[13,4,1],[13,5,6],[13,4,2],[13,2,1],[13,2,1],[13,34,11],[13,5,2],[13,9,5],[13,6,2],[13,5,5],[13,9,5],[13,9,1],[13,19,3],[13,4,1],[13,3,1],[13,7,2],[13,1,1],[13,11,7],[13,4,7],[13,6,1],[13,2,1],[13,1,1],[13,21,1],[13,6,15],[13,5,2],[13,1,1],[13,1,2],[14,2,1],[14,18,1],[14,8,2],[14,5,1],[14,2,2],[14,5,2],[14,2,1],[14,8,2],[14,4,1],[14,8,5],[14,14,1],[14,9,6],[14,18,2],[14,4,1],[14,6,1],[14,18,1],[14,6,6],[14,4,1],[14,6,2],[14,6,8],[14,3,1],[14,2,3],[14,1,1],[14,17,4],[14,4,3],[14,15,3],[14,4,8],[14,15,2],[14,6,1],[14,9,22],[14,7,3],[14,7,6],[14,2,2],[14,1,1],[14,7,4],[14,10,1],[14,1,1]])\n #data = np.array([[26,2],[18,3],[30,4],[19,2],[21,1],[40,1],[17,3],[20,3],[19,3],[15,4],[246,1],[57,2],[16,2],[44,101],[31,1],[19,2],[35,2],[25,1],[28,1],[82,1],[52,11],[19,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,6],[1,1],[1,4],[1,1],[1,7],[1,9],[1,1],[1,2],[1,4],[1,2],[1,2],[1,2],[1,1],[1,1],[1,1],[1,9],[1,1],[1,1],[1,1],[1,2],[1,6],[1,1],[1,2],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,13],[1,1],[1,4],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,7],[1,2],[1,1],[1,5],[1,1],[1,1],[1,1],[1,2],[1,4],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,1],[1,4],[1,3],[1,1],[1,1],[1,2],[1,1],[1,4],[1,3],[1,2],[1,3],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,3],[1,2],[1,1],[1,1],[1,2],[1,3],[1,1],[1,2],[1,1],[1,1],[1,3],[1,37],[1,1],[1,2],[1,1],[1,1],[1,50],[1,1],[1,1],[1,1],[1,8],[1,1],[1,1],[1,1],[1,6],[1,2],[1,3],[1,3],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,2],[1,15],[1,2],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,2],[1,2],[1,2],[1,9],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,12],[2,3],[2,3],[2,1],[2,1],[2,1],[2,4],[2,1],[2,5],[2,1],[2,1],[2,1],[2,1],[2,2],[2,2],[2,1],[2,3],[2,2],[2,1],[2,13],[2,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,1],[2,8],[2,3],[2,1],[2,1],[2,13],[2,2],[2,1],[2,2],[2,3],[2,1],[2,1],[3,1],[3,2],[3,5],[3,1],[3,1],[3,11],[3,3],[3,1],[3,1],[3,6],[3,1],[3,3],[3,1],[3,2],[3,4],[3,2],[3,2],[3,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[4,1],[4,2],[4,2],[4,9],[4,1],[4,1],[4,5],[4,1],[4,16],[4,1],[4,2],[4,1],[4,1],[4,1],[4,6],[4,2],[4,2],[5,2],[5,2],[5,2],[5,2],[5,3],[5,1],[6,3],[6,1],[6,4],[6,1],[7,1],[7,1],[7,2],[7,1],[7,1],[8,7],[8,1],[8,1],[9,1],[9,3],[9,2],[9,1],[10,1],[10,11],[11,1],[11,2],[12,4],[13,11],[13,2],[14,3],[22,1],[39,3],[107,1],[46,6],[22,1],[15,1],[29,45],[29,1],[35,1],[23,2],[21,1],[17,1],[57,1],[20,1],[19,4],[24,1],[18,2],[61,2],[51,12],[41,3],[1,1],[1,1],[1,3],[1,1],[1,1],[1,4],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,6],[1,2],[1,1],[1,4],[1,2],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,4],[1,3],[1,1],[1,1],[1,1],[1,1],[1,3],[1,3],[1,1],[1,1],[1,1],[1,3],[1,3],[1,2],[1,4],[1,7],[1,3],[1,1],[1,15],[1,2],[1,1],[1,2],[1,2],[1,2],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,4],[1,4],[1,2],[1,2],[1,1],[1,4],[1,2],[1,5],[1,1],[1,1],[1,1],[1,1],[1,5],[1,8],[1,1],[1,1],[1,2],[1,2],[1,134],[1,45],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,4],[1,6],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,6],[1,1],[1,19],[1,4],[1,2],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,19],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,5],[1,3],[1,6],[1,2],[1,1],[1,3],[1,2],[1,2],[1,1],[1,2],[1,1],[1,26],[1,4],[1,1],[1,3],[1,1],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,1],[1,5],[1,4],[1,1],[1,27],[1,1],[1,1],[1,1],[1,11],[1,2],[1,4],[1,1],[1,1],[1,24],[1,2],[1,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,15],[2,1],[2,1],[2,1],[2,3],[2,1],[2,5],[2,1],[2,4],[2,1],[2,1],[2,5],[2,2],[2,1],[2,1],[2,2],[2,1],[2,3],[2,4],[2,1],[2,3],[2,1],[2,2],[2,17],[2,4],[2,2],[2,7],[2,2],[2,1],[3,1],[3,3],[3,1],[3,1],[3,1],[3,1],[3,2],[3,1],[3,1],[3,3],[3,1],[3,18],[3,1],[3,1],[3,1],[3,6],[3,8],[3,1],[3,1],[3,2],[3,2],[3,1],[4,1],[4,3],[4,1],[4,1],[4,1],[4,4],[4,1],[4,20],[4,2],[4,4],[4,2],[4,1],[4,3],[4,1],[4,1],[4,1],[4,1],[4,3],[4,4],[4,2],[4,2],[4,1],[4,1],[5,3],[5,1],[5,1],[6,1],[6,8],[7,1],[7,1],[7,5],[8,21],[8,1],[8,1],[8,2],[9,1],[10,30],[10,2],[10,3],[10,1],[11,1],[11,2],[11,1],[11,1],[12,1],[12,3],[12,6],[13,1],[13,2],[13,1],[14,1],[14,2],[17,1],[52,1],[64,1],[190,2],[25,3],[19,3],[22,1],[15,2],[25,1],[25,2],[38,1],[69,1],[1,1],[1,4],[1,1],[1,21],[1,1],[1,3],[1,11],[1,31],[1,1],[1,4],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,6],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,5],[1,2],[1,2],[1,212],[1,6],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,3],[1,1],[1,3],[1,4],[1,1],[1,2],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,1],[1,1],[1,3],[1,3],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,6],[1,1],[1,3],[1,7],[1,2],[1,5],[1,3],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,9],[1,1],[1,2],[1,2],[1,3],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,78],[1,3],[1,7],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,8],[1,3],[1,2],[1,1],[1,4],[1,2],[1,1],[1,1],[1,1],[1,1],[2,1],[2,4],[2,1],[2,1],[2,1],[2,3],[2,2],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,8],[2,1],[2,1],[2,5],[2,2],[2,1],[2,6],[2,1],[2,4],[2,2],[2,2],[2,1],[2,2],[2,1],[2,1],[2,30],[2,3],[2,5],[2,4],[2,3],[2,1],[2,1],[3,1],[3,2],[3,1],[3,11],[3,1],[3,1],[3,8],[3,2],[3,1],[3,4],[3,3],[3,2],[3,3],[3,1],[3,3],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[4,8],[4,1],[4,2],[4,1],[4,2],[4,1],[4,3],[4,1],[4,2],[4,7],[4,1],[4,1],[4,1],[4,1],[4,7],[5,1],[5,1],[5,2],[5,2],[5,1],[5,11],[5,1],[5,1],[5,1],[5,1],[5,2],[5,1],[5,2],[5,8],[5,1],[6,2],[6,8],[6,1],[6,1],[6,1],[6,2],[6,1],[6,2],[6,1],[7,1],[7,3],[7,1],[7,2],[7,6],[7,2],[8,1],[8,6],[8,15],[9,2],[10,3],[10,1],[10,1],[10,2],[10,5],[10,2],[10,64],[11,1],[11,1],[11,1],[12,1],[12,6],[12,1],[12,2],[14,4],[14,1],[17,1],[21,1],[17,1],[32,1],[16,1],[18,5],[17,1],[16,1],[17,2],[262,1],[22,1],[227,5],[82,4],[28,3],[56,7],[42,2],[26,1],[137,1],[55,19],[29,1],[42,2],[1,5],[1,1],[1,2],[1,22],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,4],[1,2],[1,3],[1,1],[1,4],[1,1],[1,2],[1,4],[1,1],[1,2],[1,2],[1,1],[1,2],[1,2],[1,5],[1,7],[1,2],[1,2],[1,1],[1,1],[1,7],[1,1],[1,1],[1,1],[1,2],[1,3],[1,16],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,5],[1,1],[1,1],[1,6],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,4],[1,28],[1,6],[1,1],[1,2],[1,2],[1,2],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,16],[1,1],[1,2],[1,3],[1,1],[1,1],[1,3],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,7],[1,1],[1,1],[1,2],[1,2],[1,4],[1,3],[1,4],[1,1],[1,1],[1,2],[1,5],[1,1],[1,1],[1,5],[1,2],[1,2],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[2,5],[2,5],[2,4],[2,2],[2,32],[2,1],[2,1],[2,4],[2,3],[2,1],[2,1],[2,1],[2,45],[2,3],[2,11],[2,1],[2,1],[2,2],[2,1],[2,4],[2,2],[2,1],[2,2],[2,2],[2,2],[2,1],[2,2],[2,3],[2,1],[2,8],[2,2],[2,2],[2,1],[2,2],[2,2],[2,1],[2,7],[2,4],[2,2],[2,4],[2,1],[2,8],[3,1],[3,1],[3,1],[3,3],[3,4],[3,1],[3,10],[3,6],[3,1],[3,1],[3,1],[3,2],[3,4],[3,4],[3,1],[3,1],[3,7],[3,2],[3,5],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,2],[3,3],[3,1],[3,1],[3,19],[4,1],[4,1],[4,1],[4,1],[4,1],[4,3],[4,1],[4,1],[4,2],[4,1],[4,9],[4,4],[4,5],[4,3],[4,2],[4,3],[5,1],[5,2],[5,20],[5,1],[5,2],[5,2],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,4],[5,1],[6,2],[6,2],[6,1],[6,1],[6,1],[6,1],[6,1],[6,6],[6,2],[7,1],[7,1],[7,1],[7,4],[8,1],[8,5],[8,14],[9,1],[9,4],[10,1],[10,1],[10,1],[10,1],[11,6],[11,4],[12,1],[12,2],[13,2],[13,1],[13,6],[14,2],[42,4],[264,3],[22,3],[15,6],[19,1],[46,2],[193,1],[15,1],[127,5],[47,1],[16,2],[27,1],[25,1],[19,5],[73,1],[60,1],[27,1],[19,2],[1,2],[1,1],[1,2],[1,2],[1,4],[1,2],[1,1],[1,1],[1,2],[1,1],[1,2],[1,16],[1,2],[1,3],[1,2],[1,1],[1,4],[1,20],[1,3],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,2],[1,2],[1,2],[1,3],[1,4],[1,1],[1,1],[1,2],[1,6],[1,1],[1,1],[1,1],[1,47],[1,2],[1,2],[1,5],[1,2],[1,2],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,16],[1,1],[1,1],[1,6],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,1],[1,2],[1,5],[1,2],[1,7],[1,1],[1,1],[1,4],[1,3],[1,1],[1,1],[1,2],[1,14],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,3],[1,4],[1,5],[1,1],[1,1],[1,1],[1,17],[1,71],[1,1],[1,1],[1,1],[1,79],[1,1],[1,2],[1,4],[1,2],[1,1],[1,1],[1,3],[1,4],[1,1],[1,1],[1,7],[1,1],[1,3],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,4],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[2,1],[2,1],[2,1],[2,4],[2,13],[2,1],[2,2],[2,2],[2,1],[2,1],[2,1],[2,2],[2,3],[2,6],[2,3],[2,1],[2,1],[2,1],[2,2],[2,17],[2,2],[2,2],[2,8],[2,1],[2,3],[2,2],[2,11],[2,1],[2,2],[2,5],[2,1],[2,1],[2,2],[2,1],[2,2],[2,2],[2,1],[2,1],[2,3],[2,4],[2,1],[2,6],[2,25],[2,1],[2,1],[2,1],[2,1],[2,2],[2,3],[2,2],[2,2],[2,1],[2,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,3],[3,8],[3,5],[3,3],[3,7],[3,1],[3,1],[3,9],[3,6],[3,3],[3,2],[3,8],[3,4],[3,3],[4,1],[4,1],[4,1],[4,1],[4,1],[4,6],[4,1],[4,3],[4,2],[4,1],[4,3],[4,1],[4,2],[4,1],[4,1],[4,1],[4,1],[5,1],[5,5],[5,3],[5,2],[5,3],[5,1],[5,3],[6,1],[6,1],[6,1],[6,1],[7,1],[7,1],[7,1],[7,1],[7,32],[7,2],[7,1],[7,4],[7,1],[7,1],[7,4],[8,2],[8,2],[8,1],[8,2],[8,1],[9,1],[9,3],[9,1],[9,1],[9,1],[10,3],[11,4],[11,1],[11,1],[11,3],[11,3],[11,1],[12,1],[12,1],[12,1],[13,2],[13,1],[13,2],[14,5],[26,2],[49,1],[26,1],[18,1],[27,1],[15,1],[23,1],[58,3],[36,2],[19,3],[62,2],[72,2],[90,1],[124,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,2],[1,3],[1,1],[1,4],[1,2],[1,1],[1,1],[1,18],[1,1],[1,2],[1,4],[1,24],[1,1],[1,2],[1,1],[1,1],[1,4],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,4],[1,3],[1,1],[1,3],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,5],[1,2],[1,1],[1,1],[1,1],[1,1],[1,8],[1,10],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,17],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,3],[1,2],[1,1],[1,4],[1,2],[1,1],[1,2],[1,25],[1,2],[1,7],[1,1],[1,1],[1,6],[1,1],[1,3],[1,2],[1,4],[1,1],[1,1],[1,6],[1,1],[1,2],[1,3],[1,1],[1,4],[1,2],[1,3],[1,2],[1,3],[1,1],[1,1],[1,3],[1,2],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,2],[1,1],[1,1],[2,1],[2,5],[2,1],[2,2],[2,5],[2,1],[2,1],[2,1],[2,2],[2,3],[2,2],[2,2],[2,1],[2,2],[2,6],[2,1],[2,2],[2,1],[2,3],[2,1],[2,2],[2,3],[2,13],[2,1],[2,2],[2,1],[2,3],[2,1],[2,4],[2,1],[2,2],[2,1],[2,1],[2,1],[2,3],[2,2],[2,1],[2,2],[2,3],[2,2],[2,2],[2,1],[2,1],[2,3],[2,1],[2,1],[2,5],[3,2],[3,2],[3,2],[3,5],[3,1],[3,1],[3,1],[3,1],[3,3],[3,2],[3,2],[3,1],[3,1],[3,1],[3,1],[3,5],[3,1],[3,4],[3,2],[3,1],[3,1],[3,3],[3,1],[3,1],[3,3],[4,3],[4,1],[4,2],[4,1],[4,1],[4,1],[4,1],[4,1],[5,1],[5,2],[5,9],[5,2],[5,1],[5,7],[5,2],[5,1],[5,2],[5,2],[5,1],[6,3],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,29],[6,2],[7,3],[7,2],[7,1],[7,1],[7,2],[7,2],[7,2],[7,3],[7,2],[8,5],[8,1],[8,1],[8,3],[8,2],[8,1],[8,2],[9,1],[9,1],[10,1],[10,14],[10,3],[10,4],[10,3],[10,4],[11,1],[11,5],[11,2],[11,3],[11,1],[11,1],[11,2],[12,1],[12,1],[13,5],[13,1],[13,1],[14,1],[14,3],[14,1],[24,1],[15,1],[19,2],[15,5],[131,1],[28,13],[33,1],[24,1],[17,1],[15,1],[44,2],[16,2],[16,3],[29,7],[29,1],[82,8],[16,1],[17,2],[16,2],[45,1],[159,1],[100,2],[23,1],[15,1],[15,1],[22,1],[48,1],[25,5],[15,1],[1,1],[1,3],[1,1],[1,3],[1,1],[1,1],[1,2],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,3],[1,2],[1,2],[1,6],[1,1],[1,2],[1,1],[1,2],[1,4],[1,44],[1,1],[1,2],[1,40],[1,1],[1,9],[1,1],[1,17],[1,1],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,25],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,12],[1,1],[1,2],[1,12],[1,2],[1,2],[1,5],[1,2],[1,3],[1,7],[1,5],[1,72],[1,2],[1,8],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,3],[1,1],[1,2],[1,2],[1,5],[1,3],[1,2],[1,3],[1,382],[1,1],[1,3],[1,1],[1,1],[1,6],[1,4],[1,1],[1,2],[1,2],[1,1],[1,1],[1,2],[1,4],[1,1],[1,2],[1,6],[1,1],[1,3],[1,3],[1,1],[1,6],[1,3],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,7],[1,1],[1,1],[1,2],[2,1],[2,1],[2,1],[2,1],[2,12],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,1],[2,52],[2,2],[2,1],[2,1],[2,2],[2,1],[2,2],[2,9],[2,1],[2,1],[2,18],[2,3],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,2],[2,3],[2,2],[2,2],[2,2],[2,1],[2,1],[2,1],[2,2],[2,3],[2,2],[2,1],[2,1],[2,1],[2,1],[3,6],[3,3],[3,4],[3,1],[3,1],[3,1],[3,1],[3,1],[3,4],[3,1],[3,3],[3,1],[3,1],[3,2],[3,1],[3,1],[3,80],[3,1],[3,2],[3,1],[3,1],[4,2],[4,1],[4,1],[4,1],[4,1],[4,1],[4,3],[4,1],[4,2],[4,1],[4,4],[4,4],[4,1],[4,2],[4,2],[4,1],[4,2],[4,1],[4,1],[5,1],[5,1],[5,3],[5,3],[5,1],[5,1],[5,1],[5,2],[5,1],[6,4],[6,3],[6,1],[6,6],[6,1],[6,1],[7,2],[7,1],[7,1],[7,2],[7,1],[7,2],[7,1],[7,1],[8,1],[8,4],[8,1],[8,2],[8,3],[9,2],[9,3],[9,3],[9,6],[10,1],[10,1],[10,1],[10,1],[11,8],[11,1],[11,1],[12,2],[13,5],[15,1],[35,7],[16,1],[24,2],[16,1],[25,1],[65,4],[36,1],[16,5],[21,10],[18,1],[16,12],[29,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,5],[1,3],[1,3],[1,3],[1,1],[1,4],[1,3],[1,3],[1,3],[1,1],[1,1],[1,1],[1,2],[1,5],[1,3],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,8],[1,1],[1,1],[1,1],[1,1],[1,1],[1,8],[1,2],[1,4],[1,2],[1,7],[1,1],[1,1],[1,1],[1,2],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,5],[1,1],[1,4],[1,8],[1,6],[1,1],[1,4],[1,1],[1,1],[1,3],[1,1],[1,3],[1,2],[1,7],[1,2],[1,5],[1,2],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,3],[1,3],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,5],[1,1],[1,13],[1,3],[1,2],[1,1],[1,1],[1,10],[1,1],[1,2],[1,1],[1,3],[1,12],[1,2],[1,2],[1,4],[1,1],[1,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,4],[2,3],[2,1],[2,1],[2,1],[2,6],[2,1],[2,6],[2,1],[2,2],[2,6],[2,1],[2,10],[2,1],[2,1],[2,4],[2,1],[2,3],[2,3],[2,1],[2,1],[2,3],[2,5],[2,3],[2,10],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,3],[2,1],[2,4],[2,1],[2,1],[2,2],[2,1],[2,3],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[3,2],[3,1],[3,1],[3,1],[3,5],[3,34],[3,2],[3,3],[3,1],[3,1],[3,2],[3,1],[3,5],[3,1],[3,1],[3,2],[3,4],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,2],[3,1],[3,2],[3,25],[3,1],[3,1],[4,1],[4,6],[4,3],[4,1],[4,6],[4,1],[4,1],[4,4],[4,1],[4,1],[4,1],[4,1],[4,1],[4,2],[4,1],[4,1],[4,3],[4,4],[5,1],[5,2],[5,3],[5,1],[5,1],[5,1],[5,4],[5,1],[5,2],[5,4],[5,1],[5,1],[6,1],[6,4],[6,2],[6,1],[6,1],[6,2],[6,3],[7,11],[7,1],[7,5],[8,2],[8,1],[8,1],[9,2],[9,5],[9,4],[9,3],[9,1],[9,2],[9,2],[10,1],[10,2],[11,1],[12,3],[12,1],[13,11],[13,1],[17,1],[201,2],[16,2],[104,4],[123,2],[15,1],[26,5],[74,1],[15,3],[15,7],[16,1],[39,2],[27,1],[32,1],[53,4],[28,1],[25,3],[1,1],[1,3],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,7],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,1],[1,2],[1,16],[1,3],[1,2],[1,2],[1,3],[1,1],[1,1],[1,3],[1,11],[1,4],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,4],[1,1],[1,4],[1,1],[1,2],[1,1],[1,1],[1,1],[1,32],[1,2],[1,1],[1,1],[1,6],[1,1],[1,7],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,2],[1,2],[1,1],[1,1],[1,2],[1,2],[1,2],[1,2],[1,1],[1,1],[1,55],[1,2],[1,4],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,5],[1,4],[1,7],[1,1],[1,1],[1,6],[1,2],[1,2],[1,6],[1,3],[1,2],[1,1],[1,6],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,4],[1,9],[1,2],[1,3],[1,1],[2,1],[2,1],[2,11],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,2],[2,1],[2,4],[2,1],[2,2],[2,2],[2,2],[2,3],[2,4],[2,2],[2,5],[2,1],[2,1],[2,3],[2,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,3],[2,3],[2,2],[2,3],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,4],[2,2],[3,2],[3,1],[3,1],[3,3],[3,1],[3,1],[3,1],[3,2],[3,1],[3,2],[3,6],[3,2],[3,1],[3,1],[3,3],[3,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,5],[3,1],[3,1],[3,2],[3,2],[3,2],[3,1],[3,1],[3,2],[3,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,3],[4,1],[4,2],[4,3],[4,3],[4,1],[4,4],[4,1],[4,2],[4,1],[4,3],[4,1],[5,1],[5,2],[5,1],[5,3],[5,3],[5,1],[5,2],[5,9],[5,1],[5,1],[5,2],[5,1],[5,2],[6,2],[6,3],[6,1],[6,1],[6,2],[6,1],[6,2],[6,2],[6,1],[6,4],[6,2],[7,7],[7,2],[7,4],[7,1],[7,2],[7,19],[7,1],[7,1],[7,1],[8,1],[8,12],[8,1],[8,3],[8,1],[9,1],[9,1],[9,1],[9,1],[9,1],[10,1],[10,1],[10,4],[10,2],[12,3],[12,1],[12,1],[13,1],[13,1],[14,1],[14,1],[14,3],[30,7],[32,1],[40,2],[16,1],[91,6],[122,1],[15,1],[17,1],[20,3],[19,2],[19,1],[98,2],[81,14],[47,4],[38,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,6],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,83],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,5],[1,2],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,1],[1,4],[1,2],[1,2],[1,2],[1,1],[1,1],[1,2],[1,1],[1,7],[1,1],[1,2],[1,4],[1,1],[1,1],[1,88],[1,2],[1,2],[1,2],[1,2],[1,3],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,57],[1,2],[1,6],[1,4],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,5],[1,5],[1,1],[1,1],[1,9],[1,1],[1,1],[1,3],[1,4],[1,1],[1,2],[1,5],[1,2],[1,3],[1,1],[1,2],[1,4],[1,4],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,2],[1,3],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,6],[1,3],[1,2],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[2,1],[2,1],[2,2],[2,2],[2,2],[2,2],[2,2],[2,15],[2,4],[2,1],[2,1],[2,2],[2,1],[2,2],[2,3],[2,3],[2,3],[2,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,1],[2,2],[2,1],[2,2],[2,1],[2,7],[2,1],[2,4],[2,3],[2,2],[2,3],[2,1],[2,1],[2,2],[3,4],[3,1],[3,1],[3,2],[3,3],[3,6],[3,2],[3,9],[3,9],[3,2],[3,2],[3,1],[3,15],[3,1],[3,1],[3,1],[3,3],[4,1],[4,1],[4,2],[4,3],[4,1],[4,2],[4,1],[4,6],[4,2],[4,8],[4,9],[4,1],[4,1],[4,1],[5,1],[5,1],[5,78],[5,1],[5,1],[5,1],[5,17],[5,1],[5,3],[5,2],[5,1],[6,1],[6,1],[6,5],[6,19],[6,1],[6,6],[6,1],[6,1],[6,2],[6,1],[6,1],[6,1],[6,2],[6,1],[7,2],[7,1],[7,1],[7,4],[7,1],[7,28],[7,1],[8,1],[8,1],[8,1],[9,3],[9,1],[9,11],[9,4],[10,1],[10,2],[11,1],[11,1],[11,1],[11,1],[12,1],[14,2],[14,2],[14,2],[18,2],[31,1],[29,2],[16,1],[17,20],[25,1],[20,3],[59,1],[25,1],[27,2],[26,1],[44,1],[17,4],[16,4],[20,6],[67,2],[15,1],[65,1],[17,1],[33,1],[61,2],[1,2],[1,2],[1,2],[1,4],[1,1],[1,1],[1,1],[1,2],[1,2],[1,4],[1,4],[1,5],[1,2],[1,1],[1,1],[1,18],[1,1],[1,3],[1,1],[1,2],[1,1],[1,2],[1,2],[1,5],[1,4],[1,1],[1,4],[1,1],[1,1],[1,1],[1,56],[1,1],[1,4],[1,1],[1,9],[1,6],[1,9],[1,1],[1,2],[1,1],[1,1],[1,1],[1,18],[1,10],[1,1],[1,5],[1,1],[1,1],[1,2],[1,5],[1,1],[1,3],[1,1],[1,1],[1,4],[1,1],[1,2],[1,1],[1,8],[1,3],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,3],[1,2],[1,1],[1,1],[1,5],[1,2],[1,1],[1,1],[1,4],[1,2],[1,1],[1,1],[1,5],[1,2],[1,27],[1,3],[1,1],[1,2],[1,9],[1,2],[1,2],[1,6],[1,1],[1,2],[1,1],[1,15],[1,1],[1,2],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,2],[1,3],[1,1],[1,1],[1,2],[1,1],[1,17],[1,1],[1,4],[1,1],[1,1],[1,2],[1,2],[1,4],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,18],[1,1],[1,2],[1,46],[1,1],[1,1],[1,1],[1,6],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,7],[1,8],[1,1],[1,3],[1,6],[2,1],[2,1],[2,1],[2,1],[2,5],[2,4],[2,1],[2,2],[2,2],[2,4],[2,2],[2,1],[2,2],[2,1],[2,3],[2,5],[2,1],[2,2],[2,2],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,12],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,2],[2,3],[2,1],[2,2],[2,1],[2,10],[2,2],[2,8],[2,2],[2,2],[2,1],[2,5],[2,5],[2,4],[2,1],[2,1],[2,1],[2,1],[3,2],[3,6],[3,2],[3,1],[3,58],[3,1],[3,3],[3,1],[3,1],[3,2],[3,1],[3,1],[3,2],[3,1],[3,1],[3,6],[3,10],[3,1],[3,4],[3,1],[3,1],[3,6],[3,1],[3,29],[3,2],[3,2],[3,6],[3,1],[4,1],[4,4],[4,2],[4,1],[4,46],[4,2],[4,1],[4,2],[4,2],[4,3],[4,11],[4,3],[4,1],[4,2],[4,1],[4,15],[4,2],[5,5],[5,9],[5,1],[5,2],[5,136],[5,48],[5,5],[5,1],[5,1],[5,1],[5,1],[5,1],[6,1],[6,1],[6,10],[6,1],[6,2],[6,1],[7,2],[7,1],[7,3],[7,2],[7,11],[7,6],[7,1],[8,1],[8,3],[8,2],[8,1],[8,12],[8,2],[8,2],[9,1],[9,1],[9,1],[9,4],[10,1],[10,2],[11,2],[12,9],[13,1],[14,2],[21,1],[26,1],[16,2],[29,1],[16,5],[401,3],[33,1],[19,31],[15,4],[28,2],[23,1],[42,4],[40,1],[70,1],[15,3],[15,2],[22,1],[103,1],[256,27],[41,1],[86,1],[17,1],[31,1],[26,1],[105,2],[28,1],[1,4],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,6],[1,6],[1,4],[1,1],[1,4],[1,7],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,3],[1,2],[1,1],[1,2],[1,2],[1,8],[1,1],[1,2],[1,1],[1,5],[1,2],[1,1],[1,1],[1,2],[1,2],[1,2],[1,2],[1,1],[1,9],[1,1],[1,2],[1,2],[1,3],[1,2],[1,1],[1,2],[1,2],[1,1],[1,4],[1,1],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,5],[1,1],[1,29],[1,1],[1,4],[1,2],[1,3],[1,3],[1,17],[1,6],[1,2],[1,1],[1,2],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,9],[1,3],[1,1],[1,1],[1,1],[1,2],[1,3],[1,3],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,8],[1,1],[1,7],[1,1],[1,5],[1,1],[1,1],[1,4],[1,1],[1,2],[1,6],[1,2],[1,1],[1,2],[1,1],[1,4],[1,1],[1,1],[1,3],[1,3],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,16],[1,5],[2,2],[2,1],[2,2],[2,2],[2,2],[2,1],[2,1],[2,8],[2,3],[2,1],[2,2],[2,4],[2,2],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,9],[2,1],[2,23],[2,1],[2,1],[2,1],[2,2],[2,3],[2,1],[2,1],[2,3],[2,1],[2,1],[2,2],[2,1],[2,25],[2,2],[2,3],[2,2],[2,1],[2,1],[2,3],[2,1],[2,3],[2,1],[2,3],[2,1],[2,2],[2,1],[2,1],[2,1],[3,1],[3,2],[3,2],[3,3],[3,2],[3,1],[3,1],[3,5],[3,9],[3,1],[3,3],[3,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,9],[3,1],[3,2],[3,7],[3,3],[3,4],[3,2],[3,1],[3,37],[3,1],[3,1],[3,1],[3,1],[4,1],[4,2],[4,305],[4,4],[4,1],[4,1],[4,1],[4,4],[4,3],[4,1],[4,6],[4,7],[4,1],[4,1],[4,1],[4,1],[4,29],[4,1],[5,10],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[6,2],[6,1],[6,1],[6,2],[7,1],[7,1],[7,2],[7,1],[7,1],[7,1],[7,2],[8,1],[8,3],[8,2],[9,1],[9,1],[10,1],[10,3],[10,1],[11,6],[11,2],[11,1],[11,1],[12,5],[12,4],[12,1],[14,1],[14,1],[23,1],[26,2],[15,2],[16,16],[31,7],[18,3],[22,3],[87,1],[17,2],[17,9],[30,1],[58,4],[24,2],[28,5],[53,1],[23,1],[28,2],[44,1],[60,3],[17,2],[17,1],[1,1],[1,2],[1,1],[1,11],[1,1],[1,1],[1,2],[1,2],[1,3],[1,2],[1,6],[1,3],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,2],[1,1],[1,1],[1,3],[1,2],[1,4],[1,1],[1,1],[1,2],[1,3],[1,1],[1,1],[1,7],[1,2],[1,1],[1,1],[1,4],[1,2],[1,1],[1,3],[1,1],[1,5],[1,3],[1,3],[1,3],[1,1],[1,1],[1,4],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,3],[1,5],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,8],[1,15],[1,1],[1,8],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[1,3],[1,15],[1,1],[1,2],[1,1],[1,1],[1,4],[1,1],[1,5],[1,3],[1,1],[1,1],[1,14],[1,1],[1,2],[1,2],[1,3],[1,2],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,3],[1,1],[1,5],[1,2],[1,3],[1,1],[1,2],[1,9],[1,1],[1,4],[1,1],[1,2],[1,8],[1,1],[1,3],[1,1],[1,1],[1,4],[1,4],[1,3],[1,1],[1,1],[1,9],[1,2],[1,4],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,4],[1,2],[1,1],[1,1],[1,2],[1,3],[1,2],[1,6],[1,1],[1,18],[2,1],[2,3],[2,3],[2,1],[2,6],[2,1],[2,2],[2,2],[2,5],[2,1],[2,1],[2,1],[2,3],[2,2],[2,6],[2,1],[2,3],[2,3],[2,1],[2,3],[2,2],[2,2],[2,1],[2,1],[2,9],[2,5],[2,1],[2,1],[2,1],[2,2],[2,85],[2,60],[2,2],[2,1],[2,12],[2,1],[2,1],[2,1],[2,8],[2,1],[2,21],[2,1],[2,3],[2,1],[2,1],[2,8],[2,1],[2,1],[3,3],[3,3],[3,1],[3,3],[3,3],[3,1],[3,2],[3,2],[3,1],[3,1],[3,14],[3,1],[3,6],[3,1],[3,2],[3,1],[3,3],[3,2],[3,1],[3,1],[3,1],[3,1],[3,2],[3,3],[3,2],[4,3],[4,2],[4,1],[4,3],[4,1],[4,1],[4,2],[4,2],[4,1],[4,1],[4,1],[4,1],[4,1],[4,4],[5,1],[5,1],[5,1],[5,3],[5,2],[5,1],[5,4],[6,6],[6,1],[6,18],[6,1],[6,1],[6,1],[6,5],[6,2],[6,3],[6,2],[7,3],[7,5],[7,2],[7,1],[7,3],[7,5],[7,1],[7,1],[7,1],[7,1],[8,1],[8,1],[8,3],[8,1],[8,1],[8,4],[9,1],[9,2],[9,4],[10,2],[10,1],[11,2],[11,1],[11,1],[12,3],[13,1],[14,2],[32,7],[26,2],[22,2],[15,1],[26,46],[15,2],[16,1],[19,1],[36,1],[16,2],[24,1],[20,5],[1,1],[1,1],[1,1],[1,7],[1,1],[1,1],[1,2],[1,4],[1,2],[1,1],[1,1],[1,1],[1,10],[1,5],[1,13],[1,2],[1,3],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,4],[1,3],[1,1],[1,1],[1,2],[1,8],[1,1],[1,3],[1,5],[1,1],[1,2],[1,2],[1,2],[1,4],[1,2],[1,3],[1,1],[1,1],[1,1],[1,2],[1,8],[1,2],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,2],[1,4],[1,3],[1,2],[1,9],[1,19],[1,1],[1,1],[1,1],[1,1],[1,14],[1,3],[1,2],[1,4],[1,2],[1,1],[1,4],[1,1],[1,1],[1,5],[1,2],[1,1],[1,1],[1,2],[1,4],[1,2],[1,1],[1,11],[1,1],[1,3],[1,2],[1,2],[1,1],[1,1],[1,3],[1,9],[1,2],[1,6],[1,9],[1,3],[1,1],[1,1],[1,5],[1,1],[1,3],[1,2],[1,9],[1,1],[1,3],[1,5],[1,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,4],[1,2],[1,1],[1,3],[1,2],[1,1],[1,12],[1,1],[1,1],[1,1],[1,1],[2,5],[2,2],[2,5],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,3],[2,3],[2,114],[2,1],[2,2],[2,1],[2,1],[2,3],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,9],[2,1],[2,1],[2,2],[2,1],[2,3],[2,1],[2,1],[2,2],[2,1],[2,3],[2,19],[2,1],[2,8],[2,2],[2,2],[2,7],[2,1],[2,1],[3,2],[3,1],[3,5],[3,3],[3,1],[3,5],[3,1],[3,1],[3,1],[3,1],[3,1],[3,30],[3,1],[3,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,2],[3,2],[3,1],[3,2],[3,2],[3,1],[3,2],[3,1],[3,2],[4,1],[4,3],[4,1],[4,1],[4,7],[4,2],[4,2],[4,3],[4,3],[4,2],[4,2],[4,1],[4,1],[4,2],[4,1],[4,2],[4,1],[4,1],[4,6],[5,2],[5,1],[5,2],[5,1],[5,7],[5,7],[5,1],[5,2],[5,1],[6,1],[6,1],[6,1],[6,2],[6,1],[6,1],[6,4],[6,1],[7,1],[7,1],[7,1],[7,3],[7,1],[7,1],[7,1],[8,1],[8,2],[8,3],[8,1],[8,1],[8,9],[8,6],[9,1],[9,3],[9,4],[10,4],[10,1],[10,3],[10,1],[10,19],[11,3],[11,2],[11,5],[11,5],[11,1],[12,7],[13,3],[13,4],[13,2],[13,4],[14,2],[16,1],[93,1],[22,2],[42,6],[15,1],[16,3],[36,8],[34,1],[30,3],[43,7],[46,8],[40,1],[22,1],[1,3],[1,1],[1,13],[1,2],[1,3],[1,2],[1,3],[1,1],[1,2],[1,2],[1,1],[1,2],[1,3],[1,1],[1,2],[1,1],[1,2],[1,1],[1,3],[1,2],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,2],[1,2],[1,1],[1,5],[1,13],[1,3],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,2],[1,6],[1,4],[1,1],[1,4],[1,1],[1,2],[1,3],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,6],[1,1],[1,1],[1,1],[1,1],[1,3],[1,2],[1,3],[1,2],[1,3],[1,1],[1,1],[1,3],[1,2],[1,3],[1,3],[1,2],[1,1],[1,3],[1,4],[1,1],[1,2],[1,1],[1,1],[1,3],[1,1],[1,3],[1,4],[1,2],[1,2],[1,3],[1,7],[1,3],[1,1],[1,1],[1,3],[1,2],[1,1],[1,4],[1,5],[1,2],[1,2],[1,1],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,7],[1,6],[1,1],[1,2],[1,3],[1,3],[1,1],[1,4],[1,2],[1,7],[1,2],[1,5],[1,1],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,3],[1,6],[1,2],[1,2],[1,1],[1,1],[2,1],[2,1],[2,3],[2,1],[2,2],[2,1],[2,3],[2,1],[2,2],[2,12],[2,1],[2,1],[2,3],[2,3],[2,1],[2,2],[2,3],[2,3],[2,1],[2,1],[2,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,8],[2,2],[2,1],[2,2],[2,1],[2,1],[2,7],[2,1],[2,1],[2,1],[2,7],[2,2],[2,1],[2,18],[2,1],[2,1],[2,1],[2,2],[2,2],[2,1],[2,1],[2,5],[2,1],[2,1],[2,6],[2,3],[2,1],[3,3],[3,1],[3,1],[3,3],[3,1],[3,1],[3,3],[3,1],[3,2],[3,3],[3,1],[3,1],[3,1],[4,6],[4,1],[4,1],[4,3],[4,1],[4,1],[4,1],[4,2],[4,2],[4,5],[4,2],[4,2],[4,2],[4,2],[4,1],[4,3],[4,2],[4,1],[5,1],[5,3],[5,2],[5,2],[5,1],[5,1],[5,3],[5,1],[5,1],[5,2],[5,4],[5,4],[5,1],[6,2],[6,2],[6,2],[6,1],[6,1],[6,1],[6,1],[6,4],[6,1],[7,2],[7,1],[7,2],[7,1],[7,1],[7,1],[8,2],[8,2],[8,3],[8,14],[9,5],[9,2],[9,1],[9,1],[10,8],[10,2],[11,1],[11,1],[12,1],[12,1],[12,1],[12,7],[12,3],[48,1],[73,3],[22,2],[19,1],[20,1],[40,2],[15,2],[34,1],[22,5],[31,2],[47,28],[51,1],[19,2],[231,1],[15,3],[18,2],[18,3],[101,5],[65,2],[30,11],[18,3],[1,1],[1,2],[1,2],[1,1],[1,3],[1,5],[1,2],[1,2],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,64],[1,2],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,3],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,4],[1,2],[1,1],[1,4],[1,5],[1,1],[1,1],[1,1],[1,1],[1,3],[1,4],[1,3],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,1],[1,6],[1,1],[1,3],[1,4],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,2],[1,3],[1,2],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,2],[1,3],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,4],[1,3],[1,1],[1,1],[1,1],[1,1],[1,14],[1,1],[1,1],[1,1],[1,1],[1,2],[1,12],[1,2],[1,2],[1,1],[1,1],[1,3],[1,2],[1,3],[1,2],[1,1],[1,5],[1,1],[1,7],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,2],[1,3],[1,1],[2,2],[2,1],[2,3],[2,2],[2,1],[2,1],[2,2],[2,1],[2,2],[2,2],[2,1],[2,1],[2,10],[2,2],[2,1],[2,2],[2,3],[2,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,3],[2,1],[2,6],[2,2],[2,4],[2,9],[2,2],[2,1],[2,3],[2,2],[2,10],[2,3],[2,1],[2,37],[2,2],[2,2],[2,2],[3,9],[3,4],[3,3],[3,2],[3,2],[3,1],[3,19],[3,1],[3,1],[3,1],[3,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,2],[3,2],[3,10],[3,1],[3,1],[3,1],[3,1],[3,3],[3,6],[4,2],[4,5],[4,1],[4,3],[4,10],[4,1],[4,1],[4,1],[4,1],[4,4],[4,5],[4,1],[4,1],[4,2],[5,2],[5,2],[5,1],[5,2],[5,1],[5,3],[5,2],[5,1],[5,1],[6,3],[6,1],[6,1],[6,6],[6,1],[6,3],[7,2],[7,1],[7,1],[7,1],[7,1],[7,1],[8,1],[8,2],[8,1],[8,3],[8,1],[9,1],[9,1],[9,2],[10,3],[10,4],[10,1],[11,1],[12,1],[12,1],[13,1],[13,3],[13,1],[14,1],[35,2],[15,7],[32,1],[80,1],[22,2],[16,1],[25,1],[156,1],[175,2],[460,1],[63,1],[74,3],[121,2],[16,3],[49,5],[29,1],[16,1],[1,5],[1,4],[1,3],[1,5],[1,1],[1,1],[1,2],[1,2],[1,1],[1,3],[1,1],[1,2],[1,1],[1,3],[1,4],[1,12],[1,1],[1,3],[1,1],[1,2],[1,3],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,12],[1,1],[1,1],[1,3],[1,1],[1,2],[1,38],[1,1],[1,1],[1,1],[1,2],[1,5],[1,1],[1,1],[1,10],[1,3],[1,3],[1,4],[1,2],[1,2],[1,3],[1,1],[1,1],[1,1],[1,6],[1,1],[1,4],[1,2],[1,2],[1,1],[1,1],[1,9],[1,1],[1,1],[1,4],[1,4],[1,3],[1,3],[1,2],[1,1],[1,6],[1,2],[1,3],[1,1],[1,5],[1,2],[1,2],[1,1],[1,1],[1,5],[1,2],[1,1],[1,3],[1,1],[1,6],[1,1],[1,2],[1,2],[1,1],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,5],[1,2],[1,2],[1,8],[1,1],[1,3],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,4],[1,3],[1,1],[1,2],[1,2],[1,1],[1,3],[1,1],[1,1],[2,1],[2,1],[2,4],[2,7],[2,1],[2,3],[2,2],[2,3],[2,2],[2,10],[2,2],[2,6],[2,4],[2,2],[2,2],[2,1],[2,2],[2,1],[2,1],[2,1],[2,3],[2,4],[2,1],[2,1],[2,2],[2,2],[2,1],[2,2],[2,3],[2,1],[2,10],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,4],[2,1],[2,1],[2,1],[2,3],[2,2],[2,2],[3,5],[3,3],[3,26],[3,1],[3,4],[3,2],[3,5],[3,1],[3,3],[3,2],[3,1],[3,1],[3,2],[3,1],[3,2],[3,2],[3,1],[3,4],[3,2],[4,8],[4,1],[4,1],[4,1],[4,1],[4,2],[4,1],[4,2],[4,1],[4,5],[4,1],[4,2],[4,2],[4,2],[4,3],[4,2],[5,2],[5,1],[5,2],[5,3],[5,1],[5,1],[5,3],[5,1],[5,1],[5,1],[6,4],[6,2],[6,1],[6,1],[6,7],[6,2],[7,1],[7,1],[7,1],[7,3],[7,3],[7,3],[8,2],[8,1],[8,3],[9,3],[9,2],[9,1],[9,3],[9,2],[10,1],[10,1],[10,4],[11,2],[11,1],[11,1],[12,1],[12,55],[12,1],[13,1],[35,4],[21,9],[26,1],[165,7],[21,1],[55,5],[19,10],[18,5],[17,1],[67,1],[68,4],[19,1],[24,6],[89,3],[21,1],[40,1],[52,2],[16,1],[1,3],[1,4],[1,1],[1,4],[1,2],[1,3],[1,1],[1,3],[1,1],[1,4],[1,1],[1,1],[1,14],[1,5],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,22],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,4],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,5],[1,1],[1,2],[1,2],[1,5],[1,1],[1,4],[1,2],[1,1],[1,1],[1,1],[1,3],[1,1],[1,4],[1,1],[1,2],[1,37],[1,1],[1,2],[1,1],[1,2],[1,2],[1,5],[1,1],[1,1],[1,11],[1,2],[1,1],[1,1],[1,1],[1,7],[1,3],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,6],[1,2],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,3],[1,2],[1,2],[1,1],[1,1],[1,2],[1,3],[1,1],[1,4],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,3],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,2],[1,1],[1,11],[1,2],[1,1],[1,6],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,8],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,4],[1,1],[1,5],[1,2],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,3],[2,1],[2,1],[2,3],[2,1],[2,2],[2,1],[2,1],[2,1],[2,19],[2,6],[2,3],[2,1],[2,2],[2,3],[2,2],[2,6],[2,1],[2,1],[2,4],[2,1],[2,2],[2,1],[2,1],[2,1],[2,3],[2,2],[2,1],[2,7],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,2],[2,7],[2,1],[2,3],[2,3],[2,1],[3,6],[3,2],[3,2],[3,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,3],[3,1],[3,1],[3,29],[3,1],[3,2],[3,3],[3,1],[3,1],[3,1],[3,15],[3,2],[3,1],[3,1],[3,2],[3,1],[3,2],[3,2],[3,7],[3,3],[3,4],[3,1],[4,2],[4,10],[4,1],[4,1],[4,1],[4,1],[4,1],[4,6],[5,3],[5,2],[5,1],[5,4],[5,1],[5,2],[5,1],[6,13],[6,2],[6,2],[6,2],[6,1],[6,1],[6,1],[7,1],[7,1],[7,2],[8,1],[8,1],[8,1],[9,2],[9,1],[9,1],[9,1],[9,1],[9,1],[10,1],[10,1],[10,112],[10,1],[11,1],[11,3],[11,11],[12,1],[13,2],[13,1],[13,2],[14,1],[78,1],[43,1],[20,1],[15,1],[26,5],[17,2],[32,2],[93,2],[57,2],[25,1],[112,4],[18,1],[73,1],[30,55],[24,1],[699,1],[17,1],[1,1],[1,1],[1,3],[1,5],[1,1],[1,2],[1,1],[1,3],[1,2],[1,1],[1,1],[1,2],[1,3],[1,3],[1,1],[1,2],[1,2],[1,3],[1,1],[1,4],[1,5],[1,3],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,2],[1,2],[1,1],[1,2],[1,4],[1,1],[1,2],[1,1],[1,1],[1,6],[1,3],[1,4],[1,1],[1,2],[1,1],[1,1],[1,2],[1,4],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,4],[1,1],[1,1],[1,4],[1,4],[1,1],[1,3],[1,1],[1,1],[1,1],[1,9],[1,1],[1,2],[1,1],[1,1],[1,4],[1,1],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,13],[1,2],[1,1],[1,1],[1,1],[1,7],[1,3],[1,3],[1,1],[1,1],[1,1],[1,2],[1,15],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,7],[1,3],[1,1],[1,1],[1,1],[1,5],[1,1],[1,3],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,6],[1,2],[1,4],[1,15],[1,2],[1,1],[1,1],[1,1],[1,2],[1,2],[1,2],[1,1],[1,2],[1,1],[2,1],[2,10],[2,3],[2,1],[2,1],[2,1],[2,3],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,4],[2,1],[2,1],[2,1],[2,2],[2,1],[2,24],[2,1],[2,2],[2,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,2],[2,2],[2,5],[2,3],[2,2],[2,1],[2,2],[2,1],[2,1],[2,3],[2,4],[2,1],[3,2],[3,2],[3,1],[3,2],[3,1],[3,3],[3,1],[3,1],[3,1],[3,3],[3,13],[3,10],[3,7],[3,1],[3,1],[3,1],[3,9],[3,9],[3,1],[3,2],[3,11],[3,1],[3,4],[3,1],[3,1],[4,2],[4,1],[4,2],[4,1],[4,115],[4,1],[4,1],[4,1],[4,1],[4,2],[4,2],[4,1],[4,2],[4,4],[4,9],[4,1],[4,1],[5,1],[5,2],[5,3],[5,2],[5,1],[5,4],[5,1],[5,2],[5,1],[5,1],[5,1],[5,7],[5,1],[5,1],[6,39],[6,2],[6,3],[6,1],[7,1],[7,2],[7,3],[7,1],[7,2],[7,8],[7,1],[8,3],[8,1],[8,1],[8,1],[8,1],[9,3],[9,2],[9,1],[10,3],[10,25],[10,1],[10,1],[11,6],[11,1],[11,1],[11,1],[11,7],[12,1],[12,1],[12,1],[13,1],[13,1],[14,8],[14,1],[14,1],[74,2],[26,11],[69,1],[108,1],[20,5],[21,1],[16,1],[16,3],[32,2],[62,2],[50,1],[16,1],[15,1],[22,5],[1,2],[1,1],[1,2],[1,2],[1,1],[1,2],[1,1],[1,1],[1,6],[1,3],[1,1],[1,1],[1,3],[1,1],[1,1],[1,5],[1,10],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,7],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,2],[1,4],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,9],[1,7],[1,9],[1,1],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,15],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,42],[1,12],[1,3],[1,3],[1,5],[1,2],[1,1],[1,5],[1,4],[1,3],[1,3],[1,4],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,1],[1,3],[1,1],[1,12],[1,1],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[1,5],[1,1],[1,16],[1,1],[1,7],[1,1],[1,1],[1,3],[1,1],[1,7],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,7],[1,1],[2,1],[2,3],[2,1],[2,1],[2,9],[2,2],[2,1],[2,1],[2,1],[2,1],[2,3],[2,1],[2,3],[2,2],[2,3],[2,1],[2,1],[2,1],[2,2],[2,1],[2,4],[2,2],[2,1],[2,10],[2,2],[2,1],[2,4],[2,1],[2,4],[2,3],[2,1],[2,1],[2,1],[2,1],[2,5],[2,1],[2,1],[2,1],[2,1],[2,2],[2,2],[2,1],[2,1],[2,4],[2,1],[2,2],[2,1],[3,1],[3,3],[3,135],[3,1],[3,10],[3,1],[3,1],[3,3],[3,2],[3,2],[3,2],[3,5],[3,1],[3,2],[3,7],[3,2],[3,1],[3,1],[3,3],[3,3],[3,1],[3,1],[3,1],[3,1],[3,3],[3,1],[4,91],[4,2],[4,2],[4,3],[4,10],[4,3],[4,2],[4,3],[4,1],[4,1],[4,32],[4,2],[4,2],[5,1],[5,1],[5,3],[5,1],[5,3],[5,2],[5,1],[5,34],[5,2],[5,7],[5,2],[5,1],[6,2],[6,1],[6,5],[6,2],[6,1],[6,1],[7,2],[7,2],[7,1],[7,1],[7,6],[7,1],[8,1],[8,2],[8,1],[8,5],[8,4],[8,1],[8,3],[8,1],[9,4],[9,7],[9,1],[11,2],[11,2],[11,1],[11,1],[11,2],[11,19],[11,6],[12,6],[13,2],[13,1],[13,1],[14,1],[76,1],[65,1],[15,2],[19,1],[15,1],[32,1],[33,1],[19,4],[27,3],[62,7],[36,2],[39,3],[44,3],[17,1],[940,4],[20,1],[16,5],[17,4],[21,1],[46,1],[55,1],[251,12],[27,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,12],[1,8],[1,1],[1,1],[1,5],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,9],[1,2],[1,5],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,3],[1,2],[1,1],[1,3],[1,2],[1,3],[1,1],[1,4],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,32],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,5],[1,1],[1,11],[1,4],[1,15],[1,3],[1,2],[1,1],[1,1],[1,1],[1,6],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,11],[1,9],[1,1],[1,2],[1,6],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,128],[1,3],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,2],[1,3],[1,2],[1,3],[1,1],[1,1],[1,1],[1,3],[1,2],[1,2],[1,2],[1,2],[1,1],[1,1],[1,2],[1,1],[1,17],[1,1],[1,1],[1,1],[1,3],[1,8],[2,1],[2,1],[2,3],[2,1],[2,3],[2,2],[2,4],[2,2],[2,1],[2,3],[2,1],[2,2],[2,1],[2,2],[2,2],[2,5],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,10],[2,1],[2,2],[2,1],[2,3],[2,1],[2,1],[2,2],[2,1],[2,1],[2,4],[2,1],[2,1],[2,2],[2,1],[2,3],[2,1],[2,1],[2,1],[3,1],[3,2],[3,1],[3,8],[3,1],[3,1],[3,1],[3,1],[3,1],[3,2],[3,1],[3,2],[3,3],[3,2],[3,3],[3,1],[3,1],[3,2],[3,1],[3,1],[4,1],[4,1],[4,1],[4,1],[4,2],[4,1],[4,1],[4,3],[4,1],[4,2],[4,2],[4,1],[4,1],[5,33],[5,5],[5,2],[5,1],[5,5],[5,48],[6,2],[6,3],[6,2],[6,1],[6,1],[6,2],[6,3],[6,1],[6,3],[7,8],[7,1],[7,1],[7,2],[8,1],[8,1],[8,1],[8,1],[8,2],[8,1],[9,1],[9,1],[9,1],[10,1],[10,1],[10,1],[11,2],[11,5],[12,1],[12,2],[12,2],[17,4],[17,1],[15,2],[29,5],[38,1],[20,1],[16,2],[24,1],[42,1],[29,1],[60,2],[20,1],[168,4],[17,33],[83,2],[71,1],[16,1],[18,3],[54,1],[15,8],[22,1],[36,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,1],[1,2],[1,7],[1,5],[1,1],[1,9],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,7],[1,3],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,15],[1,1],[1,3],[1,2],[1,2],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,5],[1,3],[1,2],[1,1],[1,143],[1,1],[1,1],[1,2],[1,4],[1,4],[1,2],[1,2],[1,96],[1,1],[1,4],[1,16],[1,2],[1,1],[1,3],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,8],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,4],[1,2],[1,1],[1,5],[1,2],[1,1],[1,1],[1,6],[1,1],[1,15],[1,1],[1,1],[1,3],[1,1],[1,2],[1,1],[1,1],[1,7],[1,1],[1,2],[1,4],[1,1],[1,6],[1,5],[1,6],[1,1],[1,1],[1,2],[1,2],[1,1],[1,5],[1,2],[1,2],[1,12],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,4],[1,1],[1,3],[1,8],[2,1],[2,1],[2,2],[2,3],[2,1],[2,3],[2,1],[2,1],[2,1],[2,5],[2,1],[2,2],[2,1],[2,1],[2,3],[2,1],[2,14],[2,1],[2,1],[2,1],[2,5],[2,1],[2,7],[2,3],[2,1],[2,3],[2,2],[2,3],[2,1],[2,1],[2,33],[2,1],[2,1],[2,1],[2,2],[2,3],[2,5],[2,1],[2,2],[2,8],[2,5],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[3,1],[3,2],[3,1],[3,1],[3,1],[3,3],[3,16],[3,1],[3,4],[3,1],[3,1],[3,8],[3,2],[3,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,3],[3,1],[3,2],[3,1],[3,1],[3,2],[3,5],[3,6],[3,1],[3,1],[3,2],[3,3],[3,1],[3,1],[3,4],[3,1],[4,1],[4,2],[4,1],[4,1],[4,2],[4,1],[4,4],[4,2],[4,3],[4,1],[4,2],[4,2],[4,3],[4,1],[4,1],[4,1],[4,1],[4,45],[5,2],[5,1],[5,4],[5,2],[5,1],[5,1],[5,1],[5,1],[5,3],[5,1],[5,3],[6,5],[6,13],[6,4],[6,1],[6,2],[6,1],[6,2],[7,3],[7,1],[7,2],[7,1],[7,1],[8,1],[8,1],[8,1],[8,11],[8,4],[8,1],[8,1],[9,2],[9,1],[10,1],[10,1],[10,2],[11,25],[11,1],[11,1],[11,7],[11,1],[12,3],[12,1],[12,1],[26,3],[29,11],[18,1],[20,1],[15,1],[16,1],[35,4],[15,1],[63,2],[39,1],[64,4],[15,1],[15,1],[26,1],[64,1],[40,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,12],[1,1],[1,1],[1,2],[1,2],[1,3],[1,2],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,10],[1,1],[1,1],[1,16],[1,1],[1,2],[1,47],[1,3],[1,1],[1,1],[1,1],[1,4],[1,1],[1,170],[1,2],[1,2],[1,1],[1,1],[1,3],[1,3],[1,1],[1,5],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,2],[1,1],[1,2],[1,1],[1,3],[1,1],[1,14],[1,35],[1,1],[1,3],[1,4],[1,2],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,2],[1,2],[1,4],[1,1],[1,2],[1,1],[1,1],[1,3],[1,2],[1,3],[1,2],[1,1],[1,1],[1,2],[1,1],[1,15],[1,13],[1,2],[1,1],[1,1],[1,8],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,5],[1,3],[1,1],[1,53],[1,1],[1,4],[1,3],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,14],[2,3],[2,1],[2,2],[2,3],[2,9],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,4],[2,8],[2,3],[2,1],[2,1],[2,3],[2,2],[2,1],[2,1],[2,1],[2,2],[2,4],[2,2],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,2],[2,2],[2,3],[2,1],[2,1],[2,4],[2,2],[2,161],[2,1],[3,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,51],[3,1],[3,1],[3,3],[3,1],[3,3],[3,2],[3,1],[3,1],[3,2],[3,3],[3,4],[3,2],[3,2],[3,1],[3,1],[3,10],[3,1],[4,1],[4,1],[4,1],[4,4],[4,1],[4,1],[4,4],[4,1],[4,5],[4,9],[4,1],[4,3],[4,1],[5,4],[5,3],[5,1],[5,1],[5,1],[5,1],[5,1],[5,2],[5,1],[5,1],[5,1],[6,7],[6,1],[6,1],[6,1],[6,1],[6,1],[6,3],[6,2],[7,1],[7,2],[7,1],[7,1],[8,1],[8,2],[8,2],[9,1],[9,1],[10,3],[10,1],[10,1],[10,3],[11,9],[11,1],[11,1],[11,1],[11,1],[11,2],[11,2],[12,1],[12,4],[13,2],[13,2],[13,15],[14,1],[14,1],[17,3],[185,1],[51,1],[21,3],[19,3],[17,1],[29,1],[38,4],[169,24],[41,4],[15,1],[59,5],[87,3],[169,1],[29,5],[28,1],[25,4],[48,1],[15,3],[18,1],[22,2],[36,4],[134,1],[19,1],[15,1],[17,3],[56,1],[24,1],[17,1],[1,1],[1,3],[1,4],[1,3],[1,2],[1,3],[1,6],[1,4],[1,6],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,1],[1,4],[1,9],[1,79],[1,1],[1,4],[1,1],[1,3],[1,2],[1,1],[1,2],[1,1],[1,1],[1,7],[1,1],[1,3],[1,3],[1,2],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,3],[1,5],[1,4],[1,1],[1,2],[1,5],[1,2],[1,1],[1,10],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,7],[1,2],[1,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,24],[1,2],[1,1],[1,11],[1,2],[1,8],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,5],[1,4],[1,2],[1,2],[1,1],[1,3],[1,2],[1,1],[1,3],[1,1],[1,2],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,31],[1,1],[1,1],[1,6],[1,1],[1,1],[1,1],[1,1],[1,7],[1,1],[1,5],[1,1],[1,1],[1,2],[1,1],[1,3],[1,2],[1,1],[1,13],[1,5],[1,3],[1,2],[1,4],[1,2],[1,1],[1,2],[1,1],[1,1],[1,4],[1,3],[1,3],[1,1],[1,2],[1,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,1],[2,2],[2,5],[2,2],[2,8],[2,1],[2,1],[2,1],[2,3],[2,13],[2,6],[2,1],[2,4],[2,1],[2,2],[2,2],[2,1],[2,1],[2,4],[2,1],[2,1],[2,1],[2,6],[2,1],[2,1],[2,4],[2,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,4],[2,6],[2,1],[2,1],[2,1],[2,1],[2,6],[2,1],[2,1],[2,1],[2,2],[2,2],[2,4],[3,1],[3,1],[3,2],[3,1],[3,5],[3,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,6],[3,1],[3,8],[3,1],[3,1],[3,1],[3,1],[3,13],[3,3],[3,1],[3,2],[3,2],[3,1],[4,4],[4,1],[4,1],[4,3],[4,1],[4,1],[4,1],[4,2],[5,4],[5,1],[5,2],[5,3],[5,1],[5,1],[5,1],[5,1],[5,2],[6,8],[7,1],[7,1],[7,2],[8,2],[8,2],[8,2],[8,3],[8,3],[8,1],[8,1],[9,1],[9,1],[10,1],[10,3],[10,1],[12,3],[12,2],[12,2],[12,1],[12,1],[12,1],[13,3],[13,1],[13,1],[14,1],[17,1],[25,7],[15,6],[111,8],[92,1],[26,21],[328,1],[16,1],[752,1],[16,1],[22,2],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,2],[1,2],[1,3],[1,6],[1,1],[1,1],[1,7],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,2],[1,7],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,7],[1,2],[1,1],[1,1],[1,1],[1,3],[1,2],[1,5],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,6],[1,1],[1,1],[1,4],[1,2],[1,3],[1,1],[1,3],[1,1],[1,2],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,8],[1,2],[1,2],[1,3],[1,2],[1,2],[1,3],[1,1],[1,3],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,6],[1,1],[1,1],[1,2],[1,2],[1,6],[1,1],[1,1],[1,8],[1,5],[1,1],[1,2],[1,4],[1,21],[1,1],[1,2],[1,2],[1,1],[1,1],[1,2],[1,2],[1,4],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,4],[1,2],[2,5],[2,1],[2,1],[2,4],[2,2],[2,1],[2,3],[2,1],[2,2],[2,8],[2,1],[2,2],[2,12],[2,2],[2,2],[2,1],[2,5],[2,2],[2,2],[2,1],[2,2],[2,1],[2,3],[2,4],[2,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,1],[2,2],[2,4],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,4],[2,5],[2,1],[2,2],[2,2],[2,9],[2,1],[2,1],[3,3],[3,1],[3,1],[3,5],[3,1],[3,2],[3,3],[3,1],[3,12],[3,2],[3,1],[3,1],[3,3],[3,3],[3,1],[3,1],[3,1],[3,2],[3,1],[3,2],[3,1],[3,1],[3,7],[4,2],[4,2],[4,1],[4,3],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,3],[4,1],[4,3],[5,1],[5,2],[5,1],[5,1],[5,1],[5,1],[6,1],[6,5],[6,11],[6,1],[6,1],[6,2],[6,1],[6,4],[6,1],[6,1],[7,5],[7,1],[7,1],[8,1],[8,3],[9,2],[9,1],[10,1],[11,1],[11,1],[11,2],[11,1],[12,4],[12,2],[13,1],[13,1],[13,2],[14,6],[14,1],[68,4],[113,4],[22,1],[48,79],[28,2],[88,1],[232,2],[23,1],[32,1],[72,2],[26,1],[20,1],[53,1],[16,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,8],[1,1],[1,1],[1,2],[1,2],[1,1],[1,2],[1,1],[1,4],[1,1],[1,1],[1,2],[1,1],[1,6],[1,1],[1,3],[1,1],[1,3],[1,4],[1,3],[1,1],[1,2],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,2],[1,2],[1,1],[1,5],[1,2],[1,1],[1,2],[1,1],[1,1],[1,3],[1,1],[1,3],[1,1],[1,2],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,5],[1,4],[1,1],[1,1],[1,9],[1,6],[1,5],[1,1],[1,1],[1,3],[1,2],[1,9],[1,2],[1,3],[1,1],[1,4],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,6],[1,1],[1,1],[1,2],[1,1],[1,16],[1,3],[1,1],[1,86],[1,1],[1,2],[1,4],[1,2],[1,16],[1,9],[1,4],[1,2],[1,9],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,7],[1,10],[1,5],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,12],[1,2],[1,4],[1,1],[1,1],[1,2],[1,2],[1,4],[2,6],[2,3],[2,2],[2,1],[2,3],[2,2],[2,2],[2,2],[2,6],[2,1],[2,4],[2,2],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,3],[2,1],[2,1],[2,1],[2,3],[2,1],[2,2],[2,2],[2,1],[2,2],[2,9],[2,10],[2,1],[2,1],[2,1],[2,1],[2,1],[2,4],[2,3],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,8],[2,2],[2,1],[2,3],[2,1],[3,1],[3,1],[3,1],[3,2],[3,7],[3,5],[3,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,2],[3,1],[3,2],[3,2],[3,1],[3,1],[3,2],[3,1],[3,2],[3,5],[3,2],[4,1],[4,2],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,6],[4,2],[5,5],[5,2],[5,9],[5,5],[5,1],[5,2],[5,1],[5,2],[6,7],[6,7],[7,3],[7,8],[7,1],[7,1],[7,2],[7,7],[8,1],[8,1],[8,1],[9,6],[9,4],[10,2],[10,1],[10,1],[10,3],[10,2],[11,1],[12,5],[12,3],[12,1],[13,1],[14,2],[14,3],[14,4],[30,1],[19,1],[27,1],[24,12],[20,24],[20,1],[80,1],[26,1],[25,1],[35,1],[150,1],[22,1],[28,1],[187,2],[15,2],[21,1],[22,1],[17,8],[27,1],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,4],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,6],[1,4],[1,1],[1,1],[1,2],[1,1],[1,2],[1,4],[1,4],[1,1],[1,3],[1,5],[1,1],[1,10],[1,8],[1,1],[1,3],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,3],[1,7],[1,3],[1,1],[1,10],[1,1],[1,4],[1,1],[1,1],[1,2],[1,7],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,4],[1,1],[1,2],[1,3],[1,1],[1,2],[1,2],[1,7],[1,1],[1,1],[1,1],[1,1],[1,5],[1,2],[1,1],[1,5],[1,1],[1,1],[1,5],[1,2],[1,2],[1,1],[1,3],[1,1],[1,1],[1,4],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,17],[1,4],[1,2],[1,6],[1,1],[1,2],[1,1],[1,2],[1,1],[1,6],[1,2],[1,1],[1,28],[1,3],[1,1],[1,3],[1,1],[1,2],[1,2],[1,2],[1,1],[1,3],[1,1],[2,1],[2,3],[2,1],[2,4],[2,1],[2,3],[2,2],[2,1],[2,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,3],[2,1],[2,9],[2,1],[2,1],[2,7],[2,3],[2,1],[2,1],[2,3],[2,4],[2,2],[2,2],[2,2],[2,1],[2,3],[2,2],[2,3],[2,2],[2,1],[2,1],[2,2],[3,10],[3,1],[3,3],[3,4],[3,4],[3,398],[3,1],[3,1],[3,3],[3,1],[3,3],[3,1],[3,1],[3,3],[3,1],[3,1],[3,4],[3,3],[3,2],[3,1],[4,2],[4,16],[4,3],[4,2],[4,1],[4,4],[4,1],[4,1],[4,4],[4,1],[4,1],[4,1],[4,21],[4,5],[4,1],[4,3],[4,2],[4,2],[4,1],[4,2],[4,1],[4,2],[5,3],[5,1],[5,3],[5,1],[5,5],[5,7],[5,1],[5,1],[5,1],[5,7],[5,4],[5,6],[5,1],[6,1],[6,2],[6,3],[6,2],[6,1],[6,3],[7,8],[7,6],[7,1],[7,2],[7,1],[7,1],[8,4],[8,1],[8,4],[8,1],[8,1],[8,8],[8,3],[9,1],[9,1],[9,2],[10,6],[11,1],[11,1],[11,1],[12,1],[12,4],[12,6],[13,3],[13,1],[520,3],[292,13],[16,1],[20,1],[44,3],[22,1],[17,2],[18,1],[46,5],[19,1],[15,3],[28,1],[23,1],[19,13],[25,2],[23,134],[68,1],[79,13],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,1],[1,5],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,5],[1,1],[1,1],[1,3],[1,1],[1,2],[1,6],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,5],[1,12],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,2],[1,6],[1,1],[1,1],[1,36],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,5],[1,1],[1,5],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,6],[1,3],[1,2],[1,2],[1,3],[1,1],[1,1],[1,3],[1,1],[1,1],[1,4],[1,2],[1,1],[1,22],[1,1],[1,1],[1,1],[1,187],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,2],[1,5],[1,4],[1,1],[1,2],[1,1],[1,20],[1,4],[1,2],[1,1],[1,1],[1,3],[1,1],[1,3],[1,1],[1,1],[2,1],[2,5],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,2],[2,1],[2,2],[2,1],[2,1],[2,1],[2,5],[2,1],[2,2],[2,1],[2,1],[2,6],[2,6],[2,9],[2,1],[2,2],[2,1],[2,2],[2,2],[2,3],[2,6],[2,2],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,44],[2,1],[2,1],[2,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[3,9],[3,4],[3,1],[3,2],[3,1],[3,1],[3,1],[3,4],[3,2],[3,1],[3,1],[3,21],[3,6],[3,1],[3,2],[3,1],[3,1],[3,1],[3,2],[3,1],[3,2],[3,3],[3,1],[3,3],[3,5],[3,1],[3,1],[3,5],[3,1],[3,2],[3,2],[3,1],[3,1],[3,1],[4,92],[4,1],[4,1],[4,1],[4,13],[4,4],[4,1],[4,1],[4,2],[4,1],[4,1],[5,1],[5,1],[5,1],[5,2],[5,1],[5,3],[5,3],[5,1],[5,1],[5,1],[5,4],[5,1],[6,1],[6,3],[6,2],[6,23],[6,2],[6,3],[6,35],[7,1],[7,1],[7,1],[8,690],[8,1],[8,3],[9,2],[9,5],[9,1],[10,4],[11,6],[12,4],[12,1],[14,15],[14,1],[18,1],[46,1],[16,1],[24,4],[27,2],[21,1],[98,1],[107,3],[44,16],[16,1],[28,1],[1,1],[1,2],[1,7],[1,3],[1,1],[1,1],[1,2],[1,2],[1,14],[1,1],[1,1],[1,1],[1,36],[1,1],[1,3],[1,4],[1,1],[1,3],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,13],[1,51],[1,1],[1,1],[1,3],[1,1],[1,3],[1,1],[1,6],[1,2],[1,2],[1,1],[1,3],[1,1],[1,5],[1,3],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,94],[1,6],[1,1],[1,1],[1,1],[1,2],[1,4],[1,5],[1,1],[1,1],[1,2],[1,2],[1,2],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,3],[1,2],[1,2],[1,1],[1,2],[1,3],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,5],[1,2],[1,1],[1,2],[1,2],[1,5],[1,1],[1,2],[1,1],[1,2],[1,2],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,4],[1,4],[1,1],[1,28],[1,1],[1,2],[1,3],[1,2],[1,1],[1,1],[1,10],[1,4],[1,4],[1,2],[1,1],[1,3],[1,3],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,3],[1,5],[1,7],[2,1],[2,5],[2,1],[2,3],[2,2],[2,1],[2,2],[2,2],[2,2],[2,1],[2,1],[2,1],[2,2],[2,2],[2,1],[2,1],[2,2],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,1],[2,1],[2,7],[2,7],[2,2],[2,4],[2,3],[2,1],[2,2],[2,2],[2,1],[2,1],[2,1],[2,4],[2,1],[2,1],[2,2],[2,5],[2,1],[2,1],[2,1],[2,2],[2,2],[2,2],[2,1],[2,1],[2,1],[2,1],[3,1],[3,1],[3,2],[3,2],[3,1],[3,1],[3,5],[3,5],[3,1],[3,1],[3,10],[3,30],[3,1],[3,1],[3,1],[3,3],[3,1],[3,4],[3,3],[3,3],[3,1],[3,1],[3,2],[3,1],[3,92],[3,1],[4,4],[4,1],[4,2],[4,5],[4,1],[4,2],[4,2],[4,1],[4,4],[4,1],[4,1],[4,1],[5,1],[5,2],[5,1],[5,1],[5,1],[5,4],[5,2],[5,1],[5,10],[6,2],[6,1],[6,1],[6,1],[6,4],[6,2],[6,1],[6,1],[6,2],[7,1],[7,1],[7,1],[7,1],[7,2],[7,1],[7,1],[8,5],[8,1],[8,1],[8,5],[8,5],[8,1],[9,2],[9,1],[9,4],[9,4],[10,1],[10,1],[10,5],[10,5],[10,1],[10,1],[11,1],[11,1],[11,1],[11,2],[12,1],[12,2],[12,2],[12,1],[13,1],[13,1],[13,3],[14,1],[14,22],[14,1],[14,1],[14,2],[20,4],[27,1],[18,2],[49,1],[16,3],[15,1],[18,1],[15,1],[18,1],[15,1],[27,2],[21,1],[23,1],[54,1],[22,1],[46,1],[17,1],[37,7],[17,1],[19,1],[33,2],[62,1],[18,4],[18,1],[24,1],[18,1],[36,1],[20,1],[125,1],[18,13],[36,1],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,3],[1,4],[1,3],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,10],[1,6],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,2],[1,4],[1,1],[1,3],[1,8],[1,2],[1,4],[1,10],[1,1],[1,71],[1,1],[1,2],[1,18],[1,1],[1,3],[1,2],[1,1],[1,1],[1,2],[1,2],[1,1],[1,34],[1,9],[1,2],[1,7],[1,3],[1,3],[1,3],[1,3],[1,4],[1,1],[1,1],[1,1],[1,2],[1,1],[1,4],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,6],[1,1],[1,1],[1,8],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,6],[1,3],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,2],[1,9],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,6],[1,1],[1,10],[1,1],[1,10],[1,1],[1,2],[1,2],[1,2],[1,3],[1,1],[1,2],[1,3],[1,2],[1,2],[1,20],[1,2],[1,3],[1,2],[1,1],[1,1],[1,5],[1,1],[1,5],[1,1],[1,1],[1,1],[1,4],[1,1],[1,2],[2,1],[2,1],[2,3],[2,3],[2,2],[2,2],[2,1],[2,2],[2,3],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,10],[2,1],[2,1],[2,6],[2,3],[2,5],[2,3],[2,1],[2,1],[2,11],[2,2],[2,3],[2,2],[2,1],[2,7],[2,1],[2,1],[2,2],[2,1],[2,1],[2,2],[2,2],[2,1],[2,3],[2,1],[2,3],[2,2],[2,1],[2,6],[2,3],[2,1],[2,1],[2,1],[3,4],[3,2],[3,1],[3,8],[3,1],[3,49],[3,2],[3,2],[3,3],[3,1],[3,2],[3,5],[3,3],[3,2],[3,1],[3,3],[3,1],[3,2],[3,13],[3,7],[3,2],[3,1],[4,2],[4,4],[4,1],[4,2],[4,1],[4,1],[4,1],[4,2],[5,1],[5,4],[5,1],[5,1],[5,1],[5,1],[5,1],[5,4],[5,1],[5,2],[6,1],[6,7],[6,1],[6,1],[6,4],[6,2],[6,3],[6,1],[6,9],[7,1],[7,1],[8,3],[8,7],[8,1],[8,2],[8,2],[8,2],[8,8],[8,1],[9,1],[9,1],[9,1],[9,2],[10,1],[11,3],[12,1],[12,1],[12,2],[12,1],[12,3],[13,1],[14,1],[58,1],[21,1],[36,15],[218,1],[34,1],[20,2],[16,2],[28,1],[38,1],[38,3],[16,1],[165,2],[132,1],[19,2],[260,1],[39,2],[64,1],[18,1],[1,1],[1,1],[1,1],[1,12],[1,1],[1,2],[1,1],[1,5],[1,2],[1,2],[1,1],[1,2],[1,1],[1,13],[1,1],[1,2],[1,3],[1,1],[1,1],[1,2],[1,1],[1,2],[1,4],[1,2],[1,5],[1,1],[1,3],[1,2],[1,1],[1,2],[1,6],[1,1],[1,2],[1,2],[1,7],[1,1],[1,1],[1,1],[1,1],[1,1],[1,6],[1,1],[1,1],[1,1],[1,3],[1,6],[1,1],[1,1],[1,1],[1,6],[1,3],[1,2],[1,6],[1,2],[1,1],[1,3],[1,1],[1,2],[1,1],[1,1],[1,2],[1,3],[1,1],[1,3],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,6],[1,1],[1,2],[1,63],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,2],[1,2],[1,1],[1,2],[1,1],[1,1],[1,4],[1,1],[1,2],[1,3],[1,9],[1,2],[1,1],[1,2],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,10],[1,1],[1,2],[1,1],[1,2],[1,2],[1,7],[1,1],[1,8],[1,1],[1,3],[1,5],[1,1],[1,1],[1,1],[1,1],[1,15],[1,6],[1,1],[1,1],[1,422],[1,2],[1,2],[1,4],[1,2],[1,2],[1,3],[1,2],[1,3],[1,1],[1,5],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[2,4],[2,3],[2,1],[2,2],[2,2],[2,3],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,2],[2,2],[2,2],[2,13],[2,11],[2,4],[2,1],[2,2],[2,10],[2,5],[2,2],[2,75],[2,3],[2,1],[2,8],[2,4],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,14],[2,2],[2,15],[2,1],[2,2],[2,4],[2,1],[2,1],[2,2],[2,33],[2,2],[2,1],[2,1],[2,3],[2,2],[2,2],[2,1],[3,1],[3,13],[3,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,6],[3,7],[3,2],[3,1],[3,3],[3,1],[3,1],[3,1],[3,1],[3,1],[3,2],[3,3],[3,3],[3,2],[3,1],[3,6],[3,2],[3,4],[3,2],[4,4],[4,4],[4,4],[4,4],[4,6],[4,1],[4,1],[4,1],[4,3],[4,1],[4,2],[4,5],[4,1],[5,4],[5,1],[5,2],[5,8],[5,3],[5,1],[5,1],[5,1],[5,1],[5,3],[6,1],[6,3],[6,2],[6,4],[6,1],[6,3],[6,1],[6,1],[7,1],[7,1],[7,1],[7,1],[7,1],[7,1],[7,1],[7,1],[7,3],[8,1],[8,1],[8,1],[8,7],[9,2],[10,2],[10,1],[10,6],[11,1],[11,3],[11,2],[12,1],[12,1],[14,2],[14,6],[17,2],[19,1],[15,1],[112,1],[16,1],[30,6],[19,3],[15,4],[19,2],[25,1],[17,4],[49,1],[48,1],[26,1],[17,9],[43,3],[51,6],[17,1],[21,3],[26,4],[31,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,9],[1,1],[1,753],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,7],[1,2],[1,6],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,3],[1,4],[1,3],[1,4],[1,1],[1,2],[1,1],[1,6],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,2],[1,1],[1,3],[1,3],[1,3],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,2],[1,1],[1,26],[1,3],[1,1],[1,1],[1,4],[1,1],[1,1],[1,5],[1,2],[1,3],[1,1],[1,5],[1,2],[1,2],[1,2],[1,2],[1,1],[1,3],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,3],[1,1],[1,4],[1,8],[1,10],[1,1],[1,2],[1,6],[1,1],[1,2],[1,2],[1,2],[1,6],[1,1],[1,1],[1,15],[1,2],[2,1],[2,12],[2,1],[2,8],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,20],[2,2],[2,2],[2,1],[2,1],[2,2],[2,2],[2,1],[2,2],[2,1],[2,1],[2,1],[2,3],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,14],[2,2],[2,1],[2,5],[2,5],[2,1],[2,2],[2,2],[2,6],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[3,2],[3,3],[3,3],[3,1],[3,1],[3,1],[3,3],[3,1],[3,1],[3,6],[3,8],[3,1],[3,1],[3,1],[3,3],[3,12],[3,1],[3,1],[3,1],[3,1],[3,6],[3,1],[3,2],[3,1],[3,1],[4,5],[4,1],[4,5],[4,5],[4,29],[4,11],[4,1],[4,1],[4,2],[4,1],[4,1],[5,2],[5,4],[5,1],[5,6],[5,1],[5,1],[5,1],[5,1],[6,1],[6,4],[6,1],[6,4],[6,2],[6,2],[6,1],[6,1],[6,2],[6,1],[7,1],[7,2],[7,1],[7,1],[7,2],[8,3],[8,4],[8,5],[8,7],[8,5],[9,5],[9,1],[9,1],[10,2],[10,2],[10,4],[11,1],[11,1],[12,8],[12,1],[12,1],[13,1],[13,1],[13,2],[14,2],[20,4],[18,3],[65,1],[23,1],[20,3],[237,1],[70,5],[80,2],[71,1],[15,4],[18,8],[54,1],[30,1],[15,2],[26,2],[20,1],[17,1],[26,4],[20,13],[1,2],[1,1],[1,3],[1,1],[1,3],[1,5],[1,3],[1,1],[1,5],[1,1],[1,3],[1,7],[1,2],[1,1],[1,1],[1,1],[1,4],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,11],[1,1],[1,6],[1,4],[1,3],[1,3],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,4],[1,1],[1,1],[1,2],[1,3],[1,1],[1,2],[1,7],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,5],[1,2],[1,1],[1,1],[1,4],[1,1],[1,10],[1,4],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,3],[1,2],[1,2],[1,1],[1,4],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,2],[1,3],[1,1],[1,2],[1,1],[1,4],[1,1],[1,8],[1,1],[1,1],[1,2],[1,4],[1,1],[1,34],[1,2],[1,2],[1,1],[1,1],[1,4],[1,1],[1,3],[1,7],[1,4],[1,7],[1,7],[1,1],[1,3],[1,1],[1,1],[1,3],[1,4],[1,2],[1,1],[1,1],[1,1],[1,1],[1,14],[1,6],[1,6],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[2,2],[2,1],[2,1],[2,4],[2,2],[2,2],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,2],[2,1],[2,4],[2,1],[2,1],[2,1],[2,1],[2,4],[2,2],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,2],[2,1],[2,2],[2,6],[2,1],[2,1],[2,1],[2,2],[2,2],[3,3],[3,7],[3,4],[3,2],[3,3],[3,1],[3,1],[3,4],[3,1],[3,14],[3,2],[3,5],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,9],[3,25],[3,1],[3,1],[4,1],[4,9],[4,1],[4,3],[4,1],[4,1],[4,12],[4,1],[4,3],[4,7],[4,2],[4,1],[4,1],[4,1],[4,1],[4,1],[5,5],[5,2],[5,1],[5,1],[5,2],[5,5],[5,1],[5,1],[5,1],[5,1],[5,1],[6,5],[6,1],[6,3],[6,1],[6,4],[6,1],[6,1],[6,3],[6,2],[6,1],[7,1],[7,1],[7,1],[7,1],[7,1],[8,2],[8,1],[8,1],[8,1],[8,1],[9,2],[10,374],[10,3],[11,1],[11,1],[11,3],[11,8],[11,4],[12,1],[13,3],[13,2],[13,4],[58,1],[43,1],[38,1],[196,1],[55,3],[15,1],[79,1],[16,5],[20,1],[32,1],[111,1],[68,1],[50,17],[327,47],[46,3],[24,3],[41,2],[65,1],[1,2],[1,14],[1,4],[1,1],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,7],[1,4],[1,5],[1,8],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,6],[1,2],[1,1],[1,5],[1,1],[1,3],[1,29],[1,4],[1,2],[1,1],[1,1],[1,4],[1,2],[1,9],[1,5],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,4],[1,2],[1,1],[1,8],[1,2],[1,13],[1,1],[1,1],[1,1],[1,2],[1,2],[1,2],[1,4],[1,6],[1,1],[1,1],[1,3],[1,2],[1,4],[1,2],[1,10],[1,2],[1,2],[1,2],[1,1],[1,4],[1,2],[1,1],[1,5],[1,93],[1,1],[1,1],[1,3],[1,22],[1,1],[1,1],[1,4],[1,2],[1,2],[1,1],[1,1],[1,4],[1,1],[1,6],[1,1],[1,3],[1,4],[1,1],[1,1],[1,2],[1,2],[1,8],[1,3],[1,1],[1,5],[1,6],[1,2],[1,2],[1,1],[1,1],[1,3],[1,1],[1,3],[1,2],[1,1],[1,2],[1,2],[1,2],[1,28],[1,1],[1,6],[1,6],[1,2],[2,1],[2,2],[2,1],[2,2],[2,1],[2,2],[2,6],[2,1],[2,1],[2,2],[2,6],[2,2],[2,2],[2,1],[2,2],[2,2],[2,2],[2,1],[2,2],[2,2],[2,6],[2,3],[2,3],[2,1],[2,2],[2,2],[2,1],[2,1],[2,14],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,9],[2,2],[2,1],[2,5],[2,1],[2,1],[2,3],[2,2],[2,2],[2,7],[2,16],[2,6],[2,2],[2,2],[2,1],[2,2],[3,1],[3,26],[3,1],[3,2],[3,1],[3,1],[3,3],[3,1],[3,3],[3,1],[3,1],[3,4],[3,1],[3,3],[3,3],[3,1],[3,1],[3,1],[3,1],[3,1],[3,12],[3,2],[3,2],[3,4],[3,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[4,1],[4,1],[4,2],[4,1],[4,1],[4,2],[4,1],[4,1],[4,1],[4,2],[4,1],[4,8],[4,3],[4,1],[4,4],[5,2],[5,2],[5,1],[5,1],[5,1],[5,9],[6,1],[6,2],[6,2],[6,1],[6,1],[6,1],[6,10],[6,1],[7,1],[7,11],[7,4],[7,1],[7,2],[8,2],[8,1],[8,1],[8,1],[8,1],[8,4],[8,7],[9,1],[9,1],[10,2],[10,4],[10,1],[10,1],[11,6],[12,1],[12,1],[12,6],[13,1],[13,5],[13,2],[13,11],[14,8],[14,3],[16,1],[55,1],[17,1],[91,1],[27,1],[16,1],[17,1],[37,1],[54,3],[73,2],[50,1],[19,3],[20,2],[26,1],[55,3],[54,1],[31,1],[68,2],[75,8],[412,1],[21,2],[1,6],[1,1],[1,2],[1,2],[1,4],[1,4],[1,2],[1,6],[1,5],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,9],[1,4],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,6],[1,3],[1,1],[1,2],[1,3],[1,12],[1,16],[1,3],[1,1],[1,1],[1,3],[1,3],[1,502],[1,3],[1,1],[1,1],[1,5],[1,2],[1,2],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,6],[1,3],[1,2],[1,1],[1,5],[1,1],[1,6],[1,4],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,2],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,1],[1,17],[1,3],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,4],[1,6],[1,1],[1,1],[1,11],[1,1],[1,4],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,3],[1,1],[1,1],[1,2],[1,3],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,5],[1,2],[2,1],[2,1],[2,3],[2,3],[2,2],[2,2],[2,9],[2,2],[2,1],[2,9],[2,1],[2,2],[2,2],[2,2],[2,5],[2,5],[2,2],[2,1],[2,2],[2,1],[2,1],[2,13],[2,5],[2,2],[2,1],[2,4],[2,1],[2,1],[2,2],[2,1],[2,2],[2,3],[2,3],[2,5],[2,3],[2,3],[2,10],[2,2],[2,2],[2,2],[2,4],[2,1],[2,2],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,3],[3,2],[3,2],[3,1],[3,7],[3,2],[3,2],[3,1],[3,5],[3,2],[3,3],[3,1],[3,8],[3,1],[3,1],[3,2],[3,14],[3,2],[4,2],[4,1],[4,2],[4,3],[4,2],[4,7],[4,1],[4,5],[4,1],[4,3],[4,10],[4,1],[4,2],[4,4],[4,4],[4,1],[5,1],[5,4],[5,2],[5,1],[5,1],[5,2],[5,8],[5,3],[5,1],[5,1],[6,2],[6,2],[6,1],[6,1],[6,1],[6,2],[6,15],[6,39],[6,3],[7,2],[7,1],[7,3],[7,1],[7,1],[8,1],[8,1],[9,2],[9,2],[9,1],[9,1],[10,1],[10,1],[10,1],[11,14],[11,1],[11,3],[11,1],[12,1],[12,1],[13,2],[13,2],[14,8],[16,1],[27,1],[21,5],[18,2],[36,1],[36,3],[28,15],[17,13],[18,7],[17,9],[28,2],[19,2],[27,1],[33,11],[40,2],[17,3],[120,2],[136,4],[21,1],[64,1],[23,3],[81,4],[27,1],[126,15],[17,1],[37,2],[21,1],[22,1],[58,1],[1,85],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,2],[1,1],[1,2],[1,3],[1,9],[1,2],[1,3],[1,7],[1,3],[1,2],[1,5],[1,2],[1,1],[1,3],[1,1],[1,1],[1,4],[1,13],[1,74],[1,14],[1,1],[1,1],[1,2],[1,1],[1,2],[1,4],[1,2],[1,5],[1,1],[1,4],[1,1],[1,4],[1,1],[1,1],[1,3],[1,2],[1,79],[1,1],[1,1],[1,6],[1,1],[1,2],[1,7],[1,2],[1,1],[1,2],[1,1],[1,7],[1,1],[1,2],[1,1],[1,4],[1,4],[1,3],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,2],[1,6],[1,1],[1,8],[1,2],[1,2],[1,1],[1,9],[1,1],[1,2],[1,1],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,11],[1,1],[1,5],[1,1],[1,4],[1,3],[1,8],[1,4],[1,1],[1,9],[1,1],[1,3],[1,1],[1,4],[1,1],[1,2],[1,3],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,3],[1,8],[1,4],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,11],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[2,6],[2,1],[2,3],[2,1],[2,3],[2,7],[2,6],[2,1],[2,2],[2,1],[2,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,2],[2,1],[2,2],[2,2],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,2],[2,2],[2,1],[2,4],[2,3],[2,2],[2,1],[2,6],[2,1],[2,3],[2,2],[2,2],[2,1],[2,3],[2,1],[2,2],[2,1],[2,1],[2,1],[2,3],[2,2],[2,1],[2,4],[2,5],[2,1],[2,1],[3,1],[3,57],[3,2],[3,1],[3,1],[3,2],[3,3],[3,15],[3,4],[3,1],[3,1],[3,9],[3,10],[3,5],[3,1],[3,4],[3,4],[3,1],[3,1],[3,6],[3,1],[4,2],[4,1],[4,1],[4,2],[4,1],[4,14],[4,3],[4,1],[4,1],[4,3],[4,10],[4,1],[4,2],[5,10],[5,1],[5,1],[5,3],[5,1],[5,5],[5,1],[6,5],[6,4],[6,2],[6,2],[6,3],[6,1],[7,1],[7,1],[7,4],[7,1],[7,2],[7,2],[7,2],[7,2],[8,2],[8,1],[8,4],[8,2],[8,4],[8,1],[9,1],[9,1],[10,3],[10,1],[11,1],[11,1],[12,9],[12,4],[12,2],[13,7],[13,4],[13,2],[13,7],[13,1],[14,1],[14,1],[23,1],[19,2],[16,1],[36,4],[15,4],[22,3],[17,1],[17,2],[38,2],[15,1],[34,1],[29,2],[20,7],[23,4],[44,5],[22,2],[18,1],[1,2],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,3],[1,4],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,9],[1,1],[1,4],[1,2],[1,2],[1,1],[1,5],[1,1],[1,2],[1,1],[1,4],[1,2],[1,2],[1,1],[1,3],[1,3],[1,3],[1,2],[1,3],[1,1],[1,2],[1,5],[1,3],[1,1],[1,4],[1,1],[1,6],[1,4],[1,3],[1,1],[1,2],[1,1],[1,2],[1,2],[1,6],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,3],[1,8],[1,1],[1,2],[1,5],[1,1],[1,6],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,3],[1,10],[1,3],[1,7],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,3],[1,2],[1,2],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,43],[1,23],[1,2],[1,4],[1,33],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,7],[1,2],[1,4],[1,6],[1,1],[1,1],[1,1],[1,2],[1,7],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,136],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,20],[2,1],[2,1],[2,16],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,3],[2,2],[2,1],[2,1],[2,2],[2,7],[2,2],[2,1],[2,2],[2,114],[2,1],[2,3],[2,4],[2,1],[2,4],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,6],[2,2],[2,1],[2,1],[2,1],[2,4],[2,1],[2,1],[2,1],[2,4],[2,1],[2,1],[2,1],[2,4],[2,2],[2,4],[2,3],[2,2],[2,1],[3,2],[3,1],[3,1],[3,5],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,8],[3,2],[3,1],[3,2],[3,28],[3,1],[3,118],[3,1],[3,1],[3,2],[3,2],[3,3],[3,8],[3,3],[4,1],[4,2],[4,4],[4,1],[4,1],[4,1],[4,1],[4,1],[4,2],[4,2],[4,1],[4,1],[4,3],[4,1],[4,3],[4,1],[4,1],[4,1],[5,2],[5,1],[5,6],[5,1],[5,4],[5,2],[5,4],[5,1],[5,4],[6,4],[6,1],[6,3],[6,1],[6,2],[6,1],[7,1],[7,3],[7,1],[7,46],[7,2],[7,1],[8,3],[8,6],[8,1],[8,5],[9,12],[9,1],[9,5],[10,3],[10,3],[11,3],[11,7],[12,3],[12,1],[12,1],[13,1],[13,1],[13,2],[13,13],[13,1],[14,1],[14,1],[58,2],[112,1],[18,3],[19,1],[20,1],[18,1],[15,2],[92,1],[50,1],[40,1],[57,5],[19,2],[19,1],[15,4],[16,5],[54,1],[15,1],[1,2],[1,6],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,6],[1,7],[1,1],[1,2],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,11],[1,3],[1,6],[1,1],[1,1],[1,6],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,12],[1,1],[1,1],[1,1],[1,4],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,5],[1,2],[1,1],[1,1],[1,2],[1,8],[1,2],[1,1],[1,1],[1,2],[1,1],[1,19],[1,1],[1,1],[1,4],[1,1],[1,4],[1,2],[1,1],[1,4],[1,1],[1,1],[1,2],[1,1],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,3],[1,5],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,4],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,5],[1,1],[1,2],[1,3],[1,9],[1,26],[1,3],[1,17],[1,1],[1,2],[1,1],[1,5],[1,4],[1,1],[1,1],[1,2],[1,1],[1,3],[1,2],[1,8],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,30],[2,1],[2,4],[2,1],[2,2],[2,1],[2,1],[2,2],[2,3],[2,4],[2,2],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,2],[2,7],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,10],[2,4],[2,1],[2,1],[2,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,3],[2,7],[2,1],[2,1],[2,2],[2,5],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,4],[2,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,3],[3,1],[3,3],[3,1],[3,1],[3,1],[3,2],[3,29],[3,2],[4,2],[4,1],[4,3],[4,1],[4,1],[4,1],[4,1],[4,1],[4,2],[4,1],[4,3],[4,1],[5,2],[5,1],[5,1],[5,4],[5,1],[5,1],[5,2],[5,1],[5,1],[5,3],[6,4],[6,1],[6,1],[6,3],[6,2],[6,2],[6,1],[6,1],[6,1],[6,2],[7,2],[7,3],[7,2],[7,1],[7,2],[8,1],[8,1],[8,4],[8,1],[8,3],[9,1],[9,5],[9,1],[9,1],[9,1],[11,1],[11,2],[11,2],[11,3],[12,7],[12,1],[13,1],[14,2],[16,1],[78,3],[17,3],[27,3],[19,2],[67,3],[16,3],[58,3],[17,1],[29,2],[29,1],[23,1],[390,2],[75,2],[26,8],[20,3],[19,2],[16,4],[33,1],[66,2],[20,1],[17,5],[1,1],[1,2],[1,1],[1,1],[1,9],[1,4],[1,2],[1,3],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,1],[1,2],[1,1],[1,4],[1,2],[1,1],[1,1],[1,1],[1,4],[1,5],[1,11],[1,1],[1,4],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,3],[1,4],[1,1],[1,2],[1,3],[1,1],[1,1],[1,3],[1,1],[1,7],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,8],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,6],[1,1],[1,1],[1,6],[1,2],[1,1],[1,11],[1,3],[1,1],[1,2],[1,4],[1,4],[1,1],[1,11],[1,7],[1,3],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,14],[1,1],[1,1],[1,1],[1,4],[1,1],[1,2],[1,3],[1,6],[1,1],[1,1],[1,3],[1,3],[1,2],[1,2],[1,7],[1,5],[1,2],[1,7],[1,7],[1,1],[1,3],[1,2],[1,4],[1,4],[1,3],[1,1],[1,1],[1,4],[1,2],[1,1],[1,1],[1,5],[1,3],[1,1],[1,124],[1,2],[1,6],[1,1],[1,1],[2,1],[2,4],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,5],[2,21],[2,2],[2,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,7],[2,31],[2,1],[2,2],[2,4],[2,1],[2,3],[2,125],[2,1],[2,8],[2,1],[2,4],[2,2],[2,2],[2,1],[2,1],[2,1],[2,4],[2,5],[2,1],[2,2],[2,2],[2,1],[2,1],[2,1],[2,8],[2,1],[2,12],[2,278],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,2],[3,3],[3,1],[3,1],[3,1],[3,1],[3,3],[3,2],[3,1],[3,1],[3,3],[3,1],[3,3],[3,1],[3,3],[3,1],[3,2],[3,3],[3,1],[4,2],[4,8],[4,1],[4,3],[4,3],[4,1],[4,3],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,2],[4,1],[4,3],[5,1],[5,1],[5,1],[5,2],[5,2],[5,2],[5,1],[6,2],[6,2],[6,24],[6,2],[6,2],[6,20],[6,1],[6,1],[6,3],[6,1],[6,4],[6,5],[6,3],[7,2],[7,1],[7,4],[7,1],[7,1],[7,1],[7,1],[7,1],[7,1],[7,134],[8,1],[8,1],[8,5],[8,1],[8,6],[9,3],[9,15],[10,4],[10,3],[10,1],[11,12],[11,2],[12,2],[12,2],[14,1],[14,6],[15,3],[30,2],[35,1],[28,1],[111,1],[22,1],[25,1],[18,1],[40,4],[58,1],[295,4],[18,3],[35,1],[16,1],[1,1],[1,1],[1,2],[1,1],[1,6],[1,6],[1,2],[1,1],[1,301],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,1],[1,5],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,3],[1,5],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,3],[1,2],[1,1],[1,7],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,5],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,17],[1,1],[1,1],[1,2],[1,2],[1,4],[1,3],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,3],[1,3],[1,2],[1,1],[1,23],[1,1],[1,1],[1,1],[1,1],[1,3],[1,4],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,4],[1,4],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,4],[1,1],[1,1],[1,1],[1,2],[1,1],[1,3],[1,2],[1,2],[1,1],[1,1],[1,3],[1,15],[1,4],[1,1],[1,1],[1,3],[1,3],[1,1],[1,2],[1,2],[1,6],[1,1],[1,2],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,10],[2,3],[2,1],[2,1],[2,2],[2,7],[2,1],[2,1],[2,4],[2,1],[2,2],[2,1],[2,2],[2,2],[2,1],[2,1],[2,3],[2,6],[2,1],[2,1],[2,46],[2,1],[2,3],[2,1],[2,4],[2,1],[2,1],[2,1],[2,1],[2,2],[2,4],[2,4],[2,3],[3,11],[3,1],[3,1],[3,1],[3,1],[3,2],[3,1],[3,2],[3,4],[3,1],[3,1],[3,1],[3,3],[3,2],[3,1],[3,2],[3,2],[3,2],[3,1],[3,3],[3,1],[3,2],[3,2],[3,4],[3,1],[3,45],[3,2],[4,11],[4,2],[4,1],[4,2],[4,4],[4,14],[4,4],[4,2],[4,2],[4,1],[5,3],[5,1],[5,1],[5,2],[5,1],[5,2],[5,3],[5,2],[5,1],[5,2],[5,2],[6,1],[6,1],[6,3],[6,2],[6,1],[6,3],[6,1],[6,6],[7,1],[7,2],[7,1],[8,1],[8,2],[8,1],[8,1],[8,1],[8,2],[8,2],[8,2],[9,5],[9,2],[10,1],[10,1],[10,3],[11,8],[11,1],[12,5],[12,1],[14,1]])\n #data = np.array([[26,2],[18,3],[30,4],[19,2],[21,1],[40,1],[17,3],[20,3],[19,3],[15,4],[246,1],[57,2],[16,2],[44,101],[31,1],[19,2],[35,2],[25,1],[28,1],[82,1],[52,11],[19,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,6],[1,1],[1,4],[1,1],[1,7],[1,9],[1,1],[1,2],[1,4],[1,2],[1,2],[1,2],[1,1],[1,1],[1,1],[1,9],[1,1],[1,1],[1,1],[1,2],[1,6],[1,1],[1,2],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,13],[1,1],[1,4],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,7],[1,2],[1,1],[1,5],[1,1],[1,1],[1,1],[1,2],[1,4],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,1],[1,4],[1,3],[1,1],[1,1],[1,2],[1,1],[1,4],[1,3],[1,2],[1,3],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,3],[1,2],[1,1],[1,1],[1,2],[1,3],[1,1],[1,2],[1,1],[1,1],[1,3],[1,37],[1,1],[1,2],[1,1],[1,1],[1,50],[1,1],[1,1],[1,1],[1,8],[1,1],[1,1],[1,1],[1,6],[1,2],[1,3],[1,3],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,2],[1,15],[1,2],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,2],[1,2],[1,2],[1,9],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,12],[2,3],[2,3],[2,1],[2,1],[2,1],[2,4],[2,1],[2,5],[2,1],[2,1],[2,1],[2,1],[2,2],[2,2],[2,1],[2,3],[2,2],[2,1],[2,13],[2,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,1],[2,8],[2,3],[2,1],[2,1],[2,13],[2,2],[2,1],[2,2],[2,3],[2,1],[2,1],[3,1],[3,2],[3,5],[3,1],[3,1],[3,11],[3,3],[3,1],[3,1],[3,6],[3,1],[3,3],[3,1],[3,2],[3,4],[3,2],[3,2],[3,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[4,1],[4,2],[4,2],[4,9],[4,1],[4,1],[4,5],[4,1],[4,16],[4,1],[4,2],[4,1],[4,1],[4,1],[4,6],[4,2],[4,2],[5,2],[5,2],[5,2],[5,2],[5,3],[5,1],[6,3],[6,1],[6,4],[6,1],[7,1],[7,1],[7,2],[7,1],[7,1],[8,7],[8,1],[8,1],[9,1],[9,3],[9,2],[9,1],[10,1],[10,11],[11,1],[11,2],[12,4],[13,11],[13,2],[14,3],[22,1],[39,3],[107,1],[46,6],[22,1],[15,1],[29,45],[29,1],[35,1],[23,2],[21,1],[17,1],[57,1],[20,1],[19,4],[24,1],[18,2],[61,2],[51,12],[41,3],[1,1],[1,1],[1,3],[1,1],[1,1],[1,4],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,6],[1,2],[1,1],[1,4],[1,2],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,4],[1,3],[1,1],[1,1],[1,1],[1,1],[1,3],[1,3],[1,1],[1,1],[1,1],[1,3],[1,3],[1,2],[1,4],[1,7],[1,3],[1,1],[1,15],[1,2],[1,1],[1,2],[1,2],[1,2],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,4],[1,4],[1,2],[1,2],[1,1],[1,4],[1,2],[1,5],[1,1],[1,1],[1,1],[1,1],[1,5],[1,8],[1,1],[1,1],[1,2],[1,2],[1,134],[1,45],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,4],[1,6],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,6],[1,1],[1,19],[1,4],[1,2],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,19],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,5],[1,3],[1,6],[1,2],[1,1],[1,3],[1,2],[1,2],[1,1],[1,2],[1,1],[1,26],[1,4],[1,1],[1,3],[1,1],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,1],[1,5],[1,4],[1,1],[1,27],[1,1],[1,1],[1,1],[1,11],[1,2],[1,4],[1,1],[1,1],[1,24],[1,2],[1,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,15],[2,1],[2,1],[2,1],[2,3],[2,1],[2,5],[2,1],[2,4],[2,1],[2,1],[2,5],[2,2],[2,1],[2,1],[2,2],[2,1],[2,3],[2,4],[2,1],[2,3],[2,1],[2,2],[2,17],[2,4],[2,2],[2,7],[2,2],[2,1],[3,1],[3,3],[3,1],[3,1],[3,1],[3,1],[3,2],[3,1],[3,1],[3,3],[3,1],[3,18],[3,1],[3,1],[3,1],[3,6],[3,8],[3,1],[3,1],[3,2],[3,2],[3,1],[4,1],[4,3],[4,1],[4,1],[4,1],[4,4],[4,1],[4,20],[4,2],[4,4],[4,2],[4,1],[4,3],[4,1],[4,1],[4,1],[4,1],[4,3],[4,4],[4,2],[4,2],[4,1],[4,1],[5,3],[5,1],[5,1],[6,1],[6,8],[7,1],[7,1],[7,5],[8,21],[8,1],[8,1],[8,2],[9,1],[10,30],[10,2],[10,3],[10,1],[11,1],[11,2],[11,1],[11,1],[12,1],[12,3],[12,6],[13,1],[13,2],[13,1],[14,1],[14,2],[17,1],[52,1],[64,1],[190,2],[25,3],[19,3],[22,1],[15,2],[25,1],[25,2],[38,1],[69,1],[1,1],[1,4],[1,1],[1,21],[1,1],[1,3],[1,11],[1,31],[1,1],[1,4],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,6],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,5],[1,2],[1,2],[1,212],[1,6],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,3],[1,1],[1,3],[1,4],[1,1],[1,2],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,1],[1,1],[1,3],[1,3],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,6],[1,1],[1,3],[1,7],[1,2],[1,5],[1,3],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,9],[1,1],[1,2],[1,2],[1,3],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,78],[1,3],[1,7],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,8],[1,3],[1,2],[1,1],[1,4],[1,2],[1,1],[1,1],[1,1],[1,1],[2,1],[2,4],[2,1],[2,1],[2,1],[2,3],[2,2],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,8],[2,1],[2,1],[2,5],[2,2],[2,1],[2,6],[2,1],[2,4],[2,2],[2,2],[2,1],[2,2],[2,1],[2,1],[2,30],[2,3],[2,5],[2,4],[2,3],[2,1],[2,1],[3,1],[3,2],[3,1],[3,11],[3,1],[3,1],[3,8],[3,2],[3,1],[3,4],[3,3],[3,2],[3,3],[3,1],[3,3],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[4,8],[4,1],[4,2],[4,1],[4,2],[4,1],[4,3],[4,1],[4,2],[4,7],[4,1],[4,1],[4,1],[4,1],[4,7],[5,1],[5,1],[5,2],[5,2],[5,1],[5,11],[5,1],[5,1],[5,1],[5,1],[5,2],[5,1],[5,2],[5,8],[5,1],[6,2],[6,8],[6,1],[6,1],[6,1],[6,2],[6,1],[6,2],[6,1],[7,1],[7,3],[7,1],[7,2],[7,6],[7,2],[8,1],[8,6],[8,15],[9,2],[10,3],[10,1],[10,1],[10,2],[10,5],[10,2],[10,64],[11,1],[11,1],[11,1],[12,1],[12,6],[12,1],[12,2],[14,4],[14,1],[17,1],[21,1],[17,1],[32,1],[16,1],[18,5],[17,1],[16,1],[17,2],[262,1],[22,1],[227,5],[82,4],[28,3],[56,7],[42,2],[26,1],[137,1],[55,19],[29,1],[42,2],[1,5],[1,1],[1,2],[1,22],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,4],[1,2],[1,3],[1,1],[1,4],[1,1],[1,2],[1,4],[1,1],[1,2],[1,2],[1,1],[1,2],[1,2],[1,5],[1,7],[1,2],[1,2],[1,1],[1,1],[1,7],[1,1],[1,1],[1,1],[1,2],[1,3],[1,16],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,5],[1,1],[1,1],[1,6],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,4],[1,28],[1,6],[1,1],[1,2],[1,2],[1,2],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,16],[1,1],[1,2],[1,3],[1,1],[1,1],[1,3],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,7],[1,1],[1,1],[1,2],[1,2],[1,4],[1,3],[1,4],[1,1],[1,1],[1,2],[1,5],[1,1],[1,1],[1,5],[1,2],[1,2],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[2,5],[2,5],[2,4],[2,2],[2,32],[2,1],[2,1],[2,4],[2,3],[2,1],[2,1],[2,1],[2,45],[2,3],[2,11],[2,1],[2,1],[2,2],[2,1],[2,4],[2,2],[2,1],[2,2],[2,2],[2,2],[2,1],[2,2],[2,3],[2,1],[2,8],[2,2],[2,2],[2,1],[2,2],[2,2],[2,1],[2,7],[2,4],[2,2],[2,4],[2,1],[2,8],[3,1],[3,1],[3,1],[3,3],[3,4],[3,1],[3,10],[3,6],[3,1],[3,1],[3,1],[3,2],[3,4],[3,4],[3,1],[3,1],[3,7],[3,2],[3,5],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,2],[3,3],[3,1],[3,1],[3,19],[4,1],[4,1],[4,1],[4,1],[4,1],[4,3],[4,1],[4,1],[4,2],[4,1],[4,9],[4,4],[4,5],[4,3],[4,2],[4,3],[5,1],[5,2],[5,20],[5,1],[5,2],[5,2],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,4],[5,1],[6,2],[6,2],[6,1],[6,1],[6,1],[6,1],[6,1],[6,6],[6,2],[7,1],[7,1],[7,1],[7,4],[8,1],[8,5],[8,14],[9,1],[9,4],[10,1],[10,1],[10,1],[10,1],[11,6],[11,4],[12,1],[12,2],[13,2],[13,1],[13,6],[14,2],[42,4],[264,3],[22,3],[15,6],[19,1],[46,2],[193,1],[15,1],[127,5],[47,1],[16,2],[27,1],[25,1],[19,5],[73,1],[60,1],[27,1],[19,2],[1,2],[1,1],[1,2],[1,2],[1,4],[1,2],[1,1],[1,1],[1,2],[1,1],[1,2],[1,16],[1,2],[1,3],[1,2],[1,1],[1,4],[1,20],[1,3],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,2],[1,2],[1,2],[1,3],[1,4],[1,1],[1,1],[1,2],[1,6],[1,1],[1,1],[1,1],[1,47],[1,2],[1,2],[1,5],[1,2],[1,2],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,16],[1,1],[1,1],[1,6],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,1],[1,2],[1,5],[1,2],[1,7],[1,1],[1,1],[1,4],[1,3],[1,1],[1,1],[1,2],[1,14],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,3],[1,4],[1,5],[1,1],[1,1],[1,1],[1,17],[1,71],[1,1],[1,1],[1,1],[1,79],[1,1],[1,2],[1,4],[1,2],[1,1],[1,1],[1,3],[1,4],[1,1],[1,1],[1,7],[1,1],[1,3],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,4],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[2,1],[2,1],[2,1],[2,4],[2,13],[2,1],[2,2],[2,2],[2,1],[2,1],[2,1],[2,2],[2,3],[2,6],[2,3],[2,1],[2,1],[2,1],[2,2],[2,17],[2,2],[2,2],[2,8],[2,1],[2,3],[2,2],[2,11],[2,1],[2,2],[2,5],[2,1],[2,1],[2,2],[2,1],[2,2],[2,2],[2,1],[2,1],[2,3],[2,4],[2,1],[2,6],[2,25],[2,1],[2,1],[2,1],[2,1],[2,2],[2,3],[2,2],[2,2],[2,1],[2,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,3],[3,8],[3,5],[3,3],[3,7],[3,1],[3,1],[3,9],[3,6],[3,3],[3,2],[3,8],[3,4],[3,3],[4,1],[4,1],[4,1],[4,1],[4,1],[4,6],[4,1],[4,3],[4,2],[4,1],[4,3],[4,1],[4,2],[4,1],[4,1],[4,1],[4,1],[5,1],[5,5],[5,3],[5,2],[5,3],[5,1],[5,3],[6,1],[6,1],[6,1],[6,1],[7,1],[7,1],[7,1],[7,1],[7,32],[7,2],[7,1],[7,4],[7,1],[7,1],[7,4],[8,2],[8,2],[8,1],[8,2],[8,1],[9,1],[9,3],[9,1],[9,1],[9,1],[10,3],[11,4],[11,1],[11,1],[11,3],[11,3],[11,1],[12,1],[12,1],[12,1],[13,2],[13,1],[13,2],[14,5],[26,2],[49,1],[26,1],[18,1],[27,1],[15,1],[23,1],[58,3],[36,2],[19,3],[62,2],[72,2],[90,1],[124,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,2],[1,3],[1,1],[1,4],[1,2],[1,1],[1,1],[1,18],[1,1],[1,2],[1,4],[1,24],[1,1],[1,2],[1,1],[1,1],[1,4],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,4],[1,3],[1,1],[1,3],[1,1303],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,5],[1,2],[1,1],[1,1],[1,1],[1,1],[1,8],[1,10],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,17],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,3],[1,2],[1,1],[1,4],[1,2],[1,1],[1,2],[1,25],[1,2],[1,7],[1,1],[1,1],[1,6],[1,1],[1,3],[1,2],[1,4],[1,1],[1,1],[1,6],[1,1],[1,2],[1,3],[1,1],[1,4],[1,2],[1,3],[1,2],[1,3],[1,1],[1,1],[1,3],[1,2],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,2],[1,1],[1,1],[2,1],[2,5],[2,1],[2,2],[2,5],[2,1],[2,1],[2,1],[2,2],[2,3],[2,2],[2,2],[2,1],[2,2],[2,6],[2,1],[2,2],[2,1],[2,3],[2,1],[2,2],[2,3],[2,13],[2,1],[2,2],[2,1],[2,3],[2,1],[2,4],[2,1],[2,2],[2,1],[2,1],[2,1],[2,3],[2,2],[2,1],[2,2],[2,3],[2,2],[2,2],[2,1],[2,1],[2,3],[2,1],[2,1],[2,5],[3,2],[3,2],[3,2],[3,5],[3,1],[3,1],[3,1],[3,1],[3,3],[3,2],[3,2],[3,1],[3,1],[3,1],[3,1],[3,5],[3,1],[3,4],[3,2],[3,1],[3,1],[3,3],[3,1],[3,1],[3,3],[4,3],[4,1],[4,2],[4,1],[4,1],[4,1],[4,1],[4,1],[5,1],[5,2],[5,9],[5,2],[5,1],[5,7],[5,2],[5,1],[5,2],[5,2],[5,1],[6,3],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,29],[6,2],[7,3],[7,2],[7,1],[7,1],[7,2],[7,2],[7,2],[7,3],[7,2],[8,5],[8,1],[8,1],[8,3],[8,2],[8,1],[8,2],[9,1],[9,1],[10,1],[10,14],[10,3],[10,4],[10,3],[10,4],[11,1],[11,5],[11,2],[11,3],[11,1],[11,1],[11,2],[12,1],[12,1],[13,5],[13,1],[13,1],[14,1],[14,3],[14,1],[24,1],[15,1],[19,2],[15,5],[131,1],[28,13],[33,1],[24,1],[17,1],[15,1],[44,2],[16,2],[16,3],[29,7],[29,1],[82,8],[16,1],[17,2],[16,2],[45,1],[159,1],[100,2],[23,1],[15,1],[15,1],[22,1],[48,1],[25,5],[15,1],[1,1],[1,3],[1,1],[1,3],[1,1],[1,1],[1,2],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,3],[1,2],[1,2],[1,6],[1,1],[1,2],[1,1],[1,2],[1,4],[1,44],[1,1],[1,2],[1,40],[1,1],[1,9],[1,1],[1,17],[1,1],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,25],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,12],[1,1],[1,2],[1,12],[1,2],[1,2],[1,5],[1,2],[1,3],[1,7],[1,5],[1,72],[1,2],[1,8],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,3],[1,1],[1,2],[1,2],[1,5],[1,3],[1,2],[1,3],[1,382],[1,1],[1,3],[1,1],[1,1],[1,6],[1,4],[1,1],[1,2],[1,2],[1,1],[1,1],[1,2],[1,4],[1,1],[1,2],[1,6],[1,1],[1,3],[1,3],[1,1],[1,6],[1,3],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,7],[1,1],[1,1],[1,2],[2,1],[2,1],[2,1],[2,1],[2,12],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,1],[2,52],[2,2],[2,1],[2,1],[2,2],[2,1],[2,2],[2,9],[2,1],[2,1],[2,18],[2,3],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,2],[2,3],[2,2],[2,2],[2,2],[2,1],[2,1],[2,1],[2,2],[2,3],[2,2],[2,1],[2,1],[2,1],[2,1],[3,6],[3,3],[3,4],[3,1],[3,1],[3,1],[3,1],[3,1],[3,4],[3,1],[3,3],[3,1],[3,1],[3,2],[3,1],[3,1],[3,80],[3,1],[3,2],[3,1],[3,1],[4,2],[4,1],[4,1],[4,1],[4,1],[4,1],[4,3],[4,1],[4,2],[4,1],[4,4],[4,4],[4,1],[4,2],[4,2],[4,1],[4,2],[4,1],[4,1],[5,1],[5,1],[5,3],[5,3],[5,1],[5,1],[5,1],[5,2],[5,1],[6,4],[6,3],[6,1],[6,6],[6,1],[6,1],[7,2],[7,1],[7,1],[7,2],[7,1],[7,2],[7,1],[7,1],[8,1],[8,4],[8,1],[8,2],[8,3],[9,2],[9,3],[9,3],[9,6],[10,1],[10,1],[10,1],[10,1],[11,8],[11,1],[11,1],[12,2],[13,5],[15,1],[35,7],[16,1],[24,2],[16,1],[25,1],[65,4],[36,1],[16,5],[21,10],[18,1],[16,12],[29,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,5],[1,3],[1,3],[1,3],[1,1],[1,4],[1,3],[1,3],[1,3],[1,1],[1,1],[1,1],[1,2],[1,5],[1,3],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,8],[1,1],[1,1],[1,1],[1,1],[1,1],[1,8],[1,2],[1,4],[1,2],[1,7],[1,1],[1,1],[1,1],[1,2],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,5],[1,1],[1,4],[1,8],[1,6],[1,1],[1,4],[1,1],[1,1],[1,3],[1,1],[1,3],[1,2],[1,7],[1,2],[1,5],[1,2],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,3],[1,3],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,5],[1,1],[1,13],[1,3],[1,2],[1,1],[1,1],[1,10],[1,1],[1,2],[1,1],[1,3],[1,12],[1,2],[1,2],[1,4],[1,1],[1,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,4],[2,3],[2,1],[2,1],[2,1],[2,6],[2,1],[2,6],[2,1],[2,2],[2,6],[2,1],[2,10],[2,1],[2,1],[2,4],[2,1],[2,3],[2,3],[2,1],[2,1],[2,3],[2,5],[2,3],[2,10],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,3],[2,1],[2,4],[2,1],[2,1],[2,2],[2,1],[2,3],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[3,2],[3,1],[3,1],[3,1],[3,5],[3,34],[3,2],[3,3],[3,1],[3,1],[3,2],[3,1],[3,5],[3,1],[3,1],[3,2],[3,4],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,2],[3,1],[3,2],[3,25],[3,1],[3,1],[4,1],[4,6],[4,3],[4,1],[4,6],[4,1],[4,1],[4,4],[4,1],[4,1],[4,1],[4,1],[4,1],[4,2],[4,1],[4,1],[4,3],[4,4],[5,1],[5,2],[5,3],[5,1],[5,1],[5,1],[5,4],[5,1],[5,2],[5,4],[5,1],[5,1],[6,1],[6,4],[6,2],[6,1],[6,1],[6,2],[6,3],[7,11],[7,1],[7,5],[8,2],[8,1],[8,1],[9,2],[9,5],[9,4],[9,3],[9,1],[9,2],[9,2],[10,1],[10,2],[11,1],[12,3],[12,1],[13,11],[13,1],[17,1],[201,2],[16,2],[104,4],[123,2],[15,1],[26,5],[74,1],[15,3],[15,7],[16,1],[39,2],[27,1],[32,1],[53,4],[28,1],[25,3],[1,1],[1,3],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,7],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,1],[1,2],[1,16],[1,3],[1,2],[1,2],[1,3],[1,1],[1,1],[1,3],[1,11],[1,4],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,4],[1,1],[1,4],[1,1],[1,2],[1,1],[1,1],[1,1],[1,32],[1,2],[1,1],[1,1],[1,6],[1,1],[1,7],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,2],[1,2],[1,1],[1,1],[1,2],[1,2],[1,2],[1,2],[1,1],[1,1],[1,55],[1,2],[1,4],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,5],[1,4],[1,7],[1,1],[1,1],[1,6],[1,2],[1,2],[1,6],[1,3],[1,2],[1,1],[1,6],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,4],[1,9],[1,2],[1,3],[1,1],[2,1],[2,1],[2,11],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,2],[2,1],[2,4],[2,1],[2,2],[2,2],[2,2],[2,3],[2,4],[2,2],[2,5],[2,1],[2,1],[2,3],[2,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,3],[2,3],[2,2],[2,3],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,4],[2,2],[3,2],[3,1],[3,1],[3,3],[3,1],[3,1],[3,1],[3,2],[3,1],[3,2],[3,6],[3,2],[3,1],[3,1],[3,3],[3,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,5],[3,1],[3,1],[3,2],[3,2],[3,2],[3,1],[3,1],[3,2],[3,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,3],[4,1],[4,2],[4,3],[4,3],[4,1],[4,4],[4,1],[4,2],[4,1],[4,3],[4,1],[5,1],[5,2],[5,1],[5,3],[5,3],[5,1],[5,2],[5,9],[5,1],[5,1],[5,2],[5,1],[5,2],[6,2],[6,3],[6,1],[6,1],[6,2],[6,1],[6,2],[6,2],[6,1],[6,4],[6,2],[7,7],[7,2],[7,4],[7,1],[7,2],[7,19],[7,1],[7,1],[7,1],[8,1],[8,12],[8,1],[8,3],[8,1],[9,1],[9,1],[9,1],[9,1],[9,1],[10,1],[10,1],[10,4],[10,2],[12,3],[12,1],[12,1],[13,1],[13,1],[14,1],[14,1],[14,3],[30,7],[32,1],[40,2],[16,1],[91,6],[122,1],[15,1],[17,1],[20,3],[19,2],[19,1],[98,2],[81,14],[47,4],[38,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,6],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,83],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,5],[1,2],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,1],[1,4],[1,2],[1,2],[1,2],[1,1],[1,1],[1,2],[1,1],[1,7],[1,1],[1,2],[1,4],[1,1],[1,1],[1,88],[1,2],[1,2],[1,2],[1,2],[1,3],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,57],[1,2],[1,6],[1,4],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,5],[1,5],[1,1],[1,1],[1,9],[1,1],[1,1],[1,3],[1,4],[1,1],[1,2],[1,5],[1,2],[1,3],[1,1],[1,2],[1,4],[1,4],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,2],[1,3],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,6],[1,3],[1,2],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[2,1],[2,1],[2,2],[2,2],[2,2],[2,2],[2,2],[2,15],[2,4],[2,1],[2,1],[2,2],[2,1],[2,2],[2,3],[2,3],[2,3],[2,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,1],[2,2],[2,1],[2,2],[2,1],[2,7],[2,1],[2,4],[2,3],[2,2],[2,3],[2,1],[2,1],[2,2],[3,4],[3,1],[3,1],[3,2],[3,3],[3,6],[3,2],[3,9],[3,9],[3,2],[3,2],[3,1],[3,15],[3,1],[3,1],[3,1],[3,3],[4,1],[4,1],[4,2],[4,3],[4,1],[4,2],[4,1],[4,6],[4,2],[4,8],[4,9],[4,1],[4,1],[4,1],[5,1],[5,1],[5,78],[5,1],[5,1],[5,1],[5,17],[5,1],[5,3],[5,2],[5,1],[6,1],[6,1],[6,5],[6,19],[6,1],[6,6],[6,1],[6,1],[6,2],[6,1],[6,1],[6,1],[6,2],[6,1],[7,2],[7,1],[7,1],[7,4],[7,1],[7,28],[7,1],[8,1],[8,1],[8,1],[9,3],[9,1],[9,11],[9,4],[10,1],[10,2],[11,1],[11,1],[11,1],[11,1],[12,1],[14,2],[14,2],[14,2],[18,2],[31,1],[29,2],[16,1],[17,20],[25,1],[20,3],[59,1],[25,1],[27,2],[26,1],[44,1],[17,4],[16,4],[20,6],[67,2],[15,1],[65,1],[17,1],[33,1],[61,2],[1,2],[1,2],[1,2],[1,4],[1,1],[1,1],[1,1],[1,2],[1,2],[1,4],[1,4],[1,5],[1,2],[1,1],[1,1],[1,18],[1,1],[1,3],[1,1],[1,2],[1,1],[1,2],[1,2],[1,5],[1,4],[1,1],[1,4],[1,1],[1,1],[1,1],[1,56],[1,1],[1,4],[1,1],[1,9],[1,6],[1,9],[1,1],[1,2],[1,1],[1,1],[1,1],[1,18],[1,10],[1,1],[1,5],[1,1],[1,1],[1,2],[1,5],[1,1],[1,3],[1,1],[1,1],[1,4],[1,1],[1,2],[1,1],[1,8],[1,3],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,3],[1,2],[1,1],[1,1],[1,5],[1,2],[1,1],[1,1],[1,4],[1,2],[1,1],[1,1],[1,5],[1,2],[1,27],[1,3],[1,1],[1,2],[1,9],[1,2],[1,2],[1,6],[1,1],[1,2],[1,1],[1,15],[1,1],[1,2],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,2],[1,3],[1,1],[1,1],[1,2],[1,1],[1,17],[1,1],[1,4],[1,1],[1,1],[1,2],[1,2],[1,4],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,18],[1,1],[1,2],[1,46],[1,1],[1,1],[1,1],[1,6],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,7],[1,8],[1,1],[1,3],[1,6],[2,1],[2,1],[2,1],[2,1],[2,5],[2,4],[2,1],[2,2],[2,2],[2,4],[2,2],[2,1],[2,2],[2,1],[2,3],[2,5],[2,1],[2,2],[2,2],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,12],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,2],[2,3],[2,1],[2,2],[2,1],[2,10],[2,2],[2,8],[2,2],[2,2],[2,1],[2,5],[2,5],[2,4],[2,1],[2,1],[2,1],[2,1],[3,2],[3,6],[3,2],[3,1],[3,58],[3,1],[3,3],[3,1],[3,1],[3,2],[3,1],[3,1],[3,2],[3,1],[3,1],[3,6],[3,10],[3,1],[3,4],[3,1],[3,1],[3,6],[3,1],[3,29],[3,2],[3,2],[3,6],[3,1],[4,1],[4,4],[4,2],[4,1],[4,46],[4,2],[4,1],[4,2],[4,2],[4,3],[4,11],[4,3],[4,1],[4,2],[4,1],[4,15],[4,2],[5,5],[5,9],[5,1],[5,2],[5,136],[5,48],[5,5],[5,1],[5,1],[5,1],[5,1],[5,1],[6,1],[6,1],[6,10],[6,1],[6,2],[6,1],[7,2],[7,1],[7,3],[7,2],[7,11],[7,6],[7,1],[8,1],[8,3],[8,2],[8,1],[8,12],[8,2],[8,2],[9,1],[9,1],[9,1],[9,4],[10,1],[10,2],[11,2],[12,9],[13,1],[14,2],[21,1],[26,1],[16,2],[2230,1],[29,1],[16,5],[401,3],[33,1],[19,31],[15,4],[28,2],[23,1],[42,4],[40,1],[70,1],[15,3],[15,2],[22,1],[103,1],[256,27],[41,1],[86,1],[17,1],[31,1],[26,1],[105,2],[28,1],[1,4],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,6],[1,6],[1,4],[1,1],[1,4],[1,7],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,3],[1,2],[1,1],[1,2],[1,2],[1,8],[1,1],[1,2],[1,1],[1,5],[1,2],[1,1],[1,1],[1,2],[1,2],[1,2],[1,2],[1,1],[1,9],[1,1],[1,2],[1,2],[1,3],[1,2],[1,1],[1,2],[1,2],[1,1],[1,4],[1,1],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,5],[1,1],[1,29],[1,1],[1,4],[1,2],[1,3],[1,3],[1,17],[1,6],[1,2],[1,1],[1,2],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,9],[1,3],[1,1],[1,1],[1,1],[1,2],[1,3],[1,3],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,8],[1,1],[1,7],[1,1],[1,5],[1,1],[1,1],[1,4],[1,1],[1,2],[1,6],[1,2],[1,1],[1,2],[1,1],[1,4],[1,1],[1,1],[1,3],[1,3],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,16],[1,5],[2,2],[2,1],[2,2],[2,2],[2,2],[2,1],[2,1],[2,8],[2,3],[2,1],[2,2],[2,4],[2,2],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,9],[2,1],[2,23],[2,1],[2,1],[2,1],[2,2],[2,3],[2,1],[2,1],[2,3],[2,1],[2,1],[2,2],[2,1],[2,25],[2,2],[2,3],[2,2],[2,1],[2,1],[2,3],[2,1],[2,3],[2,1],[2,3],[2,1],[2,2],[2,1],[2,1],[2,1],[3,1],[3,2],[3,2],[3,3],[3,2],[3,1],[3,1],[3,5],[3,9],[3,1],[3,3],[3,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,9],[3,1],[3,2],[3,7],[3,3],[3,4],[3,2],[3,1],[3,37],[3,1],[3,1],[3,1],[3,1],[4,1],[4,2],[4,305],[4,4],[4,1],[4,1],[4,1],[4,4],[4,3],[4,1],[4,6],[4,7],[4,1],[4,1],[4,1],[4,1],[4,29],[4,1],[5,10],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[6,2],[6,1],[6,1],[6,2],[7,1],[7,1],[7,2],[7,1],[7,1],[7,1],[7,2],[8,1],[8,3],[8,2],[9,1],[9,1],[10,1],[10,3],[10,1],[11,6],[11,2],[11,1],[11,1],[12,5],[12,4],[12,1],[14,1],[14,1],[23,1],[26,2],[15,2],[16,16],[31,7],[18,3],[22,3],[87,1],[17,2],[17,9],[30,1],[58,4],[24,2],[28,5],[53,1],[23,1],[28,2],[44,1],[60,3],[17,2],[17,1],[1,1],[1,2],[1,1],[1,11],[1,1],[1,1],[1,2],[1,2],[1,3],[1,2],[1,6],[1,3],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,2],[1,1],[1,1],[1,3],[1,2],[1,4],[1,1],[1,1],[1,2],[1,3],[1,1],[1,1],[1,7],[1,2],[1,1],[1,1],[1,4],[1,2],[1,1],[1,3],[1,1],[1,5],[1,3],[1,3],[1,3],[1,1],[1,1],[1,4],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,3],[1,5],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,8],[1,15],[1,1],[1,8],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[1,3],[1,15],[1,1],[1,2],[1,1],[1,1],[1,4],[1,1],[1,5],[1,3],[1,1],[1,1],[1,14],[1,1],[1,2],[1,2],[1,3],[1,2],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,3],[1,1],[1,5],[1,2],[1,3],[1,1],[1,2],[1,9],[1,1],[1,4],[1,1],[1,2],[1,8],[1,1],[1,3],[1,1],[1,1],[1,4],[1,4],[1,3],[1,1],[1,1],[1,9],[1,2],[1,4],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,4],[1,2],[1,1],[1,1],[1,2],[1,3],[1,2],[1,6],[1,1],[1,18],[2,1],[2,3],[2,3],[2,1],[2,6],[2,1],[2,2],[2,2],[2,5],[2,1],[2,1],[2,1],[2,3],[2,2],[2,6],[2,1],[2,3],[2,3],[2,1],[2,3],[2,2],[2,2],[2,1],[2,1],[2,9],[2,5],[2,1],[2,1],[2,1],[2,2],[2,85],[2,60],[2,2],[2,1],[2,12],[2,1],[2,1],[2,1],[2,8],[2,1],[2,21],[2,1],[2,3],[2,1],[2,1],[2,8],[2,1],[2,1],[3,3],[3,3],[3,1],[3,3],[3,3],[3,1],[3,2],[3,2],[3,1],[3,1],[3,14],[3,1],[3,6],[3,1],[3,2],[3,1],[3,3],[3,2],[3,1],[3,1],[3,1],[3,1],[3,2],[3,3],[3,2],[4,3],[4,2],[4,1],[4,3],[4,1],[4,1],[4,2],[4,2],[4,1],[4,1],[4,1],[4,1],[4,1],[4,4],[5,1],[5,1],[5,1],[5,3],[5,2],[5,1],[5,4],[6,6],[6,1],[6,18],[6,1],[6,1],[6,1],[6,5],[6,2],[6,3],[6,2],[7,3],[7,5],[7,2],[7,1],[7,3],[7,5],[7,1],[7,1],[7,1],[7,1],[8,1],[8,1],[8,3],[8,1],[8,1],[8,4],[9,1],[9,2],[9,4],[10,2],[10,1],[11,2],[11,1],[11,1],[12,3],[13,1],[14,2],[32,7],[26,2],[22,2],[15,1],[26,46],[15,2],[16,1],[19,1],[36,1],[16,2],[24,1],[20,5],[1,1],[1,1],[1,1],[1,7],[1,1],[1,1],[1,2],[1,4],[1,2],[1,1],[1,1],[1,1],[1,10],[1,5],[1,13],[1,2],[1,3],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,4],[1,3],[1,1],[1,1],[1,2],[1,8],[1,1],[1,3],[1,5],[1,1],[1,2],[1,2],[1,2],[1,4],[1,2],[1,3],[1,1],[1,1],[1,1],[1,2],[1,8],[1,2],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,2],[1,4],[1,3],[1,2],[1,9],[1,19],[1,1],[1,1],[1,1],[1,1],[1,14],[1,3],[1,2],[1,4],[1,2],[1,1],[1,4],[1,1],[1,1],[1,5],[1,2],[1,1],[1,1],[1,2],[1,4],[1,2],[1,1],[1,11],[1,1],[1,3],[1,2],[1,2],[1,1],[1,1],[1,3],[1,9],[1,2],[1,6],[1,9],[1,3],[1,1],[1,1],[1,5],[1,1],[1,3],[1,2],[1,9],[1,1],[1,3],[1,5],[1,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,4],[1,2],[1,1],[1,3],[1,2],[1,1],[1,12],[1,1],[1,1],[1,1],[1,1],[2,5],[2,2],[2,5],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,3],[2,3],[2,114],[2,1],[2,2],[2,1],[2,1],[2,3],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,9],[2,1],[2,1],[2,2],[2,1],[2,3],[2,1],[2,1],[2,2],[2,1],[2,3],[2,19],[2,1],[2,8],[2,2],[2,2],[2,7],[2,1],[2,1],[3,2],[3,1],[3,5],[3,3],[3,1],[3,5],[3,1],[3,1],[3,1],[3,1],[3,1],[3,30],[3,1],[3,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,2],[3,2],[3,1],[3,2],[3,2],[3,1],[3,2],[3,1],[3,2],[4,1],[4,3],[4,1],[4,1],[4,7],[4,2],[4,2],[4,3],[4,3],[4,2],[4,2],[4,1],[4,1],[4,2],[4,1],[4,2],[4,1],[4,1],[4,6],[5,2],[5,1],[5,2],[5,1],[5,7],[5,7],[5,1],[5,2],[5,1],[6,1],[6,1],[6,1],[6,2],[6,1],[6,1],[6,4],[6,1],[7,1],[7,1],[7,1],[7,3],[7,1],[7,1],[7,1],[8,1],[8,2],[8,3],[8,1],[8,1],[8,9],[8,6],[9,1],[9,3],[9,4],[10,4],[10,1],[10,3],[10,1],[10,19],[11,3],[11,2],[11,5],[11,5],[11,1],[12,7],[13,3],[13,4],[13,2],[13,4],[14,2],[16,1],[93,1],[22,2],[42,6],[15,1],[16,3],[36,8],[34,1],[30,3],[43,7],[46,8],[40,1],[22,1],[1,3],[1,1],[1,13],[1,2],[1,3],[1,2],[1,3],[1,1],[1,2],[1,2],[1,1],[1,2],[1,3],[1,1],[1,2],[1,1],[1,2],[1,1],[1,3],[1,2],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,2],[1,2],[1,1],[1,5],[1,13],[1,3],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,2],[1,6],[1,4],[1,1],[1,4],[1,1],[1,2],[1,3],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,6],[1,1],[1,1],[1,1],[1,1],[1,3],[1,2],[1,3],[1,2],[1,3],[1,1],[1,1],[1,3],[1,2],[1,3],[1,3],[1,2],[1,1],[1,3],[1,4],[1,1],[1,2],[1,1],[1,1],[1,3],[1,1],[1,3],[1,4],[1,2],[1,2],[1,3],[1,7],[1,3],[1,1],[1,1],[1,3],[1,2],[1,1],[1,4],[1,5],[1,2],[1,2],[1,1],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,7],[1,6],[1,1],[1,2],[1,3],[1,3],[1,1],[1,4],[1,2],[1,7],[1,2],[1,5],[1,1],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,3],[1,6],[1,2],[1,2],[1,1],[1,1],[2,1],[2,1],[2,3],[2,1],[2,2],[2,1],[2,3],[2,1],[2,2],[2,12],[2,1],[2,1],[2,3],[2,3],[2,1],[2,2],[2,3],[2,3],[2,1],[2,1],[2,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,8],[2,2],[2,1],[2,2],[2,1],[2,1],[2,7],[2,1],[2,1],[2,1],[2,7],[2,2],[2,1],[2,18],[2,1],[2,1],[2,1],[2,2],[2,2],[2,1],[2,1],[2,5],[2,1],[2,1],[2,6],[2,3],[2,1],[3,3],[3,1],[3,1],[3,3],[3,1],[3,1],[3,3],[3,1],[3,2],[3,3],[3,1],[3,1],[3,1],[4,6],[4,1],[4,1],[4,3],[4,1],[4,1],[4,1],[4,2],[4,2],[4,5],[4,2],[4,2],[4,2],[4,2],[4,1],[4,3],[4,2],[4,1],[5,1],[5,3],[5,2],[5,2],[5,1],[5,1],[5,3],[5,1],[5,1],[5,2],[5,4],[5,4],[5,1],[6,2],[6,2],[6,2],[6,1],[6,1],[6,1],[6,1],[6,4],[6,1],[7,2],[7,1],[7,2],[7,1],[7,1],[7,1],[8,2],[8,2],[8,3],[8,14],[9,5],[9,2],[9,1],[9,1],[10,8],[10,2],[11,1],[11,1],[12,1],[12,1],[12,1],[12,7],[12,3],[48,1],[73,3],[22,2],[19,1],[20,1],[40,2],[15,2],[34,1],[22,5],[31,2],[47,28],[51,1],[19,2],[231,1],[15,3],[18,2],[18,3],[101,5],[65,2],[30,11],[18,3],[1,1],[1,2],[1,2],[1,1],[1,3],[1,5],[1,2],[1,2],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,64],[1,2],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,3],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,4],[1,2],[1,1],[1,4],[1,5],[1,1],[1,1],[1,1],[1,1],[1,3],[1,4],[1,3],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,1],[1,6],[1,1],[1,3],[1,4],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,2],[1,3],[1,2],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,2],[1,3],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,4],[1,3],[1,1],[1,1],[1,1],[1,1],[1,14],[1,1],[1,1],[1,1],[1,1],[1,2],[1,12],[1,2],[1,2],[1,1],[1,1],[1,3],[1,2],[1,3],[1,2],[1,1],[1,5],[1,1],[1,7],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,2],[1,3],[1,1],[2,2],[2,1],[2,3],[2,2],[2,1],[2,1],[2,2],[2,1],[2,2],[2,2],[2,1],[2,1],[2,10],[2,2],[2,1],[2,2],[2,3],[2,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,3],[2,1],[2,6],[2,2],[2,4],[2,9],[2,2],[2,1],[2,3],[2,2],[2,10],[2,3],[2,1],[2,37],[2,2],[2,2],[2,2],[3,9],[3,4],[3,3],[3,2],[3,2],[3,1],[3,19],[3,1],[3,1],[3,1],[3,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,2],[3,2],[3,10],[3,1],[3,1],[3,1],[3,1],[3,3],[3,6],[4,2],[4,5],[4,1],[4,3],[4,10],[4,1],[4,1],[4,1],[4,1],[4,4],[4,5],[4,1],[4,1],[4,2],[5,2],[5,2],[5,1],[5,2],[5,1],[5,3],[5,2],[5,1],[5,1],[6,3],[6,1],[6,1],[6,6],[6,1],[6,3],[7,2],[7,1],[7,1],[7,1],[7,1],[7,1],[8,1],[8,2],[8,1],[8,3],[8,1],[9,1],[9,1],[9,2],[10,3],[10,4],[10,1],[11,1],[12,1],[12,1],[13,1],[13,3],[13,1],[14,1],[35,2],[15,7],[32,1],[80,1],[22,2],[16,1],[25,1],[156,1],[175,2],[460,1],[63,1],[74,3],[121,2],[16,3],[49,5],[29,1],[16,1],[1,5],[1,4],[1,3],[1,5],[1,1],[1,1],[1,2],[1,2],[1,1],[1,3],[1,1],[1,2],[1,1],[1,3],[1,4],[1,12],[1,1],[1,3],[1,1],[1,2],[1,3],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,12],[1,1],[1,1],[1,3],[1,1],[1,2],[1,38],[1,1],[1,1],[1,1],[1,2],[1,5],[1,1],[1,1],[1,10],[1,3],[1,3],[1,4],[1,2],[1,2],[1,3],[1,1],[1,1],[1,1],[1,6],[1,1],[1,4],[1,2],[1,2],[1,1],[1,1],[1,9],[1,1],[1,1],[1,4],[1,4],[1,3],[1,3],[1,2],[1,1],[1,6],[1,2],[1,3],[1,1],[1,5],[1,2],[1,2],[1,1],[1,1],[1,5],[1,2],[1,1],[1,3],[1,1],[1,6],[1,1],[1,2],[1,2],[1,1],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,5],[1,2],[1,2],[1,8],[1,1],[1,3],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,4],[1,3],[1,1],[1,2],[1,2],[1,1],[1,3],[1,1],[1,1],[2,1],[2,1],[2,4],[2,7],[2,1],[2,3],[2,2],[2,3],[2,2],[2,10],[2,2],[2,6],[2,4],[2,2],[2,2],[2,1],[2,2],[2,1],[2,1],[2,1],[2,3],[2,4],[2,1],[2,1],[2,2],[2,2],[2,1],[2,2],[2,3],[2,1],[2,10],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,4],[2,1],[2,1],[2,1],[2,3],[2,2],[2,2],[3,5],[3,3],[3,26],[3,1],[3,4],[3,2],[3,5],[3,1],[3,3],[3,2],[3,1],[3,1],[3,2],[3,1],[3,2],[3,2],[3,1],[3,4],[3,2],[4,8],[4,1],[4,1],[4,1],[4,1],[4,2],[4,1],[4,2],[4,1],[4,5],[4,1],[4,2],[4,2],[4,2],[4,3],[4,2],[5,2],[5,1],[5,2],[5,3],[5,1],[5,1],[5,3],[5,1],[5,1],[5,1],[6,4],[6,2],[6,1],[6,1],[6,7],[6,2],[7,1],[7,1],[7,1],[7,3],[7,3],[7,3],[8,2],[8,1],[8,3],[9,3],[9,2],[9,1],[9,3],[9,2],[10,1],[10,1],[10,4],[11,2],[11,1],[11,1],[12,1],[12,55],[12,1],[13,1],[35,4],[21,9],[26,1],[165,7],[21,1],[55,5],[19,10],[18,5],[17,1],[67,1],[68,4],[19,1],[24,6],[89,3],[21,1],[40,1],[52,2],[16,1],[1,3],[1,4],[1,1],[1,4],[1,2],[1,3],[1,1],[1,3],[1,1],[1,4],[1,1],[1,1],[1,14],[1,5],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,22],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,4],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,5],[1,1],[1,2],[1,2],[1,5],[1,1],[1,4],[1,2],[1,1],[1,1],[1,1],[1,3],[1,1],[1,4],[1,1],[1,2],[1,37],[1,1],[1,2],[1,1],[1,2],[1,2],[1,5],[1,1],[1,1],[1,11],[1,2],[1,1],[1,1],[1,1],[1,7],[1,3],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,6],[1,2],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,3],[1,2],[1,2],[1,1],[1,1],[1,2],[1,3],[1,1],[1,4],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,3],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,2],[1,1],[1,11],[1,2],[1,1],[1,6],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,8],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,4],[1,1],[1,5],[1,2],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,3],[2,1],[2,1],[2,3],[2,1],[2,2],[2,1],[2,1],[2,1],[2,19],[2,6],[2,3],[2,1],[2,2],[2,3],[2,2],[2,6],[2,1],[2,1],[2,4],[2,1],[2,2],[2,1],[2,1],[2,1],[2,3],[2,2],[2,1],[2,7],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,2],[2,7],[2,1],[2,3],[2,3],[2,1],[3,6],[3,2],[3,2],[3,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,3],[3,1],[3,1],[3,29],[3,1],[3,2],[3,3],[3,1],[3,1],[3,1],[3,15],[3,2],[3,1],[3,1],[3,2],[3,1],[3,2],[3,2],[3,7],[3,3],[3,4],[3,1],[4,2],[4,10],[4,1],[4,1],[4,1],[4,1],[4,1],[4,6],[5,3],[5,2],[5,1],[5,4],[5,1],[5,2],[5,1],[6,13],[6,2],[6,2],[6,2],[6,1],[6,1],[6,1],[7,1],[7,1],[7,2],[8,1],[8,1],[8,1],[9,2],[9,1],[9,1],[9,1],[9,1],[9,1],[10,1],[10,1],[10,112],[10,1],[11,1],[11,3],[11,11],[12,1],[13,2],[13,1],[13,2],[14,1],[78,1],[43,1],[20,1],[15,1],[26,5],[17,2],[32,2],[93,2],[57,2],[25,1],[112,4],[18,1],[73,1],[30,55],[24,1],[699,1],[17,1],[1,1],[1,1],[1,3],[1,5],[1,1],[1,2],[1,1],[1,3],[1,2],[1,1],[1,1],[1,2],[1,3],[1,3],[1,1],[1,2],[1,2],[1,3],[1,1],[1,4],[1,5],[1,3],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,2],[1,2],[1,1],[1,2],[1,4],[1,1],[1,2],[1,1],[1,1],[1,6],[1,3],[1,4],[1,1],[1,2],[1,1],[1,1],[1,2],[1,4],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,4],[1,1],[1,1],[1,4],[1,4],[1,1],[1,3],[1,1],[1,1],[1,1],[1,9],[1,1],[1,2],[1,1],[1,1],[1,4],[1,1],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,13],[1,2],[1,1],[1,1],[1,1],[1,7],[1,3],[1,3],[1,1],[1,1],[1,1],[1,2],[1,15],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,7],[1,3],[1,1],[1,1],[1,1],[1,5],[1,1],[1,3],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,6],[1,2],[1,4],[1,15],[1,2],[1,1],[1,1],[1,1],[1,2],[1,2],[1,2],[1,1],[1,2],[1,1],[2,1],[2,10],[2,3],[2,1],[2,1],[2,1],[2,3],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,4],[2,1],[2,1],[2,1],[2,2],[2,1],[2,24],[2,1],[2,2],[2,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,2],[2,2],[2,5],[2,3],[2,2],[2,1],[2,2],[2,1],[2,1],[2,3],[2,4],[2,1],[3,2],[3,2],[3,1],[3,2],[3,1],[3,3],[3,1],[3,1],[3,1],[3,3],[3,13],[3,10],[3,7],[3,1],[3,1],[3,1],[3,9],[3,9],[3,1],[3,2],[3,11],[3,1],[3,4],[3,1],[3,1],[4,2],[4,1],[4,2],[4,1],[4,115],[4,1],[4,1],[4,1],[4,1],[4,2],[4,2],[4,1],[4,2],[4,4],[4,9],[4,1],[4,1],[5,1],[5,2],[5,3],[5,2],[5,1],[5,4],[5,1],[5,2],[5,1],[5,1],[5,1],[5,7],[5,1],[5,1],[6,39],[6,2],[6,3],[6,1],[7,1],[7,2],[7,3],[7,1],[7,2],[7,8],[7,1],[8,3],[8,1],[8,1],[8,1],[8,1],[9,3],[9,2],[9,1],[10,3],[10,25],[10,1],[10,1],[11,6],[11,1],[11,1],[11,1],[11,7],[12,1],[12,1],[12,1],[13,1],[13,1],[14,8],[14,1],[14,1],[74,2],[26,11],[69,1],[108,1],[20,5],[1263,1],[21,1],[16,1],[16,3],[32,2],[62,2],[50,1],[16,1],[15,1],[22,5],[1,2],[1,1],[1,2],[1,2],[1,1],[1,2],[1,1],[1,1],[1,6],[1,3],[1,1],[1,1],[1,3],[1,1],[1,1],[1,5],[1,10],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,7],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,2],[1,4],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,9],[1,7],[1,9],[1,1],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,15],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,42],[1,12],[1,3],[1,3],[1,5],[1,2],[1,1],[1,5],[1,4],[1,3],[1,3],[1,4],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,1],[1,3],[1,1],[1,12],[1,1],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[1,5],[1,1],[1,16],[1,1],[1,7],[1,1],[1,1],[1,3],[1,1],[1,7],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,7],[1,1],[2,1],[2,3],[2,1],[2,1],[2,9],[2,2],[2,1],[2,1],[2,1],[2,1],[2,3],[2,1],[2,3],[2,2],[2,3],[2,1],[2,1],[2,1],[2,2],[2,1],[2,4],[2,2],[2,1],[2,10],[2,2],[2,1],[2,4],[2,1],[2,4],[2,3],[2,1],[2,1],[2,1],[2,1],[2,5],[2,1],[2,1],[2,1],[2,1],[2,2],[2,2],[2,1],[2,1],[2,4],[2,1],[2,2],[2,1],[3,1],[3,3],[3,135],[3,1],[3,10],[3,1],[3,1],[3,3],[3,2],[3,2],[3,2],[3,5],[3,1],[3,2],[3,7],[3,2],[3,1],[3,1],[3,3],[3,3],[3,1],[3,1],[3,1],[3,1],[3,3],[3,1],[4,91],[4,2],[4,2],[4,3],[4,10],[4,3],[4,2],[4,3],[4,1],[4,1],[4,32],[4,2],[4,2],[5,1],[5,1],[5,3],[5,1],[5,3],[5,2],[5,1],[5,34],[5,2],[5,7],[5,2],[5,1],[6,2],[6,1],[6,5],[6,2],[6,1],[6,1],[7,2],[7,2],[7,1],[7,1],[7,6],[7,1],[8,1],[8,2],[8,1],[8,5],[8,4],[8,1],[8,3],[8,1],[9,4],[9,7],[9,1],[11,2],[11,2],[11,1],[11,1],[11,2],[11,19],[11,6],[12,6],[13,2],[13,1],[13,1],[14,1],[76,1],[65,1],[15,2],[19,1],[15,1],[32,1],[33,1],[19,4],[27,3],[62,7],[36,2],[39,3],[44,3],[17,1],[940,4],[20,1],[16,5],[17,4],[21,1],[46,1],[55,1],[251,12],[27,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,12],[1,8],[1,1],[1,1],[1,5],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,9],[1,2],[1,5],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,3],[1,2],[1,1],[1,3],[1,2],[1,3],[1,1],[1,4],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,32],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,5],[1,1],[1,11],[1,4],[1,15],[1,3],[1,2],[1,1],[1,1],[1,1],[1,6],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,11],[1,9],[1,1],[1,2],[1,6],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,128],[1,3],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,2],[1,3],[1,2],[1,3],[1,1],[1,1],[1,1],[1,3],[1,2],[1,2],[1,2],[1,2],[1,1],[1,1],[1,2],[1,1],[1,17],[1,1],[1,1],[1,1],[1,3],[1,8],[2,1],[2,1],[2,3],[2,1],[2,3],[2,2],[2,4],[2,2],[2,1],[2,3],[2,1],[2,2],[2,1],[2,2],[2,2],[2,5],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,10],[2,1],[2,2],[2,1],[2,3],[2,1],[2,1],[2,2],[2,1],[2,1],[2,4],[2,1],[2,1],[2,2],[2,1],[2,3],[2,1],[2,1],[2,1],[3,1],[3,2],[3,1],[3,8],[3,1],[3,1],[3,1],[3,1],[3,1],[3,2],[3,1],[3,2],[3,3],[3,2],[3,3],[3,1],[3,1],[3,2],[3,1],[3,1],[4,1],[4,1],[4,1],[4,1],[4,2],[4,1],[4,1],[4,3],[4,1],[4,2],[4,2],[4,1],[4,1],[5,33],[5,5],[5,2],[5,1],[5,5],[5,48],[6,2],[6,3],[6,2],[6,1],[6,1],[6,2],[6,3],[6,1],[6,3],[7,8],[7,1],[7,1],[7,2],[8,1],[8,1],[8,1],[8,1],[8,2],[8,1],[9,1],[9,1],[9,1],[10,1],[10,1],[10,1],[11,2],[11,5],[12,1],[12,2],[12,2],[17,4],[17,1],[15,2],[29,5],[38,1],[20,1],[16,2],[24,1],[42,1],[29,1],[60,2],[20,1],[168,4],[17,33],[83,2],[71,1],[16,1],[18,3],[54,1],[15,8],[22,1],[36,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,1],[1,2],[1,7],[1,5],[1,1],[1,9],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,7],[1,3],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,15],[1,1],[1,3],[1,2],[1,2],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,5],[1,3],[1,2],[1,1],[1,143],[1,1],[1,1],[1,2],[1,4],[1,4],[1,2],[1,2],[1,96],[1,1],[1,4],[1,16],[1,2],[1,1],[1,3],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,8],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,4],[1,2],[1,1],[1,5],[1,2],[1,1],[1,1],[1,6],[1,1],[1,15],[1,1],[1,1],[1,3],[1,1],[1,2],[1,1],[1,1],[1,7],[1,1],[1,2],[1,4],[1,1],[1,6],[1,5],[1,6],[1,1],[1,1],[1,1303],[1,2],[1,2],[1,1],[1,5],[1,2],[1,2],[1,12],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,4],[1,1],[1,3],[1,8],[2,1],[2,1],[2,2],[2,3],[2,1],[2,3],[2,1],[2,1],[2,1],[2,5],[2,1],[2,2],[2,1],[2,1],[2,3],[2,1],[2,14],[2,1],[2,1],[2,1],[2,5],[2,1],[2,7],[2,3],[2,1],[2,3],[2,2],[2,3],[2,1],[2,1],[2,33],[2,1],[2,1],[2,1],[2,2],[2,3],[2,5],[2,1],[2,2],[2,8],[2,5],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[3,1],[3,2],[3,1],[3,1],[3,1],[3,3],[3,16],[3,1],[3,4],[3,1],[3,1],[3,8],[3,2],[3,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,3],[3,1],[3,2],[3,1],[3,1],[3,2],[3,5],[3,6],[3,1],[3,1],[3,2],[3,3],[3,1],[3,1],[3,4],[3,1],[4,1],[4,2],[4,1],[4,1],[4,2],[4,1],[4,4],[4,2],[4,3],[4,1],[4,2],[4,2],[4,3],[4,1],[4,1],[4,1],[4,1],[4,45],[5,2],[5,1],[5,4],[5,2],[5,1],[5,1],[5,1],[5,1],[5,3],[5,1],[5,3],[6,5],[6,13],[6,4],[6,1],[6,2],[6,1],[6,2],[7,3],[7,1],[7,2],[7,1],[7,1],[8,1],[8,1],[8,1],[8,11],[8,4],[8,1],[8,1],[9,2],[9,1],[10,1],[10,1],[10,2],[11,25],[11,1],[11,1],[11,7],[11,1],[12,3],[12,1],[12,1],[26,3],[29,11],[18,1],[20,1],[15,1],[16,1],[35,4],[15,1],[63,2],[39,1],[64,4],[15,1],[15,1],[26,1],[64,1],[40,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,12],[1,1],[1,1],[1,2],[1,2],[1,3],[1,2],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,10],[1,1],[1,1],[1,16],[1,1],[1,2],[1,47],[1,3],[1,1],[1,1],[1,1],[1,4],[1,1],[1,170],[1,2],[1,2],[1,1],[1,1],[1,3],[1,3],[1,1],[1,5],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,2],[1,1],[1,2],[1,1],[1,3],[1,1],[1,14],[1,35],[1,1],[1,3],[1,4],[1,2],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,2],[1,2],[1,4],[1,1],[1,2],[1,1],[1,1],[1,3],[1,2],[1,3],[1,2],[1,1],[1,1],[1,2],[1,1],[1,15],[1,13],[1,2],[1,1],[1,1],[1,8],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,5],[1,3],[1,1],[1,53],[1,1],[1,4],[1,3],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,14],[2,3],[2,1],[2,2],[2,3],[2,9],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,4],[2,8],[2,3],[2,1],[2,1],[2,3],[2,2],[2,1],[2,1],[2,1],[2,2],[2,4],[2,2],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,2],[2,2],[2,3],[2,1],[2,1],[2,4],[2,2],[2,161],[2,1],[3,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,51],[3,1],[3,1],[3,3],[3,1],[3,3],[3,2],[3,1],[3,1],[3,2],[3,3],[3,4],[3,2],[3,2],[3,1],[3,1],[3,10],[3,1],[4,1],[4,1],[4,1],[4,4],[4,1],[4,1],[4,4],[4,1],[4,5],[4,9],[4,1],[4,3],[4,1],[5,4],[5,3],[5,1],[5,1],[5,1],[5,1],[5,1],[5,2],[5,1],[5,1],[5,1],[6,7],[6,1],[6,1],[6,1],[6,1],[6,1],[6,3],[6,2],[7,1],[7,2],[7,1],[7,1],[8,1],[8,2],[8,2],[9,1],[9,1],[10,3],[10,1],[10,1],[10,3],[11,9],[11,1],[11,1],[11,1],[11,1],[11,2],[11,2],[12,1],[12,4],[13,2],[13,2],[13,15],[14,1],[14,1],[17,3],[185,1],[51,1],[21,3],[19,3],[17,1],[29,1],[38,4],[169,24],[41,4],[15,1],[59,5],[87,3],[169,1],[29,5],[28,1],[25,4],[48,1],[15,3],[18,1],[22,2],[36,4],[134,1],[19,1],[15,1],[17,3],[56,1],[24,1],[17,1],[1,1],[1,3],[1,4],[1,3],[1,2],[1,3],[1,6],[1,4],[1,6],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,1],[1,4],[1,9],[1,79],[1,1],[1,4],[1,1],[1,3],[1,2],[1,1],[1,2],[1,1],[1,1],[1,7],[1,1],[1,3],[1,3],[1,2],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,3],[1,5],[1,4],[1,1],[1,2],[1,5],[1,2],[1,1],[1,10],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,7],[1,2],[1,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,24],[1,2],[1,1],[1,11],[1,2],[1,8],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,5],[1,4],[1,2],[1,2],[1,1],[1,3],[1,2],[1,1],[1,3],[1,1],[1,2],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,31],[1,1],[1,1],[1,6],[1,1],[1,1],[1,1],[1,1],[1,7],[1,1],[1,5],[1,1],[1,1],[1,2],[1,1],[1,3],[1,2],[1,1],[1,13],[1,5],[1,3],[1,2],[1,4],[1,2],[1,1],[1,2],[1,1],[1,1],[1,4],[1,3],[1,3],[1,1],[1,2],[1,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,1],[2,2],[2,5],[2,2],[2,8],[2,1],[2,1],[2,1],[2,3],[2,13],[2,6],[2,1],[2,4],[2,1],[2,2],[2,2],[2,1],[2,1],[2,4],[2,1],[2,1],[2,1],[2,6],[2,1],[2,1],[2,4],[2,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,4],[2,6],[2,1],[2,1],[2,1],[2,1],[2,6],[2,1],[2,1],[2,1],[2,2],[2,2],[2,4],[3,1],[3,1],[3,2],[3,1],[3,5],[3,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,6],[3,1],[3,8],[3,1],[3,1],[3,1],[3,1],[3,13],[3,3],[3,1],[3,2],[3,2],[3,1],[4,4],[4,1],[4,1],[4,3],[4,1],[4,1],[4,1],[4,2],[5,4],[5,1],[5,2],[5,3],[5,1],[5,1],[5,1],[5,1],[5,2],[6,8],[7,1],[7,1],[7,2],[8,2],[8,2],[8,2],[8,3],[8,3],[8,1],[8,1],[9,1],[9,1],[10,1],[10,3],[10,1],[12,3],[12,2],[12,2],[12,1],[12,1],[12,1],[13,3],[13,1],[13,1],[14,1],[17,1],[25,7],[15,6],[111,8],[92,1],[26,21],[328,1],[16,1],[752,1],[16,1],[22,2],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,2],[1,2],[1,3],[1,6],[1,1],[1,1],[1,7],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,2],[1,7],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,7],[1,2],[1,1],[1,1],[1,1],[1,3],[1,2],[1,5],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,6],[1,1],[1,1],[1,4],[1,2],[1,3],[1,1],[1,3],[1,1],[1,2],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,8],[1,2],[1,2],[1,3],[1,2],[1,2],[1,3],[1,1],[1,3],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,6],[1,1],[1,1],[1,2],[1,2],[1,6],[1,1],[1,1],[1,8],[1,5],[1,1],[1,2],[1,4],[1,21],[1,1],[1,2],[1,2],[1,1],[1,1],[1,2],[1,2],[1,4],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,4],[1,2],[2,5],[2,1],[2,1],[2,4],[2,2],[2,1],[2,3],[2,1],[2,2],[2,8],[2,1],[2,2],[2,12],[2,2],[2,2],[2,1],[2,5],[2,2],[2,2],[2,1],[2,2],[2,1],[2,3],[2,4],[2,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,1],[2,2],[2,4],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,4],[2,5],[2,1],[2,2],[2,2],[2,9],[2,1],[2,1],[3,3],[3,1],[3,1],[3,5],[3,1],[3,2],[3,3],[3,1],[3,12],[3,2],[3,1],[3,1],[3,3],[3,3],[3,1],[3,1],[3,1],[3,2],[3,1],[3,2],[3,1],[3,1],[3,7],[4,2],[4,2],[4,1],[4,3],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,3],[4,1],[4,3],[5,1],[5,2],[5,1],[5,1],[5,1],[5,1],[6,1],[6,5],[6,11],[6,1],[6,1],[6,2],[6,1],[6,4],[6,1],[6,1],[7,5],[7,1],[7,1],[8,1],[8,3],[9,2],[9,1],[10,1],[11,1],[11,1],[11,2],[11,1],[12,4],[12,2],[13,1],[13,1],[13,2],[14,6],[14,1],[68,4],[113,4],[22,1],[48,79],[28,2],[88,1],[232,2],[23,1],[32,1],[72,2],[26,1],[20,1],[53,1],[16,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,8],[1,1],[1,1],[1,2],[1,2],[1,1],[1,2],[1,1],[1,4],[1,1],[1,1],[1,2],[1,1],[1,6],[1,1],[1,3],[1,1],[1,3],[1,4],[1,3],[1,1],[1,2],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,2],[1,2],[1,1],[1,5],[1,2],[1,1],[1,2],[1,1],[1,1],[1,3],[1,1],[1,3],[1,1],[1,2],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,5],[1,4],[1,1],[1,1],[1,9],[1,6],[1,5],[1,1],[1,1],[1,3],[1,2],[1,9],[1,2],[1,3],[1,1],[1,4],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,6],[1,1],[1,1],[1,2],[1,1],[1,16],[1,3],[1,1],[1,86],[1,1],[1,2],[1,4],[1,2],[1,16],[1,9],[1,4],[1,2],[1,9],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,7],[1,10],[1,5],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,12],[1,2],[1,4],[1,1],[1,1],[1,2],[1,2],[1,4],[2,6],[2,3],[2,2],[2,1],[2,3],[2,2],[2,2],[2,2],[2,6],[2,1],[2,4],[2,2],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,3],[2,1],[2,1],[2,1],[2,3],[2,1],[2,2],[2,2],[2,1],[2,2],[2,9],[2,10],[2,1],[2,1],[2,1],[2,1],[2,1],[2,4],[2,3],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,8],[2,2],[2,1],[2,3],[2,1],[3,1],[3,1],[3,1],[3,2],[3,7],[3,5],[3,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,2],[3,1],[3,2],[3,2],[3,1],[3,1],[3,2],[3,1],[3,2],[3,5],[3,2],[4,1],[4,2],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,6],[4,2],[5,5],[5,2],[5,9],[5,5],[5,1],[5,2],[5,1],[5,2],[6,7],[6,7],[7,3],[7,8],[7,1],[7,1],[7,2],[7,7],[8,1],[8,1],[8,1],[9,6],[9,4],[10,2],[10,1],[10,1],[10,3],[10,2],[11,1],[12,5],[12,3],[12,1],[13,1],[14,2],[14,3],[14,4],[30,1],[19,1],[27,1],[24,12],[20,24],[20,1],[80,1],[26,1],[25,1],[35,1],[150,1],[22,1],[28,1],[187,2],[15,2],[21,1],[22,1],[17,8],[27,1],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,4],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,6],[1,4],[1,1],[1,1],[1,2],[1,1],[1,2],[1,4],[1,4],[1,1],[1,3],[1,5],[1,1],[1,10],[1,8],[1,1],[1,3],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,3],[1,7],[1,3],[1,1],[1,10],[1,1],[1,4],[1,1],[1,1],[1,2],[1,7],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,4],[1,1],[1,2],[1,3],[1,1],[1,2],[1,2],[1,7],[1,1],[1,1],[1,1],[1,1],[1,5],[1,2],[1,1],[1,5],[1,1],[1,1],[1,5],[1,2],[1,2],[1,1],[1,3],[1,1],[1,1],[1,4],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,17],[1,4],[1,2],[1,6],[1,1],[1,2],[1,1],[1,2],[1,1],[1,6],[1,2],[1,1],[1,28],[1,3],[1,1],[1,3],[1,1],[1,2],[1,2],[1,2],[1,1],[1,3],[1,1],[2,1],[2,3],[2,1],[2,4],[2,1],[2,3],[2,2],[2,1],[2,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,3],[2,1],[2,9],[2,1],[2,1],[2,7],[2,3],[2,1],[2,1],[2,3],[2,4],[2,2],[2,2],[2,2],[2,1],[2,3],[2,2],[2,3],[2,2],[2,1],[2,1],[2,2],[3,10],[3,1],[3,3],[3,4],[3,4],[3,398],[3,1],[3,1],[3,3],[3,1],[3,3],[3,1],[3,1],[3,3],[3,1],[3,1],[3,4],[3,3],[3,2],[3,1],[4,2],[4,16],[4,3],[4,2],[4,1],[4,4],[4,1],[4,1],[4,4],[4,1],[4,1],[4,1],[4,21],[4,5],[4,1],[4,3],[4,2],[4,2],[4,1],[4,2],[4,1],[4,2],[5,3],[5,1],[5,3],[5,1],[5,5],[5,7],[5,1],[5,1],[5,1],[5,7],[5,4],[5,6],[5,1],[6,1],[6,2],[6,3],[6,2],[6,1],[6,3],[7,8],[7,6],[7,1],[7,2],[7,1],[7,1],[8,4],[8,1],[8,4],[8,1],[8,1],[8,8],[8,3],[9,1],[9,1],[9,2],[10,6],[11,1],[11,1],[11,1],[12,1],[12,4],[12,6],[13,3],[13,1],[520,3],[292,13],[16,1],[20,1],[44,3],[22,1],[17,2],[18,1],[46,5],[19,1],[15,3],[28,1],[23,1],[19,13],[25,2],[23,134],[68,1],[79,13],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,1],[1,5],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,5],[1,1],[1,1],[1,3],[1,1],[1,2],[1,6],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,5],[1,12],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,2],[1,6],[1,1],[1,1],[1,36],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,5],[1,1],[1,5],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,6],[1,3],[1,2],[1,2],[1,3],[1,1],[1,1],[1,3],[1,1],[1,1],[1,4],[1,2],[1,1],[1,22],[1,1],[1,1],[1,1],[1,187],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,2],[1,5],[1,4],[1,1],[1,2],[1,1],[1,20],[1,4],[1,2],[1,1],[1,1],[1,3],[1,1],[1,3],[1,1],[1,1],[2,1],[2,5],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,2],[2,1],[2,2],[2,1],[2,1],[2,1],[2,5],[2,1],[2,2],[2,1],[2,1],[2,6],[2,6],[2,9],[2,1],[2,2],[2,1],[2,2],[2,2],[2,3],[2,6],[2,2],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,44],[2,1],[2,1],[2,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[3,9],[3,4],[3,1],[3,2],[3,1],[3,1],[3,1],[3,4],[3,2],[3,1],[3,1],[3,21],[3,6],[3,1],[3,2],[3,1],[3,1],[3,1],[3,2],[3,1],[3,2],[3,3],[3,1],[3,3],[3,5],[3,1],[3,1],[3,5],[3,1],[3,2],[3,2],[3,1],[3,1],[3,1],[4,92],[4,1],[4,1],[4,1],[4,13],[4,4],[4,1],[4,1],[4,2],[4,1],[4,1],[5,1],[5,1],[5,1],[5,2],[5,1],[5,3],[5,3],[5,1],[5,1],[5,1],[5,4],[5,1],[6,1],[6,3],[6,2],[6,23],[6,2],[6,3],[6,35],[7,1],[7,1],[7,1],[8,690],[8,1],[8,3],[9,2],[9,5],[9,1],[10,4],[11,6],[12,4],[12,1],[14,15],[14,1],[18,1],[46,1],[16,1],[24,4],[27,2],[21,1],[98,1],[107,3],[44,16],[16,1],[28,1],[1,1],[1,2],[1,7],[1,3],[1,1],[1,1],[1,2],[1,2],[1,14],[1,1],[1,1],[1,1],[1,36],[1,1],[1,3],[1,4],[1,1],[1,3],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,13],[1,51],[1,1],[1,1],[1,3],[1,1],[1,3],[1,1],[1,6],[1,2],[1,2],[1,1],[1,3],[1,1],[1,5],[1,3],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,94],[1,6],[1,1],[1,1],[1,1],[1,2],[1,4],[1,5],[1,1],[1,1],[1,2],[1,2],[1,2],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,3],[1,2],[1,2],[1,1],[1,2],[1,3],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,5],[1,2],[1,1],[1,2],[1,2],[1,5],[1,1],[1,2],[1,1],[1,2],[1,2],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,4],[1,4],[1,1],[1,28],[1,1],[1,2],[1,3],[1,2],[1,1],[1,1],[1,10],[1,4],[1,4],[1,2],[1,1],[1,3],[1,3],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,3],[1,5],[1,7],[2,1],[2,5],[2,1],[2,3],[2,2],[2,1],[2,2],[2,2],[2,2],[2,1],[2,1],[2,1],[2,2],[2,2],[2,1],[2,1],[2,2],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,1],[2,1],[2,7],[2,7],[2,2],[2,4],[2,3],[2,1],[2,2],[2,2],[2,1],[2,1],[2,1],[2,4],[2,1],[2,1],[2,2],[2,5],[2,1],[2,1],[2,1],[2,2],[2,2],[2,2],[2,1],[2,1],[2,1],[2,1],[3,1],[3,1],[3,2],[3,2],[3,1],[3,1],[3,5],[3,5],[3,1],[3,1],[3,10],[3,30],[3,1],[3,1],[3,1],[3,3],[3,1],[3,4],[3,3],[3,3],[3,1],[3,1],[3,2],[3,1],[3,92],[3,1],[4,4],[4,1],[4,2],[4,5],[4,1],[4,2],[4,2],[4,1],[4,4],[4,1],[4,1],[4,1],[5,1],[5,2],[5,1],[5,1],[5,1],[5,4],[5,2],[5,1],[5,10],[6,2],[6,1],[6,1],[6,1],[6,4],[6,2],[6,1],[6,1],[6,2],[7,1],[7,1],[7,1],[7,1],[7,2],[7,1],[7,1],[8,5],[8,1],[8,1],[8,5],[8,5],[8,1],[9,2],[9,1],[9,4],[9,4],[10,1],[10,1],[10,5],[10,5],[10,1],[10,1],[11,1],[11,1],[11,1],[11,2],[12,1],[12,2],[12,2],[12,1],[13,1],[13,1],[13,3],[14,1],[14,22],[14,1],[14,1],[14,2],[20,4],[27,1],[18,2],[49,1],[16,3],[15,1],[18,1],[15,1],[18,1],[15,1],[27,2],[21,1],[23,1],[54,1],[22,1],[46,1],[17,1],[37,7],[17,1],[19,1],[33,2],[62,1],[18,4],[18,1],[24,1],[18,1],[36,1],[20,1],[125,1],[18,13],[36,1],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,3],[1,4],[1,3],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,10],[1,6],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,2],[1,4],[1,1],[1,3],[1,8],[1,2],[1,4],[1,10],[1,1],[1,71],[1,1],[1,2],[1,18],[1,1],[1,3],[1,2],[1,1],[1,1],[1,2],[1,2],[1,1],[1,34],[1,9],[1,2],[1,7],[1,3],[1,3],[1,3],[1,3],[1,4],[1,1],[1,1],[1,1],[1,2],[1,1],[1,4],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,6],[1,1],[1,1],[1,8],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,6],[1,3],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,2],[1,9],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,6],[1,1],[1,10],[1,1],[1,10],[1,1],[1,2],[1,2],[1,2],[1,3],[1,1],[1,2],[1,3],[1,2],[1,2],[1,20],[1,2],[1,3],[1,2],[1,1],[1,1],[1,5],[1,1],[1,5],[1,1],[1,1],[1,1],[1,4],[1,1],[1,2],[2,1],[2,1],[2,3],[2,3],[2,2],[2,2],[2,1],[2,2],[2,3],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,10],[2,1],[2,1],[2,6],[2,3],[2,5],[2,3],[2,1],[2,1],[2,11],[2,2],[2,3],[2,2],[2,1],[2,7],[2,1],[2,1],[2,2],[2,1],[2,1],[2,2],[2,2],[2,1],[2,3],[2,1],[2,3],[2,2],[2,1],[2,6],[2,3],[2,1],[2,1],[2,1],[3,4],[3,2],[3,1],[3,8],[3,1],[3,49],[3,2],[3,2],[3,3],[3,1],[3,2],[3,5],[3,3],[3,2],[3,1],[3,3],[3,1],[3,2],[3,13],[3,7],[3,2],[3,1],[4,2],[4,4],[4,1],[4,2],[4,1],[4,1],[4,1],[4,2],[5,1],[5,4],[5,1],[5,1],[5,1],[5,1],[5,1],[5,4],[5,1],[5,2],[6,1],[6,7],[6,1],[6,1],[6,4],[6,2],[6,3],[6,1],[6,9],[7,1],[7,1],[8,3],[8,7],[8,1],[8,2],[8,2],[8,2],[8,8],[8,1],[9,1],[9,1],[9,1],[9,2],[10,1],[11,3],[12,1],[12,1],[12,2],[12,1],[12,3],[13,1],[14,1],[58,1],[21,1],[36,15],[218,1],[34,1],[20,2],[16,2],[28,1],[38,1],[38,3],[16,1],[165,2],[132,1],[19,2],[260,1],[39,2],[64,1],[18,1],[1,1],[1,1],[1,1],[1,12],[1,1],[1,2],[1,1],[1,5],[1,2],[1,2],[1,1],[1,2],[1,1],[1,13],[1,1],[1,2],[1,3],[1,1],[1,1],[1,2],[1,1],[1,2],[1,4],[1,2],[1,5],[1,1],[1,3],[1,2],[1,1],[1,2],[1,6],[1,1],[1,2],[1,2],[1,7],[1,1],[1,1],[1,1],[1,1],[1,1],[1,6],[1,1],[1,1],[1,1],[1,3],[1,6],[1,1],[1,1],[1,1],[1,6],[1,3],[1,2],[1,6],[1,2],[1,1],[1,3],[1,1],[1,2],[1,1],[1,1],[1,2],[1,3],[1,1],[1,3],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,6],[1,1],[1,2],[1,63],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,2],[1,2],[1,1],[1,2],[1,1],[1,1],[1,4],[1,1],[1,2],[1,3],[1,9],[1,2],[1,1],[1,2],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,10],[1,1],[1,2],[1,1],[1,2],[1,2],[1,7],[1,1],[1,8],[1,1],[1,3],[1,5],[1,1],[1,1],[1,1],[1,1],[1,15],[1,6],[1,1],[1,1],[1,422],[1,2],[1,2],[1,4],[1,2],[1,2],[1,3],[1,2],[1,3],[1,1],[1,5],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[2,4],[2,3],[2,1],[2,2],[2,2],[2,3],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,2],[2,2],[2,2],[2,13],[2,11],[2,4],[2,1],[2,2],[2,10],[2,5],[2,2],[2,75],[2,3],[2,1],[2,8],[2,4],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,14],[2,2],[2,15],[2,1],[2,2],[2,4],[2,1],[2,1],[2,2],[2,33],[2,2],[2,1],[2,1],[2,3],[2,2],[2,2],[2,1],[3,1],[3,13],[3,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,6],[3,7],[3,2],[3,1],[3,3],[3,1],[3,1],[3,1],[3,1],[3,1],[3,2],[3,3],[3,3],[3,2],[3,1],[3,6],[3,2],[3,4],[3,2],[4,4],[4,4],[4,4],[4,4],[4,6],[4,1],[4,1],[4,1],[4,3],[4,1],[4,2],[4,5],[4,1],[5,4],[5,1],[5,2],[5,8],[5,3],[5,1],[5,1],[5,1],[5,1],[5,3],[6,1],[6,3],[6,2],[6,4],[6,1],[6,3],[6,1],[6,1],[7,1],[7,1],[7,1],[7,1],[7,1],[7,1],[7,1],[7,1],[7,3],[8,1],[8,1],[8,1],[8,7],[9,2],[10,2],[10,1],[10,6],[11,1],[11,3],[11,2],[12,1],[12,1],[14,2],[14,6],[17,2],[19,1],[15,1],[112,1],[16,1],[30,6],[19,3],[15,4],[19,2],[25,1],[17,4],[49,1],[48,1],[26,1],[17,9],[43,3],[51,6],[17,1],[21,3],[26,4],[31,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,9],[1,1],[1,753],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,7],[1,2],[1,6],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,3],[1,4],[1,3],[1,4],[1,1],[1,2],[1,1],[1,6],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,2],[1,1],[1,3],[1,3],[1,3],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,2],[1,1],[1,26],[1,3],[1,1],[1,1],[1,4],[1,1],[1,1],[1,5],[1,2],[1,3],[1,1],[1,5],[1,2],[1,2],[1,2],[1,2],[1,1],[1,3],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,3],[1,1],[1,4],[1,8],[1,10],[1,1],[1,2],[1,6],[1,1],[1,2],[1,2],[1,2],[1,6],[1,1],[1,1],[1,15],[1,2],[2,1],[2,12],[2,1],[2,8],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,20],[2,2],[2,2],[2,1],[2,1],[2,2],[2,2],[2,1],[2,2],[2,1],[2,1],[2,1],[2,3],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,14],[2,2],[2,1],[2,5],[2,5],[2,1],[2,2],[2,2],[2,6],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[3,2],[3,3],[3,3],[3,1],[3,1],[3,1],[3,3],[3,1],[3,1],[3,6],[3,8],[3,1],[3,1],[3,1],[3,3],[3,12],[3,1],[3,1],[3,1],[3,1],[3,6],[3,1],[3,2],[3,1],[3,1],[4,5],[4,1],[4,5],[4,5],[4,29],[4,11],[4,1],[4,1],[4,2],[4,1],[4,1],[5,2],[5,4],[5,1],[5,6],[5,1],[5,1],[5,1],[5,1],[6,1],[6,4],[6,1],[6,4],[6,2],[6,2],[6,1],[6,1],[6,2],[6,1],[7,1],[7,2],[7,1],[7,1],[7,2],[8,3],[8,4],[8,5],[8,7],[8,5],[9,5],[9,1],[9,1],[10,2],[10,2],[10,4],[11,1],[11,1],[12,8],[12,1],[12,1],[13,1],[13,1],[13,2],[14,2],[20,4],[18,3],[65,1],[23,1],[20,3],[237,1],[70,5],[80,2],[71,1],[15,4],[18,8],[54,1],[30,1],[15,2],[26,2],[20,1],[17,1],[26,4],[20,13],[1,2],[1,1],[1,3],[1,1],[1,3],[1,5],[1,3],[1,1],[1,5],[1,1],[1,3],[1,7],[1,2],[1,1],[1,1],[1,1],[1,4],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,11],[1,1],[1,6],[1,4],[1,3],[1,3],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,4],[1,1],[1,1],[1,2],[1,3],[1,1],[1,2],[1,7],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,5],[1,2],[1,1],[1,1],[1,4],[1,1],[1,10],[1,4],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,3],[1,2],[1,2],[1,1],[1,4],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,2],[1,3],[1,1],[1,2],[1,1],[1,4],[1,1],[1,8],[1,1],[1,1],[1,2],[1,4],[1,1],[1,34],[1,2],[1,2],[1,1],[1,1],[1,4],[1,1],[1,3],[1,7],[1,4],[1,7],[1,7],[1,1],[1,3],[1,1],[1,1],[1,3],[1,4],[1,2],[1,1],[1,1],[1,1],[1,1],[1,14],[1,6],[1,6],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[2,2],[2,1],[2,1],[2,4],[2,2],[2,2],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,2],[2,1],[2,4],[2,1],[2,1],[2,1],[2,1],[2,4],[2,2],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,2],[2,1],[2,2],[2,6],[2,1],[2,1],[2,1],[2,2],[2,2],[3,3],[3,7],[3,4],[3,2],[3,3],[3,1],[3,1],[3,4],[3,1],[3,14],[3,2],[3,5],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,9],[3,25],[3,1],[3,1],[4,1],[4,9],[4,1],[4,3],[4,1],[4,1],[4,12],[4,1],[4,3],[4,7],[4,2],[4,1],[4,1],[4,1],[4,1],[4,1],[5,5],[5,2],[5,1],[5,1],[5,2],[5,5],[5,1],[5,1],[5,1],[5,1],[5,1],[6,5],[6,1],[6,3],[6,1],[6,4],[6,1],[6,1],[6,3],[6,2],[6,1],[7,1],[7,1],[7,1],[7,1],[7,1],[8,2],[8,1],[8,1],[8,1],[8,1],[9,2],[10,374],[10,3],[11,1],[11,1],[11,3],[11,8],[11,4],[12,1],[13,3],[13,2],[13,4],[58,1],[43,1],[38,1],[196,1],[55,3],[15,1],[79,1],[16,5],[20,1],[32,1],[111,1],[68,1],[50,17],[327,47],[46,3],[24,3],[41,2],[65,1],[1,2],[1,14],[1,4],[1,1],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,7],[1,4],[1,5],[1,8],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,6],[1,2],[1,1],[1,5],[1,1],[1,3],[1,29],[1,4],[1,2],[1,1],[1,1],[1,4],[1,2],[1,9],[1,5],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,4],[1,2],[1,1],[1,8],[1,2],[1,13],[1,1],[1,1],[1,1],[1,2],[1,2],[1,2],[1,4],[1,6],[1,1],[1,1],[1,3],[1,2],[1,4],[1,2],[1,10],[1,2],[1,2],[1,2],[1,1],[1,4],[1,2],[1,1],[1,5],[1,93],[1,1],[1,1],[1,3],[1,22],[1,1],[1,1],[1,4],[1,2],[1,2],[1,1],[1,1],[1,4],[1,1],[1,6],[1,1],[1,3],[1,4],[1,1],[1,1],[1,2],[1,2],[1,8],[1,3],[1,1],[1,5],[1,6],[1,2],[1,2],[1,1],[1,1],[1,3],[1,1],[1,3],[1,2],[1,1],[1,2],[1,2],[1,2],[1,28],[1,1],[1,6],[1,6],[1,2],[2,1],[2,2],[2,1],[2,2],[2,1],[2,2],[2,6],[2,1],[2,1],[2,2],[2,6],[2,2],[2,2],[2,1],[2,2],[2,2],[2,2],[2,1],[2,2],[2,2],[2,6],[2,3],[2,3],[2,1],[2,2],[2,2],[2,1],[2,1],[2,14],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,9],[2,2],[2,1],[2,5],[2,1],[2,1],[2,3],[2,2],[2,2],[2,7],[2,16],[2,6],[2,2],[2,2],[2,1],[2,2],[3,1],[3,26],[3,1],[3,2],[3,1],[3,1],[3,3],[3,1],[3,3],[3,1],[3,1],[3,4],[3,1],[3,3],[3,3],[3,1],[3,1],[3,1],[3,1],[3,1],[3,12],[3,2],[3,2],[3,4],[3,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[4,1],[4,1],[4,2],[4,1],[4,1],[4,2],[4,1],[4,1],[4,1],[4,2],[4,1],[4,8],[4,3],[4,1],[4,4],[5,2],[5,2],[5,1],[5,1],[5,1],[5,9],[6,1],[6,2],[6,2],[6,1],[6,1],[6,1],[6,10],[6,1],[7,1],[7,11],[7,4],[7,1],[7,2],[8,2],[8,1],[8,1],[8,1],[8,1],[8,4],[8,7],[9,1],[9,1],[10,2],[10,4],[10,1],[10,1],[11,6],[12,1],[12,1],[12,6],[13,1],[13,5],[13,2],[13,11],[14,8],[14,3],[16,1],[55,1],[17,1],[91,1],[27,1],[16,1],[17,1],[37,1],[54,3],[73,2],[50,1],[19,3],[20,2],[26,1],[55,3],[54,1],[31,1],[68,2],[75,8],[412,1],[21,2],[1,6],[1,1],[1,2],[1,2],[1,4],[1,4],[1,2],[1,6],[1,5],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,9],[1,4],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,6],[1,3],[1,1],[1,2],[1,3],[1,12],[1,16],[1,3],[1,1],[1,1],[1,3],[1,3],[1,502],[1,3],[1,1],[1,1],[1,5],[1,2],[1,2],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,6],[1,3],[1,2],[1,1],[1,5],[1,1],[1,6],[1,4],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,2],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,1],[1,17],[1,3],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,4],[1,6],[1,1],[1,1],[1,11],[1,1],[1,4],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,3],[1,1],[1,1],[1,2],[1,3],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,5],[1,2],[2,1],[2,1],[2,3],[2,3],[2,2],[2,2],[2,9],[2,2],[2,1],[2,9],[2,1],[2,2],[2,2],[2,2],[2,5],[2,5],[2,2],[2,1],[2,2],[2,1],[2,1],[2,13],[2,5],[2,2],[2,1],[2,4],[2,1],[2,1],[2,2],[2,1],[2,2],[2,3],[2,3],[2,5],[2,3],[2,3],[2,10],[2,2],[2,2],[2,2],[2,4],[2,1],[2,2],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,3],[3,2],[3,2],[3,1],[3,7],[3,2],[3,2],[3,1],[3,5],[3,2],[3,3],[3,1],[3,8],[3,1],[3,1],[3,2],[3,14],[3,2],[4,2],[4,1],[4,2],[4,3],[4,2],[4,7],[4,1],[4,5],[4,1],[4,3],[4,10],[4,1],[4,2],[4,4],[4,4],[4,1],[5,1],[5,4],[5,2],[5,1],[5,1],[5,2],[5,8],[5,3],[5,1],[5,1],[6,2],[6,2],[6,1],[6,1],[6,1],[6,2],[6,15],[6,39],[6,3],[7,2],[7,1],[7,3],[7,1],[7,1],[8,1],[8,1],[9,2],[9,2],[9,1],[9,1],[10,1],[10,1],[10,1],[11,14],[11,1],[11,3],[11,1],[12,1],[12,1],[13,2],[13,2],[14,8],[16,1],[27,1],[21,5],[18,2],[36,1],[36,3],[28,15],[17,13],[18,7],[17,9],[28,2],[19,2],[27,1],[33,11],[40,2],[17,3],[120,2],[136,4],[21,1],[64,1],[23,3],[81,4],[27,1],[126,15],[17,1],[37,2],[21,1],[22,1],[58,1],[1,85],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,2],[1,1],[1,2],[1,3],[1,9],[1,2],[1,3],[1,7],[1,3],[1,2],[1,5],[1,2],[1,1],[1,3],[1,1],[1,1],[1,4],[1,13],[1,74],[1,14],[1,1],[1,1],[1,2],[1,1],[1,2],[1,4],[1,2],[1,5],[1,1],[1,4],[1,1],[1,4],[1,1],[1,1],[1,3],[1,2],[1,79],[1,1],[1,1],[1,6],[1,1],[1,2],[1,7],[1,2],[1,1],[1,2],[1,1],[1,7],[1,1],[1,2],[1,1],[1,4],[1,4],[1,3],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,2],[1,6],[1,1],[1,8],[1,2],[1,2],[1,1],[1,9],[1,1],[1,2],[1,1],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,11],[1,1],[1,5],[1,1],[1,4],[1,3],[1,8],[1,4],[1,1],[1,9],[1,1],[1,3],[1,1],[1,4],[1,1],[1,2],[1,3],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,3],[1,8],[1,4],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,11],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[2,6],[2,1],[2,3],[2,1],[2,3],[2,7],[2,6],[2,1],[2,2],[2,1],[2,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,2],[2,1],[2,2],[2,2],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,2],[2,2],[2,1],[2,4],[2,3],[2,2],[2,1],[2,6],[2,1],[2,3],[2,2],[2,2],[2,1],[2,3],[2,1],[2,2],[2,1],[2,1],[2,1],[2,3],[2,2],[2,1],[2,4],[2,5],[2,1],[2,1],[3,1],[3,57],[3,2],[3,1],[3,1],[3,2],[3,3],[3,15],[3,4],[3,1],[3,1],[3,9],[3,10],[3,5],[3,1],[3,4],[3,4],[3,1],[3,1],[3,6],[3,1],[4,2],[4,1],[4,1],[4,2],[4,1],[4,14],[4,3],[4,1],[4,1],[4,3],[4,10],[4,1],[4,2],[5,10],[5,1],[5,1],[5,3],[5,1],[5,5],[5,1],[6,5],[6,4],[6,2],[6,2],[6,3],[6,1],[7,1],[7,1],[7,4],[7,1],[7,2],[7,2],[7,2],[7,2],[8,2],[8,1],[8,4],[8,2],[8,4],[8,1],[9,1],[9,1],[10,3],[10,1],[11,1],[11,1],[12,9],[12,4],[12,2],[13,7],[13,4],[13,2],[13,7],[13,1],[14,1],[14,1],[23,1],[19,2],[16,1],[36,4],[15,4],[22,3],[17,1],[17,2],[38,2],[15,1],[34,1],[29,2],[20,7],[23,4],[44,5],[22,2],[18,1],[1,2],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,3],[1,4],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,9],[1,1],[1,4],[1,2],[1,2],[1,1],[1,5],[1,1],[1,2],[1,1],[1,4],[1,2],[1,2],[1,1],[1,3],[1,3],[1,3],[1,2],[1,3],[1,1],[1,2],[1,5],[1,3],[1,1],[1,4],[1,1],[1,6],[1,4],[1,3],[1,1],[1,2],[1,1],[1,2],[1,2],[1,6],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,3],[1,8],[1,1],[1,2],[1,5],[1,1],[1,6],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,3],[1,10],[1,3],[1,7],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,3],[1,2],[1,2],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,43],[1,23],[1,2],[1,4],[1,33],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,7],[1,2],[1,4],[1,6],[1,1],[1,1],[1,1],[1,2],[1,7],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,136],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,20],[2,1],[2,1],[2,16],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,3],[2,2],[2,1],[2,1],[2,2],[2,7],[2,2],[2,1],[2,2],[2,114],[2,1],[2,3],[2,4],[2,1],[2,4],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,6],[2,2],[2,1],[2,1],[2,1],[2,4],[2,1],[2,1],[2,1],[2,4],[2,1],[2,1],[2,1],[2,4],[2,2],[2,4],[2,3],[2,2],[2,1],[3,2],[3,1],[3,1],[3,5],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,8],[3,2],[3,1],[3,2],[3,28],[3,1],[3,118],[3,1],[3,1],[3,2],[3,2],[3,3],[3,8],[3,3],[4,1],[4,2],[4,4],[4,1],[4,1],[4,1],[4,1],[4,1],[4,2],[4,2],[4,1],[4,1],[4,3],[4,1],[4,3],[4,1],[4,1],[4,1],[5,2],[5,1],[5,6],[5,1],[5,4],[5,2],[5,4],[5,1],[5,4],[6,4],[6,1],[6,3],[6,1],[6,2],[6,1],[7,1],[7,3],[7,1],[7,46],[7,2],[7,1],[8,3],[8,6],[8,1],[8,5],[9,12],[9,1],[9,5],[10,3],[10,3],[11,3],[11,7],[12,3],[12,1],[12,1],[13,1],[13,1],[13,2],[13,13],[13,1],[14,1],[14,1],[58,2],[112,1],[18,3],[19,1],[20,1],[18,1],[15,2],[92,1],[50,1],[40,1],[57,5],[19,2],[19,1],[15,4],[16,5],[54,1],[15,1],[1,2],[1,6],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,6],[1,7],[1,1],[1,2],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,11],[1,3],[1,6],[1,1],[1,1],[1,6],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,12],[1,1],[1,1],[1,1],[1,4],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,5],[1,2],[1,1],[1,1],[1,2],[1,8],[1,2],[1,1],[1,1],[1,2],[1,1],[1,19],[1,1],[1,1],[1,4],[1,1],[1,4],[1,2],[1,1],[1,4],[1,1],[1,1],[1,2],[1,1],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,3],[1,5],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,4],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,5],[1,1],[1,2],[1,3],[1,9],[1,26],[1,3],[1,17],[1,1],[1,2],[1,1],[1,5],[1,4],[1,1],[1,1],[1,2],[1,1],[1,3],[1,2],[1,8],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,30],[2,1],[2,4],[2,1],[2,2],[2,1],[2,1],[2,2],[2,3],[2,4],[2,2],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,2],[2,7],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,10],[2,4],[2,1],[2,1],[2,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,3],[2,7],[2,1],[2,1],[2,2],[2,5],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,4],[2,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,3],[3,1],[3,3],[3,1],[3,1],[3,1],[3,2],[3,29],[3,2],[4,2],[4,1],[4,3],[4,1],[4,1],[4,1],[4,1],[4,1],[4,2],[4,1],[4,3],[4,1],[5,2],[5,1],[5,1],[5,4],[5,1],[5,1],[5,2],[5,1],[5,1],[5,3],[6,4],[6,1],[6,1],[6,3],[6,2],[6,2],[6,1],[6,1],[6,1],[6,2],[7,2],[7,3],[7,2],[7,1],[7,2],[8,1],[8,1],[8,4],[8,1],[8,3],[9,1],[9,5],[9,1],[9,1],[9,1],[11,1],[11,2],[11,2],[11,3],[12,7],[12,1],[13,1],[14,2],[16,1],[78,3],[17,3],[27,3],[19,2],[67,3],[16,3],[58,3],[17,1],[29,2],[29,1],[23,1],[390,2],[75,2],[26,8],[20,3],[19,2],[16,4],[33,1],[66,2],[20,1],[17,5],[1,1],[1,2],[1,1],[1,1],[1,9],[1,4],[1,2],[1,3],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,1],[1,2],[1,1],[1,4],[1,2],[1,1],[1,1],[1,1],[1,4],[1,5],[1,11],[1,1],[1,4],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,3],[1,4],[1,1],[1,2],[1,3],[1,1],[1,1],[1,3],[1,1],[1,7],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,8],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,6],[1,1],[1,1],[1,6],[1,2],[1,1],[1,11],[1,3],[1,1],[1,2],[1,4],[1,4],[1,1],[1,11],[1,7],[1,3],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,14],[1,1],[1,1],[1,1],[1,4],[1,1],[1,2],[1,3],[1,6],[1,1],[1,1],[1,3],[1,3],[1,2],[1,2],[1,7],[1,5],[1,2],[1,7],[1,7],[1,1],[1,3],[1,2],[1,4],[1,4],[1,3],[1,1],[1,1],[1,4],[1,2],[1,1],[1,1],[1,5],[1,3],[1,1],[1,124],[1,2],[1,6],[1,1],[1,1],[2,1],[2,4],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,5],[2,21],[2,2],[2,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,7],[2,31],[2,1],[2,2],[2,4],[2,1],[2,3],[2,125],[2,1],[2,8],[2,1],[2,4],[2,2],[2,2],[2,1],[2,1],[2,1],[2,4],[2,5],[2,1],[2,2],[2,2],[2,1],[2,1],[2,1],[2,8],[2,1],[2,12],[2,278],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,2],[3,3],[3,1],[3,1],[3,1],[3,1],[3,3],[3,2],[3,1],[3,1],[3,3],[3,1],[3,3],[3,1],[3,3],[3,1],[3,2],[3,3],[3,1],[4,2],[4,8],[4,1],[4,3],[4,3],[4,1],[4,3],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,2],[4,1],[4,3],[5,1],[5,1],[5,1],[5,2],[5,2],[5,2],[5,1],[6,2],[6,2],[6,24],[6,2],[6,2],[6,20],[6,1],[6,1],[6,3],[6,1],[6,4],[6,5],[6,3],[7,2],[7,1],[7,4],[7,1],[7,1],[7,1],[7,1],[7,1],[7,1],[7,134],[8,1],[8,1],[8,5],[8,1],[8,6],[9,3],[9,15],[10,4],[10,3],[10,1],[11,12],[11,2],[12,2],[12,2],[14,1],[14,6],[15,3],[30,2],[35,1],[28,1],[111,1],[22,1],[25,1],[18,1],[40,4],[58,1],[295,4],[18,3],[35,1],[16,1],[1,1],[1,1],[1,2],[1,1],[1,6],[1,6],[1,2],[1,1],[1,301],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,1],[1,5],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,3],[1,5],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,3],[1,2],[1,1],[1,7],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,5],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,17],[1,1],[1,1],[1,2],[1,2],[1,4],[1,3],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,3],[1,3],[1,2],[1,1],[1,23],[1,1],[1,1],[1,1],[1,1],[1,3],[1,4],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,4],[1,4],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,4],[1,1],[1,1],[1,1],[1,2],[1,1],[1,3],[1,2],[1,2],[1,1],[1,1],[1,3],[1,15],[1,4],[1,1],[1,1],[1,3],[1,3],[1,1],[1,2],[1,2],[1,6],[1,1],[1,2],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,10],[2,3],[2,1],[2,1],[2,2],[2,7],[2,1],[2,1],[2,4],[2,1],[2,2],[2,1],[2,2],[2,2],[2,1],[2,1],[2,3],[2,6],[2,1],[2,1],[2,46],[2,1],[2,3],[2,1],[2,4],[2,1],[2,1],[2,1],[2,1],[2,2],[2,4],[2,4],[2,3],[3,11],[3,1],[3,1],[3,1],[3,1],[3,2],[3,1],[3,2],[3,4],[3,1],[3,1],[3,1],[3,3],[3,2],[3,1],[3,2],[3,2],[3,2],[3,1],[3,3],[3,1],[3,2],[3,2],[3,4],[3,1],[3,45],[3,2],[4,11],[4,2],[4,1],[4,2],[4,4],[4,14],[4,4],[4,2],[4,2],[4,1],[5,3],[5,1],[5,1],[5,2],[5,1],[5,2],[5,3],[5,2],[5,1],[5,2],[5,2],[6,1],[6,1],[6,3],[6,2],[6,1],[6,3],[6,1],[6,6],[7,1],[7,2],[7,1],[8,1],[8,2],[8,1],[8,1],[8,1],[8,2],[8,2],[8,2],[9,5],[9,2],[10,1],[10,1],[10,3],[11,8],[11,1],[12,5],[12,1],[14,1]])\n \n ida.scatter_plot(data, '{0}/faithful_ida_scatter.png'.format(output_dir))\n ida.histogram(data, '{0}/faithful_ida_hist.png'.format(output_dir))\n ida.linear_regression(data, '{0}/faithful_ida_regression.png'.format(output_dir))\n\n #clustering\n km2 = __run_clustering(data, output_dir)\n\n #expectation-maximization\n __run_em(data, output_dir, km2)\n\n #build bayes fmm model\n __run_bayesfmm(data, iterations, save_diagnostics, output_dir, burnin, km2)",
"def test_roundtrip():\n # Based heavily on test_sbinterpolatedimage() in test_SBProfile.py!\n import time\n t1 = time.time()\n\n # for each type, try to make an SBInterpolatedImage, and check that when we draw an image from\n # that SBInterpolatedImage that it is the same as the original\n ftypes = [np.float32, np.float64]\n ref_array = np.array([\n [0.01, 0.08, 0.07, 0.02],\n [0.13, 0.38, 0.52, 0.06],\n [0.09, 0.41, 0.44, 0.09],\n [0.04, 0.11, 0.10, 0.01] ]) \n\n for array_type in ftypes:\n image_in = galsim.ImageView[array_type](ref_array.astype(array_type))\n np.testing.assert_array_equal(\n ref_array.astype(array_type),image_in.array,\n err_msg=\"Array from input Image differs from reference array for type %s\"%\n array_type)\n interp = galsim.InterpolatedImage(image_in, dx=test_dx)\n test_array = np.zeros(ref_array.shape, dtype=array_type)\n image_out = galsim.ImageView[array_type](test_array)\n image_out.setScale(test_dx)\n interp.draw(image_out)\n np.testing.assert_array_equal(\n ref_array.astype(array_type),image_out.array,\n err_msg=\"Array from output Image differs from reference array for type %s\"%\n array_type)\n \n # Lanczos doesn't quite get the flux right. Wrong at the 5th decimal place.\n # Gary says that's expected -- Lanczos isn't technically flux conserving. \n # He applied the 1st order correction to the flux, but expect to be wrong at around\n # the 10^-5 level.\n # Anyway, Quintic seems to be accurate enough.\n quint = galsim.Quintic(1.e-4)\n quint_2d = galsim.InterpolantXY(quint)\n interp = galsim.InterpolatedImage(image_in, interpolant=quint_2d, dx=test_dx, flux=1.)\n do_shoot(interp,image_out,\"InterpolatedImage\")\n\n t2 = time.time()\n print 'time for %s = %.2f'%(funcname(),t2-t1)",
"def interpolate(self, image):\n return",
"def main():\n time_start = perf_counter()\n\n args = parse_args(sys.argv[1:]).ordered()\n _, opts = next(args)\n log_level = 0\n try:\n log_level = (0, 20, 10)[opts.verbosity]\n mpl_log_level = log_level + 10 if log_level > 0 else log_level\n except IndexError:\n log_level = 10\n mpl_log_level = log_level\n loggers = [logging.getLogger(name) for name in logging.root.manager.loggerDict]\n # set level for all loggers\n # separate log level for matplotlib because it's so verbose\n for logger in loggers:\n if logger.name.startswith(\"matplotlib\"):\n logger.setLevel(mpl_log_level)\n else:\n logger.setLevel(log_level)\n\n LOG.debug(\"Program opts:\\n%s\", pformat(vars(opts)))\n\n # main vars\n inputs = []\n processed = []\n # im: Optional[Image.Image] = None\n im: Image.Image | np.ndarray | None = None\n in_file_path: Optional[str]\n in_image_size = Size(0, 0)\n in_file_size = 0\n in_dpi = 0\n in_exif: Optional[dict] = None\n out_exif: bytes = b\"\"\n out_exif_size = 0\n out_file_path = None\n out_image_size = Size(0, 0)\n out_file_size = 0\n no_op = False\n\n for cmd, arg in args:\n LOG.debug(\"Processing command %s with args:\\n%s\", cmd, pformat(vars(arg)))\n\n if cmd == \"open\":\n in_file_path = arg.input.name\n in_file_size = os.path.getsize(in_file_path) # type: ignore\n im = Image.open(arg.input)\n in_image_size = Size(*im.size)\n LOG.info(\"Input dims: %s\", in_image_size)\n try:\n in_exif = piexif.load(in_file_path)\n del in_exif[\"thumbnail\"]\n # LOG.debug(\"Exif: %s\", in_exif)\n in_dpi = im.info[\"dpi\"]\n except KeyError:\n pass\n LOG.info(\"Input file size: %s\", humanize_bytes(in_file_size))\n LOG.info(\"Input dpi: %s\", in_dpi)\n if arg.show_histogram:\n LOG.debug(\"Generating numpy thumbnail for histogram\")\n im = cv2.cvtColor(np.asarray(im), cv2.COLOR_RGB2BGR)\n thumb = resize.resize_thumbnail_opencv(im, Size(1000, 1000))\n print(generate_rgb_histogram(thumb))\n show_rgb_histogram(im)\n elif cmd == \"open2\":\n # Test of opening multiple images for some operations, such as matting\n for item in arg.input:\n _im = Image.open(item)\n try:\n ex = piexif.load(item.name)\n dpi = _im.info[\"dpi\"]\n del ex[\"thumbnail\"]\n except KeyError:\n ex = None\n dpi = (0, 0)\n _im = np.asarray(_im)\n _im = cv2.cvtColor(_im, cv2.COLOR_RGB2BGR)\n inputs.append(\n Img(\n _im,\n file_path=item.name,\n dpi=dpi,\n exif=ex,\n )\n )\n LOG.debug(\"Imgs: %s\", inputs)\n im = inputs[0].data\n in_file_path = inputs[0].file_path\n in_file_size = inputs[0].file_size\n in_image_size = inputs[0].size\n if arg.show_histogram:\n if not is_ndarray(im):\n raise TypeError('Expected numpy.ndarray')\n LOG.debug(\"Generating numpy thumbnail for histogram\")\n thumb = resize.resize_thumbnail_opencv(im, Size(1000, 1000))\n print(generate_rgb_histogram(thumb))\n show_rgb_histogram(im)\n elif cmd == \"mat\":\n if not is_ndarray(im):\n raise TypeError('Expected numpy.ndarray')\n im = mat.create_mat(im, size_inches=arg.size)\n out_image_size = Size.from_np(im)\n elif cmd == \"resize\":\n im = Image.fromarray(im) if type(im) == np.ndarray else im\n if is_ndarray(im) or im is None:\n raise TypeError('Expected Image, not ndarray')\n orig_size = Size(*im.size) # type: ignore\n out_image_size = orig_size\n try:\n resize_method, new_size = resize.get_method(\n orig_size,\n width=arg.width,\n height=arg.height,\n scale=arg.scale,\n longest=arg.longest,\n shortest=arg.shortest,\n )\n except ResizeNotNeededError as e:\n LOG.warning(e)\n else:\n # Resize/resample\n try:\n im = resize.resize(\n resize_method,\n im,\n new_size,\n )\n except ImageTooSmallError as e:\n LOG.warning(e)\n out_image_size = Size(*im.size) # type: ignore\n elif cmd == \"resize2\":\n for item in inputs:\n try:\n resize_method, new_size = resize.get_method(\n item.size,\n width=arg.width,\n height=arg.height,\n scale=arg.scale,\n longest=arg.longest,\n shortest=arg.shortest,\n force=arg.force,\n )\n except ResizeNotNeededError as e:\n LOG.warning(e)\n except ResizeAttributeError as e:\n print(f\"{fg.li_red}error: {e}{rs.fg}\", file=sys.stderr)\n sys.exit(1)\n else:\n try:\n _im = resize.resize_opencv(\n resize_method, item.data, new_size, resample=cv2.INTER_AREA\n )\n if _im is not None:\n processed.append(Img(_im))\n else:\n LOG.error('Expected image from resize_opencv(), got None')\n except ImageTooSmallError as e:\n LOG.warning(e)\n LOG.info(processed)\n out_image_size = processed[0].size\n im = processed[0].data\n elif cmd == \"text\":\n if im is None:\n LOG.error('Image is None')\n return\n im = watermark.with_text(\n im,\n text=arg.text,\n copyright=arg.copyright,\n scale=arg.scale,\n position=arg.position,\n opacity=arg.opacity,\n exif=in_exif,\n ) # type: ignore\n elif cmd == \"text2\":\n im = watermark.with_text(\n Image.fromarray(im),\n text=arg.text,\n copyright=arg.copyright,\n scale=arg.scale,\n position=arg.position,\n opacity=arg.opacity,\n exif=in_exif,\n )\n im = np.asarray(im)\n elif cmd == \"watermark\":\n im = watermark.with_image(\n im,\n Image.open(arg.image),\n scale=arg.scale,\n position=arg.position,\n padding=arg.margin,\n opacity=arg.opacity,\n invert=arg.invert,\n )\n elif cmd == \"watermark2\":\n watermark_image = cv2.imread(arg.image.name, cv2.IMREAD_UNCHANGED)\n # im = watermark.with_image_opencv(\n # im,\n # watermark_image,\n # scale=arg.scale,\n # position=arg.position,\n # opacity=arg.opacity,\n # padding=arg.margin,\n # )\n try:\n im = watermark.overlay_transparent(\n im,\n watermark_image,\n scale=arg.scale,\n padding=arg.margin,\n position=arg.position,\n alpha=arg.opacity,\n invert=arg.invert,\n )\n except OverlaySizeError as e:\n print(f\"{fg.li_red}error: {e}{rs.fg}\", file=sys.stderr)\n sys.exit(1)\n elif cmd == \"sharpen\":\n im = sharpen.unsharp_mask(im, amount=arg.amount, threshold=arg.threshold)\n elif cmd == \"save\":\n # if type(im) == np.ndarray:\n # im = Image.fromarray(cv2.cvtColor(im, cv2.COLOR_BGR2RGB))\n use_progressive_jpg = in_file_size > 10000\n if use_progressive_jpg:\n LOG.debug(\"Large file; using progressive jpg\")\n\n # Exif\n if arg.keep_exif:\n out_exif = piexif.dump(piexif.load(in_file_path))\n out_exif_size = sys.getsizeof(out_exif)\n\n outbuf = BytesIO()\n try:\n im.save(\n outbuf,\n \"JPEG\",\n quality=arg.jpg_quality,\n dpi=in_dpi,\n progressive=use_progressive_jpg,\n optimize=True,\n exif=out_exif,\n )\n except AttributeError:\n write_params = [\n cv2.IMWRITE_JPEG_QUALITY,\n arg.jpg_quality,\n cv2.IMWRITE_JPEG_OPTIMIZE,\n ]\n if use_progressive_jpg:\n write_params += [\n cv2.IMWRITE_JPEG_PROGRESSIVE,\n ]\n _, buf = cv2.imencode(\".jpg\", im, write_params)\n outbuf = BytesIO(buf)\n image_buffer = outbuf.getbuffer()\n out_file_size = image_buffer.nbytes + out_exif_size\n LOG.info(\"Buffer output size: %s\", humanize_bytes(out_file_size))\n\n if arg.output is None:\n root, _ = os.path.splitext(in_file_path)\n out_file_path = f\"{root}{arg.suffix}.jpg\"\n else:\n out_file_path = arg.output.name\n\n if arg.no_op:\n no_op = True\n continue\n LOG.info(\"Saving buffer to %s\", out_file_path)\n if (out_path := Path(out_file_path)).exists():\n if not arg.force:\n LOG.critical(\n \"file '%s' exists and force argument not found\", out_path\n )\n print(\n f\"{fg.red}{ef.bold}Error: file '{out_path}' exists;\",\n f\" use -f option to force overwrite.{rs.all}\",\n file=sys.stderr,\n )\n return\n # Create output dir if it doesn't exist\n out_path.parent.mkdir(parents=True, exist_ok=True)\n\n with out_path.open(\"wb\") as f:\n f.write(image_buffer)\n if arg.keep_exif:\n piexif.insert(out_exif, out_file_path)\n out_file_size = os.path.getsize(out_file_path)\n\n elapsed = perf_counter() - time_start\n report = generate_report(\n in_file_size,\n out_file_size,\n in_file_path,\n out_file_path,\n in_image_size,\n out_image_size,\n elapsed,\n no_op,\n )\n print(report)",
"def main():\n test_image = load_image()\n\n pixelate_image(\n normalize_image(test_image)\n )\n pass",
"def smooth_stitch(*, input_dir, output_dir):\n image_paths = glob(os.path.join(input_dir, \"*.tif\"))\n if not image_paths:\n raise RuntimeError(\"%s does not contain any .tif file\" % (input_dir))\n\n # Get the profile and affine of some image as template for output image\n first_image = image_paths[0]\n with rasterio.open(first_image) as src:\n profile = src.profile.copy()\n src_res = src.res\n chip_size = src.width\n assert src.width == src.height\n\n with tempfile.TemporaryDirectory() as tmpdir:\n tmp_image_paths = generate_spline_window_chips(\n image_paths=image_paths, output_dir=tmpdir\n )\n\n # Get bounds from all images and build R-Tree index\n idx, (dst_w, dst_s, dst_e, dst_n) = build_bounds_index(tmp_image_paths)\n\n # Get affine transform for complete bounds\n logger.info(\"Output bounds: %r\", (dst_w, dst_s, dst_e, dst_n))\n output_transform = Affine.translation(dst_w, dst_n)\n logger.info(\"Output transform, before scaling: %r\", output_transform)\n\n output_transform *= Affine.scale(src_res[0], -src_res[1])\n logger.info(\"Output transform, after scaling: %r\", output_transform)\n\n # Compute output array shape. We guarantee it will cover the output\n # bounds completely. We need this to build windows list later.\n output_width = int(math.ceil((dst_e - dst_w) / src_res[0]))\n output_height = int(math.ceil((dst_n - dst_s) / src_res[1]))\n\n # Set width and height for output chips, and other attributes\n profile.update(width=chip_size, height=chip_size, tiled=True)\n\n windows = list(\n sliding_windows(chip_size, width=output_width, height=output_height)\n )\n logger.info(\"Num. windows: %d\", len(windows))\n\n for win, (i, j) in tqdm(windows):\n # Get window affine transform and bounds\n win_transform = rasterio.windows.transform(win, output_transform)\n win_bounds = rasterio.windows.bounds(win, output_transform)\n\n # Get chips that intersect with window\n intersect_chip_paths = [\n tmp_image_paths[i] for i in idx.intersection(win_bounds)\n ]\n\n if intersect_chip_paths:\n # Merge them with median method\n img = merge_chips(intersect_chip_paths, win_bounds=win_bounds)\n\n # Write output chip\n profile.update(transform=win_transform)\n output_path = os.path.join(output_dir, f\"{i}_{j}.tif\")\n\n os.makedirs(output_dir, exist_ok=True)\n with rasterio.open(output_path, \"w\", **profile) as dst:\n for i in range(img.shape[0]):\n dst.write(img[i, :, :], i + 1)",
"def main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--interval\", \"-t\", type=int,\n help=\"length of time in seconds between images\",\n default=60)\n parser.add_argument(\"--images\", \"-n\", type=int, help=\"number of images\", default=1)\n parser.add_argument(\"--output\", \"-o\",\n help=\"path to output directory\", default=\"./\")\n parser.add_argument(\"--rotation\", \"-r\",\n help=\"degrees to rotate pi image\", default=0)\n parser.add_argument(\"--iso\", \"-i\", type=int, help=\"camera iso 0 is auto mode\", default=0)\n parser.add_argument(\"--shutter\", \"-s\", type=int,\n help=\"camera shutter speed in microseconds 0 is auto mode\", default=0)\n parser.add_argument(\"--exposure\", \"-e\", help=\"exposure compensation set between -25 and +25\", default=0)\n args = parser.parse_args()\n interval = int(args.interval)\n images = int(args.images)\n output = args.output\n rotation = int(args.rotation)\n iso = int(args.iso)\n shutter_speed = int(args.shutter)\n exposure_compensation = int(args.exposure)\n\n\n timelapse(int=interval,\n n=images,\n dir=output,\n rotation=rotation,\n iso=iso,\n shutter=shutter_speed,\n exposure=exposure_compensation)",
"def _run_rbf_interpolation(out_dir, layer, bounds, function, smooth):\n # if running scipy methods prepend root dir to out path\n out_dir = OPJ(path_root, out_dir)\n if not os.path.isdir(out_dir):\n print(\n os.path.abspath(out_dir),\n ' does not exist, creating directory.\\n'\n )\n Path(out_dir).mkdir(parents=True, exist_ok=True)\n\n out_file = OPJ(\n out_dir, \n '{time_agg}.tiff'.format(time_agg=layer)\n )\n print(\n '\\nInterpolating {g} point bias ratios for: {t}\\n'.\\\n format(g=grid_var, t=layer),\n 'Using the \"{}\" method\\n'.format(function),\n 'Resolution (pixel size) of output raster: {} degrees'.format(res)\n )\n print( \n 'GeoTIFF raster will be saved to: \\n',\n os.path.abspath(out_file)\n )\n\n\n # get grid extent based on station locations in CSV\n if not bounds:\n bounds = get_subgrid_bounds(in_path, buffer=buffer, grid_res=CS) \n lon_min, lon_max, lat_min, lat_max = bounds\n # fix any minor adjustments to make raster fit gridMET fishnet extent\n # if scale_factor=1 make sure raster pixels align exactly w/gridcells\n # raster extent may exceed fishnet grid to fill gaps for zonal stats\n if scale_factor:\n nxcells = abs(lon_min-lon_max) / (CS*scale_factor)\n nycells = abs(lat_min-lat_max) / (CS*scale_factor)\n remainder_x = int(nxcells) - nxcells\n remainder_y = int(nycells) - nycells\n if abs(remainder_x) > CS:\n remainder_x -= CS * (remainder_x / CS) \n if abs(remainder_y) > CS:\n remainder_y -= CS * (remainder_y / CS)\n lon_min -= remainder_x\n lon_max += CS\n lat_min -= remainder_y\n lat_min -= CS\n \n # check if layer is in summary CSV \n existing_layers = pd.read_csv(in_path).columns\n if not layer in existing_layers:\n print('Column {} does not exist in input CSV:\\n {}'.format(\n layer, in_path),\n '\\nSkipping interpolation.'\n )\n return\n \n # get point station data from summary CSV\n in_df = pd.read_csv(in_path, na_values=[-999])\n lon_pts, lat_pts = in_df.STATION_LON.values, in_df.STATION_LAT.values\n values = in_df[layer].values\n \n # mask out stations with missing data\n if in_df[layer].isnull().sum() > 0:\n mask = in_df[layer].notnull()\n n_missing = in_df[layer].isna().sum()\n # if one point or less data points exists exit\n if len(mask) == n_missing or len(values) - n_missing == 1:\n print('Missing sufficient point data for variable: {} {}'.\\\n format(grid_var, layer),\n '\\nNeed at least two stations with data, skipping.')\n return\n print('Warning:\\n',\n 'Data missing for {} of {} stations for variable: {} {}'.\\\n format(n_missing, len(values), grid_var, layer),\n '\\nproceeding with interpolation.')\n # get locations where ratio is not nan\n values = values[mask]\n lon_pts = lon_pts[mask]\n lat_pts = lat_pts[mask]\n\n nx_cells = int(np.round(np.abs((lon_min - lon_max) / CS)))\n ny_cells = int(np.round(np.abs((lat_min - lat_max) / CS)))\n # rbf requires uniform grid (n X n) so \n # extend short dimension and clip later \n nx_cells_out = copy.copy(nx_cells)\n ny_cells_out = copy.copy(ny_cells)\n # gdal requires \"upper left\" corner coordinates\n lat_max_out = copy.copy(lat_max)\n lon_max_out = copy.copy(lon_max)\n # extend short dimension to make square grid\n if not nx_cells == ny_cells:\n diff = np.abs(nx_cells - ny_cells)\n if nx_cells > ny_cells:\n lat_max += diff * CS\n ny_cells += diff\n else:\n lon_max += diff * CS\n nx_cells += diff\n\n if scale_factor == 1:\n # make finer/coarse grid by scale factor\n lons = np.linspace(lon_min, lon_max, \n int(np.round(nx_cells/scale_factor))-1)\n lats = np.linspace(lat_min, lat_max, \n int(np.round(ny_cells/scale_factor))-1)\n # extent for original created by spatial.build_subgrid\n # add one to make sure raster covers full extent\n lons_out = np.linspace(lon_min, lon_max_out, \n int(np.round(nx_cells_out/scale_factor))-1)\n lats_out = np.linspace(lat_min, lat_max_out, \n int(np.round(ny_cells_out/scale_factor))-1)\n\n else:\n # add one extra cell to cover grid buffer extent for upscaling\n # raster extent always >= grid buffer\n lons = np.linspace(lon_min, lon_max, \n int(np.round(nx_cells/scale_factor)))\n lats = np.linspace(lat_min, lat_max, \n int(np.round(ny_cells/scale_factor)))\n lons_out = np.linspace(lon_min, lon_max_out, \n int(np.round(nx_cells_out/scale_factor)))\n lats_out = np.linspace(lat_min, lat_max_out, \n int(np.round(ny_cells_out/scale_factor)))\n\n # if function was 'linear_rbf' \n function = function.replace('_rbf', '')\n # make sampling square grid\n XI, YI = np.meshgrid(lons, lats)\n # apply rbf interpolation\n rbf = Rbf(lon_pts, lat_pts, values, function=function, smooth=smooth)\n ZI = rbf(XI, YI)\n # clip to original extent, rbf array flips axes, and row order... \n ZI_out = ZI[0:len(lats_out),0:len(lons_out)]\n ZI_out = np.flip(ZI_out,axis=0)\n\n #### save scipy interpolated data as raster \n pixel_size = CS * scale_factor\n # number of pixels in each direction\n x_size = len(lons_out)\n y_size = len(lats_out)\n # set geotransform info\n gt = [\n lon_min,\n pixel_size,\n 0,\n lat_max_out,\n 0,\n -pixel_size\n ]\n # make geotiff raster\n driver = gdal.GetDriverByName('GTiff')\n ds = driver.Create(\n out_file,\n x_size, \n y_size, \n 1, \n gdal.GDT_Float32, \n )\n # set projection geographic lat/lon WGS 84\n srs = osr.SpatialReference()\n srs.ImportFromEPSG(4326)\n ds.SetProjection(srs.ExportToWkt())\n # assign spatial dimensions \n ds.SetGeoTransform(gt)\n outband = ds.GetRasterBand(1)\n # save rbf interpolated array as geotiff raster close\n outband.WriteArray(ZI_out)\n ds = None\n\n # calc residuals add to shapefile and in_path CSV, move shape to out_dir\n # only residuals for bias ratios, i.e. not for std dev, etc\n if layer in InterpGdal.default_layers:\n calc_pt_error(in_path, out_dir, layer, grid_var, \n grid_id_name=grid_id_name\n )\n # calculate zonal statistics save means for each gridMET cell\n if z_stats:\n zonal_stats(in_path, out_file, grid_id_name=grid_id_name)\n \n # plot layer's interpolated residuals as bar plot witheach Wx station \n # only produce residual plots for bias ratios, i.e. not for std dev, etc\n if res_plot and layer in InterpGdal.default_layers:\n layer = InterpGdal.var_residual_names.get(\n layer, \n layer.replace('mean','res')\n )\n y_label = 'residual (interpolated minus station value)'\n title = 'layer: {} algorithm: {} (RBF) resolution: {} deg.'.format(\n layer, function ,res)\n res_plot_dir = Path(out_dir)/'residual_plots'\n subtitle='parameters: smooth={}'.format(smooth)\n source_file = Path(out_dir)/Path(in_path).name\n\n station_bar_plot(source_file, layer, out_dir=res_plot_dir, \n y_label=y_label, title=title, subtitle=subtitle)",
"def main():\n batch_size = 64\n nb_runs = 20\n\n # Measure time required to generate 100k augmentation matrices\n \"\"\"\n print(\"Generating 100 times 1000 augmentation matrices of size 64x64...\")\n start = time.time()\n for _ in range(100):\n create_aug_matrices(1000, 64, 64,\n scale_to_percent=1.5, scale_axis_equally=False,\n rotation_deg=20, shear_deg=20,\n translation_x_px=5, translation_y_px=5)\n print(\"Done in %.8f\" % (time.time() - start,))\n \"\"\"\n\n # Test Performance on 64 images of size 512x512 pixels\n image = data.lena()\n images = np.resize(image, (batch_size, image.shape[0], image.shape[1], image.shape[2]))\n augmenter = ImageAugmenter(image.shape[0], image.shape[1],\n hflip=True, vflip=True,\n scale_to_percent=1.3, scale_axis_equally=False,\n rotation_deg=25, shear_deg=10,\n translation_x_px=5, translation_y_px=5)\n print(\"Running tests on %d images of shape %s\" % (batch_size, str(image.shape)))\n run_tests(augmenter, images, nb_runs)\n print(\"\")\n\n print(\"Running tests on %d images of shape %s\" % (batch_size, str(image.shape)))\n print(\"(With 1000 pregenerated matrices)\")\n augmenter.pregenerate_matrices(1000)\n run_tests(augmenter, images, nb_runs)\n print(\"\")\n\n # Test Performance on 64 images of size 64x64 pixels\n image = data.lena()\n image = misc.imresize(image, (64, 64))\n images = np.resize(image, (batch_size, image.shape[0], image.shape[1], image.shape[2]))\n augmenter = ImageAugmenter(image.shape[0], image.shape[1],\n hflip=True, vflip=True,\n scale_to_percent=1.3, scale_axis_equally=False,\n rotation_deg=25, shear_deg=10,\n translation_x_px=5, translation_y_px=5)\n print(\"Running tests on %d images of shape %s\" % (batch_size, str(image.shape)))\n run_tests(augmenter, images, nb_runs)\n\n print(\"Running tests on %d images of shape %s\" % (batch_size, str(image.shape)))\n print(\"(With 1000 pregenerated matrices)\")\n augmenter.pregenerate_matrices(1000)\n run_tests(augmenter, images, nb_runs)\n print(\"\")\n\n # Time required to augment 1,000,000 images of size 32x32\n print(\"Augmenting 1000 batches of 1000 lena images (1 million total)\" \\\n \", each of size 32x32...\")\n image = data.lena()\n image = misc.imresize(image, (32, 32))\n batch_size = 1000\n images = np.resize(image, (batch_size, image.shape[0], image.shape[1], image.shape[2]))\n augmenter = ImageAugmenter(image.shape[1], image.shape[0],\n hflip=True, vflip=True,\n scale_to_percent=1.3, scale_axis_equally=False,\n rotation_deg=25, shear_deg=10,\n translation_x_px=5, translation_y_px=5)\n augmenter.pregenerate_matrices(1000)\n\n start = time.time()\n for _ in range(1000):\n augmenter.augment_batch(images)\n print(\"Done in %.8fs\" % (time.time() - start,))\n print(\"\")\n\n # Time required to augment 1,000,000 images of size 32x32\n # but using only one matrix without the class (no library overhead from\n # ImageAugmenter)\n # Notice that this does not include horizontal and vertical flipping,\n # which is done via numpy in the ImageAugmenter class.\n print(\"Augmenting 1000 batches of 1000 lena images (1 million total)\" \\\n \", each of size 32x32, using one matrix directly (no ImageAugmenter \" \\\n \"class)...\")\n matrices = create_aug_matrices(1, image.shape[1], image.shape[0],\n scale_to_percent=1.3, scale_axis_equally=False,\n rotation_deg=25, shear_deg=10,\n translation_x_px=5, translation_y_px=5)\n matrix = matrices[0]\n\n start = time.time()\n for _ in range(1000):\n for image in images:\n augmented_image = tf.warp(image, matrix)\n print(\"Done in %.8fs\" % (time.time() - start,))",
"def main():\n base_dir = os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n os.pardir,\n )\n default_output_path = os.path.join(base_dir, \"output\", \"out.png\")\n default_texture_path = os.path.join(base_dir, \"textures\", \"grid.png\")\n\n default_options = {\n \"resolution\": (1512, 762),\n \"texture_path\": default_texture_path,\n \"output_path\": default_output_path,\n \"iterations\": 200, # Increase this for good results\n \"camera_position\": [3.1, 1.570796, 0.],\n \"num_processes\": multi.cpu_count(),\n \"chunk_size\": 9000,\n \"gain\": 1,\n \"normalize\": 0,\n \"spin\": 0.7,\n }\n args = parse_args(default_options)\n\n output_path = os.path.dirname(args.output_path)\n if not os.path.exists(output_path):\n print(\"Error: Output path does not exist at:\")\n print(args.output_path)\n print(\"Create the directory or change the path then try again.\")\n print_help_and_exit()\n\n\n try:\n texture = spm.imread(args.texture_path)\n except FileNotFoundError as error:\n print(error)\n print(\"Error: Texture file not found at:\")\n print(args.texture_path)\n print_help_and_exit()\n\n # Convert to float to work in linear colour space\n texture = convert_image_to_float(texture)\n if not args.no_srgb:\n # Convert to sRGB before resizing for correct results\n srgbtorgb(texture)\n\n texture = convert_image_to_float(\n spm.imresize(texture, 2.0, interp=\"bicubic\"),\n )\n\n black_hole = KerrBlackHole(args.spin)\n raytracer = KerrRaytracer(\n black_hole,\n args.camera_position,\n texture,\n args.resolution,\n args.iterations,\n args.num_processes,\n args.chunk_size,\n shuffle=not args.disable_shuffle,\n )\n raytracer.generate_image()\n print(\"Raytracing Completed Succesfully.\")\n print(\n \"Total raytracing time:\",\n datetime.timedelta(seconds=(time.time() - raytracer.start_time)),\n )\n\n colour = post_process(raytracer.colour_buffer_preproc, args.gain, args.normalize)\n\n save_to_img(\n colour,\n args.output_path,\n args.resolution,\n srgb_out=not args.no_srgb,\n )",
"def manipulations(path):\r\n\r\n print (\"\\n Working on %s\\n\" %(path))\r\n\r\n # Creates a folder with the results for the current image\r\n if not os.path.exists(\"Results\\\\%s\" %(path)):\r\n os.makedirs(\"Results\\\\%s\" %(path))\r\n\r\n # The variations made of the image\r\n func.pixelImage(path, 10, 10)\r\n func.animate(path)\r\n func.colorScale(path, 0)\r\n func.colorScale(path, 1)\r\n func.colorScale(path, 2)\r\n func.scan(path, 280)\r\n func.greyImage(path)\r\n func.colorSteps(path, 1)\r\n func.inverted(path)",
"def test_interp(self):\n (_, gen_val, gen_test) = self.dataset.data_loaders(\n batch_size=1, # TODO: remove this hard coding\n split=(0.01, 0.5)\n )\n gen_it_test = gen_test.__iter__()\n for _ in range(randint(0, len(gen_test))):\n tensor_score1, _ = next(gen_it_test)\n\n gen_it_val = gen_val.__iter__()\n for _ in range(randint(0, len(gen_val))):\n tensor_score2, _ = next(gen_it_val)\n\n tensor_score1 = to_cuda_variable(tensor_score1.long())\n tensor_score2 = to_cuda_variable(tensor_score2.long())\n self.test_interpolation(tensor_score1, tensor_score2, 10)",
"def bruteforce(self):\n import time\n t1 = time.time()\n for i in range(self.td.shape[0]):\n #Get the latitude at the start of the row, this is used for the entire row\n\n if i % config.LATITUDE_STEP == 0:\n startlat = i + config.LATITUDE_STEP #move to the center of the step\n startlat += self.start #Offset for parallel segmentation\n\n # This is the latitude at the center of the tile defined by\n # the image width, and the latitude_step\n x = int(self.td.shape[1] / 2)\n y = int((startlat + config.LATITUDE_STEP) / 2)\n latitude, _ = self.temperature.pixel_to_latlon(x,y)\n\n lat_f = PchipInterpolator(self.latitudenodes, self.lookup, extrapolate=False, axis=0)\n #The reshape corresponds to the dimensions of the OLAP cube\n # 5 elevations, 5 slope azimuths, 3 slopes, 3 opacities, 3 albedos, and finally 20 TI\n data = lat_f(latitude)\n compressedlookup = data.reshape(6,5,3,3,3,20)\n # Compute the PChip interpolation function for elevation\n elevation_interp_f = PchipInterpolator(np.array([-5.0, -2.0, -1.0, 1.0, 6.0, 8.0]), compressedlookup, extrapolate=False, axis=0)\n \n for j in range(self.td.shape[1]):\n # Each interpolation is composed in 2 parts.\n # 1. The interpolation function is computed.\n # 2. The interpolation function is applied.\n #print(self.reference[i,j], self.r_ndv)\n # If either the reference or the input THEMIS have no data\n if (self.td[i,j] == self.ndv) or (self.reference[i,j] == self.r_ndv):\n #The pixel is no data in the input, propagate to the output\n self.resultdata[i,j] = self.ndv\n continue\n\n #Interpolate elevation\n try:\n new_elevation = elevation_interp_f(self.ed[i,j])\n except:\n # The elevation is bad.\n self.resultdata[i,j] = self.ndv\n self.log[i,j] = self.error_codes['elevation_out_of_bounds']\n continue\n #Interpolate Slope Azimuth\n slopeaz_f = self.compute_interpolation_function(sorted(self.slopeaz_lookup.keys()),\n new_elevation,\n config.SLOPEAZ_INTERPOLATION)\n new_slopeaz = slopeaz_f(self.sz[i,j])\n #Interpolate Slope\n slope_f = self.compute_interpolation_function(sorted(self.slope_lookup.keys()),\n new_slopeaz,\n config.SLOPE_INTERPOLATION)\n capped_slope = self.sd[i,j]\n if capped_slope > 60.0:\n capped_slope = 60.0\n new_slope = slope_f(capped_slope)\n # I am having problems here with pulling TAU properly - check montabone!\n #Interpolate Tau\n tau_f = PchipInterpolator(sorted(self.tau_lookup.keys()),\n new_slope,\n extrapolate=False,\n axis=0)\n new_tau = tau_f(self.od[i,j])\n #Interpolate Albedo\n albedo_f = self.compute_interpolation_function(sorted(self.albedo_lookup.keys()),\n new_tau,\n config.ALBEDO_INTERPOLATION)\n new_albedo = albedo_f(self.ad[i,j])\n #Interpolate Inertia\n self.resultdata[i,j] = self.extract_monotonic(self.td[i,j],\n new_albedo)",
"def compute(self, Rs, D):\n self.Rs = Rs\n self.D = D\n self.M = (self.Rs * c**2 * au) / (2 * G * M_sun)\n print(\"M = %.1e M☉\\t%.2e Kg\" % (self.M, self.M*M_sun))\n print(\"Rs = %s ua\\t%.2e m\" % (self.Rs, self.Rs*au))\n print(\"D = %s ua\\t%.2e m\\n\" % (self.D, self.D*au))\n\n vrai_debut = time.process_time()\n\n\n seen_angle, deviated_angle = self.trajectories()\n\n self.interpolation = self.interpolate(seen_angle, deviated_angle)\n\n if self.display_interpolation is True:\n xmin = np.min(seen_angle)\n xmax = np.max(seen_angle)\n seen_angle_splin = np.linspace(xmin, xmax, 20001)\n deviated_angle_splin = self.interpolation(seen_angle_splin)\n plt.figure('Trajectories interpolation')\n plt.clf()\n plt.title(\"Light deviation interpolation\", va='bottom')\n plt.xlabel('seen angle(°)')\n plt.ylabel('deviated angle(°)')\n plt.plot(seen_angle, deviated_angle, 'o')\n plt.plot(seen_angle_splin, deviated_angle_splin)\n plt.grid()\n #plt.savefig('interpolation.png', dpi=250, bbox_inches='tight')\n plt.draw()\n#\n print(\"last angle\", seen_angle[-1])\n print(\"trajectories time: %.1f\" % (time.process_time()-vrai_debut))\n\n img_matrix_x, img_matrix_y = self.create_matrices()\n\n self.img_matrix_x = img_matrix_x\n self.img_matrix_y = img_matrix_y\n\n self.img2 = self.img_pixels(self.img_debut)\n\n vrai_fin = time.process_time()\n print(\"\\nglobal computing time: %.1f\\n\" % (vrai_fin-vrai_debut))",
"def run_tests(augmenter, images, nb_runs):\n results = np.zeros((nb_runs,))\n for i in range(nb_runs):\n start = time.time()\n augmenter.augment_batch(images)\n results[i] = time.time() - start\n print(\"Run %d: %.8fs\" % (i, results[i]))\n print(\"Mean: %.8fs\" % (results.mean(),))\n print(\"Sum: %.8fs\" % (results.sum(),))",
"def run(self, image):\n start = datetime.datetime.now()\n\n width, height = image.size\n resize_ratio = 1.0 * self.INPUT_SIZE / max(width, height)\n target_size = (int(resize_ratio * width), int(resize_ratio * height))\n resized_image = image.convert('RGB').resize(target_size, Image.ANTIALIAS)\n batch_seg_map = self.sess.run(\n self.OUTPUT_TENSOR_NAME,\n feed_dict={self.INPUT_TENSOR_NAME: [np.asarray(resized_image)]})\n seg_map = batch_seg_map[0]\n\n end = datetime.datetime.now()\n\n diff = end - start\n print(\"Time taken to evaluate segmentation is : \" + str(diff))\n\n return resized_image, seg_map",
"def interpolate_dataset(args, adacof_model, fusion_net, dataset_path='', max_num=None):\n\n if args.vimeo_testset:\n interpolate_vimeo_testset(args, adacof_model, fusion_net)\n else:\n dataset_name = os.path.basename(dataset_path)\n print('Interpolating Dataset {}'.format(dataset_name))\n dataset = sorted(glob.glob('{}/*.png'.format(dataset_path)))\n if not dataset:\n dataset = sorted(glob.glob('{}/*.jpg'.format(dataset_path)))\n\n num = len(dataset)-2\n start = 0\n end = num\n if max_num and max_num < num:\n start = random.randint(0, num - max_num)\n end = start + max_num\n\n it = range(start, end)\n print(dataset_path)\n\n print('Start: {}'.format(start))\n print('End: {}'.format(end))\n\n for i in tqdm(iterable=it, total=len(it)):\n interpolated_filename = '{}.png'.format(str(i+1).zfill(4))\n output_path_adacof = os.path.join(\n args.base_dir, args.img_output, dataset_name, 'adacof')\n output_path_phasenet = os.path.join(\n args.base_dir, args.img_output, dataset_name, 'phasenet')\n output_path_fusion = os.path.join(\n args.base_dir, args.img_output, dataset_name, 'fusion')\n output_path_baseline = os.path.join(\n args.base_dir, args.img_output, dataset_name, 'baseline')\n\n output_path_adacof_image = os.path.join(\n output_path_adacof, interpolated_filename)\n output_path_phasenet_image = os.path.join(\n output_path_phasenet, interpolated_filename)\n output_path_fusion_image = os.path.join(\n output_path_fusion, interpolated_filename)\n output_path_baseline_image = os.path.join(\n output_path_baseline, interpolated_filename)\n\n # Interpolate and create output folders if they don't exist yet\n '''if args.adacof:\n os.makedirs(output_path_adacof, exist_ok=True)\n interpolate_adacof(\n args, dataset[i], dataset[i+2], output_path_adacof_image)\n if args.phase:\n os.makedirs(output_path_phasenet, exist_ok=True)\n interpolate_phasenet(\n args, dataset[i], dataset[i+2], output_path_phasenet_image)'''\n if args.fusion:\n os.makedirs(output_path_fusion, exist_ok=True)\n os.makedirs(output_path_adacof, exist_ok=True)\n os.makedirs(output_path_phasenet, exist_ok=True)\n os.makedirs(output_path_baseline, exist_ok=True)\n interpolate_fusion(args, adacof_model, fusion_net,\n dataset[i], dataset[i +\n 2], output_path_fusion_image,\n output_path_phasenet_image, output_path_adacof_image, output_path_baseline_image)",
"def interpolate_vimeo_testset(args, adacof_model, fusion_net):\n # Read file with triplets\n with open(os.path.join('Testset', 'vimeo_interp_test', 'tri_testlist.txt')) as f:\n triplets = f.readlines()\n triplets = [x.strip() for x in triplets]\n\n for triplet in tqdm(triplets):\n im1 = os.path.join('Testset', 'vimeo_interp_test',\n 'input', triplet, 'im1.png')\n im3 = os.path.join('Testset', 'vimeo_interp_test',\n 'input', triplet, 'im3.png')\n\n output_path_adacof = os.path.join(\n args.base_dir, args.img_output, 'adacof', triplet)\n output_path_phasenet = os.path.join(\n args.base_dir, args.img_output, 'phasenet', triplet)\n output_path_fusion = os.path.join(\n args.base_dir, args.img_output, 'fusion', triplet)\n output_path_baseline = os.path.join(\n args.base_dir, args.img_output, 'baseline', triplet)\n\n output_path_adacof_image = os.path.join(output_path_adacof, 'im2.png')\n output_path_phasenet_image = os.path.join(\n output_path_phasenet, 'im2.png')\n output_path_fusion_image = os.path.join(output_path_fusion, 'im2.png')\n output_path_baseline_image = os.path.join(\n output_path_baseline, 'im2.png')\n\n # Interpolate and create output folders if they don't exist yet\n '''if args.adacof:\n os.makedirs(output_path_adacof, exist_ok=True)\n interpolate_adacof(\n args, im1, im3, output_path_adacof_image)\n if args.phase:\n os.makedirs(output_path_phasenet, exist_ok=True)\n interpolate_phasenet(\n args, im1, im3, output_path_phasenet_image)'''\n if args.fusion:\n os.makedirs(output_path_fusion, exist_ok=True)\n os.makedirs(output_path_phasenet, exist_ok=True)\n os.makedirs(output_path_adacof, exist_ok=True)\n os.makedirs(output_path_baseline, exist_ok=True)\n\n interpolate_fusion(args, adacof_model, fusion_net,\n im1, im3, output_path_fusion_image, output_path_phasenet_image,\n output_path_adacof_image, output_path_baseline_image)",
"def registration(im1, im2, num = 10, opt = 'py', outputPath = 'None'):\n\n # determin which one is the right side of the breast\n b_size = 5\n n_row, n_col = im1.shape\n side = 0\n if np.sum(im1[0:b_size,0:b_size]) < np.sum(im1[0:b_size,n_col-b_size:n_col]):\n side = 1 \n\n # flip the right side image\n if side == 1:\n im1 = np.fliplr(im1)\n else:\n im2 = np.fliplr(im2) \n\n # find edges of both images\n edge1 = findEdge(im1)\n edge2 = findEdge(im2)\n\n # tune edges of both side\n edge1 = tuneEdge(edge1,im1.shape)\n edge2 = tuneEdge(edge2,im2.shape)\n\n # samping from both side\n points1 = contour_sampling(edge1, num)\n points2 = contour_sampling(edge2, num)\n\n # for debugging .........................\n sam_im1 = np.zeros(im1.shape,np.float32)\n for point in points1:\n sam_im1[point[0],point[1]] = 1\n\n sam_im2 = np.zeros(im2.shape,np.float32)\n for point in points2:\n sam_im2[point[0],point[1]] = 1\n \n selem = disk(15)\n dilated1 = ndimage.convolve(sam_im1, selem, mode='constant', cval=0)\n dilated2 = ndimage.convolve(sam_im2, selem, mode='constant', cval=0)\n\n points1 = np.asarray(points1)\n points2 = np.asarray(points2)\n \n # Thin Plate Spline interpolation\n dst = np.zeros(im1.shape)\n # im1 as source\n if opt == 'py': \n tps = TPSpline.TPSpline()\n tps.setCorrespondences(points1, points2)\n dst = tps.warpImage(im1)\n return dst\n\n if opt == 'c':\n print \"Please run the interpolation with C++ exe file!\"\n print \"./TPSpline /home/yanbin/Tomosynthesis/libs/TPSpline/test/ps.txt /home/yanbin/Tomosynthesis/libs/TPSpline/test/pd.txt /home/yanbin/Tomosynthesis/libs/TPSpline/test/5016_test.tif /home/yanbin/Tomosynthesis/libs/TPSpline/test/dst.tif\"\n np.savetxt(outputPath + 'ps.txt', points1, '%d', delimiter=' ') # X is an array\n np.savetxt(outputPath + 'pd.txt', points2, '%d', delimiter=' ') # X is an array\n tiffLib.imsave(outputPath + 'im1.tif',im1)\n return None",
"def main():\n nb_processed = 0\n for dataset_name in DATASETS:\n print(\"-----------------\")\n print(\"Dataset: '%s'\" % (dataset_name,))\n print(\"-----------------\")\n\n dataset_dir = os.path.join(WRITE_MAIN_DIR, dataset_name)\n if not os.path.isdir(dataset_dir):\n os.makedirs(dataset_dir)\n\n dataset = Dataset([os.path.join(READ_MAIN_DIR, dataset_name)])\n print(\"Found %d images total.\" % (len(dataset.fps),))\n\n errors = []\n\n scale_height, scale_width = SCALES[dataset_name]\n target_aspect_ratio = scale_width / scale_height\n\n # iterate over every image in the current dataset,\n # augment that image N times, add cols/rows until target aspect ratio\n # is reached, resize it (e.g. 64x64), save it\n for img_idx, (image_filepath, image) in enumerate(zip(dataset.fps, dataset.get_images())):\n print(\"[%s] Image %d of %d (%.2f%%)...\" \\\n % (dataset_name, img_idx+1, len(dataset.fps),\n 100*(img_idx+1)/len(dataset.fps)))\n\n # IOErrors during loading of images result here in a None value\n if image is None:\n print(\"Error / None\")\n errors.append((\n image_filepath,\n \"Failed to load image '%s' (idx %d for dataset %s)\" \\\n % (image_filepath, img_idx, dataset_name)\n ))\n else:\n # resize too big images to smaller ones before any augmentation\n # (for performance reasons)\n height = image.shape[0]\n width = image.shape[1]\n aspect_ratio = width / height\n if width > 1000 or height > 1000:\n image = misc.imresize(image, (1000, int(1000 * aspect_ratio)))\n\n # augment image\n # converts augmented versions automatically to float32, 0-1\n augmentations = augment(image, **AUGMENTATIONS[dataset_name])\n\n # create list of original image + augmented versions\n images_aug = [image / 255.0]\n images_aug.extend(augmentations)\n\n # for each augmented version of the images:\n # resize it to target aspect ratio (e.g. same width and height),\n # save it\n for aug_idx, image_aug in enumerate(images_aug):\n image_aug = to_aspect_ratio_add(image_aug, target_aspect_ratio)\n filename = \"{:0>6}_{:0>3}.jpg\".format(img_idx, aug_idx)\n img_scaled = misc.imresize(image_aug, (scale_height, scale_width))\n misc.imsave(os.path.join(dataset_dir, filename), img_scaled)\n\n nb_processed += 1\n\n print(\"Processed %d images with %d errors.\" % (nb_processed, len(errors)))\n for (fp, err) in errors:\n print(\"File %s error:\" % (fp,))\n print(err)\n print(\"Finished.\")"
] | [
"0.6005782",
"0.5990915",
"0.58582526",
"0.57174736",
"0.5715835",
"0.57075953",
"0.5687177",
"0.5644446",
"0.55878294",
"0.55830985",
"0.55769694",
"0.55724096",
"0.5563402",
"0.55316186",
"0.55314726",
"0.5511741",
"0.5504877",
"0.54847926",
"0.54411846",
"0.5347744",
"0.5331806",
"0.5307531",
"0.53033143",
"0.529733",
"0.52812195",
"0.5263208",
"0.5253313",
"0.52437216",
"0.5222725",
"0.52089065"
] | 0.6844896 | 0 |
Benchmark cpu vs gpu time wise. | def benchmark_cpu_vs_gpu(input_raw_file):
nb_iterations = 20
(cpu1, f1) = interpolate(input_raw_file, 'cpu_nn_lena.dat', 'cpu', nb_iterations, 'nn', 4000, 2000)
(gpu1, f2) = interpolate(input_raw_file, 'gpu_nn_lena.dat', 'gpu', nb_iterations, 'nn', 4000, 2000)
(cpu2, f3) = interpolate(input_raw_file, 'cpu_bl_lena.dat', 'cpu', nb_iterations, 'bl', 4000, 2000)
(gpu2, f4) = interpolate(input_raw_file, 'gpu_bl_lena.dat', 'gpu', nb_iterations, 'bl', 4000, 2000)
# return ((cpu1/nb_iterations, cpu2/nb_iterations), (gpu1/nb_iterations, gpu2/nb_iterations))
return ((cpu1, cpu2), (gpu1, gpu2)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def speed():\r\n\r\n algo = ['logistic_sgd', 'logistic_cg', 'mlp', 'convolutional_mlp',\r\n 'dA', 'SdA', 'DBN', 'rbm', 'rnnrbm']\r\n to_exec = [True] * len(algo)\r\n# to_exec = [False] * len(algo)\r\n# to_exec[-1] = True\r\n do_float64 = True\r\n do_float32 = True\r\n do_gpu = True\r\n\r\n algo_executed = [s for idx, s in enumerate(algo) if to_exec[idx]]\r\n #Timming expected are from the buildbot that have an i7-920 @\r\n # 2.67GHz with hyperthread enabled for the cpu, 12G of ram. An GeForce GTX\r\n # 285 for the GPU. OS=Fedora 14, gcc=4.5.1, python/BLAS from EPD\r\n # 7.1-2 (python 2.7.2, mkl unknow). BLAS with only 1 thread.\r\n\r\n expected_times_64 = numpy.asarray([10.0, 22.5, 76.1, 73.7, 116.4,\r\n 346.9, 381.9, 558.1, 186.3])\r\n expected_times_32 = numpy.asarray([11.6, 29.6, 42.5, 66.5, 71,\r\n 191.2, 226.8, 432.8, 176.2])\r\n\r\n # Number with just 1 decimal are new value that are faster with\r\n # the Theano version 0.5rc2 Other number are older. They are not\r\n # updated, as we where faster in the past!\r\n # TODO: find why and fix this!\r\n\r\n# Here is the value for the buildbot on February 3th 2012.\r\n# sgd, cg mlp conv da\r\n# sda dbn rbm\r\n# gpu times[3.72957802, 9.94316864, 29.1772666, 9.13857198, 25.91144657,\r\n# 18.30802011, 53.38651466, 285.41386175]\r\n# expected [3.076634879, 7.555234910, 18.99226785, 9.58915591, 24.130070450,\r\n# 24.77524018, 92.66246653, 322.340329170]\r\n# sgd, cg mlp conv da\r\n# sda dbn rbm\r\n#expected/get [0.82492841, 0.75984178, 0.65092691, 1.04930573, 0.93125138\r\n# 1.35324519 1.7356905 1.12937868]\r\n expected_times_gpu = numpy.asarray([3.07663488, 7.55523491, 18.99226785,\r\n 9.6, 24.13007045,\r\n 20.4, 56, 302.6, 315.4])\r\n expected_times_64 = [s for idx, s in enumerate(expected_times_64)\r\n if to_exec[idx]]\r\n expected_times_32 = [s for idx, s in enumerate(expected_times_32)\r\n if to_exec[idx]]\r\n expected_times_gpu = [s for idx, s in enumerate(expected_times_gpu)\r\n if to_exec[idx]]\r\n\r\n def time_test(m, l, idx, f, **kwargs):\r\n if not to_exec[idx]:\r\n return\r\n print algo[idx]\r\n ts = m.call_time\r\n try:\r\n f(**kwargs)\r\n except Exception, e:\r\n print >> sys.stderr, 'test', algo[idx], 'FAILED', e\r\n l.append(numpy.nan)\r\n return\r\n te = m.call_time\r\n l.append(te - ts)\r\n\r\n def do_tests():\r\n m = theano.compile.mode.get_default_mode()\r\n l = []\r\n time_test(m, l, 0, logistic_sgd.sgd_optimization_mnist, n_epochs=30)\r\n time_test(m, l, 1, logistic_cg.cg_optimization_mnist, n_epochs=30)\r\n time_test(m, l, 2, mlp.test_mlp, n_epochs=5)\r\n time_test(m, l, 3, convolutional_mlp.evaluate_lenet5, n_epochs=5,\r\n nkerns=[5, 5])\r\n time_test(m, l, 4, dA.test_dA, training_epochs=2,\r\n output_folder='tmp_dA_plots')\r\n time_test(m, l, 5, SdA.test_SdA, pretraining_epochs=1,\r\n training_epochs=2, batch_size=300)\r\n time_test(m, l, 6, DBN.test_DBN, pretraining_epochs=1,\r\n training_epochs=2, batch_size=300)\r\n time_test(m, l, 7, rbm.test_rbm, training_epochs=1, batch_size=300,\r\n n_chains=1, n_samples=1, output_folder='tmp_rbm_plots')\r\n time_test(m, l, 8, rnnrbm.test_rnnrbm, num_epochs=1)\r\n return numpy.asarray(l)\r\n\r\n #test in float64 in FAST_RUN mode on the cpu\r\n import theano\r\n if do_float64:\r\n theano.config.floatX = 'float64'\r\n theano.config.mode = 'FAST_RUN'\r\n float64_times = do_tests()\r\n print >> sys.stderr, algo_executed\r\n print >> sys.stderr, 'float64 times', float64_times\r\n print >> sys.stderr, 'float64 expected', expected_times_64\r\n print >> sys.stderr, 'float64 % expected/get', (\r\n expected_times_64 / float64_times)\r\n\r\n #test in float32 in FAST_RUN mode on the cpu\r\n theano.config.floatX = 'float32'\r\n if do_float32:\r\n float32_times = do_tests()\r\n print >> sys.stderr, algo_executed\r\n print >> sys.stderr, 'float32 times', float32_times\r\n print >> sys.stderr, 'float32 expected', expected_times_32\r\n print >> sys.stderr, 'float32 % expected/get', (\r\n expected_times_32 / float32_times)\r\n\r\n if do_float64:\r\n print >> sys.stderr, 'float64/float32', (\r\n float64_times / float32_times)\r\n print >> sys.stderr\r\n print >> sys.stderr, 'Duplicate the timing to have everything in one place'\r\n print >> sys.stderr, algo_executed\r\n print >> sys.stderr, 'float64 times', float64_times\r\n print >> sys.stderr, 'float64 expected', expected_times_64\r\n print >> sys.stderr, 'float64 % expected/get', (\r\n expected_times_64 / float64_times)\r\n print >> sys.stderr, 'float32 times', float32_times\r\n print >> sys.stderr, 'float32 expected', expected_times_32\r\n print >> sys.stderr, 'float32 % expected/get', (\r\n expected_times_32 / float32_times)\r\n\r\n print >> sys.stderr, 'float64/float32', (\r\n float64_times / float32_times)\r\n print >> sys.stderr, 'expected float64/float32', (\r\n expected_times_64 / float32_times)\r\n\r\n #test in float32 in FAST_RUN mode on the gpu\r\n import theano.sandbox.cuda\r\n if do_gpu:\r\n theano.sandbox.cuda.use('gpu')\r\n gpu_times = do_tests()\r\n print >> sys.stderr, algo_executed\r\n print >> sys.stderr, 'gpu times', gpu_times\r\n print >> sys.stderr, 'gpu expected', expected_times_gpu\r\n print >> sys.stderr, 'gpu % expected/get', (\r\n expected_times_gpu / gpu_times)\r\n\r\n if do_float64:\r\n print >> sys.stderr, 'float64/gpu', float64_times / gpu_times\r\n\r\n if (do_float64 + do_float32 + do_gpu) > 1:\r\n print >> sys.stderr\r\n print >> sys.stderr, 'Duplicate the timing to have everything in one place'\r\n print >> sys.stderr, algo_executed\r\n if do_float64:\r\n print >> sys.stderr, 'float64 times', float64_times\r\n print >> sys.stderr, 'float64 expected', expected_times_64\r\n print >> sys.stderr, 'float64 % expected/get', (\r\n expected_times_64 / float64_times)\r\n if do_float32:\r\n print >> sys.stderr, 'float32 times', float32_times\r\n print >> sys.stderr, 'float32 expected', expected_times_32\r\n print >> sys.stderr, 'float32 % expected/get', (\r\n expected_times_32 / float32_times)\r\n if do_gpu:\r\n print >> sys.stderr, 'gpu times', gpu_times\r\n print >> sys.stderr, 'gpu expected', expected_times_gpu\r\n print >> sys.stderr, 'gpu % expected/get', (\r\n expected_times_gpu / gpu_times)\r\n\r\n if do_float64 and do_float32:\r\n print >> sys.stderr, 'float64/float32', (\r\n float64_times / float32_times)\r\n print >> sys.stderr, 'expected float64/float32', (\r\n expected_times_64 / float32_times)\r\n if do_float64 and do_gpu:\r\n print >> sys.stderr, 'float64/gpu', float64_times / gpu_times\r\n print >> sys.stderr, 'expected float64/gpu', (\r\n expected_times_64 / gpu_times)\r\n if do_float32 and do_gpu:\r\n print >> sys.stderr, 'float32/gpu', float32_times / gpu_times\r\n print >> sys.stderr, 'expected float32/gpu', (\r\n expected_times_32 / gpu_times)\r\n\r\n def compare(x, y):\r\n ratio = x / y\r\n # If there is more then 5% difference between the expected\r\n # time and the real time, we consider this an error.\r\n return sum((ratio < 0.95) + (ratio > 1.05))\r\n\r\n if do_float64:\r\n err = compare(expected_times_64, float64_times)\r\n print >> sys.stderr, 'speed_failure_float64=' + str(err)\r\n if do_float32:\r\n err = compare(expected_times_32, float32_times)\r\n print >> sys.stderr, 'speed_failure_float32=' + str(err)\r\n if do_gpu:\r\n err = compare(expected_times_gpu, gpu_times)\r\n print >> sys.stderr, 'speed_failure_gpu=' + str(err)\r\n\r\n assert not numpy.isnan(gpu_times).any()",
"def test_compare(self):\n config = {\n 'num_components': 512,\n 'num_features': 128,\n 'covariance': 'spherical'\n }\n\n samples = self.generate_samples(config, 100_000)\n sklearn_time = np.mean([self.train_sklearn(config, samples) for _ in range(3)])\n ours_cpu_time = np.mean([self.train_ours(config, samples) for _ in range(3)])\n ours_gpu_time = np.mean([\n self.train_ours(config, samples.cuda(), gpu=True) for _ in range(3)\n ])\n\n print(f\"-------------------------------------\")\n print(f\"Speedup of CPU implementation: {sklearn_time / ours_cpu_time:.2f}\")\n print(f\"Speedup of GPU implementation: {sklearn_time / ours_gpu_time:.2f}\")\n print(f\"-------------------------------------\")",
"def benchmark_synth_1gpu_gpuparams(self):\n params = self._shared_params()._replace(num_gpus=1)\n self._run_benchmark(params)",
"def bench():\n times = []\n blocks = np.round(np.logspace(3, 7, num=50))\n for b in blocks:\n times.append(timeit.timeit('cpu_vs_mem.inplace(block_size=%d)' % b,\n 'import cpu_vs_mem', number=1))\n print('Block size: %d Execution time: %.3f s' % (b, times[-1]))\n sys.stdout.flush()\n\n return blocks, times",
"def cpu_time(self):",
"def test_cpu_statistics(self):\n import multiprocessing, time\n from supvisors.statistics import instant_cpu_statistics, cpu_statistics\n # take 2 spaced instant cpu statistics\n ref_stats = instant_cpu_statistics()\n time.sleep(1)\n last_stats = instant_cpu_statistics()\n stats = cpu_statistics(last_stats, ref_stats)\n # test number of results (number of cores + average)\n self.assertEqual(multiprocessing.cpu_count() + 1, len(stats))\n # test bounds (percent)\n for cpu in stats:\n self.assertIs(float, type(cpu))\n self.assertGreaterEqual(cpu, 0)\n self.assertLessEqual(cpu, 100)",
"def test_instant_cpu_statistics(self):\n import multiprocessing\n from supvisors.statistics import instant_cpu_statistics\n stats = instant_cpu_statistics()\n # test number of results (number of cores + average)\n self.assertEqual(multiprocessing.cpu_count() + 1, len(stats))\n # test average value\n total_work = total_idle = 0\n for cpu in stats[1:]:\n self.assertEqual(2, len(cpu))\n work, idle = cpu\n total_work += work\n total_idle += idle\n self.assertAlmostEqual(stats[0][0], total_work / multiprocessing.cpu_count())\n self.assertAlmostEqual(stats[0][1], total_idle / multiprocessing.cpu_count())",
"def benchmark(nx, tstop):\n Lx = 10\n Ly = 10\n c = 1.0\n ny = nx\n\n # our use of weave requires string formulas:\n Is = StringFunction('exp(-pow(x-Lx/2.0,2)/2.0 -pow(y-Ly/2.0,2)/2.0)',\n independent_variables=('x','y'),\n Lx=Lx, Ly=Ly, globals=globals())\n fs = StringFunction('0.0', independent_variables=('x', 'y', 't'),\n globals=globals())\n BCs = StringFunction('0.0', independent_variables=('x', 'y', 't'),\n globals=globals())\n\n def action(u, xv, yv, t):\n #print t\n pass\n\n implementation = {}\n cpu = []\n for ic in 'f77', 'vec', 'scalar', 'weave':\n for bc in 'f77', 'vec', 'scalar', 'weave':\n for inner in 'f77', 'vec', 'scalar', 'weave':\n implementation['ic'] = ic\n implementation['inner'] = inner\n implementation['bc'] = bc\n # optimize StringFunction functions for the non-weave case:\n # implementation:\n if 'weave' in (ic, bc, inner) or 'f77' in (ic, bc, inner):\n I = Is; f = fs; BC = BCs\n else:\n I = Is.__call__; f = fs.__call__; BC = BCs.__call__\n\n t0 = time.clock()\n dt, cpu_ic, cpu_inner, cpu_bc = \\\n solver(I, f, c, BC, Lx, Ly, nx, ny, 0, tstop,\n user_action=None,\n implementation=implementation,\n verbose=False)\n t1 = time.clock()\n cpu_total = cpu_ic + cpu_inner + cpu_bc\n overhead = (t1-t0)-cpu_total\n cpu.append([implementation.copy(), cpu_total,\n cpu_ic, cpu_inner, cpu_bc, overhead])\n print t1-t0, implementation, 'overhead:', overhead\n # normalize CPU-times:\n cpu_min = min([abs(c) for i, c, c1, c2, c3, c4 in cpu])\n print '\\n\\nMinimum CPU time:', cpu_min\n print 'no of time steps:', int(tstop/dt)\n print 'interior/boundary ratio:', int(nx*ny*1.0/max(nx,ny))\n for impl, cpu, cpu_ic, cpu_inner, cpu_bc, overhead in cpu:\n # normalized-CPU ic inner bc overhead\n print \"%8.2f\" % (cpu/cpu_min),\n print \"%-10s %8.2f; \" % (impl['ic'], cpu_ic),\n print \"%-10s %8.2f; \" % (impl['inner'], cpu_inner),\n print \"%-10s %8.2f; \" % (impl['bc'], cpu_bc),\n print \"%d%%\" % (overhead/cpu*100)",
"def benchmark_fake_1gpu_gpuparams(self):\n params = self._shared_params()._replace(\n num_gpus=1, data_dir=self.fake_data_dir, data_name='imagenet')\n self._run_benchmark(params)",
"def test_cpu_total_work(self):\n import time\n from supvisors.statistics import instant_cpu_statistics, cpu_total_work\n # take 2 spaced instant cpu statistics\n ref_stats = instant_cpu_statistics()\n time.sleep(1)\n last_stats = instant_cpu_statistics()\n total_work = cpu_total_work(last_stats, ref_stats)\n # total work should be quite close to sleeping time\n self.assertAlmostEqual(1, total_work, 1)",
"def measure_mp_speedup():\n modes = [\n # name, function\n ('dSMC', ana.d_smc),\n ('dAMC', ana.d_amc),\n ('EDF-VD', ana.d_edf_vd),\n ('pSMC', ana.p_smc),\n ('pAMC-BB', ana.p_amc_bb),\n ('pAMC-BB+', ft.partial(ana.p_amc_bb, ignore_hi_mode=True))\n ]\n times_seq = {}\n task_sets_list = pickle.load(open(task_sets_path + 'task_sets_fairgen', 'rb'))\n start_total_seq = time()\n for name, func in modes:\n start_mode_seq = time()\n rates = []\n for task_sets in task_sets_list:\n results = []\n for task_set in task_sets:\n results.append(func(task_set))\n rates.append(100 * np.average(results))\n stop_mode_seq = time()\n times_seq[name] = stop_mode_seq - start_mode_seq\n stop_total_seq = time()\n times_seq['Overall'] = stop_total_seq - start_total_seq\n\n times_par = {}\n start_total_par = time()\n pool = mp.Pool()\n for name, func in modes:\n start_mode_par = time()\n rates = []\n for task_sets in task_sets_list:\n rates.append(100 * np.average(pool.map(func, task_sets)))\n stop_mode_par = time()\n times_par[name] = stop_mode_par - start_mode_par\n stop_total_par = time()\n times_par['Overall'] = stop_total_par - start_total_par\n\n speedups = {}\n for name, _ in modes:\n speedups[name] = times_seq[name] / times_par[name]\n speedups['Overall'] = times_seq['Overall'] / times_par['Overall']\n\n print(\"PERFORMANCE MEASUREMENTS\")\n print(\"Number of cores: %d\" % mp.cpu_count())\n print(\"Scheme: Sequential time / Parallel time / Speedup\")\n for name, _ in modes:\n print(\"%s: %.3fs / %.3fs / %.3f\" % (name, times_seq[name], times_par[name], speedups[name]))\n print(\"Overall: %.3fs / %.3fs / %.3f\" % (times_seq['Overall'], times_par['Overall'], speedups['Overall']))",
"def benchmark_synth_1gpu_gpuparams(self):\n params = self._shared_params()._replace(\n num_gpus=1, variable_update='parameter_server')\n self._run_benchmark(params)",
"def benchmark_synth_1gpu_gpuparams(self):\n params = self._shared_params()._replace(\n num_gpus=1, variable_update='parameter_server')\n self._run_benchmark(params)",
"def benchmark_synth_1gpu_gpuparams(self):\n params = self._shared_params()._replace(\n num_gpus=1, variable_update='parameter_server')\n self._run_benchmark(params)",
"def benchmark_fp16_synth_1gpu_gpuparams(self):\n params = self._shared_params_fp16()._replace(num_gpus=1)\n self._run_benchmark(params)",
"def execute(self):\n print_verbose_messages = (self.verbose\n and self.device.communicator.rank == 0)\n\n # Ensure that all ops are attached (needed for is_tuning_complete).\n self.run(0)\n\n if print_verbose_messages:\n print(f'Running {type(self).__name__} benchmark')\n\n if print_verbose_messages:\n print(f'.. warming up for {self.warmup_steps} steps')\n self.run(self.warmup_steps)\n\n if (isinstance(self.device, hoomd.device.GPU)\n and hasattr(self.sim.operations, 'is_tuning_complete')):\n while not self.sim.operations.is_tuning_complete:\n if print_verbose_messages:\n print('.. autotuning GPU kernel parameters for '\n f'{self.warmup_steps} steps')\n self.run(self.warmup_steps)\n\n if print_verbose_messages:\n print(f'.. running for {self.benchmark_steps} steps '\n f'{self.repeat} time(s)')\n\n # benchmark\n performance = []\n\n if isinstance(self.device, hoomd.device.GPU):\n with self.device.enable_profiling():\n for i in range(self.repeat):\n self.run(self.benchmark_steps)\n performance.append(self.get_performance())\n if print_verbose_messages:\n print(f'.. {performance[-1]} {self.units}')\n else:\n for i in range(self.repeat):\n self.run(self.benchmark_steps)\n performance.append(self.get_performance())\n if print_verbose_messages:\n print(f'.. {performance[-1]} {self.units}')\n\n return performance",
"def benchmark_xla_synth_1gpu_gpuparams(self):\n params = self._shared_params()._replace(\n num_gpus=1, variable_update='parameter_server', xla=True)\n self._run_benchmark(params)",
"def benchmark_xla_synth_1gpu_gpuparams(self):\n params = self._shared_params()._replace(\n num_gpus=1, variable_update='parameter_server', xla=True)\n self._run_benchmark(params)",
"def benchmark_xla_synth_1gpu_gpuparams(self):\n params = self._shared_params()._replace(\n num_gpus=1, variable_update='parameter_server', xla=True)\n self._run_benchmark(params)",
"def benchmark_xla_real_1gpu_gpuparams(self):\n params = self._shared_params()._replace(\n num_gpus=1,\n data_dir=self.data_dir,\n variable_update='parameter_server',\n xla=True)\n self._run_benchmark(params)",
"def benchmark_xla_real_1gpu_gpuparams(self):\n params = self._shared_params()._replace(\n num_gpus=1,\n data_dir=self.data_dir,\n variable_update='parameter_server',\n xla=True)\n self._run_benchmark(params)",
"def benchmark_xla_real_1gpu_gpuparams(self):\n params = self._shared_params()._replace(\n num_gpus=1,\n data_dir=self.data_dir,\n variable_update='parameter_server',\n xla=True)\n self._run_benchmark(params)",
"def measure(x, y):\n return dotc_gpu(x, y)",
"def timing_test_gpu(f: Union[hessQuik.networks.NN, torch.nn.Module], x: torch.Tensor,\n num_trials: int = 10, clear_memory: bool = True):\n\n total_time = torch.zeros(num_trials + 1)\n for i in range(num_trials + 1):\n t1_start = time.perf_counter()\n f0, df0, d2f0 = f(x, do_gradient=True, do_Hessian=True)\n torch.cuda.synchronize()\n t1_stop = time.perf_counter()\n total_time[i] = t1_stop - t1_start\n\n if clear_memory:\n del f, x\n gc.collect()\n torch.cuda.empty_cache()\n\n return total_time[1:]",
"def test_cpu(self):\n cpu = CPUCyclesResource(128 * (2**20))\n self.assertEqual(cpu.get_value(), 128 * (2**20))\n cpu = CPUCyclesResource(128 * (2**19))\n self.assertEqual(cpu.get_value(), 128 * (2**19))\n cpu = CPUCyclesResource(128 * (2**21))\n self.assertEqual(cpu.get_value(), 128 * (2**21))",
"def benchmark_xla_fakedistort_1gpu_gpuparams(self):\n params = self._shared_params()._replace(\n num_gpus=1,\n data_dir=self.fake_data_dir,\n data_name='imagenet',\n distortions=True,\n variable_update='parameter_server',\n xla=True)\n self._run_benchmark(params)",
"def main():\n\n # Create an empty array to hold our points.\n n = gpuarray.zeros(shape=(x, y, z),\n dtype=gpuarray.vec.float3)\n\n # Populate the array with randomized points from the search space.\n for k in range(z):\n for j in range(y):\n for i in range(x):\n n[i, j, k] = gpuarray.vec.make_float3(random.uniform(-width, width),\n random.uniform(-height, height),\n random.uniform(-depth, depth))\n\n # Declare our elementwise CUDA kernel.\n mod = Elementwise(\n arguments=\"float3 pt, float3 *ns, float *rs\",\n operation=\"rs[i] = sqrt(pow(pt.x-ns[i].x,2)+pow(pt.y-ns[i].y,2)+pow(pt.z-ns[i].z,2))\",\n name=\"euclidean_distance\",\n preamble=\"#include <math.h>\"\n )\n\n # Declare an empty results array.\n r = gpuarray.zeros(shape=(50, 50, 2), dtype=numpy.float32)\n start = cuda.Event()\n end = cuda.Event()\n start.record()\n # Call the kernel with a randomize point from the search space.\n mod(gpuarray.vec.make_float3(random.uniform(-width, width),\n random.uniform(-height, height),\n random.uniform(-width, width)), n, r)\n end.record()\n end.synchronize()\n print((start.time_till(end)))\n print(r)",
"def get_cpu_usage(*args):\n \n keys = ['us', 'ni', 'sy', 'id', 'wa', 'hi', 'si', 'st'] #usage % to be returned\n \n with open('/proc/stat') as f1:\n with open('/proc/stat') as f2:\n content1 = f1.read() #first collection\n yield {} #yield so that caller can put delay before sampling again\n content2 = f2.read() #second collection\n \n cpu_count = multiprocessing.cpu_count() #total number of cpu cores available\n lines1, lines2 = content1.splitlines(), content2.splitlines()\n data, deltas = {}, {}\n \n #if only one cpu available, read only the first line, else read total cpu count lines starting from the second line\n i, cpu_count = (1, cpu_count + 1) if cpu_count > 1 else (0, 1)\n \n #extract deltas\n while i < cpu_count:\n line_split1 = lines1[i].split()\n line_split2 = lines2[i].split()\n deltas[line_split1[0]] = [int(b) - int(a) for a, b in zip(line_split1[1:], line_split2[1:])]\n i += 1\n \n for key in deltas:\n #calculate the percentage\n total = sum(deltas[key])\n data[key] = dict(zip(keys, [100 - (100 * (float(total - x) / total)) for x in deltas[key]]))\n \n yield data",
"def calculate_cpu_metric(data, code, ram):\n # max_data = 15000 # Matching an ideal state\n # max_code = 100 # Near enough to the maximum value to be an ideal state\n # max_ram = 128 # Less than the maximum, but reaches an ideal state\n #\n # data_met = (data / max_data).clip(min=0, max=1)\n # code_met = (code / max_code).clip(min=0, max=1)\n # ram_met = (ram / max_ram).clip(min=0, max=1)\n #\n # return np.abs((data_met + code_met + ram_met) / 3).clip(min=0, max=1)\n \"\"\"\n The above code was the old CPU metric in an attempt to calculate performance. As it is no longer utilised, and is\n simply a binary check for the presence of a flightboard.\n Totals is used to find if there is a positive amount of memory, which is present on all flightboards.\n It is simply the sum of any of the categories of memory.\n If the value is greater than 0, then it returns 1, else returns 0\n \"\"\"\n totals = data + code + ram\n if totals > 0:\n return 1\n else:\n return 0",
"def benchmark(self):\n logger.info(self.benchmark.__doc__)\n return self.run(self.benchmark_profile())"
] | [
"0.7225633",
"0.695674",
"0.68218005",
"0.6769229",
"0.6725079",
"0.6654836",
"0.6625746",
"0.65144086",
"0.647302",
"0.63773113",
"0.63309896",
"0.6302064",
"0.6302064",
"0.6302064",
"0.6184083",
"0.61827475",
"0.6167184",
"0.6167184",
"0.6167184",
"0.6154827",
"0.6154827",
"0.6154827",
"0.6135636",
"0.6133293",
"0.6124068",
"0.6122334",
"0.61067015",
"0.60995007",
"0.6097636",
"0.6096586"
] | 0.7135301 | 1 |
Check bit exactness on interpolation executable between Gpu vs Cpu with various parameters. | def check_bit_exactness(input_raw_file):
(t1, f1) = interpolate(input_raw_file, 'cpu_nn_lena.dat', 'cpu', 1, 'nn', 8000, 4000)
(t2, f2) = interpolate(input_raw_file, 'gpu_nn_lena.dat', 'gpu', 1, 'nn', 8000, 4000)
(t3, f3) = interpolate(input_raw_file, 'cpu_bl_lena.dat', 'cpu', 1, 'bl', 8000, 4000)
(t4, f4) = interpolate(input_raw_file, 'gpu_bl_lena.dat', 'gpu', 1, 'bl', 8000, 4000)
if filecmp.cmp(f1, f2, shallow=True):
print("NN interpolation on GPU is bit exact with CPU")
if filecmp.cmp(f3, f4, shallow=True):
print("Bilinear interpolation on GPU is bit exact with CPU") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_cpu_gpu_result(self, precision=1e-1):\n res1 = run_infer(self.model, CASE_ROOT + \"/resnet_fluid_gpu.yaml\",\n self.input_data)\n res2 = run_infer(self.model, CASE_ROOT + \"/resnet_fluid_cpu.yaml\",\n self.input_data)\n result1 = res1[0].data.float_data()\n result2 = res2[0].data.float_data()\n for i in range(len(result1)):\n tools.assert_almost_equal(result1[i], result2[i], delta=precision)",
"def _test(self):\n self.pytorch_layer.eval()\n pytorch_layer = copy.deepcopy(self.pytorch_layer).cpu()\n image_w_h = int(self.input_size ** 0.5)\n input_image = torch.rand(1, self.n_in_channels, image_w_h, image_w_h)\n output_tensor = pytorch_layer(input_image)[0]\n for channel in range(self.n_in_channels):\n current_channel = input_image[0, channel].squeeze().flatten().cpu().numpy()\n normalized_data = (current_channel - self.running_mean[channel]) / np.sqrt(\n self.running_var[channel] + self.epsilon\n )\n if self.affine:\n output_numpy = (self.weights[channel] * normalized_data) + self.bias[\n channel\n ]\n else:\n output_numpy = normalized_data\n\n assert np.isclose(\n output_numpy,\n output_tensor[channel].detach().flatten().cpu().numpy(),\n atol=1e-6,\n ).all()",
"def testMulRealConst(self):\n self.im8_1.fill(1)\n \n self.im8_3.fill(1)\n mulRealConst(self.im8_1, 1.6, self.im8_2, nearest=False)\n (x,y) = compare(self.im8_3, self.im8_2, self.im8_3)\n self.assertTrue(x<0)\n \n self.im8_3.fill(2)\n mulRealConst(self.im8_1, 1.6, self.im8_2, nearest=True)\n (x,y) = compare(self.im8_3, self.im8_2, self.im8_3)\n self.assertTrue(x<0)\n \n self.im8_1.fill(10)\n self.im8_3.fill(15)\n mulRealConst(self.im8_1, 1.5, self.im8_2)\n (x,y) = compare(self.im8_3, self.im8_2, self.im8_3)\n self.assertTrue(x<0)\n \n self.im32_1.fill(1000)\n self.im32_3.fill(1500)\n mulRealConst(self.im32_1, 1.5, self.im32_2)\n (x,y) = compare(self.im32_3, self.im32_2, self.im32_3)\n self.assertTrue(x<0)\n \n self.im8_1.fill(200)\n self.im8_3.fill(255)\n self.im32_3.fill(260)\n mulRealConst(self.im8_1, 1.3, self.im8_2)\n (x,y) = compare(self.im8_3, self.im8_2, self.im8_3)\n self.assertTrue(x<0)\n mulRealConst(self.im8_1, 1.3, self.im32_2)\n (x,y) = compare(self.im32_3, self.im32_2, self.im32_3)\n self.assertTrue(x<0)",
"def check_correctness_bc01(f):\n\n rng = np.random.RandomState([2012, 7, 19])\n batch_size = 5\n rows = 32\n cols = 30\n channels = 3\n pool_rows = 2\n pool_cols = 3\n zv = rng.randn(batch_size, rows, cols,\n channels).astype(config.floatX) * 1. - 1.5\n top_down_v = rng.randn(batch_size, rows / pool_rows, cols / pool_cols,\n channels).astype(config.floatX)\n\n p_np, h_np = max_pool_python(zv, (pool_rows, pool_cols), top_down_v)\n\n z_th = T.TensorType(broadcastable=(False, False, False, False),\n dtype = config.floatX)()\n z_th.name = 'z_th'\n zr = z_th.dimshuffle(0, 3, 1, 2)\n\n top_down_th = T.TensorType(broadcastable=(False, False, False, False),\n dtype = config.floatX)()\n top_down_th.name = 'top_down_th'\n top_down_r = top_down_th.dimshuffle(0, 3, 1, 2)\n\n p_th, h_th = f(zr, (pool_rows, pool_cols), top_down_r)\n\n func = function([z_th, top_down_th], [p_th.dimshuffle(0, 2, 3, 1),\n h_th.dimshuffle(0, 2, 3, 1)])\n\n pv, hv = func(zv, top_down_v)\n\n assert p_np.shape == pv.shape\n assert h_np.shape == hv.shape\n if not np.allclose(h_np, hv):\n print((h_np.min(), h_np.max()))\n print((hv.min(), hv.max()))\n assert False\n if not np.allclose(p_np, pv):\n diff = abs(p_np - pv)\n print('max diff ', diff.max())\n print('min diff ', diff.min())\n print('ave diff ', diff.mean())\n assert False",
"def test_pressure_increasing_check_some_constants(mocker, pressure_values, expected):\n profile = mocker.patch.object(argortqcpy.profile, \"Profile\")\n profile.get_property_data = mocker.Mock(return_value=ma.masked_array(pressure_values))\n\n pic = PressureIncreasingCheck(profile, None)\n output = pic.run()\n\n assert np.all(output.get_output_flags_for_property(\"PRES\").data == expected)",
"def test_SMEB_args():\n testing_function('sme_bl', bilinear=True)",
"def test_RGB_mode():\n\n model = Instafilter(\"Lo-Fi\")\n\n f_image = __local__ / \"Normal.jpg\"\n\n img1 = model(f_image)\n img2 = model(f_image, is_RGB=True)\n\n diff = (img1 - img2).sum()\n\n assert abs(diff) > 0",
"def test_equal7():\n x = randtool(\"float\", -10, 10, [3, 3, 1])\n y = randtool(\"float\", -10, 10, [3, 3, 3, 1])\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)",
"def check_correctness_channelwise(f):\n\n rng = np.random.RandomState([2012, 7, 19])\n batch_size = 5\n pool_size = 4\n n = 3 * pool_size\n zv = rng.randn(batch_size, n).astype(config.floatX) * 1. - 1.5\n top_down_v = rng.randn(batch_size, n / pool_size).astype(config.floatX)\n\n p_np, h_np = max_pool_channels_python(zv, pool_size, top_down_v)\n\n z_th = T.matrix()\n z_th.name = 'z_th'\n\n top_down_th = T.matrix()\n top_down_th.name = 'top_down_th'\n\n p_th, h_th = f(z_th, pool_size, top_down_th)\n\n func = function([z_th, top_down_th], [p_th, h_th])\n\n pv, hv = func(zv, top_down_v)\n\n assert p_np.shape == pv.shape\n assert h_np.shape == hv.shape\n if not np.allclose(h_np, hv):\n print((h_np.min(), h_np.max()))\n print((hv.min(), hv.max()))\n assert False\n if not np.allclose(p_np, pv):\n diff = abs(p_np - pv)\n print('max diff ', diff.max())\n print('min diff ', diff.min())\n print('ave diff ', diff.mean())\n assert False",
"def _gpu_and_random(self, exprs):\n if not GPU:\n return False\n if not all(tell_deterministic(i) for i in exprs):\n return True\n\n return False",
"def test_r():\n y, x = np.indices((10,20))\n\n default_use_numexpr = accel_math._USE_NUMEXPR\n\n accel_math._USE_NUMEXPR = True\n r1 = accel_math._r(x,y)\n\n accel_math._USE_NUMEXPR = False\n r2 = accel_math._r(x,y)\n\n np.testing.assert_almost_equal(r1,r2)\n\n accel_math._USE_NUMEXPR = default_use_numexpr",
"def test_binary_reg_fn():\n inputs = Variable(torch.Tensor([0, .5, 1]))\n outputs = binary_reg_fn(inputs).data\n expected = torch.Tensor([0.0029409, 1, 0.0029409])\n assert is_close(outputs, expected).all(), \\\n \"{} != {}\".format(outputs.tolist(), expected.tolist())",
"def check_sample_correctishness_bc01(f):\n\n batch_size = 5\n rows = 32\n cols = 30\n channels = 3\n pool_rows = 2\n pool_cols = 3\n\n rng = np.random.RandomState([2012, 9, 26])\n zv = rng.randn(batch_size, channels, rows,\n cols).astype(config.floatX) * 2. - 3.\n top_down_v = rng.randn(batch_size, channels, rows / pool_rows,\n cols / pool_cols).astype(config.floatX)\n\n z_th = T.TensorType(broadcastable=(False, False, False, False),\n dtype = config.floatX)()\n z_th.tag.test_value = zv\n z_th.name = 'z_th'\n\n top_down_th = T.TensorType(broadcastable=(False, False, False, False),\n dtype = config.floatX)()\n top_down_th.tag.test_value = top_down_v\n top_down_th.name = 'top_down_th'\n\n theano_rng = MRG_RandomStreams(rng.randint(2147462579))\n p_th, h_th, p_sth, h_sth = f(z_th, (pool_rows, pool_cols), top_down_th,\n theano_rng)\n\n prob_func = function([z_th, top_down_th], [p_th, h_th])\n pv, hv = prob_func(zv, top_down_v)\n\n sample_func = function([z_th, top_down_th], [p_sth, h_sth])\n\n acc_p = 0. * pv\n acc_h = 0. * hv\n\n # make sure the test gets good coverage, ie, that it includes many\n # different activation probs for both detector and pooling layer\n buckets = 10\n bucket_width = 1. / float(buckets)\n for i in xrange(buckets):\n lower_lim = i * bucket_width\n upper_lim = (i+1) * bucket_width\n\n assert np.any((pv >= lower_lim) * (pv < upper_lim))\n assert np.any((hv >= lower_lim) * (hv < upper_lim))\n\n assert upper_lim == 1.\n\n for i in xrange(10000):\n ps, hs = sample_func(zv, top_down_v)\n\n assert ps.shape == pv.shape\n assert hs.shape == hv.shape\n\n acc_p += ps\n acc_h += hs\n\n est_p = acc_p / float(i+1)\n est_h = acc_h / float(i+1)\n\n pd = np.abs(est_p-pv)\n hd = np.abs(est_h-hv)\n\n \"\"\"\n # plot maps of the estimation error, this is to see if it has some\n # spatial pattern this is useful for detecting bugs like not handling\n # the border correctly, etc.\n from pylearn2.gui.patch_viewer import PatchViewer\n\n pv = PatchViewer((pd.shape[0],pd.shape[3]),(pd.shape[1],pd.shape[2]),\n is_color = False)\n for i in xrange(pd.shape[0]):\n for j in xrange(pd.shape[3]):\n pv.add_patch( (pd[i,:,:,j] / pd.max() )* 2.0 - 1.0, rescale = False)\n pv.show()\n\n pv = PatchViewer((hd.shape[0],hd.shape[3]), (hd.shape[1],hd.shape[2]),\n is_color = False)\n for i in xrange(hd.shape[0]):\n for j in xrange(hd.shape[3]):\n pv.add_patch( (hd[i,:,:,j] / hd.max() )* 2.0 - 1.0, rescale = False)\n pv.show()\n \"\"\"\n\n \"\"\"\n plot expectation to estimate versus error in estimation\n expect bigger errors for values closer to 0.5\n\n from matplotlib import pyplot as plt\n\n #nelem = reduce( lambda x, y : x*y, pd.shape)\n #plt.scatter( pv.reshape(nelem), pd.reshape(nelem))\n #plt.show()\n\n nelem = reduce( lambda x, y : x*y, hd.shape)\n plt.scatter( hv.reshape(nelem), hd.reshape(nelem))\n plt.show()\n \"\"\"\n\n # don't really know how tight this should be\n # but you can try to pose an equivalent problem\n # and implement it in another way\n # using a numpy implementation in softmax_acc.py\n # I got a max error of .17\n assert max(pd.max(), hd.max()) < .17\n\n # Do exhaustive checks on just the last sample\n assert np.all((ps == 0) + (ps == 1))\n assert np.all((hs == 0) + (hs == 1))\n\n for k in xrange(batch_size):\n for i in xrange(ps.shape[2]):\n for j in xrange(ps.shape[3]):\n for l in xrange(channels):\n p = ps[k, l, i, j]\n h = hs[k, l, i*pool_rows:(i+1)*pool_rows,\n j*pool_cols:(j+1)*pool_cols]\n assert h.shape == (pool_rows, pool_cols)\n assert p == h.max()\n assert h.sum() <= 1\n\n \"\"\" If you made it to here, it's correctish\n (cant tell if samples are perfectly \"correct\") \"\"\"",
"def _compare(self, x,y, pr=False):\n batched = self.ex.batched(x, y)\n looped = self.ex.looped(x, y)\n #print(f'batched value {batched}')\n #print(f'looped value {looped}')\n \n self.assertTrue(\n torch.equal(batched, looped)\n )",
"def test_equal9():\n x = np.array([[True, False, True], [True, False, True], [True, False, True]])\n y = np.array([True, False, True])\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)",
"def conditional_guard(src, dst):\n int64_count = 0\n float64_count = 0\n float16_count = 0\n if src in int64_types or dst in int64_types:\n int64_count = 1\n if src in float64_types or dst in float64_types:\n float64_count = 1\n if src in float16_types or dst in float16_types:\n float16_count = 1\n if float16_count > 0:\n print(\"#ifdef cl_khr_fp16\")\n if float64_count > 0:\n #In embedded profile, if cl_khr_fp64 is supported cles_khr_int64 has to be\n print(\"#ifdef cl_khr_fp64\")\n return 1 + float16_count\n elif int64_count > 0:\n print(\"#if defined cles_khr_int64 || !defined(__EMBEDDED_PROFILE__)\")\n return 1 + float16_count\n return float16_count",
"def test_get_r():\n\n assert get_r(100, 143, 255) == 100\n assert get_r(100, 143, 255) != 143\n assert get_r(100, 143, 255) != 255",
"def test_binary_hamming_distance_dtype_cpu(self, inputs, dtype):\n preds, target = inputs\n if (preds < 0).any() and dtype == torch.half:\n pytest.xfail(reason=\"torch.sigmoid in metric does not support cpu + half precision\")\n self.run_precision_test_cpu(\n preds=preds,\n target=target,\n metric_module=BinaryHammingDistance,\n metric_functional=binary_hamming_distance,\n metric_args={\"threshold\": THRESHOLD},\n dtype=dtype,\n )",
"def is_perfect_square():",
"def test():\n Z = func.evaluate_circuit(F, e_x, e_y, e_xor)\n if Z == d[0]:\n return 0\n elif Z == d[1]:\n return 1",
"def test_equal6():\n x = randtool(\"float\", -10, 10, [3, 3, 3, 1])\n y = randtool(\"float\", -10, 10, [3, 3, 1])\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)",
"def testKnown(self):\n numAmps = (2, 2)\n bbox = afwGeom.Box2I(afwGeom.Point2I(0, 0), afwGeom.Extent2I(4, 4))\n # make a 4x4 image with 4 identical 2x2 subregions that flatten to -1, 0, 1, 2\n im = afwImage.ImageF(bbox)\n imArr = im.getArray()\n imArr[:, :] = np.array(((-1, 0, -1, 0),\n (1, 2, 1, 2),\n (-1, 0, -1, 0),\n (1, 2, 1, 2)), dtype=imArr.dtype)\n\n sqCoeffs = np.array(((0, 0.11), (-0.15, -12)))\n detector = self.makeDetector(bbox=bbox, numAmps=numAmps, sqCoeffs=sqCoeffs)\n ampInfoCat = detector.getAmpInfoCatalog()\n\n linSq = LinearizeSquared()\n linSq(im, detector=detector)\n\n # amp 0 has 0 squared coefficient and so makes no correction\n imArr0 = im.Factory(im, ampInfoCat[0].getBBox()).getArray()\n linCoeff0 = ampInfoCat[0].getLinearityCoeffs()[0]\n self.assertEqual(0, linCoeff0)\n self.assertFloatsAlmostEqual(imArr0.flatten(), (-1, 0, 1, 2))\n\n # test all amps\n for ampInfo in ampInfoCat:\n imArr = im.Factory(im, ampInfo.getBBox()).getArray()\n linCoeff = ampInfo.getLinearityCoeffs()[0]\n expect = np.array((-1 + linCoeff, 0, 1 + linCoeff, 2 + 4*linCoeff), dtype=imArr.dtype)\n self.assertFloatsAlmostEqual(imArr.flatten(), expect)",
"def test_verify_state_of_a_device():",
"def check_result(tflite_results, tvm_results):\n\n #\n # MNIST quantized uint8 results in one single difference of\n # ~ 0.004 so just escape this\n #\n ATOL = 1e-3\n RTOL = 0.5\n\n tvm.testing.assert_allclose(tflite_results, tvm_results, rtol=RTOL, atol=ATOL)",
"def test_change_brightness_of_the_devicetrue():",
"def test_conditional_2bit(self):\n shots = 100\n circuits = ref_conditionals.conditional_circuits_2bit(final_measure=True)\n targets = ref_conditionals.conditional_counts_2bit(shots)\n job = execute(circuits, QasmSimulator(), shots=shots)\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0)",
"def can_run_experiment(self, info, device):\n nb_qubit_max = self.backends[device]['nq']\n nb_qubit_needed = info['nq']\n return nb_qubit_needed <= nb_qubit_max, nb_qubit_max, nb_qubit_needed",
"def test_pressure_increasing_check_some_bad(mocker, pressure_values, expected):\n profile = mocker.patch.object(argortqcpy.profile, \"Profile\")\n profile.get_property_data = mocker.Mock(return_value=ma.masked_array(pressure_values))\n\n pic = PressureIncreasingCheck(profile, None)\n output = pic.run()\n\n assert np.all(output.get_output_flags_for_property(\"PRES\").data == expected)",
"def test_compare_single_and_multiprocess_results(sidesweep_image_sequence):\n cc = Cwsim_container_from_ims(ims=sidesweep_image_sequence)\n test_idx = 20\n results_single_proc = cc.self_im_query_all(test_idx, plot_output=False)\n\n test_im = sidesweep_image_sequence[test_idx]\n cc.prepare_memory_bank_outside()\n results_multi_proc = cc.query_image_mp(test_im)\n\n plt.plot(results_single_proc, marker='D', label='single_proc')\n plt.plot(results_multi_proc, marker='D', label='multi_proc')\n plt.legend()\n plt.draw()\n plt.show(block=False)\n plt.pause(3)\n\n assert np.allclose(results_single_proc, results_multi_proc)",
"def test_basic_calculation(self):\n expected_result = np.array(\n [\n [[0.5, 0.5, 0.0], [0.5, 0.5, 0.4], [0.9, 0.5, 0.4]],\n [[0.5, 0.5, 0.0], [0.5, 0.5, 0.4], [0.9, 0.5, 0.4]],\n ],\n dtype=np.float32,\n )\n result = calculate_sleet_probability(self.rain_prob_cube, self.snow_prob_cube)\n self.assertArrayAlmostEqual(result.data, expected_result)\n self.assertTrue(result.dtype == np.float32)"
] | [
"0.640324",
"0.56350285",
"0.5585213",
"0.55641",
"0.5548312",
"0.55470926",
"0.5535607",
"0.5483092",
"0.547579",
"0.5412719",
"0.53335917",
"0.533129",
"0.53188753",
"0.5310341",
"0.53064054",
"0.53023964",
"0.53012884",
"0.529403",
"0.5288582",
"0.5288348",
"0.5280955",
"0.52719754",
"0.5260428",
"0.5251597",
"0.5251099",
"0.52463526",
"0.5241372",
"0.5233511",
"0.5232092",
"0.5223887"
] | 0.7583435 | 0 |
Setup an example users generator instance so can use the record | def setUp(self):
gen = UsersGenerator({})
gen.generate_adt_user()
self.record = gen.class_data.findall('record')[0] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setUp(self):\n\n # Allocates users\n self.users = []\n self.user_session_tokens = []\n\n # Template for creating users\n user_template = {\n \"clientId\": 2,\n \"username\": \"user\",\n \"pwd\": \"password\",\n \"nameLast\": \"Last\",\n \"nameFirst\": \"First\",\n \"email\": \"[email protected]\",\n \"phone\": \"123-4567\",\n \"profile_picture_path\": \"/\",\n \"timezoneDefault\": \"EST\",\n \"languageDefault\": \"English\"\n }\n\n # Creates 'n' users and stores them\n n = 3\n for i in range(0, n):\n user = deepcopy(user_template)\n user['username'] += randstr()\n user['email'] += randstr()\n handler.user_create(event=user, context=None)\n self.users.append(user)\n self.user_session_tokens.append(None)",
"def sample_user(self):",
"def setUp(self):\n self.new_users = User(\"Zephon Makale\", \"1234xyz\") #Create User object",
"def setUp(self):\n self.users = [UserFactory.create() for i in range(5)]",
"def setUp(self):\n self.new_users = User('Dennis', 'Kiplangat', 'kiplangat18')",
"def setUp(self):\n patientgen = PatientsGenerator(0, 1, 0, 'a')\n self.record = patientgen.data.find('record')\n self.gender_sex = patientgen.gender_sex_list\n self.ethnicities = patientgen.ethnicity_list\n # self.female_names = patientgen.data_generator.first_names_female\n # self.male_names = patientgen.data_generator.first_names_male\n # self.last_names = patientgen.data_generator.last_names",
"def setUp(self):\n\n self.user_1 = User.objects.create_user(\n first_name=\"John\",\n last_name=\"Kenedy\",\n username=\"johnny\",\n password=\"Phrase908\",\n email=\"[email protected]\",\n )\n self.user_2 = User.objects.create_user(\n first_name=\"Kent\",\n last_name=\"Philip\",\n username=\"kenty\",\n password=\"Phrase908\",\n email=\"[email protected]\",\n )",
"def setUp(self):\n \n self.new_user = User_prof(username = 'munga',bio = 'funny thing to say')",
"def setUp(self):\n \n self.new_user = User_prof(username = 'munga',bio = 'funny thing to say')",
"def create_fake_data():\n User.create_fake_users()",
"def setUpClass(cls):\n super(EmotionTest, cls).setUpClass()\n user = UserFactory(username='dan', email='[email protected]')\n user.set_password('password')\n user.first_name = 'Dan'\n user.last_name = 'Theman'\n user.save()\n cls.dan = user\n\n for _ in range(10):\n user = UserFactory.create()\n user.set_password(factory.Faker('password'))\n user.save()",
"def setUp(self):\n User.users = {}\n self.app = User('[email protected]', 'admin', 'admin')\n # Set some default user data\n self.user_data = {\n 1: {\n 'email': '[email protected]',\n 'username': 'admin',\n 'password': 'admin' \n }\n \n }",
"def _create_random_user(self,startname=\"\",site=None):\n \n username = startname + \"\".join([choice('AEOUY')+\n choice('QWRTPSDFGHHKLMNB')\n for x in range(3)])\n \n data = {'username':username,\n 'email':username+\"@test.com\"}\n \n return self._create_user(data,site)",
"def setUp(self):\n self.new_user = User(\"Juma\",\"12345\")",
"def setUp(self):\n self.user_1 = User()",
"def setUp(self):\n self.new_user = User.objects.create_user(first_name='John', last_name='Doe', username='john_doe', email='[email protected]', bio='I am new here.', password='test_password', website='example.com', social_media={\n 'facebook':'Facebook link',\n 'Dribble': 'Dribble link',\n })",
"def example_data():\n\n User.create_user(\"Kate\", \"longpass\", None)\n User.create_user(\"Long\", \"regularpass\", None)\n User.create_user(\"Critter\", \"shortpass\", None)",
"def users_create():",
"def generator_setup():\n PaaSPureGenerator()",
"def setUp(self):\n\n self.user = self.client.users.create({})",
"def setUp(self):\n user = Users.query.first()",
"def setUp(self):\n users = []\n users.append(user.User(username=\"username\", name=\"name\", email=\"[email protected]\", password_hash=\"password_hash\", salt=\"salt\", profile_picture=b\"profile_picture\"))\n users.append(user.User(username=\"test\", password_hash=\"iiojfeaioieof\", salt=\"saltySalt\"))\n users.append(user.User(username=\"jeff\", name=\"jeff bob\", password_hash=\"eeeeeeeeeeeeeee\", salt=\"fffffffffffffff\"))\n users.append(user.User(username=\"epicUsername69\", email=\"[email protected]\", password_hash=\"asdfafeadf\", salt=\"graefgafae\"))\n db.create_all()\n for value in users:\n db.session.add(value)\n db.session.commit()",
"def init():\n create_user(app)\n get_all_user()",
"def setUp(self):\n self.new_user = User(\"Hamisi\",\"python\")",
"def setUp(self):\r\n\t\tself.u1 = User.objects.create(username='Gabby')\r\n\t\tself.u1.profile.bio = \"I'm a female profile with inserted components\"\r\n\t\tself.u1.profile.birth_date = datetime.now()\r\n\t\tself.u1.profile.gender = 'female'\r\n\t\tself.u1.profile.save()",
"def setUp(self):\n self.new_user = User('JosphatOtieno','jose@otis45')",
"def setUp(self):\n self.validator = Validator()\n self.users = Users()",
"def test_specific_user(global_config, test_specific_email, id_api):\n yield id_api.create_user_if_not_exists(test_specific_email, global_config.users.default.password)",
"def setUp(self):\n super(TestCaseWithUsers, self).setUp()\n\n # Creating users\n self.password = 'password1'\n\n UserData = namedtuple('UserData', 'email first_name last_name')\n\n users_data = [\n UserData('[email protected]', 'Some', 'User'),\n UserData('[email protected]', 'Some', 'Admin'),\n UserData('[email protected]', 'Another', 'User'),\n UserData('[email protected]', 'Another', 'Admin'),\n ]\n\n for idx, user_data in enumerate(users_data, start=1):\n attr_name = 'user{}'.format(idx)\n\n self.__setattr__(attr_name, User.objects.create_user(\n first_name=user_data.first_name,\n last_name=user_data.last_name,\n email=user_data.email,\n password=self.password,\n ))",
"def setUp(self):\n self.new_user = User('Valentine', 'Robai', '0712345678', '[email protected]', 'vrobai',\n 'password')"
] | [
"0.6956374",
"0.6918225",
"0.686735",
"0.6851119",
"0.67789096",
"0.67656577",
"0.66330135",
"0.6540279",
"0.6540279",
"0.6522282",
"0.6480351",
"0.64449173",
"0.64280057",
"0.63684684",
"0.6364395",
"0.63467336",
"0.63422114",
"0.63228506",
"0.6303323",
"0.63001335",
"0.62836623",
"0.6282175",
"0.627899",
"0.625981",
"0.625553",
"0.6238446",
"0.6231708",
"0.62283593",
"0.62183464",
"0.61855143"
] | 0.780119 | 0 |
Fetch organization details from the API. | def fetch_details_from_api(self, org_names=None):
logger.debug('Fetching org details from API...')
details = {}
if org_names is None:
org_names = self._all_page_names(without_namespace=True)
for org in org_names:
code = self._code_by_name(org)
if code is None:
continue
data = self._data_by_code(code)
if data is None:
continue
details[org] = data
# Replace parent code with parent name (preferredLabel)
parent_code = details[org].get('subOrganizationOf')
if parent_code:
parent_name = self._name_by_code(parent_code)
if parent_name is None:
parent_name = ''
details[org]['subOrganizationOf'] = parent_name
purpose_ids = details[org].get('purpose')
# Replace purpose ids with purpose (function) names
if purpose_ids:
details[org]['purpose'] = ','.join([
self._purpose_by_id[id_] for id_ in purpose_ids])
# Replace status with greek translation
status = details[org].get('status')
if status:
details[org]['status'] = self.STATUS_TRANSLATION[status]
# Replace type id with type name
type_id = details[org].get('organizationType')
if type_id:
details[org]['organizationType'] = self._type_by_id[type_id]
logger.debug(f'{org} - fetched details')
logger.debug('Fetched org details.')
return details | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fetch_organization(organization):\n return fetch_json(organization_url, organization)",
"def test_get_organization(self):\n pass",
"def test_retrieve_l_organization(self):\n pass",
"def test_get_organization_from_api_key(self):\n pass",
"def get_organization(self, id: str) -> dict[str, Any]:\n params = {}\n\n return self.client.get(self._url(id), params=params)",
"async def get_organization(request: Request, org: str):\n\n redis = request.app.state.redis\n organizations_obj = orjson.loads(await redis.get_key(\"influxdb_organizations\"))\n if org not in organizations_obj:\n logger.warning(\"Organization %s not found.\", org)\n raise HTTPException(\n status_code=404, detail=\"Organization {} not found.\".format(org))\n return {org: organizations_obj[org]}",
"def org_info(self):\n\n response = self.postman.request('info')\n\n if (response.status_code == requests.codes.ok):\n data = response.json()\n\n self.repos = data['public_repos']\n self.created = data['created_at']\n self.updated = data['updated_at']\n\n self.repo_info()\n self.member_info()",
"def get_organization_details(self):\n\n # Returns 1) OU Name to OU ID mapping (dict)\n # key: OU Name (in the manifest); value: OU ID (at root level)\n # 2) all OU IDs under root (dict)\n org = Organizations(self.logger)\n all_ou_ids, ou_name_to_id_map = self._get_ou_ids(org)\n\n # Returns 1) active accounts (list) under an OU.\n # use case: used to validate accounts in the manifest file\n # 2) Accounts for each OU at the root level.\n # use case: map OU Name to account IDs\n # key: OU ID (str); value: Active accounts (list)\n accounts_in_all_ous, ou_id_to_account_map = \\\n self._get_accounts_in_ou(org, all_ou_ids)\n\n # Returns account name in manifest to account id mapping.\n # key: account name; value: account id\n name_to_account_map = self.get_account_for_name(org)\n\n return accounts_in_all_ous, ou_id_to_account_map, \\\n ou_name_to_id_map, name_to_account_map",
"def organization(self):\n return self._tower.get_organization_by_id(self._data.get('organization'))",
"def organizations(self):\n return self.get('{}/orgs'.format(ApiVersion.A1.value))",
"def find_organization(self):\n if self.org_id is not None:\n ItopapiPrototype.get_itop_class('Organization').find(self.org_id)\n return None",
"def get_org_data(org, session=None):\n url = f'{GITHUB_API_URL}/orgs/{org}'\n return get_whole_response_as_json(url, session)",
"def get(self, organization_id):\n if organization_id is None:\n # Expose a list of organizations\n organizations = Organization.get_all()\n if organizations is None:\n abort(404)\n if request.args.get('name'):\n # search by name\n org_name = request.args.get('name')\n results = db.session.query(Organization).filter(\n Organization.name.ilike('%{0}%'.format(org_name)))\n organizations = results\n\n response = []\n for org in organizations:\n response.append(org.serialize())\n\n return make_response(jsonify(response)), 200\n\n else:\n # Expose a single organization\n try:\n organization = Organization.query.filter_by(\n id=organization_id).first()\n if not organization:\n abort(404)\n else:\n try:\n response = organization.serialize()\n return make_response(jsonify(response)), 200\n except Exception as e:\n response = {\n \"message\": str(e)\n }\n return make_response(jsonify(response)), 400\n except Exception as e:\n abort(404)",
"def get_organization(self):\n return self.reference[REF_ORGANIZATION][REF_VALUE]",
"def GetOrganization(**argd):\n flag, ret = CGateway.core.GetOrganizationName(argd[\"session\"])\n xFlag = CGateway._HandleExceptionAndUnauthorized(flag, ret, argd[\"session\"])\n if xFlag is not None:\n return xFlag\n return CGateway._SuccessResponse({'return': ret})",
"def test_retrieve_l_organizations(self):\n pass",
"def organization_get_no_login(self, client, id):\n assert client.get('/organizations/' + id).status == '400 BAD REQUEST'",
"def get_org_list():\r\n\r\n resp = requests.get(''.join([Kegg.BASE_URL, 'list/organism']))\r\n return resp.text",
"def get_organizations(\n self, *, params: Optional[dict] = None\n ) -> \"resource_types.Organizations\":\n\n return communicator.Organizations(self.__requester).fetch(parameters=params)",
"def test_get_all_for_organization(self):\n org = Organization.create(name='foo', program_id=self.program.uid)\n org.put()\n user = User.create(name='foo', email='[email protected]',\n owned_organizations=[org.uid])\n user.put()\n response = self.testapp.get(\n '/api/organizations/{}/users'.format(org.uid),\n headers=self.login_headers(user),\n )\n response_list = json.loads(response.body)\n self.assertEqual(len(response_list), 1)",
"def get_org(self, retry_on_rate_exceed=False):\n return Org.deserialize(self._get_raw('org', {}, retry_on_rate_exceed))",
"def get_organization_by_name(self, name: str | None = None) -> dict[str, Any]:\n params = {}\n\n return self.client.get(self._url(\"name\", name), params=params)",
"def _get_org(self, context, org):\r\n try:\r\n rtn = {'context': context,\r\n 'org': org,\r\n 'space': self._bbreader.cache[context][org]['space'],\r\n 'org_config': self._bbreader.cache[context][org]['org'],\r\n }\r\n except KeyError:\r\n raise RequestError('No such context/org: {}/{}'.format(context, org))\r\n\r\n return rtn",
"def get_organization_by_id_with_http_info(self, organization_id, **kwargs):\n\n all_params = ['organization_id', 'organizations']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_organization_by_id\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'organization_id' is set\n if ('organization_id' not in params) or (params['organization_id'] is None):\n raise ValueError(\"Missing the required parameter `organization_id` when calling `get_organization_by_id`\")\n\n resource_path = '/organizations/{organization-ID}'.replace('{format}', 'json')\n path_params = {}\n if 'organization_id' in params:\n path_params['organization-ID'] = params['organization_id']\n\n query_params = {}\n if 'organizations' in params:\n query_params['organizations'] = params['organizations']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['text/plain'])\n\n # Authentication setting\n auth_settings = []\n\n return self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='OrganizationPagedMetadata',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'))",
"def get_organization(\n self, organization_id: Union[str, int], *, params: Optional[dict] = None\n ) -> \"resource_types.Organization\":\n\n return communicator.Organization(self.__requester).from_id(\n organization_id=organization_id, parameters=params\n )",
"def test_get_cloud_organization_api_key(self):\n pass",
"def get_organization_links_by_page(self):\n return self.get_resource_by_page(\"/orgs\")",
"def get_organization_links(self):\n yield from self.get_resource_by_item(\"/orgs\")",
"async def getOwnerInfo(self, body=\"\"):\n payload = {}\n \n # Parameter validation\n schema = ConfigurationValidator.getOwnerInfo()\n schema.dump(schema.load(payload))\n \n\n url_with_params = await create_url_with_params(api_url=self._urls[\"getOwnerInfo\"], proccessed_params=\"\"\"{\"required\":[],\"optional\":[],\"query\":[],\"headers\":[],\"path\":[]}\"\"\", )\n query_string = await create_query_string()\n headers = {\n \"Authorization\": \"Bearer \" + base64.b64encode(\"{}:{}\".format(self._conf.applicationID, self._conf.applicationToken).encode()).decode()\n }\n if self._conf.locationDetails:\n headers[\"x-location-detail\"] = ujson.dumps(self._conf.locationDetails)\n for h in self._conf.extraHeaders:\n headers.update(h)\n exclude_headers = []\n for key, val in headers.items():\n if not key.startswith(\"x-fp-\"):\n exclude_headers.append(key)\n return await AiohttpHelper().aiohttp_request(\"GET\", url_with_params, headers=get_headers_with_signature(urlparse(self._urls[\"getOwnerInfo\"]).netloc, \"get\", await create_url_without_domain(\"/service/application/configuration/v1.0/about\", ), query_string, headers, body, exclude_headers=exclude_headers), data=body, cookies=self._conf.cookies)",
"def test_get_test_organization_api_key(self):\n pass"
] | [
"0.82742935",
"0.7215253",
"0.71555364",
"0.71401477",
"0.70368063",
"0.6870949",
"0.6747574",
"0.67154455",
"0.6695743",
"0.66767126",
"0.66356957",
"0.6629043",
"0.65509844",
"0.6486175",
"0.64638036",
"0.64497346",
"0.64293605",
"0.6352869",
"0.6321579",
"0.62873846",
"0.61520344",
"0.6145669",
"0.6135703",
"0.6129415",
"0.6095164",
"0.606598",
"0.6055175",
"0.6026501",
"0.60254073",
"0.6004049"
] | 0.7452373 | 1 |
Create new organization category tree and pages. | def recreate_tree(self, fetch_from_api=False):
logger.debug('Creating organization category tree and pages...')
for parent, children in self._hierarchy(
fetch_from_api=fetch_from_api).items():
self._create_pages(parent)
parent_category = f'[[Category:{parent}]]'
for child in children:
self._create_pages(
child, parent_category=parent_category)
logger.debug('Done.') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_category_pages(app):\n env = app.builder.env\n\n template = \"category.html\"\n\n categories = env.categories\n for name, category in categories.iteritems():\n context = {}\n context[\"title\"] = category.name\n context[\"subcategories\"] = category.subcategories\n context[\"pages\"] = category.pages\n\n yield (name, context, template)",
"def create_category_pages(app):\n env = app.builder.env\n # jinja2 html template\n template = CATEGORY_PAGE_TEMPLATE\n\n categories = env.categories\n for name, category in categories.iteritems():\n context = {}\n # First write out the named page\n context[\"title\"] = category.name\n\n #get parent category\n if \"\\\\\" in category.name:\n categs = category.name.split(\"\\\\\")\n categs.pop()\n parent_category = r\"\\\\\".join(categs)\n parent_category_link = \"../\" + categs[-1] + \".html\"\n parent_category = \"<b>Category:</b> <a href='{0}'>{1}</a>\"\\\n .format(parent_category_link,parent_category)\n context[\"parentcategory\"] = parent_category\n\n # sort subcategories & pages alphabetically\n context[\"subcategories\"] = sorted(category.subcategories, key = lambda x: x.name)\n context[\"pages\"] = sorted(category.pages, key = lambda x: x.name)\n context[\"outpath\"] = category.html_path\n\n #jinja appends .html to output name\n category_html_path_noext = os.path.splitext(category.html_path)[0]\n yield (category_html_path_noext, context, template)\n\n # Now any additional index pages if required\n if category.name in INDEX_CATEGORIES:\n # index in categories directory\n category_html_dir = os.path.dirname(category.html_path)\n category_html_path_noext = category_html_dir + \"/index\"\n yield (category_html_path_noext, context, template)\n\n # index in document directory\n document_dir = os.path.dirname(category_html_dir)\n category_html_path_noext = document_dir + \"/index\"\n context[\"outpath\"] = category_html_path_noext + \".html\"\n yield (category_html_path_noext, context, template)",
"def create_hierarchy(self):\n\t\tpass",
"def nuke_tree(self):\n logger.debug('Nuking organization category tree and pages...')\n\n def recurse_delete(page):\n if page.exists:\n page_is_category = True\n try:\n page_members = page.members()\n except AttributeError:\n # page is not a category (no members)\n page_is_category = False\n else:\n # page is a category\n for member in page_members:\n recurse_delete(member)\n finally:\n if page_is_category or page.name.startswith(\n self.NAMESPACE):\n page.delete()\n logger.debug(f'{page.name} deleted.')\n root_category_page = self._site.categories[self.CATEGORY_NAME]\n for page in root_category_page.members():\n recurse_delete(page)\n logger.debug('Done.')",
"def create(self, validated_data):\n category_data = validated_data.pop('categories')\n p = Page.objects.create(**validated_data)\n for c in category_data:\n cat = Category.objects.filter(title=c['title'])\n if cat:\n p.categories.add(cat[0])\n else:\n cat = Category.objects.create(**c)\n p.categories.add(cat)\n p.save()\n return p",
"def test_0005_create_categories(self):\n self.create_category(name='Test 0060 Workflow Features', description='Test 0060 - Workflow Features')",
"def create_catalog_hierarchy(self, *args, **kwargs):\n # Patched in by [email protected], Jul 23, 2014, added by birdland to template on Aug 8, 2014\n # Is not part of specs for catalog hierarchy design sessions, but may want to be in hierarchy service instead\n # Will not return an actual object, just JSON\n # since a BankHierarchy does not seem to be an OSID thing.\n return self._get_provider_session('catalog_hierarchy_design_session').create_catalog_hierarchy(*args, **kwargs)",
"def html_collect_pages(app):\n if not hasattr(app.builder.env, \"categories\"):\n return # nothing to do\n\n for name, context, template in create_category_pages(app):\n yield (name, context, template)",
"def createFolderStructure(self):\n\n\t\twith open(self.data_path + 'categories.csv', 'rb') as csvfile:\n\t\t\treader = csv.reader(csvfile, delimiter=',', quotechar='\"')\n\t\t\tnext(reader, None) # skip the headers\n\t\t\tfor row in reader:\n\t\t\t\tdirectory = self.data_path + 'categories/' + str(row[1])\n\t\t\t\tif not os.path.exists(directory):\n\t\t\t\t\tos.makedirs(directory)",
"def html_collect_pages(app):\n\n if not hasattr(app.builder.env, \"categories\"):\n return # nothing to do\n\n for name, context, template in create_category_pages(app):\n yield (name, context, template)",
"def test_create_category(self):\n pass",
"def create(self, org_name, org_children=None, suffix=None):\n dn = self.org_attr + '=' + org_name + ',' + self.base_dn\n if org_children is None:\n org_children = self.org_children\n if suffix is None:\n dn_attr = {'objectClass': ['top', self.org_class],\n self.org_attr: [org_name]}\n else:\n dn_attr = {'objectClass': ['top', self.org_class, self.user_class],\n self.org_attr: [org_name],\n self.org_suffix_attr: [suffix]}\n dn_info = [(k, v) for (k, v) in dn_attr.items()] \n msg = 'Creating %s with attributes %s' % (dn, dn_info)\n self.log.debug(msg)\n result = self._create_object(dn, dn_info) \n # Add any children\n for child_name in org_children:\n child = SpokeOrgChild(org_name)\n child.create(child_name)\n filter = '%s=%s' % (self.org_attr, org_name)\n self.log.debug('Result: %s' % result)\n return result",
"def testCreateOrg(self):\n self.timeline.orgSignup()\n self.data.createProfile()\n self.record.createOrgApp('new_org', self.data.user)\n\n url = '/gci/profile/organization/' + self.gci.key().name()\n create_url = url + '?org_id=new_org'\n response = self.get(create_url)\n self.assertResponseOK(response)\n self.assertOrgProfilePageTemplatesUsed(response)\n \n postdata = {\n 'founder': self.data.user, 'home': self.createDocument().key(),\n 'scope': self.gci, 'irc_channel': 'irc://example.com',\n 'pub_mailing_list': 'http://example.com',\n }\n response, properties = self.modelPost(create_url, GCIOrganization, postdata)\n self.assertResponseRedirect(response, url + '/new_org?validated')\n profile = db.get(self.data.profile.key())\n self.assertEqual(1, len(profile.org_admin_for))",
"def create_project(self,*pages,config_folder = \"config\",FunctionBased = False):\n\n self._make_initial_directories()\n self._make_initial_files(*pages,FunctionBased = FunctionBased)",
"def create_category():\n category = Category(name='testcategory', description=\"\", fee=DEFAULT_FEE)\n category.save()\n return category",
"def _create_links_and_track(self, page_name, category_list):\n env = self.state.document.settings.env\n if not hasattr(env, \"categories\"):\n env.categories = {}\n\n link_rst = \"\"\n ncategs = 0\n for categ_name in category_list:\n #categ_name is the full category name - register that\n category = self.register_category(categ_name, env)\n category.pages.add(PageRef(page_name, env.docname))\n\n #now step up a step up each time the category hierarchy\n parent_category = categ_name\n while True:\n if r\"\\\\\" in parent_category:\n categs = parent_category.split(r\"\\\\\")\n else:\n break\n # remove the last item\n subcat = Category(categ_name, env.docname) #create the category with the full name\n subcat.name=categs.pop() # and then replace it with the last token of the name\n parent_category = r\"\\\\\".join(categs)\n\n #register the parent category\n parent = self.register_category(parent_category, env)\n parent.subcategories.add(subcat)\n\n # endwhile\n\n #category should be the last subcategory by this point\n link_rst += \"`%s <%s>`_ | \" % (categ_name, category.link(env.docname))\n ncategs += 1\n # endfor\n\n link_rst = \"**%s**: \" + link_rst.rstrip(\" | \") # remove final separator\n if ncategs == 1:\n link_rst = link_rst % \"Category\"\n else:\n link_rst = link_rst % \"Categories\"\n #endif\n\n return link_rst",
"def create_site_structure(root, structure):\n for item in structure:\n id = item['id']\n title = item['title']\n description = item.get('description', u'')\n if id not in root:\n if 'creators' not in item:\n item['creators'] = CREATORS\n obj = api.content.create(root, **item)\n # publish private content or make a workflow transition\n if item['type'] not in ['Image', 'File']:\n if '_transition' not in item and api.content.get_state(obj) == 'private':\n api.content.transition(obj, 'publish')\n elif item.get('_transition', None):\n api.content.transition(obj, item['_transition'])\n # constrain types in folder?\n if '_addable_types' in item:\n constrain_types(obj, item['_addable_types'])\n # the content has more content inside? create it\n if '_children' in item:\n create_site_structure(obj, item['_children'])\n # add an image to all news items\n if obj.portal_type == 'News Item':\n if 'image' in item:\n obj.setImage(item['image'])\n # set the default view to object\n if '_layout' in item:\n obj.setLayout(item['_layout'])\n # XXX: workaround for https://github.com/plone/plone.api/issues/99\n obj.setTitle(title)\n obj.setDescription(description)\n obj.reindexObject()\n logger.debug(u' {0} criado e publicado'.format(title))\n else:\n logger.debug(u' pulando {0}; conteúdo existente'.format(title))",
"def post(self):\n\n args = category_parser.parse_args()\n \n category = db.categories.Category()\n category.name = args['name']\n category.description = args['description']\n category.is_hidden = args['is_hidden']\n category.order = args['order']\n # TODO: check IDs\n # TODO: flask-restful doesn't create arg if it's not in request\n if args['items_order'] is None:\n args['items_order'] = []\n category.items_order = args['items_order']\n \n parent = None\n # chech ID for parent\n if 'parent' in args and args['parent'] is not None:\n parent = db.categories.Category.find_one({\"_id\": ObjectId(args['parent'])})\n if parent is not None:\n category.parent = parent['_id']\n \n category.save()\n \n return category, 201",
"def create_permissions_for_organization(self, organization):\n course_page_role = self.create_page_role()\n organization_page_role = organization.create_page_role()\n\n if organization_page_role is None or course_page_role is None:\n return\n\n # - Create DjangoCMS page permissions\n PagePermission.objects.get_or_create(\n group_id=organization_page_role.group_id,\n page_id=self.extended_object_id,\n defaults=defaults.ORGANIZATION_ADMIN_ROLE.get(\n \"courses_page_permissions\", {}\n ),\n )\n\n # - Create the Django Filer folder permissions\n FolderPermission.objects.get_or_create(\n group_id=organization_page_role.group_id,\n folder_id=course_page_role.folder_id,\n defaults=defaults.ORGANIZATION_ADMIN_ROLE.get(\n \"courses_folder_permissions\", {}\n ),\n )",
"def _createOrganizationsCollections(folder):\n collections = [\n {'id': 'all_orgs', 'tit': _('all_orgs'), 'subj': (u'search', ), 'query': [\n {'i': 'portal_type',\n 'o': 'plone.app.querystring.operation.selection.is',\n 'v': ['organization']}],\n 'cond': u\"\", 'bypass': [],\n 'flds': (u'select_row', u'org_pretty_link_with_additional_infos',\n u'SelectedInPlonegroupColumn', u'PloneGroupUsersGroupsColumn',\n u'review_state', u'CreationDate', u'actions'),\n 'sort': u'sortable_title', 'rev': False, 'count': False},\n ]\n _createDashboardCollections(folder, collections)",
"def __init__(self, name, docname):\n\n if \"\\\\\" in docname:\n docname = docname.replace(\"\\\\\", \"/\")\n dirpath, filename = os.path.split(docname)\n html_dir = dirpath + \"/\" + CATEGORIES_DIR\n self.html_path = html_dir + \"/\" + name.replace(\"\\\\\\\\\", \"/\") + \".html\"\n super(Category, self).__init__(name, self.html_path)\n self.pages = set([])\n self.subcategories = set([])",
"def create_wiki_graph(self):\n\n print 'Creating wiki corpus graph representation'\n\n for path, subdirs, files in os.walk(self.wk_path):\n\n here = os.path.split(path)[1]\n parent = os.path.split(os.path.split(path)[0])[1]\n\n self.categories.add_edge(parent, here)\n\n self.categories[parent][\"path\"] = path\n self.categories[here][\"path\"] = path\n\n for name in files:\n if fnmatch(name, \"*.yaml\") and \"Index\" not in name and \"index\" not in name: # check if there is a text file\n \n category_name = name[0:-5]\n yaml_file_path = os.path.join(\n path, category_name + \".yaml\")\n\n # yaml\n yaml_file = open(yaml_file_path, \"r\")\n docs = yaml.load_all(yaml_file)\n\n # category_name\n for doc in docs:\n cat_parent = doc[\"CategoryPath\"][0]\n\n self.categories.add_edge(\n slugify(cat_parent), slugify(category_name))\n self.categories[slugify(cat_parent)][\"path\"] = path\n self.categories[slugify(category_name)][\"path\"] = path\n\n for cat in doc[\"Categories\"][0][self.language]:\n self.categories.add_edge(\n slugify(category_name), slugify(cat))\n self.categories[slugify(cat)][\"path\"] = path\n\n print(\"The categories graph %s has %d nodes with %d edges\"\n % (self.categories.name,\n nx.number_of_nodes(self.categories),\n nx.number_of_edges(self.categories)))\n for node in nx.nodes(self.categories):\n self.get_corpus_from_node(node)\n\n pickle.dump(self.categories, open(self.graph_path, 'w'))\n\n print \"Graph saved as %s\"%(self.graph_path)",
"def create_page_tree(parent_kwargs=None):\n root = PageFactory(title__title=\"Root\")\n parent = PageFactory(\n title__title=\"Parent\", parent=root, **(parent_kwargs or {})\n )\n page = PageFactory(title__title=\"Uncle\", parent=root)\n PageFactory(title__title=\"Page\", parent=parent)\n PageFactory(title__title=\"Sibling\", parent=parent)\n return root, parent, page",
"def create_page(self):",
"def get_categories(self):\n\n self.search([]).unlink()\n token = self.env['odoo.moodle'].search([('create_uid', '=', self.env.user.id)]).token\n domain = \"http://localhost:8888\"\n webservice_url = \"/webservice/rest/server.php?\"\n parameters = {\n \"wstoken\":token,\n 'wsfunction': 'core_course_get_categories',\n 'moodlewsrestformat': 'json'\n }\n request = requests.get(url=domain+webservice_url, params=parameters)\n request = request.json()\n for req in request:\n try:\n self.create({\n 'category_id': req['id'],\n 'name': req['name'],\n 'description': req['description'],\n 'category_parent': req['parent'],\n })\n except Exception:\n print('Category not created')",
"def create_folder(self):\n cur_dir=os.getcwd()\n unique=False\n dirlist= [item for item in os.listdir(cur_dir) if os.path.isdir(os.path.join(cur_dir,item))]\n folder_name='taxonomy_{}_{}'.format(self.place,self.year)\n j=1\n while not unique:\n if folder_name in dirlist:\n folder_name='taxonomy_{}_{}({})'.format(self.place,self.year,str(j))\n j+=1\n else:\n unique=True\n new_folder=os.path.join(cur_dir,folder_name)\n os.mkdir(new_folder)\n os.chdir(new_folder)\n return folder_name",
"def create_category(self, category):\n\n super().new_entry()\n\n return Categories.objects.create(\n name=category['id'].split(':')[1],\n name_fr=category['name'],\n url=category['url']\n )",
"def show_categories(self):\n cat_model = TreeModel(('Categories', ))\n self.categoriesView.setModel(cat_model)\n\n categories = self.orm.fetch_parents()\n for category in categories:\n item = TreeItem(category, cat_model.rootItem)\n cat_model.rootItem.appendChild(item)\n\n subs = self.orm.fetch_subcategories_for_parent(category)\n\n for sub in subs:\n sub_item = TreeItem(sub, item)\n item.appendChild(sub_item)\n\n self.categoriesView.expandAll()",
"def categories_menu():\n categories = ['EU-affairs', 'Economy', 'Security', 'Society', 'World']\n\n for category in categories:\n url = build_url({'mode': 'Topic', 'foldername': category, 'page': 1})\n li = xbmcgui.ListItem(category, iconImage='DefaultFolder.png')\n xbmcplugin.addDirectoryItem(handle=ADDON_HANDLE, url=url, listitem=li, isFolder=True)\n\n xbmcplugin.endOfDirectory(ADDON_HANDLE)",
"def search_and_store_graph (self, category, subcategory_depth, parent_node, include_pages):\n\n title = category if category.startswith('Category:') else 'Category:' + category\n\n #errore, importare ewlinks table\n category_url = ('https://en.wikipedia.org/wiki/' + category.replace(\" \", \"_\"))\n\n # indent based on the depth of the category: visualisation problems may occur if max_depth is not >>\n # subcategory_depth * 2\n print(\" \" * ((MAX_DEPHT) - (subcategory_depth * 2)) + category + \" URL: \" + category_url)\n\n # adding the category to the graph\n category_node = category_url\n\n self.category_graph.add_node(title, type='cat')\n if parent_node != 'null':\n self.category_graph.add_edge(parent_node, title)\n\n new_parent_node = title\n\n # =========Adding the pages to the categories, if required (generates a very large graph)====\n\n if include_pages:\n\n query = 'SELECT cl_from FROM categorylinks WHERE cl_type =\"page\" AND cl_to=\"' + (category[9:][0:]).replace(\n \" \", \"_\") + '\\\"'\n page_results = self.connection_db.query_request(query)\n for page_result in page_results:\n query = \"SELECT page_title FROM page WHERE page_id=\" + str(page_result[0])\n title_result = self.connection_db.query_request(query)\n try:\n page_title = str(title_result[0][0], 'utf-8')\n page_url = 'https://en.wikipedia.org/wiki/' + page_title\n page_node = \"Page:\" + page_title\n print(\" \" * (MAX_DEPHT - (\n (subcategory_depth - 1) * 2)) + \"Page title: \" + page_title + \" URL: \" + page_url)\n self.category_graph.add_node(page_node, type='pag')\n\n self.category_graph.add_edge(new_parent_node, page_node)\n except IndexError:\n print(\" \" * (MAX_DEPHT - ((subcategory_depth - 1) * 2)) + \"Document whit page id:\" + (\n str(title_result)[1:-2]) + \" Not found!\")\n\n # =======Adding and exploring the subcategories===\n if subcategory_depth > 0:\n\n search_title = (category[9:]).replace(\" \", \"_\")\n query = 'SELECT cl_from FROM categorylinks WHERE cl_type =\"subcat\" AND cl_to=\"' + search_title + '\\\"'\n subcat_results = self.connection_db.query_request(query)\n for subcat_result in subcat_results:\n query = \"SELECT page_title FROM page WHERE page_id=\" + str(subcat_result[0])\n result = self.connection_db.query_request(query)\n try:\n result = 'Category:' + str(result[0][0], 'utf-8')\n self.search_and_store_graph(result, subcategory_depth - 1, new_parent_node, include_pages)\n except IndexError:\n print(\" \" * (MAX_DEPHT - ((subcategory_depth - 1) * 2)) + \"Document whit page id:\" + (\n str(subcat_result)[1:-2]) + \" Not found!\")"
] | [
"0.6847534",
"0.67013305",
"0.63725936",
"0.6334025",
"0.6069195",
"0.59774697",
"0.5846274",
"0.5729139",
"0.57264924",
"0.57257324",
"0.5705253",
"0.5705121",
"0.5620174",
"0.56092304",
"0.5568929",
"0.55641246",
"0.5545612",
"0.553405",
"0.5525433",
"0.55243224",
"0.5514295",
"0.5503247",
"0.545458",
"0.5449532",
"0.5435681",
"0.5419766",
"0.5419564",
"0.5408297",
"0.5402214",
"0.5385694"
] | 0.7765683 | 0 |
Nuke organization category tree and pages. | def nuke_tree(self):
logger.debug('Nuking organization category tree and pages...')
def recurse_delete(page):
if page.exists:
page_is_category = True
try:
page_members = page.members()
except AttributeError:
# page is not a category (no members)
page_is_category = False
else:
# page is a category
for member in page_members:
recurse_delete(member)
finally:
if page_is_category or page.name.startswith(
self.NAMESPACE):
page.delete()
logger.debug(f'{page.name} deleted.')
root_category_page = self._site.categories[self.CATEGORY_NAME]
for page in root_category_page.members():
recurse_delete(page)
logger.debug('Done.') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def recreate_tree(self, fetch_from_api=False):\n logger.debug('Creating organization category tree and pages...')\n for parent, children in self._hierarchy(\n fetch_from_api=fetch_from_api).items():\n self._create_pages(parent)\n parent_category = f'[[Category:{parent}]]'\n for child in children:\n self._create_pages(\n child, parent_category=parent_category)\n logger.debug('Done.')",
"def show_categories(self):\n cat_model = TreeModel(('Categories', ))\n self.categoriesView.setModel(cat_model)\n\n categories = self.orm.fetch_parents()\n for category in categories:\n item = TreeItem(category, cat_model.rootItem)\n cat_model.rootItem.appendChild(item)\n\n subs = self.orm.fetch_subcategories_for_parent(category)\n\n for sub in subs:\n sub_item = TreeItem(sub, item)\n item.appendChild(sub_item)\n\n self.categoriesView.expandAll()",
"def create_category_pages(app):\n env = app.builder.env\n\n template = \"category.html\"\n\n categories = env.categories\n for name, category in categories.iteritems():\n context = {}\n context[\"title\"] = category.name\n context[\"subcategories\"] = category.subcategories\n context[\"pages\"] = category.pages\n\n yield (name, context, template)",
"def categories_menu():\n categories = ['EU-affairs', 'Economy', 'Security', 'Society', 'World']\n\n for category in categories:\n url = build_url({'mode': 'Topic', 'foldername': category, 'page': 1})\n li = xbmcgui.ListItem(category, iconImage='DefaultFolder.png')\n xbmcplugin.addDirectoryItem(handle=ADDON_HANDLE, url=url, listitem=li, isFolder=True)\n\n xbmcplugin.endOfDirectory(ADDON_HANDLE)",
"def getCategory():",
"def create_category_pages(app):\n env = app.builder.env\n # jinja2 html template\n template = CATEGORY_PAGE_TEMPLATE\n\n categories = env.categories\n for name, category in categories.iteritems():\n context = {}\n # First write out the named page\n context[\"title\"] = category.name\n\n #get parent category\n if \"\\\\\" in category.name:\n categs = category.name.split(\"\\\\\")\n categs.pop()\n parent_category = r\"\\\\\".join(categs)\n parent_category_link = \"../\" + categs[-1] + \".html\"\n parent_category = \"<b>Category:</b> <a href='{0}'>{1}</a>\"\\\n .format(parent_category_link,parent_category)\n context[\"parentcategory\"] = parent_category\n\n # sort subcategories & pages alphabetically\n context[\"subcategories\"] = sorted(category.subcategories, key = lambda x: x.name)\n context[\"pages\"] = sorted(category.pages, key = lambda x: x.name)\n context[\"outpath\"] = category.html_path\n\n #jinja appends .html to output name\n category_html_path_noext = os.path.splitext(category.html_path)[0]\n yield (category_html_path_noext, context, template)\n\n # Now any additional index pages if required\n if category.name in INDEX_CATEGORIES:\n # index in categories directory\n category_html_dir = os.path.dirname(category.html_path)\n category_html_path_noext = category_html_dir + \"/index\"\n yield (category_html_path_noext, context, template)\n\n # index in document directory\n document_dir = os.path.dirname(category_html_dir)\n category_html_path_noext = document_dir + \"/index\"\n context[\"outpath\"] = category_html_path_noext + \".html\"\n yield (category_html_path_noext, context, template)",
"def categories(self):\n pass",
"def search_and_store_graph (self, category, subcategory_depth, parent_node, include_pages):\n\n title = category if category.startswith('Category:') else 'Category:' + category\n\n #errore, importare ewlinks table\n category_url = ('https://en.wikipedia.org/wiki/' + category.replace(\" \", \"_\"))\n\n # indent based on the depth of the category: visualisation problems may occur if max_depth is not >>\n # subcategory_depth * 2\n print(\" \" * ((MAX_DEPHT) - (subcategory_depth * 2)) + category + \" URL: \" + category_url)\n\n # adding the category to the graph\n category_node = category_url\n\n self.category_graph.add_node(title, type='cat')\n if parent_node != 'null':\n self.category_graph.add_edge(parent_node, title)\n\n new_parent_node = title\n\n # =========Adding the pages to the categories, if required (generates a very large graph)====\n\n if include_pages:\n\n query = 'SELECT cl_from FROM categorylinks WHERE cl_type =\"page\" AND cl_to=\"' + (category[9:][0:]).replace(\n \" \", \"_\") + '\\\"'\n page_results = self.connection_db.query_request(query)\n for page_result in page_results:\n query = \"SELECT page_title FROM page WHERE page_id=\" + str(page_result[0])\n title_result = self.connection_db.query_request(query)\n try:\n page_title = str(title_result[0][0], 'utf-8')\n page_url = 'https://en.wikipedia.org/wiki/' + page_title\n page_node = \"Page:\" + page_title\n print(\" \" * (MAX_DEPHT - (\n (subcategory_depth - 1) * 2)) + \"Page title: \" + page_title + \" URL: \" + page_url)\n self.category_graph.add_node(page_node, type='pag')\n\n self.category_graph.add_edge(new_parent_node, page_node)\n except IndexError:\n print(\" \" * (MAX_DEPHT - ((subcategory_depth - 1) * 2)) + \"Document whit page id:\" + (\n str(title_result)[1:-2]) + \" Not found!\")\n\n # =======Adding and exploring the subcategories===\n if subcategory_depth > 0:\n\n search_title = (category[9:]).replace(\" \", \"_\")\n query = 'SELECT cl_from FROM categorylinks WHERE cl_type =\"subcat\" AND cl_to=\"' + search_title + '\\\"'\n subcat_results = self.connection_db.query_request(query)\n for subcat_result in subcat_results:\n query = \"SELECT page_title FROM page WHERE page_id=\" + str(subcat_result[0])\n result = self.connection_db.query_request(query)\n try:\n result = 'Category:' + str(result[0][0], 'utf-8')\n self.search_and_store_graph(result, subcategory_depth - 1, new_parent_node, include_pages)\n except IndexError:\n print(\" \" * (MAX_DEPHT - ((subcategory_depth - 1) * 2)) + \"Document whit page id:\" + (\n str(subcat_result)[1:-2]) + \" Not found!\")",
"def _create_links_and_track(self, page_name, category_list):\n env = self.state.document.settings.env\n if not hasattr(env, \"categories\"):\n env.categories = {}\n\n link_rst = \"\"\n ncategs = 0\n for categ_name in category_list:\n #categ_name is the full category name - register that\n category = self.register_category(categ_name, env)\n category.pages.add(PageRef(page_name, env.docname))\n\n #now step up a step up each time the category hierarchy\n parent_category = categ_name\n while True:\n if r\"\\\\\" in parent_category:\n categs = parent_category.split(r\"\\\\\")\n else:\n break\n # remove the last item\n subcat = Category(categ_name, env.docname) #create the category with the full name\n subcat.name=categs.pop() # and then replace it with the last token of the name\n parent_category = r\"\\\\\".join(categs)\n\n #register the parent category\n parent = self.register_category(parent_category, env)\n parent.subcategories.add(subcat)\n\n # endwhile\n\n #category should be the last subcategory by this point\n link_rst += \"`%s <%s>`_ | \" % (categ_name, category.link(env.docname))\n ncategs += 1\n # endfor\n\n link_rst = \"**%s**: \" + link_rst.rstrip(\" | \") # remove final separator\n if ncategs == 1:\n link_rst = link_rst % \"Category\"\n else:\n link_rst = link_rst % \"Categories\"\n #endif\n\n return link_rst",
"def create_wiki_graph(self):\n\n print 'Creating wiki corpus graph representation'\n\n for path, subdirs, files in os.walk(self.wk_path):\n\n here = os.path.split(path)[1]\n parent = os.path.split(os.path.split(path)[0])[1]\n\n self.categories.add_edge(parent, here)\n\n self.categories[parent][\"path\"] = path\n self.categories[here][\"path\"] = path\n\n for name in files:\n if fnmatch(name, \"*.yaml\") and \"Index\" not in name and \"index\" not in name: # check if there is a text file\n \n category_name = name[0:-5]\n yaml_file_path = os.path.join(\n path, category_name + \".yaml\")\n\n # yaml\n yaml_file = open(yaml_file_path, \"r\")\n docs = yaml.load_all(yaml_file)\n\n # category_name\n for doc in docs:\n cat_parent = doc[\"CategoryPath\"][0]\n\n self.categories.add_edge(\n slugify(cat_parent), slugify(category_name))\n self.categories[slugify(cat_parent)][\"path\"] = path\n self.categories[slugify(category_name)][\"path\"] = path\n\n for cat in doc[\"Categories\"][0][self.language]:\n self.categories.add_edge(\n slugify(category_name), slugify(cat))\n self.categories[slugify(cat)][\"path\"] = path\n\n print(\"The categories graph %s has %d nodes with %d edges\"\n % (self.categories.name,\n nx.number_of_nodes(self.categories),\n nx.number_of_edges(self.categories)))\n for node in nx.nodes(self.categories):\n self.get_corpus_from_node(node)\n\n pickle.dump(self.categories, open(self.graph_path, 'w'))\n\n print \"Graph saved as %s\"%(self.graph_path)",
"def __init__(self, name, docname):\n\n if \"\\\\\" in docname:\n docname = docname.replace(\"\\\\\", \"/\")\n dirpath, filename = os.path.split(docname)\n html_dir = dirpath + \"/\" + CATEGORIES_DIR\n self.html_path = html_dir + \"/\" + name.replace(\"\\\\\\\\\", \"/\") + \".html\"\n super(Category, self).__init__(name, self.html_path)\n self.pages = set([])\n self.subcategories = set([])",
"def on_category(self):\n super(ProjectSettings, self).on_category()\n selItems = self.tw_category.selectedItems() or []\n #--- Build Tree ---#\n if selItems:\n if hasattr(selItems[0], 'itemWidget'):\n if selItems[0].itemWidget is not None:\n if not selItems[0].itemWidget.__edited__:\n selItems[0].itemWidget._initWidget()\n selItems[0].itemWidget.buildTree()",
"def browse_categories(request):\n\n result = {}\n\n u = request.user\n\n top = Category.objects.get(name=\"Best Buy\")\n result['categories'] = [c.get_json() for c in top.children.all()]\n \n return JSONHttpResponse(result)",
"def test_get_categories(self):\n pass",
"def all_categories(request, slug=None):\n c = {\"categories\": Node.objects.filter(kind=\"C\")}\n return render_to_response(\"categories.html\", c)",
"def _create_links_and_track(self, page_name, category_list):\n env = self.state.document.settings.env\n if not hasattr(env, \"categories\"):\n env.categories = {}\n\n link_rst = \"\"\n ncategs = 0\n for item in category_list:\n has_subcat = False\n if r\"\\\\\" in item: \n categs = item.split(r\"\\\\\")\n has_subcat = True\n else:\n categs = [item]\n # endif\n\n print \n for index, categ_name in enumerate(categs):\n if categ_name not in env.categories:\n category = Category(categ_name)\n env.categories[categ_name] = category\n else:\n category = env.categories[categ_name]\n #endif\n category.pages.append(PageRef(page_name))\n if has_subcat and index > 0:\n category.subcategories.append(PageRef(categ_name))\n #endif\n link_rst += \":ref:`%s` | \" % categ_name\n ncategs += 1\n # endfor\n # endfor\n\n link_rst = \"`%s: <categories.html>`_ \" + link_rst\n if ncategs == 1:\n link_rst = link_rst % \"Category\"\n else:\n link_rst = link_rst % \"Categories\"\n #endif\n\n return link_rst",
"def html_collect_pages(app):\n if not hasattr(app.builder.env, \"categories\"):\n return # nothing to do\n\n for name, context, template in create_category_pages(app):\n yield (name, context, template)",
"def html_collect_pages(app):\n\n if not hasattr(app.builder.env, \"categories\"):\n return # nothing to do\n\n for name, context, template in create_category_pages(app):\n yield (name, context, template)",
"def test_get_children_category(self):\n children = self.category.get_children()\n self.assertEqual(children[0], self.project)",
"def create_hierarchy(self):\n\t\tpass",
"def easyCategory():\n return prepJSON(cs411_dash.easyCategory())",
"def test_set_category_parent(self):\n pass",
"def findCats(self):\n SQLquery = 'SELECT DISTINCT upperlevel FROM cathierarchy'\n return self._findCboxItems(SQLquery)",
"def browse_category(request):\n\n result = {'categories':[], 'products':[]}\n\n u = request.user\n\n page = request.POST.get('page', 1)\n\n cat = Category.objects.get(id=request.POST['cat_id'])\n if cat.children.count() > 0:\n result['categories'] = [c.get_json() for c in cat.children.all()]\n else:\n # display items\n result = Product.objects.filter_category(cat.category_id, page, u) \n\n return JSONHttpResponse(result)",
"def on_category(self):\n super(ToolSettings, self).on_category()\n selItems = self.tw_category.selectedItems() or []\n #--- Build Tree ---#\n if selItems:\n if hasattr(selItems[0], 'itemWidget'):\n if selItems[0].itemWidget is not None:\n if not selItems[0].itemWidget.__edited__:\n selItems[0].itemWidget._initWidget()\n selItems[0].itemWidget.buildTree()",
"def show_categories():\n for category in NEWS_CATEGORIES:\n print(category)",
"def get_selected_categories_and_codes(self):\n\n self.codes, self.categories = self.app.get_codes_categories()\n # Extra keys for hierarchy charts\n for code in self.codes:\n code['count'] = 0\n code['parentname'] = \"\"\n for cat in self.categories:\n cat['count'] = 0\n cat['parentname'] = \"\"\n\n node = self.ui.comboBox_category.currentText()\n if node == \"\":\n return\n for category in self.categories:\n if category['name'] == node:\n node = category\n node['supercatid'] = None\n break\n \"\"\" Create a list of this category (node) and all its category children.\n Note, maximum depth of 100. \"\"\"\n selected_categories = [node]\n i = 0 # Ensure an exit from loop\n new_model_changed = True\n while self.categories != [] and new_model_changed and i < 100:\n new_model_changed = False\n append_list = []\n for n in selected_categories:\n for m in self.categories:\n if m['supercatid'] == n['catid']:\n append_list.append(m)\n for n in append_list:\n selected_categories.append(n)\n self.categories.remove(n)\n new_model_changed = True\n i += 1\n self.categories = selected_categories\n # Remove codes that are not associated with these categories\n selected_codes = []\n for cat in self.categories:\n for code in self.codes:\n if code['catid'] == cat['catid']:\n selected_codes.append(code)\n self.codes = selected_codes",
"def _get_categories(self, *args):\n raise NotImplementedError(self, \"_get_categories\")",
"def _get_packages(self, category):\n raise NotImplementedError(self, \"_get_packages\")",
"def get_structure():\n\n _articles = []\n _categories = []\n\n def get_article(article_filename, general_category, sep='|||'):\n \"\"\"\n Adds the given article to the \"articles, categories, general\" current structure\n \"\"\"\n category = []\n with open(article_filename) as _f:\n for _row in _f:\n if len(_row):\n if _row[0] == '=':\n # new category\n k = 0\n while _row[k] == '=':\n k += 1\n if k > 1:\n category = category[:k - 1]\n category += [clean_text(_row)]\n sub_category = []\n elif _row[0] == '#':\n # new entry\n _articles.append(clean_text(_row))\n k = 0\n while _row[k] == '#':\n k += 1\n sub_category = sub_category[:k - 1] + [clean_text(_row)]\n if category[0] == general_category:\n _categories.append(sep.join(category + sub_category[:-1]))\n else:\n _categories.append(sep.join([general_category] + category + sub_category[:-1]))\n\n categories_dict = get_categories('https://en.wikipedia.org/wiki/Wikipedia:Vital_articles/Level/5')\n _general = {k: v.split('/')[5] for k, v in categories_dict.items()}\n filenames = list(categories_dict.keys())\n\n if not os.path.exists('wikivitals/data/mds/'):\n os.makedirs('wikivitals/data/mds/')\n\n for k, v in categories_dict.items(): # saves the category pages' text\n with open('wikivitals/data/mds/{}'.format(k), 'w', encoding='utf8') as f:\n url = \"https://en.wikipedia.org/w/index.php?title={}&action=edit\".format(v[6:])\n page = requests.get(url)\n soup = BeautifulSoup(page.text, 'html.parser')\n f.write(soup.find('textarea').text)\n\n for filename in filenames:\n get_article('wikivitals/data/mds/' + filename, _general[filename])\n\n with open('wikivitals/data/en-categories.txt', 'w', encoding='utf8') as file:\n for cat in _categories:\n file.write(cat + \"\\n\")\n\n with open('wikivitals/data/en-articles.txt', 'w', encoding='utf8') as file:\n for name in _articles:\n file.write(name + \"\\n\")\n\n return _articles, _categories, _general"
] | [
"0.6612943",
"0.6476937",
"0.6362572",
"0.6236043",
"0.6191051",
"0.61242706",
"0.6083505",
"0.5918996",
"0.5907317",
"0.5860402",
"0.58141035",
"0.5798131",
"0.57252145",
"0.5720753",
"0.5695223",
"0.56784064",
"0.56283355",
"0.56238097",
"0.55597705",
"0.5531781",
"0.5503248",
"0.5491853",
"0.54887885",
"0.5462143",
"0.54418164",
"0.5439464",
"0.53357846",
"0.5333601",
"0.53195894",
"0.5313038"
] | 0.7459369 | 0 |
Update organization pages from apografi API. | def update_pages(self, fetch_from_api=False, details=None,
force_create=False):
logger.debug('Updating organization pages...')
def template_text(org_details):
te = TemplateEditor(self.TEMPLATE)
template = te.templates[self.TEMPLATE_NAME][0]
# Add details to template parameters
for key in self.TEMPLATE_FIELD_NAME_SUFFIXES:
if '_' in key:
details_keys = key.split('_')
else:
details_keys = None
if details_keys is None:
value = org_details.get(key, None)
else:
value = org_details.get(details_keys[0], {}).get(
details_keys[1], None)
if value is not None:
if isinstance(value, list):
value = ','.join(value)
value = escape(str(value))
# Clean up telephone value
if key == self.TEMPLATE_CONTACT_POINT_TELEPHONE:
value = value.replace(' ', '').replace('+30', '')
new_value = ''
for c in value:
if not c.isdigit():
break
new_value += c
value = new_value
template.parameters[
f'{self.TEMPLATE_PARAM_PREFIX}{key}'] = value
return str(template).replace(' |', '|')
if details is None:
details = self._details(fetch_from_api=fetch_from_api)
for org, org_details in details.items():
page = self._get_site_page(f'{self.NAMESPACE}:{org}')
page_condition = page is not None
if not force_create:
page_condition = page_condition and page.exists
if page_condition:
page_text = page.text()
page_text_leftovers = re.sub(
rf'{{{{{self.TEMPLATE_NAME}[^{{}}]+}}}}', '',
page_text).strip()
new_template_text = template_text(org_details)
new_page_text = f'{new_template_text}\n{page_text_leftovers}'
page.edit(new_page_text)
logger.debug(f'{page.name} updated')
logger.debug('Done.') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update(self, organisation: Organisation) -> None:\n ...",
"def UpdateOrganizationSettings(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def update(self):\n if g.my['rank'] < 15:\n self.__request_first_block()\n \n self.__request_second_block()\n self.__request_label_and_alias()\n self.__request_values()\n \n if self.message is None:\n try:\n g.db.pages.update({ '_id' : ObjectId(self.page['_id']) }, self.page)\n self.status = 'msg msg-success'\n self.message = g.pages_msg('success_update_page')\n except PyMongoError:\n self.message = g.pages_msg('error_mongo_update')\n \n return False",
"def update(_id): \n pages_object = Pages(_id)\n page = pages_object.page\n \n language_name = languages_object.get_languages(3)\n \n # Update page\n if request.method == 'POST':\n if pages_object.update():\n return redirect(url_for('pages.overview'))\n \n len_of_label = len(page['label'])\n \n # Come back a message when there is an error\t\n if not pages_object.message is None:\n message = pages_object.message\n status = pages_object.status\n \n return render_template('{}/update.html'.format(MODULE_DIR), **locals())",
"async def update_organization(request: Request, org: str, data: dict):\n redis = request.app.state.redis\n organizations_obj = orjson.loads(await redis.get_key(\"influxdb_organizations\"))\n if org not in organizations_obj:\n logger.warning(\"Organization %s not found.\", org)\n raise HTTPException(\n status_code=404, detail=\"Organization {} not found.\".format(org))\n organizations_obj[org] = data[\"organization_data\"]\n await redis.set_key(\"influxdb_organizations\", orjson.dumps(organizations_obj))\n logger.info(\"Organization %s updated\", org)\n return {\"message\": \"Organization {} updated\".format(org)}",
"def update(self, request, pk=None):\n serializer = OrganizationUpdateBody(data=request.data)\n if serializer.is_valid(raise_exception=True):\n name = serializer.validated_data.get(\"name\")\n # agents = serializer.validated_data.get(\"agents\")\n # network = serializer.validated_data.get(\"network\")\n # channel = serializer.validated_data.get(\"channel\")\n try:\n Organization.objects.get(name=name)\n except ObjectDoesNotExist:\n pass\n # organization = Organization.objects.filter(name=name).update(agents=agents, network=network.id, channel=channel.id)\n\n return Response(status=status.HTTP_204_NO_CONTENT)",
"def put(self, request, *args, **kwargs):\n return super(PageElementDetail, self).put(request, *args, **kwargs)",
"def test_update_org(session): # pylint:disable=unused-argument\n org = factory_org_service()\n\n org.update_org(TestOrgInfo.org2)\n\n dictionary = org.as_dict()\n assert dictionary['name'] == TestOrgInfo.org2['name']",
"def put(self, organization_id):\n if organization_id is not None:\n try:\n org = Organization.query.filter_by(id=organization_id).first()\n # return a 404 if org does not exist\n abort(404) if org is None else org\n\n if request.headers['Content-Type'] == \"application/json\":\n payload = request.data\n elif request.form:\n payload = request.data.to_dict()\n else:\n payload = request.get_json(force=True)\n\n for key in payload.keys():\n setattr(org, key, payload.get(key))\n org.save()\n response = org.serialize()\n\n return make_response(jsonify(response)), 200\n\n except Exception as e:\n response = {\n \"message\": str(e)\n }\n print(str(e))\n return make_response(jsonify(response)), 400",
"def get_organization_links_by_page(self):\n return self.get_resource_by_page(\"/orgs\")",
"def post(self, request, page_pk=None):\n if page_pk is not None:\n try:\n page = request.website.pages.select_related()\\\n .get(pk=page_pk)\n app_page = page.app_page_object\n except Page.DoesNotExist:\n raise ErrorResponse(status.HTTP_400_BAD_REQUEST,\n {'msg': MESSAGES.get('default_error', \"\")})\n else:\n app_page = request.page.app_page_object\n page = request.page\n\n # Page App Admin Form\n PageAppForm = app_page.get_admin_form()\n form = PageAppForm(request.POST, instance=app_page)\n \n if form.is_valid():\n new_app_page = form.save()\n # If page is the current page,\n # refresh the layout section\n if request.page == page:\n # Get layout slug\n placeholder_slug_items = check_placeholder_html_id(\n page.placeholder_slug)\n layout_section_slug = placeholder_slug_items[0]\n # Rendering layout section\n rendering_context = RenderingContext(request)\n html_rendering = rendering_context.get_html_layout(\n layout_section_slug)\n # Send response\n data_context = {'msg': MESSAGES.get('app_edit_success', \"\"),\n 'html': html_rendering,\n 'layout_section_slug': layout_section_slug}\n # Check if the page manager have to be displayed\n if page_pk:\n data_context['refresh_pages_list'] = True\n \n response = Response(status.HTTP_200_OK,\n data_context)\n else:\n data_context = {'msg': MESSAGES.get('app_edit_success', \"\")}\n # Check if the page manager have to be displayed\n if page_pk:\n data_context['refresh_pages_list'] = True\n response = Response(status.HTTP_200_OK,\n data_context)\n return self.render(response)\n # render_page = page.render_page(request)\n\n # if render_page.status_code == 200:\n # response = Response(status.HTTP_200_OK,\n # {\"msg\": MESSAGES.get('app_edit_success', \"\"),\n # 'html': render_page.content,\n # 'medias': render_page.medias})\n # elif render_page.status_code in [301, 302]:\n # response = Response(status.HTTP_202_ACCEPTED,\n # {\"msg\": MESSAGES.get('redirection', \"\"),\n # 'location': render_page['location']})\n\n # If form not valid => reload the edit form with messages\n else:\n data_context = {'form': form,\n 'object': app_page}\n if page_pk:\n data_context['page'] = page\n\n html = render_to_string('administration/app/app-edit.html',\n data_context,\n context_instance=RequestContext(request))\n raise ErrorResponse(status.HTTP_400_BAD_REQUEST,\n {'msg': MESSAGES.get('invalid_data', \"\"),\n 'html': html})",
"def edit_page(self, document_data: dict):\n wiki_obj = WikiService()\n token = wiki_obj.get_token()\n wiki_obj.check_token(token)\n\n project_wikitext_data = self.generate_page_sections_dict(\n document_data\n )\n\n updated_text = wiki_obj.generate_page_text_from_dict(\n self.project_page_template,\n f\"=={self.page_initial_section}==\",\n project_wikitext_data,\n self.users_list_section\n )\n\n project_page_name = f\"{document_data['project']['name']}\"\n\n wiki_obj.edit_page(\n token,\n project_page_name,\n updated_text\n )",
"def put(self, request, organization):\n serializer = OrganizationSerializer(organization, data=request.DATA,\n partial=True)\n if serializer.is_valid():\n organization = serializer.save()\n\n self.create_audit_entry(\n request=request,\n organization=organization,\n target_object=organization.id,\n event=AuditLogEntryEvent.ORG_EDIT,\n data=organization.get_audit_log_data(),\n )\n\n return Response(serialize(organization, request.user))\n\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)",
"def edit(self, **kwargs):\n response = self._requester.request(\n \"PUT\",\n \"{}s/{}/pages/{}\".format(self.parent_type, self.parent_id, self.url),\n _kwargs=combine_kwargs(**kwargs),\n )\n\n page_json = response.json()\n page_json.update({\"course_id\": self.course_id})\n super(Page, self).set_attributes(page_json)\n\n return self",
"def process_organizations(self, organizations):\n self.process_elements(\n organizations,\n self.organization_table,\n self.extract_organization,\n ['organization_data', 'member', 'organization']\n )",
"def export_organizations(self):\n print('\\n=== Exporting all organization data...')\n\n for organization in self.client.organizations:\n print('- Exporting organizations:', organization.name)\n\n json = {\n 'id': self.get_id(organization),\n 'href': organization.href,\n 'name': organization.name,\n 'nameKey': organization.name_key,\n 'description': organization.description,\n 'status': organization.status,\n 'createdAt': organization.created_at.isoformat(),\n 'modifiedAt': organization.modified_at.isoformat(),\n 'customData': self.get_custom_data(organization),\n 'default_account_store_mapping': None,\n 'default_group_store_mapping': None,\n 'account_store_mappings': [],\n }\n\n default_account_store_mapping = organization.default_account_store_mapping\n default_group_store_mapping = organization.default_group_store_mapping\n\n if default_account_store_mapping:\n json['default_account_store_mapping'] = {\n 'id': organization.default_account_store_mapping.href.split('/')[-1],\n 'href': organization.default_account_store_mapping.href,\n 'type': organization.default_account_store_mapping.account_store.__class__.__name__,\n 'name': organization.default_account_store_mapping.account_store.name,\n 'list_index': organization.default_account_store_mapping.list_index,\n }\n\n if default_group_store_mapping:\n json['default_group_store_mapping'] = {\n 'id': organization.default_group_store_mapping.href.split('/')[-1],\n 'href': organization.default_group_store_mapping.href,\n 'type': organization.default_group_store_mapping.account_store.__class__.__name__,\n 'name': organization.default_group_store_mapping.account_store.name,\n 'list_index': organization.default_group_store_mapping.list_index,\n }\n\n for account_store_mapping in organization.account_store_mappings:\n json['account_store_mappings'].append({\n 'id': self.get_id(account_store_mapping),\n 'href': account_store_mapping.href,\n 'account_store': {\n 'type': account_store_mapping.account_store.__class__.__name__,\n 'id': self.get_id(account_store_mapping.account_store),\n 'href': account_store_mapping.account_store.href,\n 'name': account_store_mapping.account_store.name,\n 'description': account_store_mapping.account_store.description,\n 'status': account_store_mapping.account_store.status,\n },\n 'list_index': account_store_mapping.list_index,\n 'is_default_account_store': account_store_mapping.is_default_account_store,\n 'is_default_group_store': account_store_mapping.is_default_group_store,\n })\n\n tenant = self.get_id(organization.tenant)\n self.write('%s/%s/organizations/%s' % (self.location, tenant, json['id']), json)\n\n print('=== Done!\\n')",
"def test_put_list_replace(self):\n for org in Organization.objects.all():\n OrganizationMembership.objects.create(\n user=self.user, organization=org)\n self.user.save()\n self.story.organizations.add(*list(Organization.objects.filter(organizationtranslation__name__in=(\"Urban Land Conservancy\", \"America Scores Denver\"))))\n self.story.save()\n self.assertEqual(self.story.organizations.count(), 2)\n put_data = [organization.organization_id for organization in\n Organization.objects.filter(organizationtranslation__name__in=(\"Mile High Connects\", \"Piton Foundation\"))]\n self.api_client.client.login(username=self.username, password=self.password)\n uri = '/api/0.1/stories/%s/organizations/' % (self.story.story_id)\n response = self.api_client.put(uri, format='json', data=put_data)\n self.assertHttpAccepted(response)\n self.story = Story.objects.get(story_id=self.story.story_id)\n self.assertEqual(self.story.organizations.count(), 2)\n ids = [organization.organization_id for organization in self.story.organizations.all()]\n self.assertEqual(ids, put_data)",
"def test_retrieve_l_organization(self):\n pass",
"def organization_follow_doc_template_values(url_root):\n required_query_parameter_list = [\n {\n 'name': 'voter_device_id',\n 'value': 'string', # boolean, integer, long, string\n 'description': 'An 88 character unique identifier linked to a voter record on the server',\n },\n {\n 'name': 'organization_id',\n 'value': 'integer', # boolean, integer, long, string\n 'description': 'Internal database unique identifier for organization',\n },\n {\n 'name': 'organization_we_vote_id',\n 'value': 'string', # boolean, integer, long, string\n 'description': 'The unique identifier for this organization across all networks '\n '(either organization_id OR organization_we_vote_id required -- not both.) '\n 'NOTE: In the future we '\n 'might support other identifiers used in the industry.',\n },\n {\n 'name': 'api_key',\n 'value': 'string (from post, cookie, or get (in that order))', # boolean, integer, long, string\n 'description': 'The unique key provided to any organization using the WeVoteServer APIs',\n },\n ]\n optional_query_parameter_list = [\n ]\n\n potential_status_codes_list = [\n {\n 'code': 'VALID_VOTER_DEVICE_ID_MISSING',\n 'description': 'A valid voter_device_id parameter was not included. Cannot proceed.',\n },\n {\n 'code': 'VALID_VOTER_ID_MISSING',\n 'description': 'A valid voter_id was not found from voter_device_id. Cannot proceed.',\n },\n {\n 'code': 'VALID_ORGANIZATION_ID_MISSING',\n 'description': 'A valid organization_id was not found. Cannot proceed.',\n },\n {\n 'code': 'ORGANIZATION_NOT_FOUND_ON_CREATE FOLLOWING',\n 'description': 'An organization with that organization_id was not found. Cannot proceed.',\n },\n {\n 'code': 'FOLLOWING',\n 'description': 'Successfully following this organization',\n },\n ]\n\n try_now_link_variables_dict = {\n 'organization_id': '1',\n }\n\n api_response = '{\\n' \\\n ' \"status\": string,\\n' \\\n ' \"success\": boolean,\\n' \\\n ' \"voter_device_id\": string (88 characters long),\\n' \\\n ' \"organization_id\": integer,\\n' \\\n ' \"organization_we_vote_id\": string,\\n' \\\n '}'\n\n template_values = {\n 'api_name': 'organizationFollow',\n 'api_slug': 'organizationFollow',\n 'api_introduction':\n \"Call this to save that the voter is following this organization.\",\n 'try_now_link': 'apis_v1:organizationFollowView',\n 'try_now_link_variables_dict': try_now_link_variables_dict,\n 'url_root': url_root,\n 'get_or_post': 'GET',\n 'required_query_parameter_list': required_query_parameter_list,\n 'optional_query_parameter_list': optional_query_parameter_list,\n 'api_response': api_response,\n 'api_response_notes':\n \"\",\n 'potential_status_codes_list': potential_status_codes_list,\n }\n return template_values",
"def fetch_details_from_api(self, org_names=None):\n logger.debug('Fetching org details from API...')\n details = {}\n if org_names is None:\n org_names = self._all_page_names(without_namespace=True)\n for org in org_names:\n code = self._code_by_name(org)\n if code is None:\n continue\n data = self._data_by_code(code)\n if data is None:\n continue\n details[org] = data\n # Replace parent code with parent name (preferredLabel)\n parent_code = details[org].get('subOrganizationOf')\n if parent_code:\n parent_name = self._name_by_code(parent_code)\n if parent_name is None:\n parent_name = ''\n details[org]['subOrganizationOf'] = parent_name\n purpose_ids = details[org].get('purpose')\n # Replace purpose ids with purpose (function) names\n if purpose_ids:\n details[org]['purpose'] = ','.join([\n self._purpose_by_id[id_] for id_ in purpose_ids])\n # Replace status with greek translation\n status = details[org].get('status')\n if status:\n details[org]['status'] = self.STATUS_TRANSLATION[status]\n # Replace type id with type name\n type_id = details[org].get('organizationType')\n if type_id:\n details[org]['organizationType'] = self._type_by_id[type_id]\n logger.debug(f'{org} - fetched details')\n logger.debug('Fetched org details.')\n return details",
"def test_org_structure_sync_0365(self):\n self.div1.sync_o365 = False\n self.div1.save()\n url = '/api/users/?org_structure=true&sync_o365=true'\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n # Division 1 won't be present in the response.\n self.assertNotContains(response, self.div1.name)",
"def updateOrgAdmins(request):\n\n return updateRole('gsoc_org_admin')",
"def update_page_extension(instance, language):\n for method in [\n update_course,\n update_course_run,\n update_category,\n update_organization,\n ]:\n try:\n # The method should raise an ObjectDoesNotExist exception if the page extension\n # linked to this instance is of another type.\n method(instance, language)\n except ObjectDoesNotExist:\n continue\n else:\n return",
"def test_get_organization(self):\n pass",
"def update_organizational_id(cls, aws_cloud_account_id: str, body: CloudAccountUpdateOrganizationalUnitId) -> Dict:\n\t\tpass",
"def test_retrieve_l_organizations(self):\n pass",
"def fusion_api_edit_directory(self, body, uri, api=None, headers=None):\n return self.logindomain.update(body, uri, api, headers)",
"def get_organizations_list_with_links(year_link):\n response = get_response(year_link)\n if response.ok:\n soup = BeautifulSoup(response.text, 'html.parser')\n orgs_li = soup.find_all(\n 'li', attrs={'class': 'organization-card__container'})\n orgs_dict = {}\n for orgs_html in orgs_li:\n org_name = orgs_html.select('h4')[0].text.replace('\\n', '')\n relative_link = orgs_html.select('a')[0].get('href')\n full_link = HOME_PAGE + relative_link\n orgs_dict[org_name] = full_link\n return orgs_dict\n else:\n print('Something Went Wrong')\n print(f'Status Code: {response.status_code}')\n sys.exit(1)",
"def organization_put_not_found(self, client, jwt_token):\n assert client.put('/organizations/' + '0', headers={\n 'Authorization': 'Bearer ' + jwt_token},\n data={'firstname': 'Daisy',\n 'lastname': 'Ducks',\n 'email': '[email protected]'}).status == \\\n '404 NOT FOUND'",
"def fusion_api_edit_uplink_set(self, body, uri, api=None, headers=None):\n return self.uplink_set.update(body, uri, api, headers)"
] | [
"0.576033",
"0.5739277",
"0.5607183",
"0.55946696",
"0.5534071",
"0.54312897",
"0.53235763",
"0.53037417",
"0.5271056",
"0.5237154",
"0.51738906",
"0.5145725",
"0.51090777",
"0.51086265",
"0.5090848",
"0.50289696",
"0.49717492",
"0.49616772",
"0.49475837",
"0.4938556",
"0.49300328",
"0.49178177",
"0.4909222",
"0.4905913",
"0.4897734",
"0.4894979",
"0.4870554",
"0.4867549",
"0.4851472",
"0.48452577"
] | 0.6676893 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.