query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Returns all the current processes running
|
def get_all_current_processes():
p = subprocess.Popen(['ps', '-A'], stdout=subprocess.PIPE)
out, err = p.communicate()
return out
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def getActiveProcesses():\n active = []\n\n for p in PROCESSRUNNER_PROCESSES:\n if p.is_alive():\n active.append(p)\n\n return active",
"def get_running_processes(self):\n\n all_processes = []\n for _process in self.processes:\n all_processes.append(_process[\"pid\"])\n return all_processes",
"def running_processes(self):\n return [process for process in self.processes.values()\n if process.running_on(self.address_name)]",
"def get_processes_running():\r\n p = [] #array of processes\r\n if platform == \"linux\" or platform == \"linux2\":\r\n for proc in psutil.process_iter():\r\n try:\r\n tmp=Process(proc.name(),int(proc.pid),proc.username(),int(0),int(0))\r\n p.append(tmp)\r\n except:\r\n continue\r\n return (p)\r\n\t\t\t\r\n tasks = check_output(['tasklist']).decode('cp866', 'ignore').split(\"\\r\\n\")\r\n for task in tasks:\r\n m = re.match(b'(.*?)\\\\s+(\\\\d+)\\\\s+(\\\\w+)\\\\s+(\\\\w+)\\\\s+(.*?)\\\\s.*', task.encode())\r\n if m is not None:\r\n tmp=Process(m.group(1).decode(),int(m.group(2).decode()),m.group(3).decode(),int(m.group(4).decode()),int(m.group(5).decode('ascii', 'ignore')))\r\n p.append(tmp)\r\n #m.group(1).decode() image name\r\n #m.group(2).decode() process id\r\n #m.group(3).decode() session_name\r\n #m.group(4).decode() session_num\r\n #m.group(5).decode('ascii', 'ignore') memory usage\r\n return(p)",
"def running_procs(self) -> List[int]:\n return [p.model_id for p in self.primary_scheduler.queue_nodes.run_q]",
"def get_running():\n ps = which('/usr/bin/ps') # avoid the old BSD variant\n lines = sh(ps, '-e', '-f', quiet=True)\n # The first line of the `ps' output is a header line which is\n # used to find the data field columns.\n column = lines[0].index('CMD')\n procs = set()\n for line in lines[1:]:\n cmd_line = line[column:]\n command = cmd_line.split()[0]\n procs.add(os.path.basename(command))\n return procs",
"def get_processes():\n yield from psutil.process_iter()",
"def procs_running():\n \n return __proc_stat('procs_running')",
"def ListProcesses(self):\n stdout, stderr = self.RunCmdOnDevice(\n [\n '/bin/ps', '--no-headers', '-A', '-o', 'pid,ppid,args:4096,state'\n ],\n quiet=True)\n assert stderr == '', stderr\n procs = []\n for l in stdout.split('\\n'):\n if l == '':\n continue\n m = re.match(r'^\\s*(\\d+)\\s+(\\d+)\\s+(.+)\\s+(.+)', l, re.DOTALL)\n assert m\n procs.append((int(m.group(1)), m.group(3).rstrip(), int(m.group(2)),\n m.group(4)))\n logging.debug(\"ListProcesses(<predicate>)->[%i processes]\" % len(procs))\n return procs",
"def get_process_list() -> Dict:\n return {proc.pid: proc.name() for proc in psutil.process_iter()}",
"def list_running_tasks():\n inspector = current_app.control.inspect()\n\n return inspector.active()",
"def ListProcesses(self):\n stdout, stderr = self.RunCmdOnDevice(\n ['/bin/ps', '--no-headers', '-A', '-o', 'pid,ppid,args:4096,state'],\n quiet=True)\n assert stderr == '', stderr\n procs = []\n for l in stdout.split('\\n'):\n if l == '':\n continue\n m = re.match(r'^\\s*(\\d+)\\s+(\\d+)\\s+(.+)\\s+(.+)', l, re.DOTALL)\n assert m\n procs.append(\n (int(m.group(1)), m.group(3).rstrip(), int(m.group(2)), m.group(4)))\n logging.debug(\"ListProcesses(<predicate>)->[%i processes]\" % len(procs))\n return procs",
"def allprocs(self):\n\n processes = self.getHash( 'nameHash' ) # safely get copy of process name dictionary\n\n allprocs = {}\n\n for p in processes.keys():\n allprocs[p] = processes[p].procinfo()\n\n return allprocs",
"def allprocs(self):\n\n processes = self.getHash( 'nameHash' ) # safely get copy of process name dictionary\n\n allprocs = {}\n\n for p in processes.keys():\n allprocs[p] = processes[p].procinfo()\n\n return allprocs",
"def monitoredProcs(self):\n return self._pidToProcess.itervalues()",
"def get_running_unison_processes(self):\n # Get PIDs\n # Note: throws exception if no instances exist\n try:\n pids = str(subprocess.check_output([\"pidof\", '/usr/bin/unison']))\n\n # Parse command output into list by removing junk chars and exploding\n # string with space delimiter\n pids = pids[2:-3].split(' ')\n\n except subprocess.CalledProcessError:\n # If error caught here, no unison instances are found running\n pids = []\n\n self.logger.debug(\n \"Found \" + str(len(pids)) + \" running instances on this system: PIDs \" +\n \", \".join(pids)\n )\n\n # Return, after converting to ints\n return list(map(int, pids))",
"def pids():\n stream = os.popen(\"ps aux | grep '[m]itm' | awk '{print $2}'\")\n return stream.read()",
"def pid_processes(self):\n return [(process.namespec(), process.infos[self.address_name]['pid'])\n for process in self.processes.values()\n if process.pid_running_on(self.address_name)]",
"def get_processes():\n cmd = 'ps -do pid:1,cmd' # linux command to run\n processes = {}\n\n with os.popen(cmd) as out:\n # see https://stackoverflow.com/questions/24362007/\n next(out.__iter__()) # skip header (first line)\n\n for line in out:\n # sepate pid and command in a tuple\n p = line.rstrip('\\n').split(' ', 2)\n\n # skip kernel threads\n if p[1][0] == '[':\n continue\n\n processes[p[0]] = p[1]\n\n return processes",
"def GetPublishedProcesses():\r\n pass",
"def get_current_jobs(ssh):\n stdin, stdout, stderr = ssh.exec_command('qstat')\n\n running_jobs = []\n for line in stdout.readlines():\n if '.awonmgr2' in line:\n jobid = line.split('.awonmgr2')[0]\n running_jobs.append(jobid)\n \n return running_jobs",
"def processes(self):\n return self._getint('processes')",
"def getAllProcessInfo(self):\r\n self._update('getAllProcessInfo')\r\n\r\n all_processes = self._getAllProcesses(lexical=True)\r\n\r\n output = []\r\n for group, process in all_processes:\r\n name = make_namespec(group.config.name, process.config.name)\r\n output.append(self.getProcessInfo(name))\r\n return output",
"def get_process_info(name):\n process_lst = list()\n all_pid = psutil.pids()\n for pid in all_pid:\n info = psutil.Process(pid)\n if name in info.name():\n process_lst.append(info)\n\n return process_lst",
"def list_processes(pid, name):\n \n if not pid and not name:\n rc, out, err = j.sal.process.execute(\"ps ax\")\n click.echo(out)\n elif name:\n click.echo(j.sal.process.psfind(name))\n elif pid:\n click.echo(j.sal.process.getProcessPid(pid))",
"def get_running_processes(self, dev_handler):\n # Get the list of running processes on each device\n running_processes = NvmlHandler.exec_nvml_function(nvmlDeviceGetComputeRunningProcesses,dev_handler)\n\n # Turns these process objects into dicts\n running_processes_dicts = [obj.__dict__ for obj in running_processes if obj]\n\n # Enhance these dicts with information from psutil\n new_dicts = []\n for running_processes_dict in running_processes_dicts:\n\n # Init the new dict with the current information\n more_ps_infos = {}\n more_ps_infos.update(running_processes_dict)\n\n # Rename the usedGpuMemory key, if any\n if 'usedGpuMemory' in more_ps_infos:\n more_ps_infos['gpu_memory_used'] = utils.psutil_parse_readable_bytes(\n more_ps_infos.get('usedGpuMemory')\n )\n del more_ps_infos['usedGpuMemory']\n\n # Try to retreive info about the process using psutil\n try:\n pid = running_processes_dict.get('pid')\n more_ps_infos.update(utils.psutil_snapshot_process(pid))\n except Exception as e:\n logger.warning('Cannot gather info from process {}'.format(pid))\n\n new_dicts.append(more_ps_infos)\n\n return new_dicts",
"def get_running_pris(self):\n try:\n running_pris_list = []\n output = self.ssh.exec_command(self.check_running_kombu_dialer_command)\n for line in output[1].readlines():\n line = line.split()\n if self.server in line and \"-g\" in line:\n running_pris_list.append(\n int(\n line[line.index(\"-g\")+1][2:]\n )\n )\n return running_pris_list\n except Exception as err:\n self.error_logger.error(err.message + \" PRITester::get_running_pris\")\n return None",
"def _getAllRunningInstances(self):\n return self._ec2.get_only_instances(filters={\n 'tag:leader_instance_id': self._instanceId,\n 'instance-state-name': 'running'})",
"def processes(self):\n nodes = (self.nodes.exclude(process__isnull=True)\n .values_list('process_id', flat=True))\n return Process.objects.filter(id__in=nodes).distinct()",
"def list_java_processes():\n for line in shell_command_output('jps -l').splitlines():\n line = line.strip()\n if len(line) == 0:\n continue\n (pid, class_name) = line.split()\n yield (int(pid), class_name)"
] |
[
"0.8456338",
"0.8348467",
"0.81814694",
"0.780856",
"0.75527126",
"0.73214626",
"0.7301836",
"0.72589177",
"0.71031713",
"0.70973146",
"0.7081394",
"0.70718044",
"0.7025768",
"0.7025768",
"0.70113343",
"0.6913096",
"0.68842685",
"0.684823",
"0.6841502",
"0.6802587",
"0.67958695",
"0.677776",
"0.67644477",
"0.67483413",
"0.6744697",
"0.67446256",
"0.6737268",
"0.664622",
"0.6638923",
"0.6619755"
] |
0.87931204
|
0
|
Given a module name from a URL, obtain the handler function from it and return the function object.
|
def gethandlerfunc(modname):
try:
# Import the module
mod = __import__(modname)
except ImportError:
# No module with this name
raise404("Couldn't import module " + modname)
try:
# Find the handler function
handler = mod.handler
except AttributeError:
# No handler function
raise404("Couldn't find handler function in module " + modname)
if not callable(handler):
# It's not a function
raise404("Handler is not callable in module " + modname)
return handler
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def find_handler(url):\n for handler in __all__:\n # Get the symbol for handler\n mod = globals()[handler]\n # Ask handler if it can handle the url\n if getattr(mod, \"can_handle\")(url):\n return mod\n return None",
"def gethandlername(URL):\n match = re.search(\"/([a-zA-Z0-9_-]+)\\.prog($|/|\\?)\", URL)\n if not match:\n # Couldn't find the requested module\n raise404(\"Couldn't find a module name in URL \" + URL)\n return match.group(1)",
"def load_function(engine_path):\r\n module_path, _, name = engine_path.rpartition('.')\r\n return getattr(import_module(module_path), name)",
"def import_function(name: str):\n module_name, function_name = name.rsplit(\".\", 1)\n module = importlib.import_module(module_name)\n return getattr(module, function_name)",
"def load_function(path):\r\n module_path, _, name = path.rpartition('.')\r\n return getattr(import_module(module_path), name)",
"def load_function(path):\r\n module_path, _, name = path.rpartition('.')\r\n return getattr(import_module(module_path), name)",
"def load_function(path):\r\n module_path, _, name = path.rpartition('.')\r\n return getattr(import_module(module_path), name)",
"def get_function(function_path):\n try:\n mod_name, func_name = function_path.rsplit('.', 1)\n mod = import_module(mod_name)\n except ImportError as e:\n raise ImproperlyConfigured(('Error importing module %s: \"%s\"' %\n (mod_name, e)))\n return getattr(mod, func_name)",
"def load_function(engine_path):\r\n module_path, _, name = engine_path.rpartition('.')\r\n return getattr(importlib.import_module(module_path), name)",
"def get_function_by_name(name):\n function_name = name + 'ed'\n return globals()[function_name]",
"def get_func(func_name):\n from importlib import import_module\n if func_name is None:\n return None\n parts = func_name.split('.')\n module_name = '.'.join(parts[:-1])\n module = import_module(module_name)\n return getattr(module, parts[-1])",
"def __extract_module_from_url(url):\n modules_dict = {\n \"ADVERTISING\": [\"ADVERTISING\", \"ADV\", \"advertising\"],\n \"ADVOCACY\": [\"ADVOCACY\", \"advocacy\"],\n \"AUTOMATION\": [\"AUT\", \"automation\"],\n \"BENCHMARKING\": [\"BENCHMARKING\", \"BMK\", \"benchmarking\"],\n \"CARE\": [\"CARE\", \"care\"],\n \"CASE_MANAGEMENT\": [\"UNIVERSAL_CASE\", \"universal_case\"],\n \"COMMENT\": [\"COMMENT\", \"comment\"],\n \"ENGAGEMENT\": [\"ENGAGEMENT\", \"ENG\", \"engagement\"],\n \"GOVERNANCE\": [\"GOVERNANCE\", \"GOV\", \"governance\"],\n \"INBOUND_MESSAGE\": [\"INBOUND_MESSAGE\", \"inbound_message\"],\n \"LISTENING\": [\"LISTENING\", \"LST\", \"listening\"],\n \"MARKETING\": [\"MARKETING\", \"MKT\", \"marketing\"],\n \"METADATA\": [\"METADATA\", \"metadata\"],\n \"META_CONTENT\": [\"META_CONTENT\", \"meta_content\"],\n \"OUTBOUND\": [\"OUTBOUND\", \"outbound\"],\n \"OUTBOUND-STREAM-FEED\": [\"OUTBOUND-STREAM-FEED\", \"outbound-stream-feed\"],\n \"OUTBOUND_MESSAGE\": [\"OUTBOUND_MESSAGE\", \"outbound_message\"],\n \"PAID\": [\"PAID\", \"paid\"],\n \"PLATFORM\": [\"PLATFORM\", \"platform\"],\n \"PUBLISHING\": [\"PUBLISHING\", \"PUB\", \"publishing\"],\n \"RDB_FIREHOSE\": [\"RDB_FIREHOSE\", \"rdb_firehose\"],\n \"REPORTING\": [\"REPORTING\", \"reporting\"],\n \"RESEARCH\": [\"RESEARCH\", \"research\"],\n \"SAM\": [\"SAM\", \"/sam/\"],\n \"SOCIAL\": [\"SOCIAL\", \"social\"],\n \"spellcheck-grammar\": [\"spellcheck\", \"grammar\"],\n \"SPR_TASK\": [\"SPR_TASK\", \"spr_task\"],\n \"SUGGESTION\": [\"SUGGESTION\", \"suggestion\"],\n \"UGC\": [\"UGC\", \"ugc\"],\n }\n matching_module = [\n mod\n for mod in modules_dict\n if any(keyword in url for keyword in modules_dict[mod])\n ]\n if (len(matching_module)) == 1:\n return matching_module[0]\n\n else:\n return \"UNKNOWN\"",
"def GetHandlerForHttpRequest(request):\n\n matcher = http_routing.HTTP_ROUTING_MAP.bind(\n \"%s:%s\" % (request.environ[\"SERVER_NAME\"],\n request.environ[\"SERVER_PORT\"]))\n try:\n match = matcher.match(request.path, request.method)\n except werkzeug_exceptions.NotFound:\n raise api_call_handlers.ApiCallHandlerNotFoundError(\n \"No API handler was found for (%s) %s\" % (request.path,\n request.method))\n\n handler_cls, route_args = match\n return (handler_cls(), route_args)",
"def import_function(\n name: Optional[str]\n) -> Optional[Callable]:\n\n if name is None:\n return None\n\n module_name, function_name = name.rsplit('.', maxsplit=1)\n function_module = import_module(module_name)\n function = getattr(function_module, function_name)\n\n return function",
"def lookup_function(self, name, context):\n if name not in self.functions:\n raise Exception(\"No such function: \" + str(name))\n return Function(self.functions[name], context)",
"def _get_function(self, uri, *args, **kwargs):\n post_call = not bool(uri)\n uri = uri or \".\".join(self._traverse_access_uri())\n mod, func = uri.rsplit(\".\", 1)\n if mod not in self.map():\n raise sugar.lib.exceptions.SugarLoaderException(\"Task {} not found\".format(uri))\n cls = self.map()[mod]\n if cls is None:\n ifce, cls = self._get_impl_class(mod)\n cls.modules = self\n self.map()[mod] = cls\n if func in cls.__class__.__dict__:\n if func not in ifce.__dict__:\n raise sugar.lib.exceptions.SugarLoaderException(\n \"Access denied to function '{}'\".format(func))\n else:\n raise sugar.lib.exceptions.SugarLoaderException(\n \"Function '{}' not found in module '{}'\".format(func, mod))\n _func_or_data = getattr(cls, func)\n\n if post_call:\n result = _func_or_data(*args, **kwargs)\n cls.scheme[func].validate(result)\n else:\n\n def defer_to_call(*args, **kwargs):\n \"\"\"\n Defer bound method for a post-call for validation.\n\n :param args: generic arguments\n :param kwargs: generic keywords\n :return: generic object\n \"\"\"\n data = _func_or_data(*args, **kwargs)\n cls.scheme[func].validate(data)\n return data\n result = defer_to_call\n\n return result",
"def get_handler(self, content_name):\n import django_userhistory.handlers as handlers\n \n def to_studly(x):\n return \"\".join([token.capitalize() for token in x.split(\"_\")])\n \n handler_class = getattr(handlers, \n \"%sHandler\" % (to_studly(content_name)), \n handlers.BaseUserHistoryHandler)\n return handler_class",
"def get_callable_from_string(f_name):\n try:\n mod_name, func_name = get_mod_func(f_name)\n if mod_name == \"\" and func_name == \"\":\n raise AttributeError(\"%s couldn't be converted to a module or function name\" % f_name)\n\n module = __import__(mod_name)\n\n if func_name == \"\":\n func_name = mod_name # The common case is an eponymous class\n\n return getattr(module, func_name)\n\n except (ImportError, AttributeError), exc:\n raise RuntimeError(\"Unable to create a callable object for '%s': %s\" % (f_name, exc))",
"def get_module_command_handler(self, name: str) -> Callable:\n if self.module is None:\n return\n cmnd = getattr(self.module, name, None)\n if cmnd is None or not (callable(cmnd) and hasattr(cmnd, FILEBASE_API_API_METHOD_MARKER_ATTRIB_NAME)):\n return None\n return cmnd",
"def handler(req):\n name = gethandlername(req.uri)\n if name == \"dispatcher\":\n raise404(\"Can't display the dispatcher\")\n handlerfunc = gethandlerfunc(name)\n return handlerfunc(req)",
"def lookup_func_from_fp(fp):\n return lookup_func(fp['m_funcId'])",
"def exposed_getmodule(self, name):\n return __import__(name, None, None, \"*\")",
"def import_from(full_name):\n module_name, function_name = full_name.rsplit('.', 1)\n mod = import_module(module_name)\n return getattr(mod, function_name)",
"def handler_url(self, block, handler_name, suffix='', query='', thirdparty=False):\n raise NotImplementedError(\"Runtime needs to provide handler_url()\")",
"def get_func(func_name):\n if func_name == '':\n return None\n try:\n parts = func_name.split('.')\n # Refers to a function in this module\n if len(parts) == 1:\n return globals()[parts[0]]\n # Otherwise, assume we're referencing a module under modeling\n module_name = 'modeling.' + '.'.join(parts[:-1])\n module = importlib.import_module(module_name)\n return getattr(module, parts[-1])\n except Exception:\n raise",
"def get_function(name):\n \n # Check if already a function\n if callable(name):\n return name\n \n if not isinstance(name, str):\n raise ValueError(f'{name} must be callable or a string.')\n \n if name in globals(): \n if callable(globals()[name]):\n f = globals()[name]\n else:\n raise ValueError(f'global {name} is not callable')\n else:\n # try to import\n m_name, f_name = name.rsplit('.', 1)\n module = importlib.import_module(m_name)\n f = getattr(module, f_name)\n \n return f",
"def component_handler(request, usage_key_string, handler, suffix=''):\r\n\r\n usage_key = UsageKey.from_string(usage_key_string)\r\n\r\n descriptor = get_modulestore(usage_key).get_item(usage_key)\r\n # Let the module handle the AJAX\r\n req = django_to_webob_request(request)\r\n\r\n try:\r\n resp = descriptor.handle(handler, req, suffix)\r\n\r\n except NoSuchHandlerError:\r\n log.info(\"XBlock %s attempted to access missing handler %r\", descriptor, handler, exc_info=True)\r\n raise Http404\r\n\r\n # unintentional update to handle any side effects of handle call; so, request user didn't author\r\n # the change\r\n get_modulestore(usage_key).update_item(descriptor, None)\r\n\r\n return webob_to_django_response(resp)",
"def __extract_module(log):\n module = \"UNKNOWN\"\n if \"module\" in log:\n module = log[\"module\"]\n elif \"executorName\" in log:\n module = log[\"executorName\"]\n elif \"http_uri\" in log:\n module = Transformer.__extract_module_from_url(log[\"http_uri\"])\n if module == \"UNKNOWN\" and \"header_referer\" in log:\n module = Transformer.__extract_module_from_url(log[\"header_referer\"])\n return module",
"def _get_module(module):\n try:\n return sys.modules[module]\n except KeyError:\n raise ValueError(\n module + \"is not a valid module name or it is not loaded\"\n )",
"async def _get_command_handler(self, command_type):\n if isinstance(command_type, str):\n module_name = 'command'\n module = import_module(module_name)\n handler = getattr(module, command_type)\n return command_type, handler"
] |
[
"0.703652",
"0.67843664",
"0.5899618",
"0.58730614",
"0.5857758",
"0.5857758",
"0.5857758",
"0.58505243",
"0.58359313",
"0.580863",
"0.5804542",
"0.57574105",
"0.5730287",
"0.5711653",
"0.57108694",
"0.5693852",
"0.5678432",
"0.5673243",
"0.5655031",
"0.5647152",
"0.56082803",
"0.5568447",
"0.54840887",
"0.545601",
"0.54309744",
"0.541543",
"0.54111224",
"0.5399132",
"0.539386",
"0.53381044"
] |
0.78308606
|
0
|
Given a URL, find the handler module name
|
def gethandlername(URL):
match = re.search("/([a-zA-Z0-9_-]+)\.prog($|/|\?)", URL)
if not match:
# Couldn't find the requested module
raise404("Couldn't find a module name in URL " + URL)
return match.group(1)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def find_handler(url):\n for handler in __all__:\n # Get the symbol for handler\n mod = globals()[handler]\n # Ask handler if it can handle the url\n if getattr(mod, \"can_handle\")(url):\n return mod\n return None",
"def __extract_module_from_url(url):\n modules_dict = {\n \"ADVERTISING\": [\"ADVERTISING\", \"ADV\", \"advertising\"],\n \"ADVOCACY\": [\"ADVOCACY\", \"advocacy\"],\n \"AUTOMATION\": [\"AUT\", \"automation\"],\n \"BENCHMARKING\": [\"BENCHMARKING\", \"BMK\", \"benchmarking\"],\n \"CARE\": [\"CARE\", \"care\"],\n \"CASE_MANAGEMENT\": [\"UNIVERSAL_CASE\", \"universal_case\"],\n \"COMMENT\": [\"COMMENT\", \"comment\"],\n \"ENGAGEMENT\": [\"ENGAGEMENT\", \"ENG\", \"engagement\"],\n \"GOVERNANCE\": [\"GOVERNANCE\", \"GOV\", \"governance\"],\n \"INBOUND_MESSAGE\": [\"INBOUND_MESSAGE\", \"inbound_message\"],\n \"LISTENING\": [\"LISTENING\", \"LST\", \"listening\"],\n \"MARKETING\": [\"MARKETING\", \"MKT\", \"marketing\"],\n \"METADATA\": [\"METADATA\", \"metadata\"],\n \"META_CONTENT\": [\"META_CONTENT\", \"meta_content\"],\n \"OUTBOUND\": [\"OUTBOUND\", \"outbound\"],\n \"OUTBOUND-STREAM-FEED\": [\"OUTBOUND-STREAM-FEED\", \"outbound-stream-feed\"],\n \"OUTBOUND_MESSAGE\": [\"OUTBOUND_MESSAGE\", \"outbound_message\"],\n \"PAID\": [\"PAID\", \"paid\"],\n \"PLATFORM\": [\"PLATFORM\", \"platform\"],\n \"PUBLISHING\": [\"PUBLISHING\", \"PUB\", \"publishing\"],\n \"RDB_FIREHOSE\": [\"RDB_FIREHOSE\", \"rdb_firehose\"],\n \"REPORTING\": [\"REPORTING\", \"reporting\"],\n \"RESEARCH\": [\"RESEARCH\", \"research\"],\n \"SAM\": [\"SAM\", \"/sam/\"],\n \"SOCIAL\": [\"SOCIAL\", \"social\"],\n \"spellcheck-grammar\": [\"spellcheck\", \"grammar\"],\n \"SPR_TASK\": [\"SPR_TASK\", \"spr_task\"],\n \"SUGGESTION\": [\"SUGGESTION\", \"suggestion\"],\n \"UGC\": [\"UGC\", \"ugc\"],\n }\n matching_module = [\n mod\n for mod in modules_dict\n if any(keyword in url for keyword in modules_dict[mod])\n ]\n if (len(matching_module)) == 1:\n return matching_module[0]\n\n else:\n return \"UNKNOWN\"",
"def gethandlerfunc(modname):\n try:\n # Import the module\n mod = __import__(modname)\n except ImportError:\n # No module with this name\n raise404(\"Couldn't import module \" + modname)\n\n try:\n # Find the handler function\n handler = mod.handler\n except AttributeError:\n # No handler function\n raise404(\"Couldn't find handler function in module \" + modname)\n\n if not callable(handler):\n # It's not a function\n raise404(\"Handler is not callable in module \" + modname)\n\n return handler",
"def __extract_module(log):\n module = \"UNKNOWN\"\n if \"module\" in log:\n module = log[\"module\"]\n elif \"executorName\" in log:\n module = log[\"executorName\"]\n elif \"http_uri\" in log:\n module = Transformer.__extract_module_from_url(log[\"http_uri\"])\n if module == \"UNKNOWN\" and \"header_referer\" in log:\n module = Transformer.__extract_module_from_url(log[\"header_referer\"])\n return module",
"def getmodulename(path):\r\n info = getmoduleinfo(path)\r\n if info: return info[0]",
"def get_module_name(module_path):\n return ntpath.split(module_path)[1].split(\".\")[0]",
"def get_module_name(module_path):\n return ntpath.split(module_path)[1].split(\".\")[0]",
"def modulename():\n from inspect import getmodulename,getfile\n return getmodulename(getfile(lambda x:x))",
"def url_module(self):\n if self.translated_view_urls:\n module = self._return_module(self.translated_view_urls)\n if module is None:\n logging.warning(\n \"{plugin} defined {urls} translated view urls but the module was not found\".format(\n plugin=self.module_path, urls=self.translated_view_urls\n )\n )\n return module",
"def extract_instance_name(url):\n return url.rsplit('/', 1)[-1]",
"def get_module_name_from_entry_point(entry_point):\n if 'module_name' in dir(entry_point):\n return entry_point.module_name\n elif 'module' in dir(entry_point):\n return entry_point.module\n\n raise exception.SysinvException(_(\n \"Module name for entry point {} \"\n \"could not be determined.\".format(entry_point)))",
"def get_handler(self, content_name):\n import django_userhistory.handlers as handlers\n \n def to_studly(x):\n return \"\".join([token.capitalize() for token in x.split(\"_\")])\n \n handler_class = getattr(handlers, \n \"%sHandler\" % (to_studly(content_name)), \n handlers.BaseUserHistoryHandler)\n return handler_class",
"def _get_table_name(url):\n try:\n return urlparse(url).path.strip('/').split('/')[1]\n except IndexError:\n return None",
"def _find_url_handler(self, req):\n # First try - lookup in explicit (non parameterized URLs)\n if req.path in self.explicit_url_map:\n return self.explicit_url_map[req.path]\n # Second try - strip last path segment and lookup in another map\n idx = req.path.rfind(b'/') + 1\n path2 = req.path[:idx]\n if len(path2) > 0 and path2 in self.parameterized_url_map:\n # Save parameter into request\n req._param = req.path[idx:].decode()\n return self.parameterized_url_map[path2]\n\n if self.catch_all_handler:\n return self.catch_all_handler\n\n # No handler found\n return (None, None)",
"def get_ext(url):\r\n root, ext = splitext(url)\r\n return ext",
"def lookupmodule(self, filename):\n if os.path.isabs(filename) and os.path.exists(filename):\n return filename\n f = os.path.join(sys.path[0], filename)\n if os.path.exists(f) and self.canonic(f) == self.mainpyfile:\n return f\n root, ext = os.path.splitext(filename)\n if ext == '':\n filename = filename + '.py'\n if os.path.isabs(filename):\n return filename\n for dirname in sys.path:\n while os.path.islink(dirname):\n dirname = os.readlink(dirname)\n fullname = os.path.join(dirname, filename)\n if os.path.exists(fullname):\n return fullname\n return None",
"def extract_api_name(url):\n host = RE_HOST.sub('\\\\1', url)\n return host",
"def get_info_of_url(url):\n pass",
"def lookup_module(filename):\r\n\r\n # stolen from pdb\r\n import os\r\n import sys\r\n\r\n if os.path.isabs(filename) and os.path.exists(filename):\r\n return filename\r\n f = os.path.join(sys.path[0], filename)\r\n if os.path.exists(f): # and self.canonic(f) == self.mainpyfile:\r\n return f\r\n root, ext = os.path.splitext(filename)\r\n if ext == '':\r\n filename = filename + '.py'\r\n if os.path.isabs(filename):\r\n return filename\r\n for dirname in sys.path:\r\n while os.path.islink(dirname):\r\n dirname = os.readlink(dirname)\r\n fullname = os.path.join(dirname, filename)\r\n if os.path.exists(fullname):\r\n return fullname\r\n return None",
"def get_mod_name():\n return sys.argv[0].split(\"/\")[-1].split(\".py\")[0]",
"def findModule(name):",
"def package_name_from_url(url):\n\n url_repo_part = url.split('/')[-1]\n\n if url_repo_part.endswith('.git'):\n return url_repo_part[:-4]\n\n return url_repo_part",
"def api_url_module(self):\n if self.untranslated_view_urls:\n module = self._return_module(self.untranslated_view_urls)\n if module is None:\n logging.warning(\n \"{plugin} defined {urls} untranslated view urls but the module was not found\".format(\n plugin=self.module_path, urls=self.untranslated_view_urls\n )\n )\n return module",
"def get_host_name(url):\n return urlparse.urlparse(url)[1]",
"def get_ext(url):\n\n path = urlparse(url).path\n ext = splitext(path)[1]\n return ext",
"def get_module_name(self):\n return self.module_name",
"def root_url_module(self):\n if self.root_view_urls:\n module = self._return_module(self.root_view_urls)\n if module is None:\n logging.warning(\n \"{plugin} defined {urls} root view urls but the module was not found\".format(\n plugin=self.module_path, urls=self.root_view_urls\n )\n )\n return module",
"def _find_module(model, mod_name):\n for name, module in model.named_modules():\n if name == mod_name:\n return module\n return None",
"def uri_dispatch(uri):\n\n return uri_dispatch_map[os.path.splitext(uri)[1]]",
"def module_name(cls):\n return __name__.split(\".\")[0]"
] |
[
"0.7329028",
"0.6799866",
"0.6571339",
"0.65176964",
"0.64922774",
"0.62385327",
"0.62385327",
"0.61173797",
"0.5958094",
"0.58933336",
"0.5882599",
"0.5876819",
"0.5803288",
"0.58031505",
"0.5779282",
"0.57589215",
"0.5755825",
"0.57234514",
"0.57199085",
"0.5707681",
"0.5684649",
"0.5671889",
"0.56678003",
"0.5654658",
"0.56374586",
"0.561759",
"0.5601703",
"0.5600467",
"0.5596535",
"0.5592026"
] |
0.835686
|
0
|
Determine the utilization rate of the aggregate prefix and return it as a percentage.
|
def get_utilization(self):
child_prefixes = Prefix.objects.filter(prefix__net_contained_or_equal=str(self.prefix))
# Remove overlapping prefixes from list of children
networks = cidr_merge([c.prefix for c in child_prefixes])
children_size = float(0)
for p in networks:
children_size += p.size
return int(children_size / self.prefix.size * 100)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def usage_percent(used, total, _round=None):\r\n try:\r\n ret = (used / total) * 100\r\n except ZeroDivisionError:\r\n ret = 0\r\n if _round is not None:\r\n return round(ret, _round)\r\n else:\r\n return ret",
"def percent_usage(value, total):\n if total:\n return float(value) * 100.0 / (float(total) + float(value))\n else:\n return 100.0",
"def gc_rate(dna: str, percent=False):\n c = Counter(dna)\n result = (c[\"G\"] + c[\"C\"]) / len(dna)\n return result * 100 if percent else result",
"def rate(self) -> float:\n return self.success_cnt / self.total_cnt if self.total_cnt > 0 else 1.0",
"def get_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_rating() / self.field.range)",
"def as_percent(self):\n if self.dynamic:\n self._update_db_obj()\n return f\"{(self._db_obj.progress / self._db_obj.total) * 100}%\"",
"def percentage_used(self):\n return self.volume_used/self.total_volume * 100.0",
"def calculate_percent(self, total_number, some_number):\n\t\treturn (some_number * 100) / total_number",
"def get_percentage(self):\n return self.percentage",
"def get_percentage(self):\n return self.percentage",
"def percent_processed(self):\n try:\n return (self.pos / self.data_encap.size) * 100.0\n except ZeroDivisionError:\n return 100.0",
"def percentage(count, total):\n return count / total * 100",
"def _get_cpu_percent(self):\n cpu_delta = None\n total_delta = None\n cpu_usage = 0\n try:\n cpu_usage2_time = time.time()\n cpu_usage2_usec = self._get_cgroups_cpu_usage_snapshot()\n if cpu_usage2_usec and self._cpu_usage1_usec:\n # elapsed cpu time our cgroup consumed in time period between measurements\n cpu_delta = cpu_usage2_usec - self._cpu_usage1_usec\n if self._cpu_usage1_time:\n time_delta = cpu_usage2_time - self._cpu_usage1_time\n # max possible cpu usage per one second adjusted to elapsed time between measurements\n total_delta = self._max_cpu_usage * time_delta\n if cpu_delta and total_delta:\n cpu_usage = round((cpu_delta / total_delta) * 100, 1)\n self._cpu_usage1_usec = cpu_usage2_usec\n self._cpu_usage1_time = cpu_usage2_time\n except BaseException:\n self._log.warning(f'Unable to determine cpu usage', exc_info=True)\n return cpu_usage",
"def _prorata_rate(self, days_used, days_in_month):\n return (100 * days_used // days_in_month) / 100.0",
"def unit_of_measurement(self):\n return \"%\"",
"def dnsPrefixFraction(bundle, key, subkey, thisone, args):\n if 'psldom' in thisone: # only process ones that are domains\n prefix_unique_count = lookup(bundle, key, subkey, thisone, [key, 'unique', 'pslpre_unique'])\n if prefix_unique_count:\n return(float(prefix_unique_count) / float(thisone['psldom']))",
"def unit_of_measurement(self) -> Any:\n return PERCENTAGE",
"def pct(self):\n\t\treturn self.bottle.pct()",
"def get_real_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_real_rating() / self.field.range)",
"def percent_rating(value):\n value = Decimal(value)\n value = round(value / 3, 2) * 100\n return value",
"def rate_limit_percentage(self) -> Optional[float]:\n return pulumi.get(self, \"rate_limit_percentage\")",
"def percent_of(part, whole):\n return part * 100 / whole",
"def get_cpu_percent():\n return psutil.cpu_percent(interval=1, percpu=True)",
"def tax_rate(self) -> float:\n return round((self.total / self.income) * 100, 2)",
"def percentage(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"percentage\")",
"def percentage(self) -> str:\n return ranged_value_to_percentage(\n self._device.fan_speed_limits, self._device.fan_speed\n )",
"def set_usg_percentage(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n tcInt = bx[\"t2p_int\"] + bx[\"t3p_int\"]\n a = tcInt + (Decimal('0.44')*bx[\"tl_int\"]) + bx[\"turnovers\"]\n b = team[\"minutes\"]/5\n c = (team[\"t2p_int\"] + team[\"t3p_int\"]) + (Decimal('0.44')*team[\"tl_int\"]) + team[\"turnovers\"]\n result = 0.00\n if bx[\"minutes\"] > 0:\n result = ((Decimal(a)*Decimal(b))/(bx[\"minutes\"]*c))*100\n self.usg_percentage = \"%.2f\" % round(result, 2)",
"def completion_proximity_score(prefix, completion):\n if prefix == completion:\n return float(\"inf\")\n else:\n return 1.0 / float(len(completion))",
"def unit_of_measurement(self):\n return '%'",
"def ok(self, results):\n return \"{:5.2f}% capacity used\".format(\n results[\"usage\"].resource.usage_ratio * 100.0\n )"
] |
[
"0.62969273",
"0.6216252",
"0.61805874",
"0.6167256",
"0.6159693",
"0.6150952",
"0.61206144",
"0.61026776",
"0.60304207",
"0.60304207",
"0.60299826",
"0.60109663",
"0.5991576",
"0.5987215",
"0.5963927",
"0.5933034",
"0.5900829",
"0.5885009",
"0.5875576",
"0.58688074",
"0.58314836",
"0.5802643",
"0.57955515",
"0.5782507",
"0.5782343",
"0.576701",
"0.57535034",
"0.575304",
"0.57475615",
"0.5745958"
] |
0.67332476
|
0
|
Iterate through a QuerySet of Prefixes and annotate the hierarchical level of each. While it would be preferable to do this using .extra() on the QuerySet to count the unique parents of each prefix, that approach introduces performance issues at scale. Because we're adding a nonfield attribute to the model, annotation must be made after any QuerySet modifications.
|
def annotate_depth(self, limit=None):
queryset = self
stack = []
for p in queryset:
try:
prev_p = stack[-1]
except IndexError:
prev_p = None
if prev_p is not None:
while (p.prefix not in prev_p.prefix) or p.prefix == prev_p.prefix:
stack.pop()
try:
prev_p = stack[-1]
except IndexError:
prev_p = None
break
if prev_p is not None:
prev_p.has_children = True
stack.append(p)
p.depth = len(stack) - 1
if limit is None:
return queryset
return filter(lambda p: p.depth <= limit, queryset)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def rebuild_prefixes(vrf):\n def contains(parent, child):\n return child in parent and child != parent\n\n def push_to_stack(prefix):\n # Increment child count on parent nodes\n for n in stack:\n n['children'] += 1\n stack.append({\n 'pk': [prefix['pk']],\n 'prefix': prefix['prefix'],\n 'children': 0,\n })\n\n stack = []\n update_queue = []\n prefixes = Prefix.objects.filter(vrf=vrf).values('pk', 'prefix')\n\n # Iterate through all Prefixes in the VRF, growing and shrinking the stack as we go\n for i, p in enumerate(prefixes):\n\n # Grow the stack if this is a child of the most recent prefix\n if not stack or contains(stack[-1]['prefix'], p['prefix']):\n push_to_stack(p)\n\n # Handle duplicate prefixes\n elif stack[-1]['prefix'] == p['prefix']:\n stack[-1]['pk'].append(p['pk'])\n\n # If this is a sibling or parent of the most recent prefix, pop nodes from the\n # stack until we reach a parent prefix (or the root)\n else:\n while stack and not contains(stack[-1]['prefix'], p['prefix']):\n node = stack.pop()\n for pk in node['pk']:\n update_queue.append(\n Prefix(pk=pk, _depth=len(stack), _children=node['children'])\n )\n push_to_stack(p)\n\n # Flush the update queue once it reaches 100 Prefixes\n if len(update_queue) >= 100:\n Prefix.objects.bulk_update(update_queue, ['_depth', '_children'])\n update_queue = []\n\n # Clear out any prefixes remaining in the stack\n while stack:\n node = stack.pop()\n for pk in node['pk']:\n update_queue.append(\n Prefix(pk=pk, _depth=len(stack), _children=node['children'])\n )\n\n # Final flush of any remaining Prefixes\n Prefix.objects.bulk_update(update_queue, ['_depth', '_children'])",
"def add_sister_prefixes_helper(a, ephrases, enode, i):\n\n j = i+enode.length\n if logger.level >= 3:\n logger.write(\"(i,j) = %s\\n\" % ((i,j),))\n x = enode.label\n j1 = i\n for ci in range(len(enode.children)):\n child = enode.children[ci]\n j1 += child.length\n if logger.level >= 3:\n logger.write(\"(i,j1) = %s\\n\" % ((i,j1),))\n if j1 < j and (i,j1) in ephrases:\n\n # constprefix3:\n #x1 = sym.fromtag(\"%s*\" % x)\n\n # subcat-lr2:\n #subcat = [sister.label for sister in enode.children[ci+1:] if sister.required]\n #x1 = sym.fromtag(\"/\".join([\"%s*\"%x]+subcat))\n \n # markov1:\n x1 = sym.fromtag(\"%s/%s\" % (x, enode.children[ci+1].label))\n\n # markov2:\n #x1 = sym.fromtag(\"%s(%s)\" % (x, enode.children[ci].label))\n \n a.espans.setdefault((i,j1),[]).append(x1)\n prefix_labels.add(x1)\n \n for child in enode.children:\n add_sister_prefixes_helper(a, ephrases, child, i)\n i += child.length",
"def insert(self, prefix: str):\n leaf = self.root\n for level in range(len(prefix)):\n letter = prefix[level]\n\n # if current character is not present\n if letter not in leaf.children:\n leaf.children[letter] = self.get_node()\n leaf = leaf.children[letter]\n\n # mark last node as leaf\n leaf.word_count += 1",
"def addPrefix(self):\n closeRqd = None\n for item, next in map(None, self, self[1:]):\n lastSibling = not next or next.level != item.level\n if item.prefix and item.textLines and closeRqd == None:\n item.textLines[0] = item.prefix + item.textLines[0]\n closeRqd = item.suffix\n if closeRqd != None and (lastSibling or\n not item.equalPrefix(next)):\n if item.textLines:\n item.textLines[-1] = item.textLines[-1] + closeRqd\n else:\n item.textLines = [closeRqd]\n closeRqd = None",
"def _addPrefixes(data):\n prevTags = None\n newData = []\n\n for n, (token, tags) in enumerate(data):\n\n newTags = []\n\n for t in tags:\n p = \"B\" if ((prevTags is None) or (t not in prevTags)) else \"I\"\n newTags.append(\"%s-%s\" % (p, t))\n\n newData.append((token, newTags))\n prevTags = tags\n\n return newData",
"def _get_prefixes(self):\n return self._dispatch_json(\"get\", self._db_base(\"prefixes\")).get(\"@context\")",
"def prefix(files):\n\tfrom os import sep\n\t\n\t# Initializes counters\n\tcounters = []\n\t\n\t# For all files\n\tfor file in files:\n\t\tfile = normalizePath(file)\n\t\t\n\t\t# Split the file name into a piece\n\t\tpaths = file.split(sep)\n\t\t\n\t\t# For each piece\n\t\tfor i in range(0,len(paths)):\n\t\t\ttry:\n\t\t\t\ttry:\n\t\t\t\t\t# Test if counters exist\n\t\t\t\t\tcounters[i][paths[i]] += 1\n\t\t\t\texcept:\n\t\t\t\t\t# Creates a path counters\n\t\t\t\t\tcounters[i][paths[i]] = 1\n\t\t\texcept:\n\t\t\t\t# Adds a new level of depth\n\t\t\t\tcounters.append({paths[i] : 1})\n\t\n\t# Constructs the prefix of the list of files\n\ttry:\n\t\tresult = \"\"\n\t\tamount = list(counters[0].values())[0]\n\t\tfor counter in counters:\n\t\t\tif len(counter.keys()) == 1 and list(counter.values())[0] == amount:\n\t\t\t\tresult += list(counter.keys())[0] + sep\n\t\t\telse:\n\t\t\t\treturn result [:-1]\n\t\t\t\tbreak\n\t\treturn result\n\texcept IndexError:\n\t\treturn \"\"",
"def __wordsToPrefixes__(self):\n prefixes = defaultdict(int)\n for word, tag in self.getWordTagDict():\n for prefix in self.getPrefixesForWord(word):\n prefixes[(prefix, tag)] += 1\n return prefixes",
"def _prefixes(self, title, filter_stopwords=False):\n for word in self._clean_words(title, filter_stopwords=filter_stopwords):\n prefixer = partial(word.__getslice__, 0)\n for prefix in imap(prefixer, range(1, len(word) + 1)):\n yield prefix",
"def _parents(self, prefix):\n if self.inherit:\n suffix = self.inherit.name\n value = self.tracconfig.get(\n self.section, '%s.%s' % (prefix, suffix), default=None)\n if value:\n return self._parents_to_list(value)\n return None",
"def prefixer(prefix: str):\n def prefixed(\n node,\n dumps=lambda node: codenode.dumps(node),\n ):\n for line_content in dumps(node).splitlines():\n yield codenode.line(f'{prefix}{line_content}')\n return prefixed",
"def _analyze(node: dict, depth=0, info=defaultdict(int)):\n info[\"depth\"] = max(info[\"depth\"], depth)\n for key in node.keys():\n if key == ITEMSKEY:\n info[\"georecord_containers\"] += 1\n info[\"georecord_items\"] += len(node[key])\n elif key == SUFFIXKEY:\n info[\"suffix_containers\"] += 1\n info[\"suffix_items\"] += len(node[key])\n else:\n info[\"prefix_nodes\"] += 1\n _analyze(node[key], depth + 1, info)\n return info",
"def trie_recurse(wordinds, charinds, prefix, probs, cumul, trie, model, new_inp):\n num = 0\n for let in charinds.keys():\n new_inp[0][-1] = eye[charinds[let]]\n keys = trie.keys(prefix+let)\n num = len(trie.keys(prefix+let))\n if num == 1:\n final_probs[0][wordinds[keys[0]]] = np.multiply(cumul, probs[0][charinds[let]])\n elif num > 1:\n probs = model.predict(new_inp)\n new_inp = np.roll(new_inp, -1, 1)\n \n cumul = np.multiply(cumul, probs[0][charinds[let]])\n trie_recurse(wordinds, charinds, prefix+let, probs, cumul, trie, model, new_inp)",
"def prefixSearch(self, prefix: str, _prec=\"\"):\n if prefix == \"\":\n # prefix exhasuted, match all\n yield from self.keys(_prec)\n else:\n try:\n # prefix not exhausted, traverse further\n chld = self.children[prefix[0]]\n yield from chld.prefixSearch(prefix[1:], _prec + self.ch)\n except IndexError:\n yield None\n except KeyError:\n yield None",
"def joinPrefixItems(self):\n newList = []\n mergeList = OutputGroup()\n for item in self:\n if mergeList and (item.level != mergeList[0].level or\n not item.prefix or\n not item.equalPrefix(mergeList[0])):\n mergeList.mergeGroup()\n newList.append(mergeList[0])\n mergeList[:] = []\n mergeList.append(item)\n if mergeList:\n mergeList.mergeGroup()\n newList.append(mergeList[0])\n self[:] = newList",
"def prefixer_iter(prefix: str):\n def prefixed(\n node,\n dump_iter=lambda node: codenode.default_writer_type(node).dump_iter()\n ):\n for line_content in yield_lines(dump_iter(node)):\n yield codenode.line(f'{prefix}{line_content}')\n return prefixed",
"def prefixes(self):\n # a new OntCuries-like object that wraps NamespaceManager\n # and can leverage its trie\n self.namespace_manager\n raise NotImplementedError('yet')",
"def prefix():\r\n class TableA(tables.Table):\r\n name = tables.Column()\r\n\r\n class Meta:\r\n prefix = \"x\"\r\n\r\n assert \"x\" == TableA([]).prefix\r\n\r\n class TableB(tables.Table):\r\n name = tables.Column()\r\n\r\n assert \"\" == TableB([]).prefix\r\n assert \"x\" == TableB([], prefix=\"x\").prefix\r\n\r\n table = TableB([])\r\n table.prefix = \"x\"\r\n assert \"x\" == table.prefix",
"async def process_prefix_list(\n guild: disnake.Guild,\n ctx: commands.Context = None,\n inter: AppCmdInter = None,\n allowed_mentions=None,\n):\n await create_guild_model(guild)\n guild = await Guild.get(guild.id)\n msg = f\"The following are the custom prefixes for {guild.name}:\\n\" + \", \".join(\n guild.prefixes\n )\n await send_message(msg=msg, ctx=ctx, inter=inter, allowed_mentions=allowed_mentions)",
"def startsWith(self, prefix):\n level = self.trie\n for c in prefix:\n if c in level:\n level = level[c]\n else:\n return False\n return True",
"def enumerate_match(self, prefix: List[str]) -> List[str]:\n matched_terms = []\n cur = self._root\n for i, token in enumerate(prefix):\n if token not in cur.children:\n break\n cur = cur.children[token]\n if cur.is_term:\n item = \"\".join(prefix[:i+1])\n if item in self._masked_items:\n continue\n else:\n matched_terms.append(item)\n\n return matched_terms",
"def get_all(root: TrieNode, prefix: str):\n \"\"\" Retorna uma lista IDs de cursos com o prefixo \"\"\"\n node = root\n found = []\n prefix = prefix.upper()\n\n # Se a raíz não tem filhos, a árvore é vazia\n if not root.children:\n return found\n\n # se não, busca cada caractere do prefixo \n for char in prefix:\n char_not_found = True\n\n # se o usuário colocar um asterisco, sinaliza qualquer palavra com o prefixo\n if char == '*': \n break\n else:\n # busca nas childs do nodo atual\n for child in node.children:\n if child.char == char:\n # se encontrar, atualiza a flag\n char_not_found = False\n # e recomeça do nodo que encontrou\n node = child\n break\n\n # se não encontrou algum caractere\n if char_not_found:\n return found\n\n # se encontrou todas as letras ou um *, pega todas as palavras\n return find_words(node)",
"def add_requested_prefixes(parent, prefix_list, show_available=True, show_assigned=True):\n child_prefixes = []\n\n # Add available prefixes to the table if requested\n if prefix_list and show_available:\n\n # Find all unallocated space, add fake Prefix objects to child_prefixes.\n available_prefixes = netaddr.IPSet(parent) ^ netaddr.IPSet([p.prefix for p in prefix_list])\n available_prefixes = [Prefix(prefix=p, status=None) for p in available_prefixes.iter_cidrs()]\n child_prefixes = child_prefixes + available_prefixes\n\n # Add assigned prefixes to the table if requested\n if prefix_list and show_assigned:\n child_prefixes = child_prefixes + list(prefix_list)\n\n # Sort child prefixes after additions\n child_prefixes.sort(key=lambda p: p.prefix)\n\n return child_prefixes",
"def build_prefix(self):\r\n pattern = self.pattern\r\n m = len(pattern)\r\n p = [None]*m\r\n p[0] = 0\r\n k = 0\r\n for i in range(1,m):\r\n while k > 0 and pattern[i] != pattern[k]:\r\n k = p[k-1]\r\n if pattern[k] == pattern[i]:\r\n k = k+1\r\n p[i] = k\r\n self._prefix = p",
"def entity_prefix(self):",
"def setPrefixes(self, p):\n return self._set(prefixes=p)",
"def _get_object_properties(self):\n super()._get_object_properties()\n add_prefix(root=self.root, prefix=self.naming_prefix, exclude=self.exclude_from_prefixing)",
"def prefix_freq(self, query_str):\n # if query input is empty, return all strings\n if query_str == '':\n return len(self.text)\n found = self.search_prefix(query_str)\n # if query is found, go to that node\n if found:\n node = self.saved_node\n # extract relevant count that had been performed during insertion of words and traversal of nodes\n count = node.prefix_count\n else:\n return 0\n return count",
"def prefix(self, prefix, *args):\n new_prefix = '%s%s' % (self.prefixes[-1], prefix % args)\n self.prefixes.append(new_prefix)\n try:\n yield\n finally:\n assert self.prefixes.pop() == new_prefix",
"def _prefix_sum(self, i: int) -> int:\n pref_sum = 0\n while i > 0:\n pref_sum += self.tree[i]\n i &= ~self._lsb(i) # Equivalent to i -= _lsb(i)\n \n return pref_sum"
] |
[
"0.5682173",
"0.5620707",
"0.52825505",
"0.51227534",
"0.5075723",
"0.50714594",
"0.5029576",
"0.5010874",
"0.49602807",
"0.49544492",
"0.49525705",
"0.4912911",
"0.49022633",
"0.48917243",
"0.4878467",
"0.4842337",
"0.48361942",
"0.4820922",
"0.48130736",
"0.4796515",
"0.4788806",
"0.47444823",
"0.47205937",
"0.47081736",
"0.4702987",
"0.46971118",
"0.46950912",
"0.46930537",
"0.46918055",
"0.46876556"
] |
0.5933673
|
0
|
Each time a record or the zone is modified, the serial is incremented.
|
def update_serial(self):
current_date = time.strftime('%Y%m%d', time.localtime())
if not self.soa_serial:
self.soa_serial = current_date + '01'
else:
serial_date = self.soa_serial[:8]
serial_num = self.soa_serial[8:]
if serial_date != current_date:
self.soa_serial = current_date + '01'
else:
serial_num = int(serial_num)
serial_num += 1
if serial_num < 10:
self.soa_serial = current_date + '0' + str(serial_num)
else:
self.soa_serial = current_date + str(serial_num)
self.set_bind_changed(False)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def post_seqnoincrease(self):",
"def update_soa(record):\n if record and record.domain and record.domain.soa:\n record.domain.soa.serial += 1\n record.domain.soa.dirty = True\n record.domain.soa.save()",
"def updateCounter(self):\n self.counter = self.counter + 1\n self.syncDataStructure[\"+\"][str(self.instanceID)] = self.counter",
"def serial():\n with commit():\n link_documents_and_serials()\n reindex_pidtype('docid')\n reindex_pidtype('serid')",
"def post_seqnoincrease(self):\n raise",
"def updates_serial(self,request):\n\t\tresult = self._updates_serial.timestamp()\n\t\tMODULE.info(\" -> Serial for UPDATES is '%s'\" % result)\n\t\tself.finished(request.id,result)",
"def _seqno(self):\n self._last_seqno += 1\n return struct.pack(\">L\", self._last_seqno)",
"def update_next_id(cls):\n cls.next_id += 1",
"def get_next_serial(self):\n T = time.gmtime()\n base = T[0] * 10000 + T[1] * 100 + T[2]\n s_base = self.serial // 100\n if s_base < base:\n return base * 100 # New day\n else:\n return self.serial + 1 # May cause future lap",
"def increment_counter(self) -> None:",
"def serial_num(self) -> int:\n return self._serial_num",
"def _get_serial(self):\n with open(self.ca_dir + SERIAL_NAME, 'r+') as serial_file:\n fcntl.flock(serial_file, fcntl.LOCK_EX)\n serial = int(serial_file.read())\n serial_file.seek(0)\n serial_file.truncate()\n serial_file.writelines(['%d'% (serial + 1)])\n return serial",
"def record(self, pos):\n self.lasts += (datetime.now(), pos),\n if len(self.lasts) > 10:\n self.lasts.pop(0)",
"def serial_rfc1912(ts):\n # RFC1912 (http://www.ietf.org/rfc/rfc1912.txt) recommends 'nn' as the\n # revision. However, identifying and incrementing this value is a manual,\n # error prone step. Instead, we save a temporary daily sequence counter.\n serial_prefix = time.strftime('%Y%m%d', ts)\n return serial_prefix + get_revision(serial_prefix, ZONE_SERIAL_COUNTER)",
"def _CreateRecordId(self):\n self._record_count += 1\n return '%s_%s' % (self._unique_id, self._record_count)",
"def save_increment(self):\n self.version = self.next_available_version()\n return self.save()",
"def inc(self):\n \n self.count += 1",
"def before_update(mapper, conn, target):\n if not target.id_:\n dataset = ObjectNumber.parse(target.d_id)\n target.id_ = str(PartitionNumber(dataset, target.sequence_id))",
"def incrementWriteCount(self):\n self.writeCount += 1",
"def inc( self ):\n self.count += 1",
"def save_lastnode_id():\n init_counter()\n\n with FileLock(_COUNTER_FILE):\n with AtomicFile(_COUNTER_FILE, mode=\"w\") as fh:\n fh.write(\"%d\\n\" % _COUNTER)",
"def _init_serial(self):\n index_name = self.ca_dir + '/index.txt'\n serial_name = self.ca_dir + '/serial'\n with open(index_name, 'w'):\n pass\n with open(serial_name, 'w') as serial:\n serial.writelines(['%d' % CA_SERIAL])",
"def increase_counter(self):\n self.values = self.values + 1",
"def id(self):\n _id = super(ScheduleVisit, self).id\n return _id + 1",
"def increment(self):\n self.increments += 1\n if self.increments == self.length:\n self.finished = True",
"def mint_a_new_cid(self):\n self.update({\"cid\": self.table.c.cid +1}, condition=None)",
"def increment_pc(self):\n self.program_counter[-1] += 1",
"def increment(cls):\n index = random.randint(0, SimpleCounterShard.NUM_SHARDS - 1)\n shard_name = 'shard' + str(index)\n counter = SimpleCounterShard.objects.get_or_create(pk=shard_name)[0]\n counter.count += 1\n counter.save()",
"def _save_increment(self):\n self._set_scenefile_properties_from_ui()\n self._update_successful_save(self.scenefile.save_increment())\n self.version_spinbox.setValue(self.scenefile.version)",
"def serialno(self, serialno):\n\n self._serialno = serialno"
] |
[
"0.66880107",
"0.6164748",
"0.60421455",
"0.60311395",
"0.5971428",
"0.58435035",
"0.5806637",
"0.5700466",
"0.5664041",
"0.5663809",
"0.5650433",
"0.5644291",
"0.55386907",
"0.552171",
"0.5513814",
"0.5507396",
"0.547886",
"0.54493624",
"0.54472667",
"0.5414568",
"0.54057187",
"0.54028594",
"0.53918636",
"0.53831166",
"0.5381685",
"0.53785074",
"0.53553754",
"0.53220296",
"0.5306013",
"0.5302127"
] |
0.64695793
|
1
|
By default, PostgreSQL will order INETs with shorter (larger) prefix lengths ahead of those with longer (smaller) masks. This makes no sense when ordering IPs, which should be ordered solely by family and host address. We can use HOST() to extract just the host portion of the address (ignoring its mask), but we must then recast this value to INET() so that records will be ordered properly. We are essentially recasting each IP address as a /32 or /128.
|
def get_queryset(self):
qs = super(IPAddressManager, self).get_queryset()
return qs.annotate(host=RawSQL('INET(HOST(ipam_ipaddress.address))', [])).order_by('family', 'host')
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def sort_ip(ip):\n if \".\" in ip:\n return (int(ip.split(\"/\")[1] or \"0\"),\n int(ip.split(\"/\")[0].split(\".\")[0]),\n int(ip.split(\"/\")[0].split(\".\")[1]),\n int(ip.split(\"/\")[0].split(\".\")[2]),\n int(ip.split(\"/\")[0].split(\".\")[3])\n )\n elif \":\" in ip:\n return (int(ip.split(\"/\")[1] or \"0\"),\n int(ip.split(\":\")[0],16),\n int(ip.split(\":\")[1],16),\n int(ip.split(\":\")[2] or \"0\",16)\n )",
"def get_address_parts(ip, mask):\n\n ip_bin_str = address_to_bin(ip).replace('.', '')\n mask_bin_str = address_to_bin(mask).replace('.', '')\n\n net_size = mask_bin_str.rfind('1') + 1\n host_size = ADDR_LEN - net_size\n\n net = _address_from_bin_list(_split_bin_str_on_ocsets(ip_bin_str[:net_size] + '0' * host_size))\n host = _address_from_bin_list(_split_bin_str_on_ocsets('0' * net_size + ip_bin_str[-host_size:]))\n net_count = 2 ** host_size - 2\n count_string = '2^{0}-2'.format(host_size)\n return net, host, net_count, count_string",
"def ipaddrs( host ):\n return socket.gethostbyaddr(host)[2][0]",
"def address_to_ip_prefix(address):\n return address.split('/')",
"def sort_ipv4_list(ip_list, unique=True):\n\tif unique:\n\t\tip_list = list(set(ip_list))\n\tipv4_list = sorted([i.rstrip(':') for i in ip_list], key=lambda ip: (\n\t\tint(ip.split(\".\")[0]),\n\t\tint(ip.split(\".\")[1]),\n\t\tint(ip.split(\".\")[2]),\n\t\tint(ip.split(\".\")[3].split(':')[0]),\n\t\tint(ip.split(\":\")[1]) if \":\" in ip else 0\n\t))\n\treturn ipv4_list",
"def get_ip_list(ip_network, mask=None):\n\tif mask and '/' not in ip_network:\n\t\tnet = ipaddress.ip_network(\"{0}/{1}\".format(ip_network, mask))\n\telif '/' not in ip_network:\n\t\treturn [str(ipaddress.ip_address(ip_network))]\n\telse:\n\t\tnet = ipaddress.ip_network(ip_network)\n\thosts = net.hosts()\n\tif net.netmask == ipaddress.IPv4Address('255.255.255.255') and sys.version_info > (3, 9):\n\t\t# see: https://github.com/zeroSteiner/smoke-zephyr/issues/8\n\t\thosts = []\n\treturn [host.__str__() for host in hosts]",
"def reverse_dns(ipaddress):\n\n name = socket.gethostbyaddr(ipaddress)[0]\n return [str(name)]",
"def list_ip_addresses(data):\n ip_list = [item[0] for item in data]\n sorted_set = sorted(set(ip_list))\n addr_list = [ip for ip in sorted_set]\n return addr_list",
"def get_ip_address(host):\n for h in host:\n ip = h.address['addr']\n return ip",
"def reverse_lookup_zone(ipaddress):\n return reverse_dotted_decimals(ipaddress) + '.in-addr.arpa'",
"def iplst_to_ipaddr(iplst):\n return \".\".join([str(o) for o in iplst])",
"def __cmp__(self, obj): \n # If obj is an Address, stringifying it puts it in a state where it\n # can be parsed by IP().\n other = IP(str(obj))\n\n # Compare IPs by byte representation.\n if self.family == other.family:\n return cmp(self._bytes, other.toBytes())\n else:\n return cmp(self.family, other.family)",
"def ip(self, prefix = None):\n ip = self._update_ips()\n if prefix is None:\n return self.ip_eid\n # we need to check al IPs from self.ip_others that may start with prefix\n p = prefix.split(\"::\")[0]\n for ip in self.ip_others:\n if ip.startswith(p):\n return ip\n return None",
"def normalize_ip4(self):\n\n ip = str(self.ip4)\n # Let's normalize the ip list first\n ip_list = list(\n map(\n lambda v: ipaddress.IPv4Network(v),\n filter(\n lambda v: self.try_convert(v, None, ipaddress.IPv4Network),\n map(\n lambda v: v.split('|')[1].split('/')[0].strip()\n if '|' in v else\n v.split('/')[0].strip(),\n ip.split(',')\n )\n )\n )\n )\n\n if ip_list:\n ip_list.sort()\n ip = tuple(\n int(c)\n for c in str(ip_list[0]).split('/')[0].split('.')\n )\n else:\n ip = (9999, ip)\n\n self.ip4 = ip",
"def host_ip_address(self, host_index, vlan_index):\n if isinstance(vlan_index, tuple):\n vlan_index = vlan_index[0]\n return '10.%u.0.%u/%u' % (vlan_index+1, host_index+1, self.NETPREFIX)",
"def test_ip(self):\n ##Todo: Improve this check\n ip = socket.gethostbyname(socket.gethostname())\n ip = [int(i) for i in ip.split('.')]\n assert len(ip) == 4\n assert ip[0] == 10\n assert ip[1] == 137\n assert ip[2] == 1\n assert ip[3] >= 1 and ip[3] <= 255",
"def parseHostList( ipstring ):\r\n\r\n # ideally, we should be able to handle these cases:\r\n # w.x.y.z, .x.y.z, .y.z, .z\r\n # w.x.y.a-b, .x.y.a-b, .x.a-b, .a-b\r\n # w.x.y.z-a.b.c.d, w.x.y-a.b.c, w.x-a.b, w-a\r\n # we also need to be able to parse CIDR ranges. Urgh. w.x.y.z/0\r\n \r\n # ...but for the sake of simplicity we'll implement a subset, consisting of these cases:\r\n # 1. w.x.y.z\r\n # 2. w.x.y.z1-zN\r\n # 3. .z1-.zN\r\n\r\n currentNetwork = '0.0.0'\r\n groups = ipstring.split(',') \r\n iplist = []\r\n for i in groups:\r\n\r\n octets = i.split('.')\r\n if len(octets) == 4: # cases 1 and 2\r\n currentNetwork = \"%s.%s.%s\" % (octets[0],octets[1],octets[2])\r\n iprange = getRange(octets[3])\r\n ips = [\"%s.%s\" % (currentNetwork,i) for i in iprange]\r\n\r\n elif len(octets) == 2: # case 3\r\n network = currentNetwork\r\n iprange = getRange(octets[1])\r\n ips = [\"%s.%s\" % (currentNetwork,i) for i in iprange]\r\n \r\n else:\r\n print 'syntax error in specifying host list!'\r\n sys.exit(1)\r\n \r\n iplist += ips\r\n\r\n return uniq(iplist) # get rid of repeats\r",
"def int32_to_ip(int32):\n return str(ipaddress.IPv4Address(int32))",
"def normalize_address(addr: str) -> str:\n # bitcoin hrps\n hrps = {net[\"bech32\"] + \"1\" for net in NETWORKS.values()}\n # liquid hrps\n # Blech32 addresses are intended for confidential assets\n hrps = hrps.union(\n {net[\"blech32\"] + \"1\" for net in NETWORKS.values() if \"blech32\" in net}\n )\n if addr.lower().startswith(tuple(hrps)):\n return addr.lower()\n return addr",
"def enumerate_network(arg):\n\n network = ip_network(arg, strict=False)\n data = list(map(str, network.hosts()))\n data.insert(0, str(network.network_address))\n if network.prefixlen != network.max_prefixlen:\n data.append(str(network.broadcast_address))\n return data",
"def prefixes_ipv4(self):\n with open(self.ixpfx) as f:\n ixpfx = json.load(f)\n return [item['prefix'] for item in ixpfx['data'] if item['protocol'] == 'IPv4']",
"def net_if_addrs():\n ret = []\n for items in cext.net_if_addrs():\n items = list(items)\n items[0] = py2_strencode(items[0])\n ret.append(items)\n return ret",
"def _read_proto_resolve(self, addr: 'bytes', ptype: 'int') -> 'str | IPv4Address | IPv6Address':\n if ptype == Enum_EtherType.Internet_Protocol_version_4: # IPv4\n return ipaddress.ip_address(addr)\n if ptype == Enum_EtherType.Internet_Protocol_version_6: # IPv6\n return ipaddress.ip_address(addr)\n return addr.hex()",
"def host_ip(host):\n return host.cmd('ip addr show {}-eth1 | awk \\'/inet / {{ print $2 }}\\' | cut -d\\'/\\' -f1'.format(host.name, host.name), stdout=sp.PIPE).strip()",
"def sort_result_by_ip(result):\n def sort_func(ip: str):\n try:\n return util.ip_str_to_int(ip)\n except:\n return math.inf\n\n sorted_result = {}\n for k_ip in sorted(result, key=sort_func):\n sorted_result[k_ip] = result[k_ip]\n return sorted_result",
"def get_local_host_ip(self) -> str:",
"def test_IP_to_IPv4(self):\n self.assertEqual(helpers.IP_to_IPv4('00000000000000000000000000000000'), '0.0.0.0')\n self.assertEqual(\n helpers.IPs_to_IPv4s(\n [\n '00000000000000000000000000000000',\n '10001000100110100011111010101001'\n ]\n ),\n ['0.0.0.0', '136.154.62.169']\n )",
"def ip_address(addr):\n parts = addr.split('.')\n if len(parts) != 4:\n raise TypeError('{} does not match an IP address pattern'.format(addr))\n for part in parts:\n try:\n num = int(part)\n if num < 0 or num > 255:\n raise TypeError('{} does not match an IP address pattern'.format(addr))\n except ValueError:\n raise TypeError('{} does not match an IP address pattern'.format(addr))\n return addr",
"def parse_hostmask(hostmask):\n tmp = hostmask.lstrip(':').split('!')\n logger.debug(\"--hostmask--(%s)(%s)(%s)\", hostmask, tmp[0], tmp[1])\n return tmp[0], tmp[1]",
"def get_nets_and_highest_prefix(ip, net_group, db):\n highest_prefix_length = 0\n networks = []\n ip = nacaddr.IP(ip)\n # loop through all the networks in the net_group\n for net in get_nets([net_group], db)[0][1]:\n # find the highest prefix length for the networks that contain the IP\n if ip.version == net.version:\n if ip.subnet_of(net):\n networks.append(str(net))\n if net.prefixlen > highest_prefix_length:\n highest_prefix_length = net.prefixlen\n return highest_prefix_length, networks"
] |
[
"0.6319297",
"0.58167803",
"0.5814441",
"0.5797147",
"0.56700313",
"0.5613662",
"0.5600801",
"0.5575775",
"0.5550464",
"0.55437094",
"0.5528353",
"0.55149114",
"0.54637724",
"0.54330945",
"0.5394444",
"0.5389342",
"0.5371418",
"0.53696537",
"0.5367245",
"0.5361503",
"0.5330726",
"0.52848583",
"0.5271432",
"0.5264234",
"0.524543",
"0.52328473",
"0.52246165",
"0.5223957",
"0.52187693",
"0.5216058"
] |
0.6582377
|
0
|
Autocreate a corresponding A/AAAA DNS record (if possible) whenever the PTR field is modified
|
def update_dns(self):
if self.ptr:
which_zone = None
zones = dns.models.Zone.objects.all()
for zone in zones:
if self.ptr.endswith(zone.name) or self.ptr.endswith(zone.name + '.'):
which_zone = zone
break
if which_zone:
zone_name = which_zone.name
record_name = self.ptr[:-len(zone_name)] if not self.ptr.endswith('.') else self.ptr[:-len(zone_name) - 1]
if record_name.endswith('.'):
record_name = record_name[:-1]
record_type = 'A' if self.family == 4 else 'AAAA'
dns.models.Record.objects.get_or_create(
name=record_name,
record_type=record_type,
zone=which_zone,
address=self
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def set_dns_atype ( route53_conn, dns_name, atype_value ) :\n r53 = boto.route53.record.ResourceRecordSets( route53_conn, route_53_hosted_zoneid )\n monitor_dns = r53.add_change( 'UPSERT', dns_name, 'A', ttl=60 )\n monitor_dns.add_value( atype_value )\n r53.commit( )",
"def test_updatednsrecord(kasserver, kasapi):\n kasserver.add_dns_record(\"test.example.com\", \"CNAME\", \"www.example2.com\")\n assert kasapi.requests_contains(\"update_dns_settings\")",
"def dns_sync(self, args):\r\n dns = DNSManager(self.client)\r\n vsi = VSManager(self.client)\r\n\r\n vs_id = resolve_id(vsi.resolve_ids, args.get('<identifier>'), 'VS')\r\n instance = vsi.get_instance(vs_id)\r\n zone_id = resolve_id(dns.resolve_ids, instance['domain'], name='zone')\r\n\r\n def sync_a_record():\r\n \"\"\" Sync A record \"\"\"\r\n records = dns.get_records(\r\n zone_id,\r\n host=instance['hostname'],\r\n )\r\n\r\n if not records:\r\n # don't have a record, lets add one to the base zone\r\n dns.create_record(\r\n zone['id'],\r\n instance['hostname'],\r\n 'a',\r\n instance['primaryIpAddress'],\r\n ttl=args['--ttl'])\r\n else:\r\n recs = [x for x in records if x['type'].lower() == 'a']\r\n if len(recs) != 1:\r\n raise CLIAbort(\"Aborting A record sync, found %d \"\r\n \"A record exists!\" % len(recs))\r\n rec = recs[0]\r\n rec['data'] = instance['primaryIpAddress']\r\n rec['ttl'] = args['--ttl']\r\n dns.edit_record(rec)\r\n\r\n def sync_ptr_record():\r\n \"\"\" Sync PTR record \"\"\"\r\n host_rec = instance['primaryIpAddress'].split('.')[-1]\r\n ptr_domains = self.client['Virtual_Guest'].\\\r\n getReverseDomainRecords(id=instance['id'])[0]\r\n edit_ptr = None\r\n for ptr in ptr_domains['resourceRecords']:\r\n if ptr['host'] == host_rec:\r\n ptr['ttl'] = args['--ttl']\r\n edit_ptr = ptr\r\n break\r\n\r\n if edit_ptr:\r\n edit_ptr['data'] = instance['fullyQualifiedDomainName']\r\n dns.edit_record(edit_ptr)\r\n else:\r\n dns.create_record(\r\n ptr_domains['id'],\r\n host_rec,\r\n 'ptr',\r\n instance['fullyQualifiedDomainName'],\r\n ttl=args['--ttl'])\r\n\r\n if not instance['primaryIpAddress']:\r\n raise CLIAbort('No primary IP address associated with this VS')\r\n\r\n zone = dns.get_zone(zone_id)\r\n\r\n go_for_it = args['--really'] or confirm(\r\n \"Attempt to update DNS records for %s\"\r\n % instance['fullyQualifiedDomainName'])\r\n\r\n if not go_for_it:\r\n raise CLIAbort(\"Aborting DNS sync\")\r\n\r\n both = False\r\n if not args['--ptr'] and not args['-a']:\r\n both = True\r\n\r\n if both or args['-a']:\r\n sync_a_record()\r\n\r\n if both or args['--ptr']:\r\n sync_ptr_record()",
"def test_adddnsrecord(kasserver, kasapi):\n kasserver.add_dns_record(\"test1.example.com\", \"CNAME\", \"www.example.com\")\n assert kasapi.requests_contains(\"add_dns_settings\")",
"def create_A_record(self, heroku_host_ip, domain, ttl):\n r = self.api.post_create_record(\n domain = domain,\n name = None,\n record_type = \"A\",\n prio = None,\n content = heroku_host_ip,\n ttl = ttl)\n dns_a_record = self.extract_A_records(r[\"record\"])\n return dns_a_record",
"def dnsUpdate(portId, ipAddr='', action='create'):\n\tzone = 'osdev.skrill.net.'\n\trevZone = '23.32.10.in-addr.arpa'\n\tcname = portId + '.' + zone\n\tttl = 300\n\tnsServer = '10.32.29.99'\n key = 'yw0ADuZjXAhcGgMOYg/Clx1128iUSfhlOHdsY4CzVNIVVVXismrAe+WKMBxocLhbrIVHGvmR94jDC46K18K6oQ=='\n keyRing = dns.tsigkeyring.from_text({zone : key})\n\thostName = genHostname(ipAddr)\n\tdnsUpdate = dns.update.Update(zone, keyring=keyRing)\n\tipAddr = str(ipAddr)\n\thostName = str(hostName)\n\tif action == 'create':\n\t\tdnsUpdate.replace( hostName.split('.')[0], ttl, 'A', ipAddr )\n\t\tdnsResponse = dns.query.tcp(dnsUpdate, nsServer )\n\t\tlogging.info('DNS A record updated for: ' + hostName)\n\t\tdnsUpdate.replace(portId, ttl, 'CNAME', hostName)\n\t\tdnsResponse = dns.query.tcp(dnsUpdate, nsServer )\n\t\tlogging.info('DNS CNAME record updated for: ' + hostName)\n\t\tdnsUpdate = dns.update.Update(revZone, keyring=keyRing)\n\t\tdnsUpdate.replace(ipAddr.split('.')[3], ttl, 'PTR', hostName)\n\t\tdnsResponse = dns.query.tcp(dnsUpdate, nsServer )\n\t\tlogging.info('DNS PTR record updated for: ' + hostName)\n\tif action == 'delete':\n\t\ttry:\n\t\t\thostName = dns.resolver.query(cname, 'CNAME')[0].to_text()\n\t\t\tipAddr = dns.resolver.query(hostName, 'A')[0].to_text()\n\t\texcept Exception, e:\n\t\t\tlogging.exception('DNS query failed for cname and A records: ' + cname + ' ' + hostName)\n\t\t\thostName = ''\n\t\t\treturn hostName\n\t\tdnsUpdate.delete(cname, 'CNAME')\n\t\tdnsResponse = dns.query.tcp(dnsUpdate, nsServer )\n\t\tlogging.info('DNS CNAME record deleted for: ' + portId + ' to ' + hostName)\n\t\tdnsUpdate.delete(hostName.split('.')[0])\n\t\tdnsResponse = dns.query.tcp(dnsUpdate, nsServer )\n\t\tlogging.info('DNS A record deleted for: ' + hostName)\n\t\tdnsUpdate = dns.update.Update(revZone, keyring=keyRing)\n dnsUpdate.delete(ipAddr.split('.')[3])\n\t\tdnsResponse = dns.query.tcp(dnsUpdate, nsServer )\n\t\tlogging.info('DNS PTR record deleted for: ' + hostName)\n\t\treturn hostName",
"def update_A_record(self, heroku_host_ip, dns_a_record):\n r = self.api.post_update_record(\n record_id = dns_a_record.get('id'),\n prio = dns_a_record.get('prio'),\n content = heroku_host_ip,\n ttl = dns_a_record.get('ttl'))\n dns_a_record = self.extract_A_records(r[\"record\"])\n return dns_a_record",
"def create_record(self, context, record):\n record = self.dns_manager.create_record(context, record)\n return record",
"def test_create_domain_with_a_record(self):\n fake_dns_instance = FakeDnsInstance()\n t = template_format.parse(domain_only_template)\n a_record = [{\n \"type\": \"A\",\n \"name\": \"ftp.example.com\",\n \"data\": \"192.0.2.8\",\n \"ttl\": 3600\n }]\n t['Resources']['domain']['Properties']['records'] = a_record\n instance = self._setup_test_cloud_dns_instance('dnsinstance_create', t)\n create_args = self._get_create_args_with_comments(a_record)\n self._stubout_create(instance, fake_dns_instance, **create_args)\n scheduler.TaskRunner(instance.create)()\n self.assertEqual((instance.CREATE, instance.COMPLETE), instance.state)\n self.m.VerifyAll()",
"def sync_dns(self,):\n\n for server_name, server_ip in self.get_instances():\n self.dnsmanager.ensure_a_record(server_name, server_ip)",
"def test_update_domain_with_a_record(self):\n a_record = [{'type': 'A',\n 'name': 'ftp.example.com',\n 'data': '192.0.2.8',\n 'ttl': 3600}]\n self.test_update(updateRecords=a_record)",
"def set_dns_cname ( route53_conn, dns_name, cname_value ) :\n r53 = boto.route53.record.ResourceRecordSets( route53_conn, route_53_hosted_zoneid )\n monitor_dns = r53.add_change( 'UPSERT', dns_name, 'CNAME', ttl=60 )\n monitor_dns.add_value( cname_value )\n r53.commit( )",
"def update_record(self, context, record):\n record = self.dns_manager.update_record(context, record)\n return record",
"def pre_virtual_DNS_record_create(self, resource_dict):\n pass",
"def dns_entry(self, msg):\n if msg['message'].find('Calling getaddrinfo') > -1:\n match = re.search(r'Calling getaddrinfo for host \\[(?P<host>[^\\]]+)\\]', msg['message'])\n if match:\n hostname = match.groupdict().get('host')\n if hostname not in self.dns:\n self.dns[hostname] = {'start': msg['timestamp']}\n elif msg['message'].find('lookup completed for host') > -1:\n match = re.search(r'lookup completed for host \\[(?P<host>[^\\]]+)\\]', msg['message'])\n if match:\n hostname = match.groupdict().get('host')\n if hostname in self.dns and 'end' not in self.dns[hostname]:\n self.dns[hostname]['end'] = msg['timestamp']",
"def upsert_record(route53_zone, record_name, ip):\n\n # Only upsert the dns record if it doesn't resolve to us.\n try:\n record_ip = socket.gethostbyname(record_name)\n except socket.error:\n # Ignore if we can't connect to the host\n pass\n else:\n if ip == record_ip:\n return\n\n print str(dt.now()), \"Registering host as\", record_name\n record = route53_zone.get_a(record_name)\n\n if record and ip not in record.resource_records:\n route53_zone.update_a(record_name, ip)\n elif not record:\n route53_zone.add_a(record_name, ip)",
"def update_type_A_domain(self, domain, point_to):\n r53 = self.connections.get_route53()\n\n # Get Zone ID\n zone = r53.get_zone(self.env.domain)\n zone_id = zone.id\n\n if not zone.get_a(domain):\n sys.exit(\"\\nAbort: {} does not exists! \" \\\n \"Please create first!\".format(domain))\n\n # Commit change\n try:\n changes = ResourceRecordSets(connection=r53, hosted_zone_id=zone_id)\n change = changes.add_change(action='UPSERT', name=domain, type=\"A\")\n change.set_alias(\n alias_hosted_zone_id=zone_id,\n alias_dns_name=point_to,\n alias_evaluate_target_health=False)\n changes.commit()\n except DNSServerError:\n raise\n except Exception:\n print(\"Unexpected error: {}\".format(traceback.format_exc()))\n sys.exit(1)\n\n # Print record set\n record = zone.get_a(domain)\n print(\"\\nUpdated record set is:\\n{}\".format(record.to_print()))",
"def _update_to_group(group, rrsets):\n #logging.debug('parsing DNS UPDATE:\\n\\n\\%s' % rrsets)\n for rrset in rrsets:\n for record in rrset:\n record.rdclass %= dns.rdataclass.UNIQUE #remove cache-flush bit\n\n if record.rdtype not in [dns.rdatatype.PTR, dns.rdatatype.A, dns.rdatatype.AAAA, dns.rdatatype.TXT, dns.rdatatype.SRV]:\n logging.warning('Invalid DNS RR type (%s), not adding mDNS record to Avahi' % record.rdtype)\n continue\n\n if record.rdclass != dns.rdataclass.IN:\n logging.warning('Invalid DNS RR class (%s), not adding mDNS record to Avahi' % record.rdclass)\n continue\n\n #if (record.rdtype == dns.rdatatype.PTR and ':' in record.to_digestable()) or record.rdtype == dns.rdatatype.AAAA:\n # continue #ignore IPV6 for now, can't sniff those connections\n\n try:\n group.AddRecord( #http://avahi.sourcearchive.com/documentation/0.6.30-5/avahi-client_2publish_8h_a849f3042580d6c8534cba820644517ac.html#a849f3042580d6c8534cba820644517ac\n IF_UNSPEC, # iface *\n PROTO_UNSPEC, # proto _INET & _INET6\n dbus.UInt32(256), # AvahiPublishFlags (use multicast)\n str(rrset.name).decode('utf-8'), #name\n dbus.UInt16(record.rdclass), #class\n dbus.UInt16(record.rdtype), #type\n dbus.UInt32(rrset.ttl), #ttl\n string_array_to_txt_array([record.to_digestable()])[0] #rdata\n )\n logging.info('added mDNS record to Avahi: %s' % rrset.to_text())\n except UnicodeDecodeError:\n logging.warn('malformed unicode in rdata, skipping: %s' % rrset.to_text())\n except dbus.exceptions.DBusException, e:\n if e.get_dbus_name() == 'org.freedesktop.Avahi.InvalidDomainNameError':\n logging.warning('not mirroring mDNS record with special chars: %s' % rrset.to_text())\n continue # skip this record since Avahi will reject it\n # mac probably sent a device_info PTR with spaces and parentheses in the friendly description\n # per https://tools.ietf.org/html/rfc6763#section-4.1.3\n # fanboy\\032\\(2\\)._eppc._tcp.local. 4500 CLASS32769 TXT \"\" # `fanboy (2)`\n # mDNS.c sends UTF8, dnspythom.from_wire() assumes ASCII, DBUS wants Unicode, Avahi only takes [a-zA-Z0-9.-]\n # http://dbus.freedesktop.org/doc/dbus-python/api/dbus.String-class.html\n # http://dbus.freedesktop.org/doc/dbus-python/api/dbus.UTF8String-class.html\n # http://www.avahi.org/ticket/21 http://avahi.org/ticket/63\n # http://git.0pointer.net/avahi.git/commit/?id=5c22acadcbe5b01d910d75b71e86e06a425172d3\n # http://git.0pointer.net/avahi.git/commit/?id=ee2820a23c6968bbeadbdf510389301dca6bc765\n # http://git.0pointer.net/avahi.git/tree/avahi-common/domain.c\n raise",
"def add_txt_record(self, domain, record_name, record_content, record_ttl):\n\n # check to see if the DNS zone is present in OCI\n\n # first find the domain\n zone_ocid, zone_name = self._find_managed_zone(domain, record_name)\n if zone_name is None:\n raise errors.PluginError(\"Domain not known\")\n logger.debug(\"Found domain %s with OCID %s\", zone_name, zone_ocid)\n\n # NOTE: the OCI SDK will treat:\n # - an addition of the same name + value + TTL as a NO OP\n # - an addition of the same name + value (but different TTL) as an update to the TTL\n # it does NOT throw an error in either case.\n\n logger.debug(\"Setting record %s in zone %s to value %s w/ TTL %d\",\n record_name, zone_ocid, record_content, record_ttl)\n\n result = self.dns_client.patch_domain_records(\n zone_name,\n record_name,\n oci.dns.models.PatchDomainRecordsDetails( items=[ oci.dns.models.RecordOperation(\n operation='ADD',\n domain=record_name,\n ttl=record_ttl,\n rtype='TXT',\n rdata=record_content) ] ) )\n\n logger.debug(\"Update successful.\")\n logger.debug(\"New rrset version: %s\", result.data.items[0].rrset_version)\n\n logger.debug(\"Success\")",
"def add_route53_record(emr_internal_ips, cr):\n\n conn = connect_route53(aws_access_key_id = cr.get_config(\"aws_access_key\"), aws_secret_access_key = cr.get_config(\"aws_secret_key\"))\n\n zone = conn.get_zone(\"alpinenow.local\")\n\n print \"Adding DNS Records for: {0}\".format(emr_internal_ips)\n for ip in emr_internal_ips:\n internal_dns = \"ip-\" + ip.replace(\".\", \"-\") + \".alpinenow.local\"\n response = zone.add_a(internal_dns, ip) # TODO: Do something with response",
"def post_virtual_DNS_record_create(self, resource_dict):\n pass",
"def enable_resource_name_dns_aaaa_record_on_launch(self) -> bool:\n return pulumi.get(self, \"enable_resource_name_dns_aaaa_record_on_launch\")",
"def dns_add(self, full_record_name, record_type, value=None, raw=False, **kwargs):\n\n endpoint = '/Domain/DnsRecord/Add'\n\n params = {\n 'FullRecordName' : full_record_name,\n 'Type': record_type,\n }\n\n params.update(kwargs)\n\n if record_type not in VALID_DNS_RECORD_TYPES:\n raise ValueError(\"Accepted values for this argument are: A, AAAA, DYNAMIC, CNAME, MX, SRV, TXT and NS\")\n\n if not value and record_type != 'DYNAMIC':\n raise ValueError(\"All records except DYNAMIC must have their value\")\n \n if record_type == 'DYNAMIC':\n if not kwargs.has_key('DynDnsLogin') or not kwargs.has_key('DynDnsPassword'):\n raise ValueError('DynDNS login and password are required when record type is DYNAMIC')\n\n if value:\n params['Value'] = value\n \n response = self.__perform_get_request(endpoint, params)\n\n if response.status_code == 200:\n parsed_response = response.json()\n if raw:\n return parsed_response\n else:\n return parsed_response.get('status') == 'SUCCESS'",
"def dns_update(self, full_record_name, record_type, value=None, raw=False, **kwargs):\n\n endpoint = '/Domain/DnsRecord/Update'\n\n params = {\n 'FullRecordName' : full_record_name,\n 'Type': record_type,\n }\n\n params.update(kwargs)\n\n\n if record_type not in VALID_DNS_RECORD_TYPES:\n raise ValueError(\"Accepted values for this argument are: A, AAAA, DYNAMIC, CNAME, MX, SRV, TXT and NS\")\n\n if not value and record_type != 'DYNAMIC':\n raise ValueError(\"All records except DYNAMIC must have their value\")\n \n if record_type == 'DYNAMIC':\n if not kwargs.has_key('DynDnsLogin') or not kwargs.has_key('DynDnsPassword'):\n raise ValueError('DynDNS login and password are required when record type is DYNAMIC')\n\n if value:\n params['Value'] = value\n \n response = self.__perform_get_request(endpoint, params)\n\n if response.status_code == 200:\n parsed_response = response.json()\n if raw:\n return parsed_response\n else:\n return parsed_response.get('status') == 'SUCCESS'",
"def pre_virtual_DNS_record_update(self, resource_id, resource_dict):\n pass",
"def reverse_dns_sna(ipaddress):\n\n r = requests.get(\"http://api.statdns.com/x/%s\" % ipaddress)\n\n if r.status_code == 200:\n names = []\n\n for item in r.json()['answer']:\n name = str(item['rdata']).strip(\".\")\n names.append(name)\n\n return names\n elif r.json()['code'] == 503:\n # NXDOMAIN - no PTR record\n return None",
"def test_update_record_only(self):\n fake_dns_instance = FakeDnsInstance()\n t = template_format.parse(domain_only_template)\n instance = self._setup_test_cloud_dns_instance('dnsinstance_update', t)\n instance.resource_id = 4\n update_records = [{'type': 'A',\n 'name': 'ftp.example.com',\n 'data': '192.0.2.8',\n 'ttl': 3600}]\n\n mock_client = self.m.CreateMockAnything()\n self.m.StubOutWithMock(instance, 'cloud_dns')\n instance.cloud_dns().AndReturn(mock_client)\n self.m.StubOutWithMock(mock_client, \"get\")\n mock_domain = self.m.CreateMockAnything()\n mock_client.get(fake_dns_instance.resource_id).AndReturn(mock_domain)\n\n # mock_domain.update shouldn't be called in this scenario, so\n # stub it out but don't record a call to it\n self.m.StubOutWithMock(mock_domain, \"update\")\n\n fake_records = list()\n mock_domain.list_records().AndReturn(fake_records)\n mock_domain.add_records([{\n 'comment': None,\n 'priority': None,\n 'type': 'A',\n 'name': 'ftp.example.com',\n 'data': '192.0.2.8',\n 'ttl': 3600}])\n self.m.ReplayAll()\n\n uprops = dict(instance.properties)\n uprops['records'] = update_records\n ut = rsrc_defn.ResourceDefinition(instance.name,\n instance.type(),\n uprops)\n instance.state_set(instance.CREATE, instance.COMPLETE)\n\n scheduler.TaskRunner(instance.update, ut)()\n self.assertEqual((instance.UPDATE, instance.COMPLETE), instance.state)\n self.m.VerifyAll()",
"def update_dns(self):\n\t\tfor url in self.update_urls:\n\n\t\t\t# Adds protocol if address does not contain it\n\t\t\tif 'http://' not in url: url = 'http://' + url\n\n\t\t\trequest = urllib.urlopen(url)\n\t\t\trequest.close()",
"def create_internal_dns_name ( base_name, name ) :\n name = name + '.internal'\n return create_dns_name( base_name, name )",
"def pre_virtual_DNS_create(self, resource_dict):\n pass"
] |
[
"0.6175719",
"0.6069661",
"0.60505503",
"0.6043105",
"0.60109985",
"0.59973186",
"0.5895656",
"0.5868057",
"0.5690527",
"0.56834763",
"0.56530905",
"0.56523496",
"0.5632768",
"0.5625833",
"0.5624635",
"0.5618774",
"0.5581644",
"0.5570107",
"0.5546243",
"0.5546115",
"0.5522549",
"0.54702604",
"0.5434873",
"0.53946924",
"0.53637886",
"0.53200287",
"0.53148293",
"0.5283449",
"0.5242406",
"0.52368337"
] |
0.71578366
|
0
|
Parses lexicon into valid rewrite rules.
|
def _get_lexicon_rules(lexicon_dir: str) -> _RewriteRuleSet:
def _read_rule_set(path: str) -> _RewriteRule:
logging.info(f"reading rewrite rules from '{path}'")
entries = lexicon_reader.read_lexicon_entries(path) # might throw IOError.
for index, entry in entries.items():
try:
lexicon_validator.validate(entry)
except lexicon_validator.InvalidLexiconEntryError as error:
raise MorphotacticsCompilerError(
f"Lexicon entry at line {index} of '{path}' is illformed. {error}")
return lexicon_parser.parse(list(entries.values()))
paths = sorted(glob.glob(f"{lexicon_dir}/*.tsv"))
rule_sets = [_read_rule_set(p) for p in paths]
lexicon = _RewriteRuleSet()
lexicon.rule.extend(r for rs in rule_sets for r in rs.rule)
if not lexicon.rule:
raise MorphotacticsCompilerError("no valid lexicon rewrite rules found.")
return lexicon
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _generate_rules_and_lexicon(self):\n\n # Get a function that will split words into morphemes\n morpheme_splitter = self.morpheme_splitter\n # Get the unique morphemes from the lexicon corpus\n morphemes = {}\n if (self.lexicon_corpus and\n (not self.rules_corpus or\n self.lexicon_corpus.id != self.rules_corpus.id)):\n for form in self.lexicon_corpus.forms:\n new_morphemes = self._extract_morphemes_from_form(form, morpheme_splitter)\n for pos, data in new_morphemes:\n morphemes.setdefault(pos, set()).add(data)\n # Get the pos sequences (and morphemes) from the user-specified ``rules`` string value or else from the \n # words in the rules corpus.\n pos_sequences = set()\n if self.rules:\n for pos_sequence_string in self.rules.split():\n pos_sequence = tuple(morpheme_splitter(pos_sequence_string))\n pos_sequences.add(pos_sequence)\n else:\n for form in self.rules_corpus.forms:\n new_pos_sequences, new_morphemes = form.extract_word_pos_sequences(\n self.unknown_category, morpheme_splitter,\n self.extract_morphemes_from_rules_corpus)\n if new_pos_sequences:\n pos_sequences |= new_pos_sequences\n for pos, data in new_morphemes:\n morphemes.setdefault(pos, set()).add(data)\n pos_sequences = self._filter_invalid_sequences(pos_sequences, morphemes)\n # sort and delistify the rules and lexicon\n pos_sequences = sorted(pos_sequences)\n morphemes = dict([(pos, sorted(data)) for pos, data in morphemes.iteritems()])\n return pos_sequences, morphemes",
"def read_grammar_rules(istream):\n for line in istream:\n line = line.strip()\n if not line:\n continue\n fields = line.split('|||')\n if len(fields) != 3:\n raise ValueError('I expected 3 fields: %s', fields)\n lhs = fields[0].strip()\n\n if lhs[0] == '[':\n lhs = Nonterminal(lhs[1:-1])\n else:\n lhs = Terminal(lhs)\n rhs = fields[1].strip().split()\n new_rhs = []\n for r in rhs:\n if r[0] == '[':\n r = Nonterminal(r[1:-1])\n else:\n r = Terminal(r)\n new_rhs.append(r)\n\n prob = float(fields[2].strip())\n yield Rule(lhs, new_rhs, prob)",
"def simplify_rules(self):\n for rule in self.grammar:\n if re.search(r'->', rule):\n temp = re.split(r'->', rule)\n if len(temp[1].strip()) == 0:\n print(\"Invalid rule. The rule does not have the RHS.\")\n return\n\n lhs = temp[0]\n rhs = temp[1]\n temp = []\n\n if re.search(r'\\|', rhs):\n temp = re.split(r'\\|', rhs)\n if len(temp[1].strip()) == 0:\n print(\"Invalid rule. Unnecessary use of `|`.\")\n return\n\n for i in range(0, len(temp)):\n temp[i] = temp[i].strip()\n\n if len(temp) == 0:\n temp.append(rhs.strip())\n self.rules[lhs.strip()] = temp\n temp = []\n else:\n self.rules[lhs.strip()] = temp\n\n else:\n print(\"Invalid rule. The rule is not deriving anything.\")\n return\n\n print(\"Modified rules : \")\n print(self.rules)",
"def _get_lexicon_update(self, lexicon):\n\n def merge(lists):\n \"\"\"\n Merge the lists so lists with overlap are joined together\n (i.e. [[1,2], [3,4], [2,5]] --> [[1,2,5], [3,4]])\n from: http://stackoverflow.com/a/9400562\n \"\"\"\n newsets, sets = [set(lst) for lst in lists if lst], []\n while len(sets) != len(newsets):\n sets, newsets = newsets, []\n for aset in sets:\n for eachset in newsets:\n if not aset.isdisjoint(eachset):\n eachset.update(aset)\n break\n else:\n newsets.append(aset)\n return newsets\n\n def get_coreferences(coreferences):\n \"\"\"Decode the SAF coreferences as (node: coreferencing_nodes) pairs\"\"\"\n coref_groups = []\n for a, b in coreferences:\n # take only the heads of each coref group\n coref_groups.append([a[0], b[0]])\n for nodes in merge(coref_groups):\n for node in nodes:\n yield node, nodes\n\n coreferences = dict(get_coreferences(self.saf_article.get('coreferences', [])))\n\n classes = defaultdict(set) # token -> classes\n uris = {}\n for uri, token in self.get_tokens().iteritems():\n if 'pos' not in token: continue # not a word\n uris[int(token['id'])] = uri\n pos = token['pos']\n lemma = token['lemma']\n for lex in lexicon:\n if \"pos\" in lex and lex['pos'] != pos:\n continue\n lemmata = lex['lemma']\n lexclass = lex['lexclass']\n if not isinstance(lemmata, list):\n lemmata = [lemmata]\n for target in lemmata:\n if (target == lemma or target == lemma.lower()\n or (target.endswith(\"*\") and lemma.lower().startswith(target[:-1]))):\n id = int(token['id'])\n for coref in coreferences.get(id, [id]):\n classes[coref].add(lexclass)\n inserts = []\n for id, lexclasses in classes.iteritems():\n if id not in uris:\n continue # coref to different sentence\n uri = str(uris[id]).replace(AMCAT, \":\")\n for lexclass in lexclasses:\n inserts.append('{uri} :lexclass \"{lexclass}\"'.format(**locals()))\n return {\"insert\": \".\\n\".join(inserts)}",
"def __init__(self, lexicon, flags=0):\n\n import sre_parse\n import sre_compile\n from sre_constants import BRANCH, SUBPATTERN\n\n self.lexicon = lexicon\n\n # combine phrases into a compound pattern\n p = []\n s = sre_parse.Pattern()\n s.flags = flags\n for phrase, action in lexicon:\n p.append(sre_parse.SubPattern(s, [\n (SUBPATTERN, (len(p)+1, sre_parse.parse(phrase, flags))),\n ]))\n\n s.groups = len(p)+1\n p = sre_parse.SubPattern(s, [(BRANCH, (None, p))])\n self.scanner = sre_compile.compile(p, re.MULTILINE)",
"def adding_rules_lexicon(c, gram, grammar, vari):\r\n cont = True # True if the terminal on gram[c] belongs to vari, else False\r\n c += 2 # skip vari and the arrow\r\n while cont:\r\n h = gram[c] # terminal\r\n h = h.replace('\"', \"\") # remove \" \" from edges\r\n tup = (h, )\r\n c += 1\r\n prob = gram[c][1:len(gram[c]) - 1] # get the probability out of the brackets\r\n r = pcfg.PRule(vari, tup, prob) # create a new rule\r\n grammar.add_rule(r)\r\n c += 1 # move one index forward to check:\r\n if c >= len(gram):\r\n return c\r\n cont = (gram[c] == \"|\") # whether vari is still the current variable\r\n c += 1 # move to the next terminal (or forward)\r\n return c-2",
"def _lexer(self, msg: str):\n\n def lexing_algorithm(text):\n # TODO: >= O(n^2) (horrible time complexity). In the interest of\n # making forward progress, optimize this later. Could use a\n # bloom filter to look for matches, then binary search on\n # entities/commands find specific match once possible match is\n # found. Or even just use a hashmap for searching.\n\n # Base case.\n if text == \"\":\n return []\n\n # 1. Parse named entities.\n for entity in self.kb_named_entities:\n if entity.lower() in text.lower():\n pieces = text.lower().split(entity.lower())\n left = pieces[0]\n right = pieces[1]\n if left == text or right == text:\n # Safety measure to prevent '' causing infinite recursion.\n break\n return lexing_algorithm(left) + [entity.strip()] + lexing_algorithm(right)\n\n # 2. Parse unary commands.\n for intent, pattern in self._unary_command_regexes.items():\n sub_msg = re.sub(pattern, 'MARKER', text)\n if sub_msg != text:\n pieces = sub_msg.split('MARKER')\n left = pieces[0]\n right = pieces[1]\n return lexing_algorithm(left) \\\n + [intent] \\\n + lexing_algorithm(right)\n\n # 3. Parse terminal commands.\n for intent, pattern in self._terminal_command_regexes.items():\n sub_msg = re.sub(pattern, 'MARKER', text)\n if sub_msg != text:\n pieces = sub_msg.split('MARKER')\n left = pieces[0]\n right = pieces[1]\n return lexing_algorithm(left) \\\n + [intent] \\\n + lexing_algorithm(right)\n\n # 4. Parse binary commands.\n for intent, pattern in self._binary_command_regexes.items():\n sub_msg = re.sub(pattern, 'MARKER', text)\n if sub_msg != text:\n pieces = sub_msg.split('MARKER')\n left = pieces[0]\n right = pieces[1]\n return lexing_algorithm(left) \\\n + [intent] \\\n + lexing_algorithm(right)\n\n # If no matches, then the word is a stopword.\n return []\n\n return lexing_algorithm(msg)",
"def _parse(self):\n try:\n # parse token stream into abstract syntax tree (AST)\n self._ast = self._rule_container()\n\n except ParseError:\n raise\n\n except Exception as exc:\n raise ParseError(u'Unexpected error: {0}'.format(unicode(exc)))",
"def ParseLrules(raw_lrule_str, new_lsys):\r\n lrules = StrToLrules(raw_lrule_str)\r\n lrule_priority = 0\r\n for a_rule in lrules:\r\n Lrule.objects.create(lsys = new_lsys,\r\n str_in=a_rule[0],\r\n str_out=a_rule[1],\r\n rule_priority=lrule_priority)\r\n lrule_priority += 1",
"def apply_rules(self, token_parse_list):\r\n return token_parse_list",
"def __init__(self, lexicon):\n self._lexicon = set(lexicon)",
"def _parse_rule(self, tokens):\n if self._currently_parsed_declaration is None:\n self.tokenizer.syntax_error(\"Got a rule outside of \"+\n \"a unit declaration.\")\n\n self._check_indentation(tokens[0])\n\n sub_rules = self.tokens_to_sub_rules(tokens[1:])\n\n relevant_dict = None\n if self._currently_parsed_declaration[0] == pu.UnitType.alias:\n relevant_dict = self.alias_definitions\n elif self._currently_parsed_declaration[0] == pu.UnitType.slot:\n relevant_dict = self.slot_definitions\n else: # intent\n relevant_dict = self.intent_definitions\n\n name = self._currently_parsed_declaration[1]\n variation_name = self._currently_parsed_declaration[2].variation_name\n relevant_dict[name].add_rule(sub_rules, variation_name)",
"def process(self,rules):\n for rule in rules:\n r,arg = rule.split('(')\n args = arg[:-1].split(',')\n self.commands.append((r,args))",
"def generate_lighttpd_rules(app_names, error):\n yield 'url.rewrite = ('\n for rule in create_rules(app_names, error):\n yield ' \"^/static/%s/(.*)\" => \"%s/$1\",' % rule\n yield ')'",
"def scan(sentence):\n words = sentence.split()\n lexicons = []\n\n directions = ['north', 'south', 'east','west',\n 'down', 'up', 'left', 'right', 'back'] \n\n verbs = ['go', 'stop', 'kill', 'eat']\n\n stops = ['the', 'in', 'of', 'from', 'at', 'it']\n\n nouns = ['door', 'bear', 'princess', 'cabinet']\n\n for word in words:\n lWord = word.lower()\n if lWord in directions:\n lexicons.append(('direction', word))\n elif lWord in verbs:\n lexicons.append(('verb', word))\n elif lWord in stops:\n lexicons.append(('stop', word))\n elif lWord in nouns:\n lexicons.append(('noun', word))\n elif lWord.isdigit():\n lexicons.append(('number', int(word)))\n else: lexicons.append(('error', word))\n\n return lexicons",
"def _compile_rules(self):\n for state, table in self.RULES.items():\n patterns = []\n actions = []\n nextstates = []\n for i, row in enumerate(table):\n if len(row) == 2:\n pattern, action_ = row\n nextstate = None\n elif len(row) == 3:\n pattern, action_, nextstate = row\n else:\n fstr = \"invalid RULES: state {}, row {}\"\n raise CompileError(fstr.format(state, i))\n patterns.append(pattern)\n actions.append(action_)\n nextstates.append(nextstate)\n reobj = re.compile(\"|\".join(\"(\" + p + \")\" for p in patterns))\n self._rules[state] = (reobj, actions, nextstates)",
"def _parse_rules(self, model, comp, node):\n parent = node\n formulas = {}\n # Create variables with assignment rules (all except derivatives)\n node = dom_child(parent, 'assignmentRule')\n while node:\n var = self._convert_name(str(node.getAttribute('variable')).strip())\n if var in comp:\n self.log('Parsing assignment rule for <' + str(var) + '>.')\n var = comp[var]\n var.set_rhs(parse_mathml_rhs(\n dom_child(node, 'math'), comp, self))\n else:\n raise SBMLError('Assignment found for unknown parameter: \"'\n + var + '\".')\n node = dom_next(node, 'assignmentRule')\n # Create variables with rate rules (states)\n node = dom_child(parent, 'rateRule')\n while node:\n var = self._convert_name(str(node.getAttribute('variable')).strip())\n if var in comp:\n self.log('Parsing rate rule for <' + var + '>.')\n var = comp[var]\n ini = var.rhs()\n ini = ini.eval() if ini else 0\n var.promote(ini)\n var.set_rhs(parse_mathml_rhs(\n dom_child(node, 'math'), comp, self))\n else:\n raise SBMLError('Derivative found for unknown parameter: <'\n + var + '>.')\n node = dom_next(node, 'rateRule')",
"def __init__(self, rules):\n\n self.grammar = defaultdict(list)\n self.word_pos = dict()\n self.pos = set()\n\n for rule in rules:\n rule = rule.rstrip()\n if len(rule) > 0:\n rule = rule.split('->') # split start/end\n left = rule[0].strip()\n right = [(re.sub(r'[^a-zA-Z\\d\\s-]', '', r)).strip().split(' ') for r in rule[1].split('|')]\n self.grammar[left] += right\n\n # extract POS tags\n # pos iff on lhs of rhs without lhs\n # det -> that\n # that -> #\n for left, right in self.grammar.iteritems():\n for r in right:\n for r2 in r:\n if not self.grammar.has_key(r2):\n self.pos.add(left)",
"def convert_grammar(grammar):\n\n # Remove all the productions of the type A -> X B C or A -> B a.\n global RULE_DICT\n unit_productions, result = [], []\n res_append = result.append\n index = 0\n\n for rule in grammar:\n new_rules = []\n if len(rule) == 2 and rule[1][0] != \"'\":\n # Rule is in form A -> X, so back it up for later and continue with the next rule.\n unit_productions.append(rule)\n add_rule(rule)\n continue\n elif len(rule) > 2:\n # Rule is in form A -> X B C [...] or A -> X a.\n terminals = [(item, i) for i, item in enumerate(rule) if item[0] == \"'\"]\n if terminals:\n for item in terminals:\n # Create a new non terminal symbol and replace the terminal symbol with it.\n # The non terminal symbol derives the replaced terminal symbol.\n rule[item[1]] = f\"{rule[0]}{str(index)}\"\n new_rules += [f\"{rule[0]}{str(index)}\", item[0]]\n index += 1\n while len(rule) > 3:\n new_rules.append([f\"{rule[0]}{str(index)}\", rule[1], rule[2]])\n rule = [rule[0]] + [f\"{rule[0]}{str(index)}\"] + rule[3:]\n index += 1\n # Adds the modified or unmodified (in case of A -> x i.e.) rules.\n add_rule(rule)\n res_append(rule)\n if new_rules:\n result.extend(new_rules)\n # Handle the unit productions (A -> X)\n while unit_productions:\n rule = unit_productions.pop()\n if rule[1] in RULE_DICT:\n for item in RULE_DICT[rule[1]]:\n new_rule = [rule[0]] + item\n if len(new_rule) > 2 or new_rule[1][0] == \"'\":\n result.insert(0, new_rule)\n else:\n unit_productions.append(new_rule)\n add_rule(new_rule)\n return result",
"def parse_cst(self):\n stack = []\n self.tokenizer.next().must_be('{')\n for token in self.tokenizer:\n stack += [ token ] # Build a stack to process\n if token.text == \".\":\n # We've got a rule to process. Start by determining correct syntax.\n stack[1].must_be(':')\n ## Name analysis\n stack[0].assert_symbol_name()\n production_elements = stack[2:-1]\n for element in production_elements:\n element.assert_symbol_name()\n if stack[0].text in self.GlobalSymbolDict: # Redefined lexical sym or add a new production?\n existingSymbol = self.GlobalSymbolDict[stack[0].text]\n if existingSymbol.is_gla:\n raise Exception(\"Lexical Symbol %s redefined at %d,%d. Originally at %d,%d\" % \\\n (stack[0].text, stack[0].line, stack[0].col, \\\n existingSymbol.defining_token.line, existingSymbol.defining_token.col))\n existingSymbol.productions += [Production(existingSymbol,production_elements)]\n else: # Brand new symbol occurrence\n s = Symbol(stack[0])\n s.is_gla = False\n s.productions = [Production(s,production_elements)]\n self.GlobalSymbolDict[stack[0].text] = s\n stack = []\n elif token.text == \"{\":\n raise Exception(\"Unexpected %s\" % token)\n elif token.text == \"}\":\n if len(stack) > 1: raise Exception(\"Unfinished lexical specification beginning with %s\" % stack[0])\n #pp = pprint.PrettyPrinter()\n #pp.pprint(self.GlobalSymbolDict)\n return\n else: pass",
"def parse(self, line):\n # BEGIN_YOUR_CODE\n line = line.strip('\\n')\n orig_line = line\n\n line = line.split(' ')\n n = len(line)\n\n # initialize log_probs and backpointer\n log_probs = {}\n backpointer = {}\n for i in range(n):\n for j in range(i, n):\n log_probs[(i, j)] = {}\n backpointer[(i, j)] = {}\n for A in self.nonterminal:\n log_probs[(i, j)][A] = -float('inf')\n backpointer[(i, j)][A] = (0, [None])\n\n # fill terminal rules\n for i in range(n):\n for rule in self.from_rhs((line[i],)):\n A = rule.lhs\n new_prob = rule.log_prob\n if new_prob > log_probs[(i, i)][A]:\n log_probs[(i, i)][A] = new_prob\n backpointer[(i, i)][A] = (1, [rule])\n\n # main loop\n binary_filter = lambda rule: len(rule.rhs) == 2\n for l in range(1, n+1):\n for i in range(n-l+1):\n j = i+l-1\n for k in range(i, j):\n for rule in filter(binary_filter, self.rules):\n A = rule.lhs\n B = rule.rhs[0]\n C = rule.rhs[1]\n new_prob = rule.log_prob + log_probs[(i, k)][B] + log_probs[(k+1, j)][C]\n if new_prob > log_probs[(i, j)][A]:\n log_probs[(i, j)][A] = new_prob\n backpointer[(i, j)][A] = (k-i+1, [rule])\n\n found = True\n while found:\n found = False\n for A in log_probs[(i, j)].keys():\n for rule in self.from_rhs((A,)):\n B = rule.lhs\n new_prob = rule.log_prob + log_probs[(i, j)][A]\n if new_prob > log_probs[(i, j)][B]:\n log_probs[(i, j)][B] = new_prob\n backpointer[(i, j)][B] = (j-i+1, [rule])\n found = True\n\n self.print_parse_result(orig_line, log_probs, backpointer, n)\n # END_YOUR_CODE",
"def _create_rules(rules, node_rules, node_atrrs):\n for node_attr, node_value in node_atrrs.iteritems():\n if node_attr not in node_rules:\n continue\n for rule in node_rules[node_attr]:\n # if isinstance(rule['from'], REGEX_TYPE) and node_value.startswith('mediumtext'):\n if rule['from'] == node_value:\n rules[node_attr] = rule['to']",
"def parse(self, inp):\n\n tokens = self.tokenizer.tokenize(inp)\n tokens_left = len(tokens)\n\n # print(tokens)\n\n while tokens_left:\n\n for rule in self.grammar:\n tokens = tokens[rule.match(tokens):]\n\n if len(tokens) < tokens_left:\n tokens_left = len(tokens)\n else:\n # nothing is matching any more - stop\n break\n\n return len(tokens) == 0, tokens",
"def readrules(self, fomalines):\n for lineno, l in enumerate(fomalines):\n if 'define' in l or 'def ' in l:\n rulecom = l.split(' #')\n r = re.findall(\"(defi?n?e?)\\s+(\\S+)\\s+([^;]+)\", rulecom[0])\n if len(r[0]) != 3:\n print \"Syntax error on line %i\" % lineno\n (_, rulename, rule) = r[0]\n if len(rulecom) > 1:\n commentline = rulecom[1].strip()\n else:\n commentline = ''\n self.rule_add(rulename, rule, commentline)\n if 'chain' in l:\n l = l.replace(';','')\n chain = re.findall('chain\\s+(.*)', l)\n rc = chain[0].replace(' ','').split(',')\n self.rc = rc",
"def parse_input(lines):\n # rules structure:\n # a tuple of (before, after)\n # where before is the smaller square ([[1,1],[1,0])\n # and after is the bigger square: ([1,1,1],[1,0,1],[0,1,1])\n rules = {2: [], 3: []}\n binary_map = {'.': False, '#': True}\n ruleset = set()\n for line in lines:\n if not line.strip():\n continue\n before, after = line.strip().split(' => ')\n smaller = []\n bigger = []\n for row in before.strip().split('/'):\n smaller.append([binary_map[n] for n in row])\n for row in after.strip().split('/'):\n bigger.append([binary_map[n] for n in row])\n for _ in range(2):\n for rotation in range(4):\n # add all four rotations in:\n rot = np.rot90(smaller, rotation)\n strrot = ','.join(str(item) for row in rot for item in row)\n if strrot not in ruleset:\n rules[len(rot[0])].append(Rule(np.array(rot), np.array(bigger)))\n ruleset.add(strrot)\n\n smaller = np.flipud(smaller)\n\n return rules",
"def parse_logic(logic):\n\n ###print \"parse_logic(logic): logic:\",logic\n\n tokens = logic.split()\n\n # begin recursive logic parse\n return grammar_0(tokens)",
"def create_lexicon(lexicon_save_filepath):\r\n if not os.path.isfile(lexicon_save_filepath):\r\n download_file(LEXICON_URL, lexicon_save_filepath)\r\n\r\n # Iterate lexicon file and add the first pronunciation in the file for\r\n # each word to our lexicon dictionary\r\n lexicon = MISSING_LEXICON\r\n delayed_words = {}\r\n for line in open(lexicon_save_filepath):\r\n line = line.split()\r\n phns = \" \".join(p.strip(\"012\") for p in line[1:])\r\n\r\n # Don't add words with punctuation until we can be sure they won't\r\n # overwrite words without punctuation.\r\n clean_word = remove_punctuation(line[0])\r\n if clean_word != line[0] and clean_word not in delayed_words:\r\n delayed_words[clean_word] = phns\r\n elif clean_word == line[0] and clean_word not in lexicon:\r\n lexicon[clean_word] = phns\r\n\r\n # Add words with punctuation if they won't overwrite non-punctuated words\r\n for word, phns in delayed_words.items():\r\n if word not in lexicon:\r\n lexicon[word] = phns\r\n\r\n return lexicon",
"def parse_input(self, instructions):\r\n\r\n input_ = instructions\r\n input_list = input_.strip().split()\r\n\r\n if input_list[0] == 'push':\r\n self.push(input_list[1])\r\n\r\n elif input_list[0] == 'pop':\r\n self.pop()\r\n\r\n elif input_list[0] == 'top':\r\n self.top()\r\n\r\n elif input_list[0] == 'replace':\r\n self.replace(input_list[1], input_list[2])\r\n\r\n else:\r\n pass",
"def convert_symbol_to_raw_actions(self, symbol, rules):\n assert not isinstance(symbol, list)\n assert isinstance(symbol, str) or isinstance(symbol, int)\n symbol = [symbol]\n finished = False\n while not finished:\n new_symbol = []\n for symbol_val in symbol:\n if symbol_val in rules.keys():\n new_symbol.append(rules[symbol_val][0])\n new_symbol.append(rules[symbol_val][1])\n else:\n new_symbol.append(symbol_val)\n if new_symbol == symbol: finished = True\n else: symbol = new_symbol\n new_symbol = tuple(new_symbol)\n return new_symbol",
"def parse_sentiwordnet(lexicon_file):\n lex = dict()\n with open(lexicon_file, encoding='utf8') as f:\n for line in f:\n if line.startswith('#'):\n # comment\n continue\n fields = line.strip().split('\\t')\n if len(fields) < 6:\n # last line\n continue\n # postag id score_pos score_neg word#sense word2#sense def\n pos = float(fields[2])\n neg = float(fields[3])\n for word in fields[4].split():\n word = word.split('#')[0]\n try:\n prev_pos, prev_neg, count = lex[word]\n lex[word] = (prev_pos + pos, prev_neg + neg, count + 1)\n except KeyError:\n lex[word] = (pos, neg, 1)\n\n for word in lex:\n pos, neg, count = lex[word]\n lex[word] = (pos / count, neg / count)\n\n return lex"
] |
[
"0.56139296",
"0.5591229",
"0.5527879",
"0.54040474",
"0.5386398",
"0.52106386",
"0.51804394",
"0.5177677",
"0.51449215",
"0.51025224",
"0.504116",
"0.49598598",
"0.49496937",
"0.49448723",
"0.4906003",
"0.4897008",
"0.4895634",
"0.48914123",
"0.48295358",
"0.48112032",
"0.47720063",
"0.47692117",
"0.4752177",
"0.47138816",
"0.47103772",
"0.46957883",
"0.46953756",
"0.4685348",
"0.4675711",
"0.46591872"
] |
0.64983183
|
0
|
Parses morphotactics model into valid rewrite rules.
|
def _get_morphotactics_rules(morphotactics_dir: str) -> _RewriteRuleSet:
def _read_rule_set(path: str) -> _RewriteRule:
logging.info(f"reading rewrite rules from '{path}'")
# Below read call might throw IOError.
lines = morphotactics_reader.read_rule_definitions(path)
for index, line in lines.items():
try:
morphotactics_validator.validate(line)
except morphotactics_validator.InvalidMorphotacticsRuleError as error:
raise MorphotacticsCompilerError(
f"Rewrite rule at line {index} of '{path}' is illformed. {error}")
return morphotactics_parser.parse(list(lines.values()))
paths = sorted(glob.glob(f"{morphotactics_dir}/*.txt"))
rule_sets = [_read_rule_set(p) for p in paths]
morphotactics = _RewriteRuleSet()
morphotactics.rule.extend(r for rs in rule_sets for r in rs.rule)
if not morphotactics.rule:
raise MorphotacticsCompilerError(
"no valid morphotactics rewrite rules found.")
return morphotactics
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def load_from_morph_rules(\n self, morph_rules: Dict[str, Dict[str, Dict[Union[int, str], Union[int, str]]]]\n ) -> None:\n for tag in morph_rules:\n for word in morph_rules[tag]:\n pattern = [{\"ORTH\": word, \"TAG\": tag}]\n attrs = morph_rules[tag][word]\n attrs, morph_attrs = _split_morph_attrs(attrs)\n if \"MORPH\" in attrs:\n morph = self.vocab.morphology.add(attrs[\"MORPH\"])\n attrs[\"MORPH\"] = self.vocab.strings[morph]\n elif morph_attrs:\n morph = self.vocab.morphology.add(morph_attrs)\n attrs[\"MORPH\"] = self.vocab.strings[morph]\n self.add([pattern], attrs) # type: ignore[list-item]",
"def _parse_rules(self, model, comp, node):\n parent = node\n formulas = {}\n # Create variables with assignment rules (all except derivatives)\n node = dom_child(parent, 'assignmentRule')\n while node:\n var = self._convert_name(str(node.getAttribute('variable')).strip())\n if var in comp:\n self.log('Parsing assignment rule for <' + str(var) + '>.')\n var = comp[var]\n var.set_rhs(parse_mathml_rhs(\n dom_child(node, 'math'), comp, self))\n else:\n raise SBMLError('Assignment found for unknown parameter: \"'\n + var + '\".')\n node = dom_next(node, 'assignmentRule')\n # Create variables with rate rules (states)\n node = dom_child(parent, 'rateRule')\n while node:\n var = self._convert_name(str(node.getAttribute('variable')).strip())\n if var in comp:\n self.log('Parsing rate rule for <' + var + '>.')\n var = comp[var]\n ini = var.rhs()\n ini = ini.eval() if ini else 0\n var.promote(ini)\n var.set_rhs(parse_mathml_rhs(\n dom_child(node, 'math'), comp, self))\n else:\n raise SBMLError('Derivative found for unknown parameter: <'\n + var + '>.')\n node = dom_next(node, 'rateRule')",
"def urdu_morph_analyze(word,urdu_morfessor_model,urdu_script_check_re):\n\n def urdu_morphanalysis_needed(word):\n return urdu_script_check_re.match(word) \n\n m_list=[]\n if urdu_morphanalysis_needed(word): \n val=urdu_morfessor_model.viterbi_segment(word)\n m_list=val[0]\n else:\n m_list=[word]\n return m_list",
"def parse_logic(logic):\n\n ###print \"parse_logic(logic): logic:\",logic\n\n tokens = logic.split()\n\n # begin recursive logic parse\n return grammar_0(tokens)",
"def morpho_doc(doc):\n doc_text = doc.stripped\n mystem_analyzer.start()\n # new_morpho = mystem_analyzer.analyze(doc_text)\n new_morpho = mystem_analyzer.analyze(doc_text.replace('\\n',''))\n\n morpho_list = []\n\n for element in new_morpho: # разрезаем\n\n if is_sentence_end(element):\n morpho_list.append(element)\n else:\n\n line = element.get('text', '')\n\n space_len = 0\n\n word_start = -1\n word_len = 0\n\n symbol_number = -1\n for symbol in line:\n\n symbol_number+=1\n\n if symbol == \"'\" or symbol == '\"' or symbol == '»' or symbol == '«':\n\n if space_len > 0: # добавим пробелы\n\n cur_space = ' ' * space_len\n\n new_element = {'text': cur_space}\n if 'analysis' in element: new_element['analysis'] = element['analysis']\n morpho_list.append(new_element)\n\n space_len = 0\n\n elif word_start > -1: # добавим слово\n\n cur_word = line[word_start:(word_start + word_len)]\n\n new_element = {'text': cur_word}\n if 'analysis' in element: new_element['analysis'] = element['analysis']\n morpho_list.append(new_element)\n\n word_start = -1\n word_len = 0\n\n # добавим кавычку\n new_element = {'text': symbol}\n if 'analysis' in element: new_element['analysis'] = element['analysis']\n morpho_list.append(new_element)\n\n elif symbol == \" \":\n\n if word_start > -1: # добавим слово\n\n cur_word = line[word_start:(word_start + word_len)]\n\n new_element = {'text': cur_word}\n if 'analysis' in element: new_element['analysis'] = element['analysis']\n morpho_list.append(new_element)\n\n word_start = -1\n word_len = 0\n\n space_len += 1\n\n else:\n\n if space_len > 0: # добавим пробелы\n\n cur_space = ' ' * space_len\n\n new_element = {'text': cur_space}\n if 'analysis' in element: new_element['analysis'] = element['analysis']\n morpho_list.append(new_element)\n\n space_len = 0\n\n if word_start == -1:\n word_start = symbol_number\n word_len = 1\n else:\n word_len += 1\n\n if space_len > 0: # добавим пробелы\n\n cur_space = ' ' * space_len\n\n new_element = {'text': cur_space}\n if 'analysis' in element: new_element['analysis'] = element['analysis']\n\n morpho_list.append(new_element)\n\n elif word_start > -1: # добавим слово\n\n cur_word = line[word_start:(word_start + word_len)]\n\n new_element = {'text': cur_word}\n if 'analysis' in element: new_element['analysis'] = element['analysis']\n morpho_list.append(new_element)\n\n for i in range(len(morpho_list) - 1): # переставляем\n if i > 0:\n if morpho_list[i - 1]['text'] == ' ' and morpho_list[i]['text'] == '\"' and morpho_list[i + 1]['text'] == '\\\\s':\n morpho_list[i], morpho_list[i + 1] = morpho_list[i + 1], morpho_list[i]\n\n sentence_index = 0\n word_index = 0\n start_offset = 0\n\n for element in morpho_list: # нумеруем\n if is_sentence_end(element):\n if word_index != 0:\n sentence_index += 1\n word_index = 0\n else:\n line = element.get('text', '')\n line_len = len(line)\n\n if(line[0]!=' '):\n element['start_offset'] = start_offset\n element['end_offset'] = start_offset + line_len - 1\n element['word_index'] = word_index\n element['sentence_index'] = sentence_index\n\n word_index += 1\n start_offset += line_len\n\n doc.morpho = morpho_list\n mystem_analyzer.close()",
"def _parse_ml(self, line):\n # Parse the line\n fields = line.split('\\\\')\n if self.lang == ENGLISH:\n # pylint: disable=C0301\n # English sample:\n # 14\\abandonment\\94\\C\\\\1\\N\\N\\N\\N\\Y\\abandon+ment\\2x\\SA\\N\\N\\N\\#\\N\\N\\SA\\((abandon)[V],(ment)[N|V.])[N]\\N\\N\\N\n # From the README:\n # The eml.cd file contains the following fields:\n # 1. IdNum\n # 2. Head\n # 3. Cob\n # 4. MorphStatus\n # 5. Lang\n # 6. MorphCnt\n # 7. NVAffComp\n # 8. Der\n # 9. Comp\n # 10. DerComp\n # 11. Def\n # 12. Imm\n # 13. ImmSubCat\n # 14. ImmSA\n # 15. ImmAllo\n # 16. ImmSubst\n # 17. ImmOpac\n # 18. TransDer\n # 19. ImmInfix\n # 20. ImmRevers\n # 21 FlatSA\n # 22. StrucLab\n # 23. StrucAllo\n # 24. StrucSubst\n # 25. StrucOpac\n lemma = fields[0]\n word = fields[1]\n derivation = fields[21]\n elif self.lang == DUTCH:\n # pylint: disable=C0301\n # Dutch sample:\n # 19\\aalbessengelei\\7\\C\\1\\Y\\Y\\Y\\aalbes+en+gelei\\NxN\\N\\N\\(((aal)[N],(bes)[N])[N],(en)[N|N.N],(gelei)[N])[N]\\N\\N\\N\n # The dml.cd file contains the following fields:\n # 1. IdNum\n # 2. Head\n # 3. Inl\n # 4. MorphStatus\n # 5. MorphCnt\n # 6. DerComp\n # 7. Comp\n # 8. Def\n # 9. Imm\n # 10. ImmSubCat\n # 11. ImmAllo\n # 12. ImmSubst\n # 13. StrucLab\n # 14. StruAcAllo\n # 15. StrucSubst\n # 16. Sepa\n lemma = fields[0]\n word = fields[1]\n derivation = fields[12]\n\n # Skip multi-word entries for roots\n roots = self._get_root(derivation) if \" \" not in word else None\n return (lemma, word, roots)",
"def simplify_rules(self):\n for rule in self.grammar:\n if re.search(r'->', rule):\n temp = re.split(r'->', rule)\n if len(temp[1].strip()) == 0:\n print(\"Invalid rule. The rule does not have the RHS.\")\n return\n\n lhs = temp[0]\n rhs = temp[1]\n temp = []\n\n if re.search(r'\\|', rhs):\n temp = re.split(r'\\|', rhs)\n if len(temp[1].strip()) == 0:\n print(\"Invalid rule. Unnecessary use of `|`.\")\n return\n\n for i in range(0, len(temp)):\n temp[i] = temp[i].strip()\n\n if len(temp) == 0:\n temp.append(rhs.strip())\n self.rules[lhs.strip()] = temp\n temp = []\n else:\n self.rules[lhs.strip()] = temp\n\n else:\n print(\"Invalid rule. The rule is not deriving anything.\")\n return\n\n print(\"Modified rules : \")\n print(self.rules)",
"def _apply_to(self, model, **kwds):\n config = self.config = self.CONFIG(kwds.pop('options', {}))\n config.set_value(kwds)\n\n self._transformContainer(model)\n\n # Reclassify all disjuncts\n for disjunct_object in model.component_objects(Disjunct,\n descend_into=(Block,\n Disjunct)):\n disjunct_object.parent_block().reclassify_component_type(\n disjunct_object, Block)\n\n # Transform any remaining logical stuff\n TransformationFactory('core.logical_to_linear').apply_to(model)",
"def fix_morphs():\n morph_links = load_morph_links()\n create_morphs_node(morph_links)\n create_custom_template(morph_links)\n clean_morphs()",
"def vrules(self):\n ...",
"def read_model(self):\n f = open(self.name + '_' + 'words', 'r')\n self.words = f.read()\n f.close()\n elf.words = dict(eval(self.words))\n \n f = open(self.name + '_' + 'word_lengths', 'r')\n self.word_lengths = f.read()\n f.close()\n self.word_lengths = dict(eval(self.word_lengths))\n\n f = open(self.name + '_' + 'sentence_lengths', 'r')\n self.sentence_lengths = f.read()\n f.close()\n self.sentence_lengths = dict(eval(self.sentence_lengths))\n\n f = open(self.name + '_' + 'stems', 'r')\n self.stems = f.read()\n f.close()\n self.stems = dict(eval(self.stems))\n\n f = open(self.name + '_' + 'commas_per_sentence', 'r')\n self.commas_per_sentence = f.read()\n f.close()\n self.commas_per_sentence = dict(eval(self.commas_per_sentence))",
"def clean_morphs():\n blendshapes = cmds.ls(type=\"blendShape\")\n for blendShape in blendshapes:\n blend_target_list = cmds.listAttr(blendShape + '.w', m=True)\n\n for blend_target in blend_target_list:\n bs_fixed = blend_target.replace(\"head__eCTRL\", \"\")\n if (bs_fixed.find(\"__\") > 1):\n bs_split = bs_fixed.split(\"__\")\n bs_fixed = bs_fixed.replace(bs_split[0]+\"__\", \"\")\n bs_fixed = bs_fixed.replace(\"headInner__\", \"\")\n bs_fixed = bs_fixed.replace(\"head_eCTRL\", \"\")\n bs_fixed = bs_fixed.replace(\"head__\", \"\")\n bs_fixed = bs_fixed.replace(\"head_\", \"\")\n bs_fixed = bs_fixed.replace(\"PHM\", \"\")\n bs_fixed = bs_fixed.replace(\"CTRL\", \"\")\n bs_fixed = bs_fixed.replace(\"QT1\", \"\")\n bs_fixed = bs_fixed.replace(\"Shape\", \"\")\n\n oldMorph = blendShape + \".\" + blend_target\n try:\n # Rename Morphs (Blendshapes)\n cmds.aliasAttr(bs_fixed, oldMorph)\n except:\n pass",
"async def parse_regex(opsdroid, skills, message):\n matched_skills = []\n for skill in skills:\n for matcher in skill.matchers:\n if \"regex\" in matcher:\n opts = matcher[\"regex\"]\n matched_regex = await match_regex(message.text, opts)\n if matched_regex:\n message.regex = matched_regex\n for regroup, value in matched_regex.groupdict().items():\n message.update_entity(regroup, value, None)\n matched_skills.append(\n {\n \"score\": await calculate_score(\n opts[\"expression\"], opts[\"score_factor\"]\n ),\n \"skill\": skill,\n \"config\": skill.config,\n \"message\": message,\n }\n )\n return matched_skills",
"def _parse_modelspace(self) :\n\t\tlogging.debug(\"Parsing modelspace hard constraints\")\t\n\t\n\t\tself.modelspace = {}\n\t\t\n\t\tfor varname in ['alpha','beta','g','h'] : \n\t\t\tself._parse_var_modelspace(varname)",
"def generate_rules(self):\n for rule in self._parser.conditionals:\n\n all_in_facts, matrix = self._generate_rules(rule)\n if all_in_facts is True:\n self.new_fact_from_facts(rule)\n else:\n facts = self._parser.conditionals[rule][1]\n #print(rule, facts, matrix)",
"def canonicalize(self):\n self.rules = canonicalize_grammar(self,self.empty)\n self.is_canonical = True",
"def parse_rules(content=None):\r\n rules = content.split(DELIMITER)\r\n parsed_rules = list()\r\n order = 1\r\n for rule in rules:\r\n if rule.strip() == '':\r\n continue\r\n parsed_rule = {}\r\n lines = rule.split(\"\\n\")\r\n parsed_rule['orderValue'] = order\r\n order += 1\r\n for line in lines:\r\n if line.strip() == '':\r\n continue\r\n key_value = line.strip().split(':')\r\n key = key_value[0].strip()\r\n value = key_value[1].strip()\r\n if key == 'action':\r\n parsed_rule['action'] = value\r\n elif key == 'protocol':\r\n parsed_rule['protocol'] = value\r\n elif key == 'source_ip_address':\r\n parsed_rule['sourceIpAddress'] = value\r\n elif key == 'source_ip_subnet_mask':\r\n parsed_rule['sourceIpSubnetMask'] = value\r\n elif key == 'destination_ip_address':\r\n parsed_rule['destinationIpAddress'] = value\r\n elif key == 'destination_ip_subnet_mask':\r\n parsed_rule['destinationIpSubnetMask'] = value\r\n elif key == 'destination_port_range_start':\r\n parsed_rule['destinationPortRangeStart'] = int(value)\r\n elif key == 'destination_port_range_end':\r\n parsed_rule['destinationPortRangeEnd'] = int(value)\r\n elif key == 'version':\r\n parsed_rule['version'] = int(value)\r\n parsed_rules.append(parsed_rule)\r\n return parsed_rules",
"def preprocess(self, data, vocab, opt):\n processed = []\n rule_counts = defaultdict(int)\n with open(self.mappings) as f:\n mappings = f.readlines()\n with open('tacred/rules.json') as f:\n rules = json.load(f)\n for c, d in enumerate(data):\n tokens = d['token']\n if opt['lower']:\n tokens = [t.lower() for t in tokens]\n l = len(tokens)\n # anonymize tokens\n ss, se = d['subj_start'], d['subj_end']\n os, oe = d['obj_start'], d['obj_end']\n tokens[ss:se+1] = ['SUBJ-'+d['subj_type']] * (se-ss+1)\n tokens[os:oe+1] = ['OBJ-'+d['obj_type']] * (oe-os+1)\n tokens = map_to_ids(tokens, vocab.word2id)\n pos = map_to_ids(d['stanford_pos'], constant.POS_TO_ID)\n ner = map_to_ids(d['stanford_ner'], constant.NER_TO_ID)\n if self.opt['gat']:\n deprel = map_to_ids(d['stanford_deprel'], constant.DEPREL_TO_ID)\n else:\n deprel = map_to_ids([d for d in d['stanford_deprel'] if d!='ROOT' and d!='root'], constant.DEPREL_TO_ID)\n \n if opt['prune_k'] < 0:\n edge_index = [[h-1 for h in d['stanford_head'] if h != 0], \n [i for i, h in enumerate(d['stanford_head']) if h != 0]]\n else:\n edge_index = prune_tree(l, d['stanford_head'], opt['prune_k'], list(range(ss, se+1)), list(range(os, oe+1)))\n deprel = map_to_ids([d['stanford_deprel'][i] for i in edge_index[1]], constant.DEPREL_TO_ID)\n if deprel[-1] == 2:\n deprel = deprel[:-1]\n edge_index = [edge_index[0][:-1], edge_index[1][:-1]]\n edge_index = [edge_index[0]+edge_index[1], edge_index[1]+edge_index[0]]\n edge_mask = [1 if i in edge_index[1] else 0 for i in range(l)]\n relation = constant.LABEL_TO_ID[d['relation']]\n\n if opt['pattn']:\n subj_positions = get_positions(d['subj_start'], d['subj_end'], l)\n obj_positions = get_positions(d['obj_start'], d['obj_end'], l)\n if 't_' in mappings[c] or 's_' in mappings[c]:\n rule = helper.word_tokenize(rules[eval(mappings[c])[0][1]])\n rule = map_to_ids(rule, vocab.rule2id) \n rule = [constant.SOS_ID] + rule + [constant.EOS_ID]\n processed += [(tokens, pos, ner, deprel, subj_positions, obj_positions, relation, edge_index, rule)]\n else:\n processed += [(tokens, pos, ner, deprel, subj_positions, obj_positions, relation, edge_index, [])]\n else:\n subj_mask = [1 if (i in range(ss, se+1) and i in edge_index[0]+edge_index[1]) else 0 for i in range(len(tokens))]\n obj_mask = [1 if (i in range(os, oe+1) and i in edge_index[0]+edge_index[1]) else 0 for i in range(len(tokens))]\n \n if 't_' in mappings[c] or 's_' in mappings[c]:\n rule_counts[rules[eval(mappings[c])[0][1]]] += 1\n rule = helper.word_tokenize(rules[eval(mappings[c])[0][1]])\n rule = map_to_ids(rule, vocab.rule2id) \n rule = [constant.SOS_ID] + rule + [constant.EOS_ID]\n # processed_rule += [(tokens, pos, ner, deprel, subj_mask, obj_mask, relation, edge_index, edge_mask, rule)]\n else:\n rule = []\n processed += [(tokens, pos, ner, deprel, subj_mask, obj_mask, relation, edge_index, edge_mask, rule)]\n return processed",
"def validateRaxmlModels(self, input): \n filename=input+\".mg.modelFromMG.txt\"\n arq = open(filename, \"r\")\n output = \"\"\n conteudo = arq.readlines()\n seqcont = 0\n output = \"True\"\n modelogerado = \"\"\n listamodelos = ['DAYHOFF', 'DCMUT', 'JTT', 'MTREV', 'WAG', 'RTREV', 'CPREV',\n 'VT', 'BLOSUM62', 'MTMAM', 'MTART', 'MTZOA', 'LG', 'PMB',\n 'HIVB', 'HIVW', 'JTTDCMUT', 'FLU','GTR']\n\n if len(conteudo) == 0 or len(conteudo) > 1:\n output = \"False\"\n else:\n modelogerado = conteudo[0].strip()\n if modelogerado.upper() not in listamodelos:\n print \"Teste\"\n output = \"False\" \n \n return {\"MG_MODEL\":modelogerado, \"MG_MODEL_VALID\":output}",
"def object_script_to_model(path):\n\n\tobject_model = {\"name\": None,\n\t\t\t\t\t\"vision_model\": None,\n\t\t\t\t\t\"is_ignore_orientation\": None,\n\t\t\t\t\t\"teaching_position\": None,\n\t\t\t\t\t\"new_snapshot_pos\": None,\n\t\t\t\t\t\"new_snapshot_pos_inv\": None,\n\t\t\t\t\t\"current_speed_0_thr\": None,\n\t\t\t\t\t\"current_force_0_thr\": None,\n\t\t\t\t\t\"move_approach\": None,\n\t\t\t\t\t\"move_pick\": None,\n\t\t\t\t\t\"move_retract\": None}\n\n\t_name = re.search(r'(?:\\\\|\\/)(.*)\\.script', path)\n\tobject_model[\"name\"] = \"object\" if not _name else _name.group(1)\n\n\t# re patterns\n\t_re_6_fl_list = r'\\[(?:-?\\d*\\.?\\d*E?-?\\d+,? ?){6}\\]' \t# re for list of 6 signed float\n\t_re_vision_model = r'(?m)^\\s*f = xmlrpc_server\\.findmodel\\(\\\"(\\{[\\w-]+\\})\", tool\\[0\\], tool\\[1\\], tool\\[2\\], tool\\[3\\], tool\\[4\\], tool\\[5\\]\\)'\n\t_re_is_ignore_orientation = r'(?m)^\\s*is_ignore_orientation = ((?:True)|(?:False))'\n\t_re_teaching_pos = r'(?m)^\\s*object_teaching_location = p({})'.format(_re_6_fl_list)\n\t_re_new_snapshot_pos = r'(?m)^\\s*snapshot_pos = pose_trans\\(object_location, pose_trans\\(pose_inv\\(p({})\\), p({})\\)\\)'.format(_re_6_fl_list, _re_6_fl_list)\n\t_re_current_speed_0_thr = r'(?m)^\\s*if \\(current_speed\\[0\\] != (-?\\d*\\.?\\d*)\\):'\n\t_re_current_force_0_thr = r'(?m)^\\s*if \\(current_force\\[0\\] != (-?\\d*\\.?\\d*)\\):'\n\n\t_re_move_approach = r'(?m)^\\s*\\$ \\d+ \"Rel_approach\"\\s+movel\\(pose_trans\\(snapshot_pos, p({})\\), a=(?:-?\\d*\\.?\\d*), v=(?:-?\\d*\\.?\\d*)\\)'.format(_re_6_fl_list)\n\t_re_move_pick = r'(?m)^\\s*\\$ \\d+ \"Rel_pick\"\\s+movel\\(pose_trans\\(snapshot_pos, p({})\\), a=(?:-?\\d*\\.?\\d*), v=(?:-?\\d*\\.?\\d*)\\)'.format(_re_6_fl_list)\n\t_re_move_retract = r'(?m)^\\s*\\$ \\d+ \"Rel_retract\"\\s+movel\\(pose_trans\\(snapshot_pos, p({})\\), a=(?:-?\\d*\\.?\\d*), v=(?:-?\\d*\\.?\\d*)\\)'.format(_re_6_fl_list)\n\n\n\twith open(path, \"r\") as file:\n\t\tcontent = file.read()\n\t\tcamera_locate_match = re.search(r'\\s*\\$ \\d+ \"Camera Locate\"', content)\n\t\tcontent = content[camera_locate_match.start():]\n\t\t\n\t\tobject_model[\"vision_model\"] = _reg_catch(_re_vision_model, content)\n\t\tobject_model[\"is_ignore_orientation\"] = _reg_catch(_re_is_ignore_orientation, content)\n\t\tobject_model[\"teaching_position\"] = _reg_catch(_re_teaching_pos, content)\n\t\tobject_model[\"new_snapshot_pos_inv\"] = _reg_catch(_re_new_snapshot_pos, content)\n\t\tobject_model[\"new_snapshot_pos\"] = _reg_catch(_re_new_snapshot_pos, content, 2)\n\t\tobject_model[\"current_speed_0_thr\"] = _reg_catch(_re_current_speed_0_thr, content)\n\t\tobject_model[\"current_force_0_thr\"] = _reg_catch(_re_current_force_0_thr, content)\n\t\tobject_model[\"move_approach\"] = _reg_catch(_re_move_approach, content)\n\t\tobject_model[\"move_pick\"] = _reg_catch(_re_move_pick, content)\n\t\tobject_model[\"move_retract\"] = _reg_catch(_re_move_retract, content)\n\treturn object_model",
"def read_file(self, file_src):\n with open(file_src, \"r\") as fobj:\n grammar = Grammar()\n settings = Settings()\n for line in fobj:\n rhs = None #right-hand-side of a rule\n lhs = None #left-hand-side of a rule\n state = \"lhs\"\n words = line.rstrip().split()\n for word in words:\n if (words.index(word) == 0 and word == \"axiom:\"):\n words.remove(word)\n grammar.axiom = ' '.join(words)\n elif (words.index(word) > 0 and words[0] == \"angle_z:\"):\n settings.angle_z_min = int(words[1])\n settings.angle_z_max = int(words[3])\n elif (words.index(word) > 0 and words[0] == \"angle_y:\"):\n settings.angle_y_min = int(words[1])\n settings.angle_y_max = int(words[3])\n elif (words.index(word) > 0 and words[0] == \"angle_x:\"):\n settings.angle_x_min = int(words[1])\n settings.angle_x_max = int(words[3])\n elif (words.index(word) > 0 and words[0] == \"branch-shortening:\"):\n settings.branch_min = float(words[1])\n settings.branch_max = float(words[3])\n #elif (words.index(word) > 0 and words[0] == \"num_sides:\"):\n #grammar.num_sides = int(words[1])\n elif (words.index(word) > 0 and words[0] == \"base_radius:\"):\n settings.base_radius = float(words[1])\n elif (words.index(word) > 0 and words[0] == \"rules:\"):\n if(state == \"lhs\"):\n lhs = word\n if(lhs not in grammar.variables):\n grammar.variables.add(lhs)\n state = \"rhs\"\n continue\n if(state == \"rhs\" and word != \"->\"):\n rhs = word\n if(\",\" in rhs):\n rhs = rhs.replace(\",\", \"\")\n grammar.rules.add(Rule(lhs,rhs))\n state = \"lhs\"\n elif (words.index(word) > 0 and words[0] == \"generations:\"):\n settings.generations = int(words[1])\n elif (words.index(word) > 0 and words[0] == \"base_length:\"):\n settings.base_length = float(words[1])\n elif (words.index(word) > 0 and words[0] == \"bark_texture:\"):\n settings.bark_path = words[1]\n elif (words.index(word) > 0 and words[0] == \"leaf_texture:\"):\n settings.leaf_path = words[1]\n return [grammar, settings]",
"def build_model(self):\n insts1, attrs1, rels1 = self.arg1.get_triples()\n insts2, attrs2, rels2 = self.arg2.get_triples()\n for items, shld_norm in [(insts1, True), (insts2, True), (attrs1, True),\n (attrs2, True), (rels1, False), (rels2, False)]:\n for i in range(len(items)):\n # GUROBI cant handle Unicode so step down to ASCII\n items[i] = [items[i][0].encode('ascii', 'ignore').lower(),\n items[i][1].encode('ascii', 'ignore'),\n items[i][2].encode('ascii', 'ignore')]\n # normalize concept names -- instances and attributes\n if shld_norm:\n items[i][2] = SmatchILP.normalize(items[i][2])\n\n # Attributes are same as relations\n rels1.extend(attrs1)\n rels2.extend(attrs2)\n\n log.debug(\"AMR 1 Instances:\\n %s\" % insts1)\n log.debug(\"AMR 1 Relations:\\n %s\" % rels1)\n log.debug(\"AMR 2 Instances:\\n %s\" % insts2)\n log.debug(\"AMR 2 Relations:\\n %s\" % rels2)\n\n for index, items in [(self.arg1vars, insts1), (self.arg2vars, insts2)]:\n for name, var, concept in items:\n assert name == 'instance' # relation name is instance ==> variable definition\n assert var not in index # variable name is unique\n index[var] = concept\n\n var_choices = set() # possible variable matches\n for v1 in self.arg1vars.keys():\n for v2 in self.arg2vars.keys():\n var_choices.add((v1, v2))\n\n # instances are relations too\n rels1.extend(insts1)\n rels2.extend(insts2)\n\n self.arg1size = len(rels1)\n self.arg2size = len(rels2)\n\n trpl_choices = set()\n trpl_var_consts = {}\n for name1, var11, var12 in rels1:\n id1 = \"%s:%s:%s\" % (name1, var11, var12)\n for name2, var21, var22 in rels2:\n possible = 0\n id2 = \"%s:%s:%s\" % (name2, var21, var22)\n # triple name matches && first argument to triples can be matched\n if name1 == name2 and (var11, var21) in var_choices:\n # second argument to triple can also be matched OR\n possible += 1\n if (var12, var22) in var_choices or (\n # they are the same concepts\n # var12 not in self.arg1vars and var22 not in self.arg2vars and\n var12 == var22):\n possible += 1\n trpl_choices.add((id1, id2))\n # constrains between variables and triples\n trpl_var_consts[id1, id2] = [(var11, var21)]\n # if second argument is also variable\n\n if (var12, var22) in var_choices:\n trpl_var_consts[id1, id2].append((var12, var22))\n log.debug('\\t %s <--> %s ? %s ' % (id1, id2, possible))\n\n # Add variables to ILP model\n model = GRBModel('Smatch ILP')\n if log.getLogger().getEffectiveLevel() >= log.INFO:\n model.Params.OutputFlag = 0 # disable output\n log.info(\"Number of possible variable matches %s\" % len(var_choices))\n log.info(\"Number of possible triple matches %s\" % len(trpl_choices))\n\n self.vars = model.addVars(var_choices, vtype=GRB.BINARY, name=\"v\")\n self.trpls = model.addVars(trpl_choices, vtype=GRB.BINARY, name=\"t\")\n\n # constraints\n for v1 in self.arg1vars:\n model.addConstr(self.vars.sum(v1, '*') <= 1, name='to max 1 var')\n for v2 in self.arg2vars:\n model.addConstr(self.vars.sum('*', v2) <= 1, name='from max 1 var')\n\n for trpl_idx, var_idxs in trpl_var_consts.items():\n for var_idx in var_idxs:\n model.addConstr(self.trpls[trpl_idx] <= self.vars[var_idx], name=\"%s::%s\" % (trpl_idx, var_idx))\n\n # objective\n model.setObjective(self.trpls.sum(), GRB.MAXIMIZE)\n self.model = model\n\n # stats for how big the problem is\n var_trpl_consts_count = sum(len(x) for x in trpl_var_consts.values())\n num_constr = len(var_choices) + len(trpl_choices) + var_trpl_consts_count\n num_vars = len(var_choices) + len(trpl_choices)\n log.info(\"ILP SIZE: %d binary variables (%d vars + %d triple vars)\" % (num_vars, len(var_choices), len(trpl_choices)))\n log.info(\"ILP SIZE: %d constraints (%d b/w arg vars and triples)\" % (num_constr, var_trpl_consts_count))",
"def parse(cls, model_path: str, **kwargs):",
"def parse_model_files():\n a_copy = PY_FILES[::]\n for f in a_copy:\n if 'model' in f:\n MODEL_FILES.append(f)\n PY_FILES.remove(f)",
"def apply_rules(self, token_parse_list):\r\n return token_parse_list",
"def make_control_knowledge(self, horizon):\n\n \"\"\" *** YOUR CODE HERE *** \"\"\"\n\n # ADD_RULE1_COUNT = 0\n # ADD_RULE2_COUNT = 0\n # ADD_RULE3_COUNT = 0\n\n close = list()\n far = list()\n\n for g in self.problem.goal:\n for p in self.problem.propositions:\n if re.match(r'at\\spackage\\d+\\scity\\d+-\\d+', str(p)):\n p_split = str(p).split()\n g_split = str(g).split()\n\n # if \"at\" and \"package[oo]\" match\n if p_split[0] == g_split[0] and p_split[1] == g_split[1]:\n # also \"city[oo]-[xx]\" match\n if p_split[2][:-2] == g_split[2][:-2]:\n close.append(p)\n else:\n far.append(p)\n\n # Rule 1:\n # ===============================\n # If a package is at its goal location, then it must remain there.\n # p@t and goal@t) -> p@t+1), where p is at(package, location)\n # cnf: not p@t or not goal@t or p@t+1\n\n for g in self.problem.goal:\n for t in range(0, horizon):\n clause = list()\n clause.append(-self.proposition_fluent_codes[(g, t)])\n clause.append(self.proposition_fluent_codes[(g, t + 1)])\n self.add_clause(clause, \"control\")\n # ADD_RULE1_COUNT += 1\n\n for t in range(0, horizon):\n for a in self.problem.actions:\n\n # Rule 2\n # ===============================\n\n # RULE\n # close -> do not load airplane\n # p1: close@t\n # p2: at the location of an airport @t\n # p3: airplane at this location @t\n # p4: plane is not loaded\n # a: load this airplane\n #\n # p1@t and p2@t and p3@t and p4@t => a@t\n # not p1@t or not p2@t or not p3@t or not p4@t or a@t\n # cnf: not p@t or not a@t\n if str(a).startswith('load-airplane'):\n for i in close:\n package = str(i).split()[1]\n if str(a).split()[1] == package:\n clause = list()\n clause.append(\n -self.proposition_fluent_codes[(i, t)])\n clause.append(-self.action_fluent_codes[(a, t)])\n self.add_clause(clause, \"control\")\n # ADD_RULE2_COUNT += 1\n\n # Rule 3\n # ===============================\n\n # RULE\n # far -> do not unload airplane\n # p@t -> not a@t, where p is far, a is unload-airplane\n # cnf: not p@t or not a@t\n if str(a).startswith('unload-airplane'):\n for j in far:\n package = str(j).split()[1]\n if str(a).split()[1] == package:\n clause = list()\n clause.append(\n -self.proposition_fluent_codes[(j, t)])\n clause.append(-self.action_fluent_codes[(a, t)])\n self.add_clause(clause, \"control\")\n # ADD_RULE3_COUNT += 1\n\n # # RULE\n # # if an airplane has a package on it and the package's\n # # destination is close do not fly this airplane.\n # # in fact, if the destination of package is far,\n # # fly this plane to it.\n # #\n # # p1: package on airplane @ t\n # # p2: package at a place @ t\n # # p3: the place and the goal are in the same city\n # # rule: p1@t and p2@t and p@3 => not fly plane@t\n # # and unload the plane@t\n #\n # # not p1@t or not p2@t or not fly@t\n # # not p1@t or not p2@t or unload\n #\n # # rule: p1@t and p2@t and not p3@t => fly plane@t and not\n # # unload the plane@t\n #\n # if str(a).startswith('fly-airplane'):\n # plane = str(a).split()[1]\n # # loc_from = str(a).split()[2]\n # for p1 in self.problem.propositions:\n # if str(p1).startswith('in package') and str(p1).split()[2] == plane: # in package plane\n # package = str(p1).split()[1]\n # for p2 in self.problem.propositions:\n # if p2 in close and str(p2).split()[1] == package: # at package location\n # clause = list()\n # clause.append(-self.proposition_fluent_codes[p1, t])\n # clause.append(-self.proposition_fluent_codes[p2, t])\n # clause.append(-self.action_fluent_codes[a, t])\n # self.add_clause(clause, 'control')\n # ADD_RULE2_COUNT += 1\n #\n #\n # for g in self.problem.goal:\n # if str(g).split()[1] == package:\n # destination = str(g).split()[2]\n # for do in self.problem.actions:\n # # unload-airplane package00 plane00 city00-00\n # if str(do).startswith('unload') and str(do).split()[1] == package and str(do).split()[2] == plane and str(do).split()[3] == destination:\n # clause2 = list()\n # clause2.append(-\n # self.proposition_fluent_codes[\n # p1, t])\n # clause2.append(-\n # self.proposition_fluent_codes[\n # p2, t])\n # clause2.append(\n # self.action_fluent_codes[\n # do, t])\n # self.add_clause(clause2,\n # 'control')\n #\n # ADD_RULE3_COUNT += 1\n\n # RULE\n # if there is no package needs to be transferred at a location,\n # and the location has a truck\n # drive the truck to its airport\n\n # p1: (at package__ city__-__ /\\ (it is a goal)@t\n # p2: (at truck__ city__-__)@t\n # p3: (city__-__ is not airport)\n # not p1/\\p2/\\p3 => drive_truck_to_its_airport@t\n #\n #\n # CNF: p1 V not p2 V not p3 V drive_truck_to_its_airport@t\n # if str(a).startswith('DRIVE-TRUCK'):\n # for p1 in self.problem.goal:\n # city = str(p1).split()[2]\n # for p2 in self.problem.propositions:\n # if str(p2).startswith('at truck') and str(p2).split()[2] == city:\n # for p3 in self.problem.propositions:\n # if str(p3).startswith('airport') and str(p3).split()[1] == city:\n # clause = list()\n # clause.append(self.proposition_fluent_codes[(p1, t)])\n # clause.append(-self.proposition_fluent_codes[(p2, t)])\n # clause.append(-self.proposition_fluent_codes[(p3, t)])\n # clause.append(self.action_fluent_codes[(a, t)])\n # self.add_clause(clause, \"control\")\n\n # RULE\n # if there is an airplane is loaded with a package need\n # transfer (to another city), fly airplane to the corresponding\n # city.\n\n # p1: (at airplane__ city__-__)@t\n # p2: (in package__ airplane__)@t\n # p3: ( p2 is in far)\n # p1/\\p2/\\p3 => fly_airplane_to_its_airport@t\n #\n #\n # CNF: not p1@t V not p2@t V not p3@t V fly_plane_to_airport@t\n\n # print(\"ADDED RULE 1:\")\n # print(ADD_RULE1_COUNT)\n #\n # print(\"ADDED RULE 2:\")\n # print(ADD_RULE2_COUNT)\n #\n # print(\"ADDED RULE 3:\")\n # print(ADD_RULE3_COUNT)",
"def applyModifiers(self):\n if not self.getScopeUpdated():\n self.updateScopes()\n targets = self.getConTextModeNodes(\"target\")\n modifiers = self.getConTextModeNodes(\"modifier\")\n for target in targets:\n for modifier in modifiers:\n if modifier.applyRule(target):\n if self.getVerbose():\n print(\"applying relationship between\", modifier, target)\n\n self.add_edge(modifier, target)",
"def process(self,rules):\n for rule in rules:\n r,arg = rule.split('(')\n args = arg[:-1].split(',')\n self.commands.append((r,args))",
"def recog_v2(args):\n logging.warning(\"experimental API for custom LMs is selected by --api v2\")\n if args.batchsize > 1:\n raise NotImplementedError(\"multi-utt batch decoding is not implemented\")\n if args.streaming_mode is not None:\n raise NotImplementedError(\"streaming mode is not implemented\")\n if args.word_rnnlm:\n raise NotImplementedError(\"word LM is not implemented\")\n\n set_deterministic_pytorch(args)\n model, train_args = load_trained_model(args.model)\n\n # add lang2ph to the model\n if args.mask_phoneme:\n logging.warning(f'mask phoneme and create lang2ph for model')\n assert args.lang2ph is not None\n with open(args.lang2ph, 'r') as f:\n model.lang2ph = json.load(f)\n\n model.lang2phid = {}\n for lang, phones in model.lang2ph.items(): \n phoneset = set(phones + ['<blank>', '<unk>', '<space>', '<eos>'])\n phoneset = phoneset.intersection(model.args.char_list)\n model.lang2phid[lang] = list(map(model.args.char_list.index, phoneset))\n # model.lang2phid[lang] = list(map(model.args.char_list.index, phones+['<blank>', '<unk>', '<space>', '<eos>']))\n \n model.ctc.lang2phid = model.lang2phid\n logging.warning(f'model lang2phid {model.lang2phid}')\n\n assert isinstance(model, ASRInterface)\n model.eval()\n\n load_inputs_and_targets = LoadInputsAndTargets(\n mode=\"asr\",\n load_output=False,\n sort_in_input_length=False,\n preprocess_conf=train_args.preprocess_conf\n if args.preprocess_conf is None\n else args.preprocess_conf,\n preprocess_args={\"train\": False},\n )\n logging.warning(f'args.rnnlm: {args.rnnlm}')\n\n if args.rnnlm:\n lm_args = get_model_conf(args.rnnlm, args.rnnlm_conf)\n # NOTE: for a compatibility with less than 0.5.0 version models\n lm_model_module = getattr(lm_args, \"model_module\", \"default\")\n lm_class = dynamic_import_lm(lm_model_module, lm_args.backend)\n lm = lm_class(len(train_args.char_list), lm_args)\n torch_load(args.rnnlm, lm)\n lm.eval()\n else:\n lm = None\n\n if args.ngram_model:\n from espnet.nets.scorers.ngram import NgramFullScorer\n from espnet.nets.scorers.ngram import NgramPartScorer\n\n if args.ngram_scorer == \"full\":\n ngram = NgramFullScorer(args.ngram_model, train_args.char_list)\n else:\n ngram = NgramPartScorer(args.ngram_model, train_args.char_list)\n else:\n ngram = None\n\n scorers = model.scorers()\n \n scorers[\"lm\"] = lm\n scorers[\"ngram\"] = ngram\n scorers[\"length_bonus\"] = LengthBonus(len(train_args.char_list))\n weights = dict(\n decoder=1.0 - args.ctc_weight,\n ctc=args.ctc_weight,\n lm=args.lm_weight,\n ngram=args.ngram_weight,\n length_bonus=args.penalty,\n )\n beam_search = BeamSearch(\n beam_size=args.beam_size,\n vocab_size=len(train_args.char_list),\n weights=weights,\n scorers=scorers,\n sos=model.sos,\n eos=model.eos,\n token_list=train_args.char_list,\n pre_beam_score_key=None if args.ctc_weight == 1.0 else \"full\",\n )\n # TODO(karita): make all scorers batchfied\n if args.batchsize == 1:\n non_batch = [\n k\n for k, v in beam_search.full_scorers.items()\n if not isinstance(v, BatchScorerInterface)\n ]\n if len(non_batch) == 0:\n beam_search.__class__ = BatchBeamSearch\n logging.info(\"BatchBeamSearch implementation is selected.\")\n else:\n logging.warning(\n f\"As non-batch scorers {non_batch} are found, \"\n f\"fall back to non-batch implementation.\"\n )\n\n if args.ngpu > 1:\n raise NotImplementedError(\"only single GPU decoding is supported\")\n if args.ngpu == 1:\n device = \"cuda\"\n else:\n device = \"cpu\"\n dtype = getattr(torch, args.dtype)\n logging.info(f\"Decoding device={device}, dtype={dtype}\")\n model.to(device=device, dtype=dtype).eval()\n beam_search.to(device=device, dtype=dtype).eval()\n\n js = read_json_data(model.args, args.recog_json)\n # read json data\n # with open(args.recog_json, \"rb\") as f:\n # js = json.load(f)[\"utts\"]\n\n random.seed(args.seed)\n items = list(js.items())\n random.shuffle(items)\n js = OrderedDict(items[:args.recog_size])\n logging.warning(f'data json len {len(js)}')\n\n import re\n def get_lang(name):\n s = name.split('_')[0]\n s = re.sub(r'\\d+$', '', s.split('-')[0]) if re.search('[a-zA-Z]+', s) else s\n return s\n\n new_js = {}\n with torch.no_grad():\n for idx, name in enumerate(js.keys(), 1):\n logging.info(\"(%d/%d) decoding \" + name, idx, len(js.keys()))\n\n lang_labels = None\n lang_labels_for_masking = None\n if args.lang_label:\n lang_label = get_lang(name)\n if args.mask_phoneme:\n lang_labels_for_masking = [lang_label] # true lang labels\n if args.fake_lang_label:\n lang_labels = [args.fake_lang_label]\n\n batch = [(name, js[name])]\n feat = load_inputs_and_targets(batch)[0][0]\n enc = model.encode(torch.as_tensor(feat).to(device=device, dtype=dtype), lang_labels=lang_labels)\n\n nbest_hyps = beam_search(\n x=enc, maxlenratio=args.maxlenratio, minlenratio=args.minlenratio, \n mask_phoneme=args.mask_phoneme, lang_labels_for_masking=lang_labels_for_masking\n )\n\n nbest_hyps = [\n h.asdict() for h in nbest_hyps[: min(len(nbest_hyps), args.nbest)]\n ]\n new_js[name] = add_results_to_json(\n js[name], nbest_hyps, train_args.char_list\n )\n\n with open(args.result_label, \"wb\") as f:\n f.write(\n json.dumps(\n {\"utts\": new_js}, indent=4, ensure_ascii=False, sort_keys=True\n ).encode(\"utf_8\")\n )",
"def process_rhs_and_algebraic(self, model):\n\n # Discretise right-hand sides, passing domain from variable\n processed_rhs = self.process_dict(model.rhs)\n\n # Concatenate rhs into a single state vector\n # Need to concatenate in order as the ordering of equations could be different\n # in processed_rhs and model.rhs\n processed_concatenated_rhs = self._concatenate_in_order(processed_rhs)\n\n # Discretise and concatenate algebraic equations\n processed_algebraic = self.process_dict(model.algebraic)\n\n # Concatenate algebraic into a single state vector\n # Need to concatenate in order as the ordering of equations could be different\n # in processed_algebraic and model.algebraic\n processed_concatenated_algebraic = self._concatenate_in_order(\n processed_algebraic\n )\n\n return (\n processed_rhs,\n processed_concatenated_rhs,\n processed_algebraic,\n processed_concatenated_algebraic,\n )"
] |
[
"0.5840182",
"0.55535984",
"0.51799554",
"0.5069606",
"0.5051608",
"0.50513303",
"0.49753022",
"0.49660093",
"0.49515072",
"0.4919437",
"0.48867586",
"0.48522",
"0.48373777",
"0.4799141",
"0.47971752",
"0.47930145",
"0.4788843",
"0.4775847",
"0.47617608",
"0.47600308",
"0.47465056",
"0.47294545",
"0.46840632",
"0.4668002",
"0.46519452",
"0.46514848",
"0.46459648",
"0.4643576",
"0.4626132",
"0.46193093"
] |
0.5980429
|
0
|
Removes duplicate rewrite rules objects that are in the rule set. This function preserves the order of the rewrite rules in the rule set and does deduplication inplace by just keeping the last occurrence of a duplicate rule.
|
def _remove_duplicate_rules(rule_set: _RewriteRuleSet) -> None:
RuleKey = Tuple[str, str, str, str]
def _key_and_value(rule: _RewriteRule) -> Tuple[RuleKey, _RewriteRule]:
return (rule.from_state, rule.to_state, rule.input, rule.output), rule
inverted = collections.OrderedDict(map(_key_and_value, rule_set.rule))
duplicate_count = len(rule_set.rule) - len(inverted)
if duplicate_count:
logging.info(
f"found {duplicate_count} duplicate rewrite rules, removing them")
rule_set.ClearField("rule")
rule_set.rule.extend([r for r in inverted.values()])
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def merge_duplicated_links (self):\n # Collect backward links\n backwards = [(src, dst, key) for src, dst, key, link in\n self.network.edges_iter(keys=True, data=True) if (\n link.type == Link.STATIC or link.type == Link.DYNAMIC) and\n link.backward is True]\n # Delete backwards links\n for link in backwards:\n self.network.remove_edge(*link)\n return self",
"def simplify_rules(self):\n for rule in self.grammar:\n if re.search(r'->', rule):\n temp = re.split(r'->', rule)\n if len(temp[1].strip()) == 0:\n print(\"Invalid rule. The rule does not have the RHS.\")\n return\n\n lhs = temp[0]\n rhs = temp[1]\n temp = []\n\n if re.search(r'\\|', rhs):\n temp = re.split(r'\\|', rhs)\n if len(temp[1].strip()) == 0:\n print(\"Invalid rule. Unnecessary use of `|`.\")\n return\n\n for i in range(0, len(temp)):\n temp[i] = temp[i].strip()\n\n if len(temp) == 0:\n temp.append(rhs.strip())\n self.rules[lhs.strip()] = temp\n temp = []\n else:\n self.rules[lhs.strip()] = temp\n\n else:\n print(\"Invalid rule. The rule is not deriving anything.\")\n return\n\n print(\"Modified rules : \")\n print(self.rules)",
"def cleanUpRules(self):\n\n\t\t# initialize\n\t\tscoreDict = {}\n\t\tnewRules = {}\n\n\t\t# loop through rules\n\t\tfor i, tup in enumerate(self.generatedRules):\n\n\n\t\t\tantecedent = str(tup[0].antecedent)\n\n\t\t\t# if there is no rule in the scoredictionary yet with the same antecedent, put it in both dictionaries\n\t\t\tif (not antecedent in scoreDict):\n\t\t\t\tnewRules[antecedent] = tup[0]\n\t\t\t\tscoreDict[antecedent] = tup[1]\n\t\t\telse:\n\n\t\t\t\t# if there is, then first compare if the degree is higher before overwriting\n\t\t\t\tif (tup[1] > scoreDict[antecedent]):\n\t\t\t\t\tnewRules[antecedent] = tup[0]\n\t\t\t\t\tscoreDict[antecedent] = tup[1]\n\t\t\t\telse:\n\t\t\t\t\t# not higher? don't overwrite\n\t\t\t\t\tcontinue\n\n\t\t# save rules\n\t\tself.generatedRules = []\n\t\tfor key in newRules:\n\t\t\tself.generatedRules.append(newRules[key])\n\n\t\treturn",
"def _remove_duplicates(self):\n self.search_query = remove_duplicates(self.search_query)",
"def rel_duplicates():\n path = f'{output_path}/ppt/_rels/presentation.xml.rels'\n root, tree = gen_tree(path)\n d1 = OrderedDict()\n for relation in root:\n rIds = []\n attrib = relation.attrib\n if attrib['Target'] in d1.keys():\n val = d1[attrib['Target']]\n val.append(attrib['Id'])\n d1[attrib['Target']] = val\n else:\n d1[attrib['Target']] = [attrib['Id']]\n \n # getting duplicates rIds\n dup_rIds = []\n for k,v in d1.items():\n if len(v) > 1:\n dup_rIds.append(v.pop(0))\n d1[k] = v\n \n # removing relation\n for relation in root:\n attrib = relation.attrib\n if attrib['Id'] in dup_rIds:\n root.remove(relation)\n \n rels_rIds = [relation.attrib['Id'] for relation in root]\n \n tree.write(path, pretty_print=True, xml_declaration=True, encoding='UTF-8', standalone=True)\n return d1, rels_rIds",
"def clean_duplicate(self):\r\n self.elements = list(set(self.elements))\r\n self.elements = [e for e in self.elements if e != '']",
"def remove_duplicates(self):\n cur = self.head\n prev = None\n\n dup_values = dict()\n\n while cur:\n if cur.data in dup_values:\n # Remove node:\n prev.next = cur.next\n else:\n # Have not encountered element before.\n dup_values[cur.data] = 1\n prev = cur\n cur = prev.next",
"def remove_duplicates(self, tree={}):\n if not tree: tree=self.dict\n childrens = tree['childrens']\n for child in childrens:\n self.remove_duplicates(tree=child)\n key = f'{child[\"app_name\"]}_{child[\"model_name\"]}_{child[\"source_pk\"]}'\n if key in self.depth_reference:\n if child['depth'] < self.depth_reference[key]:\n child['save'] = False",
"def remove_dup2(linkedlist):",
"def dedupe(self):\n elems = []\n for x in self.elems:\n if x not in elems:\n elems.append(x)\n return _coconut_tail_call(self.__class__, *elems)",
"def rm_duplicates(self):\n # get uniq representation of existing detection documents\n existing = set(ed.uniq_data for ed in self.existing_detections)\n # remove duplicates\n for idx in xrange(len(self.new_detections)-1, -1, -1):\n nd = self.new_detections[idx]\n if nd.uniq_data in existing:\n self.new_detections.pop(idx)",
"def removeDuplicateUrl(inputfile, outputfile):\n\t\n\tlines_seen = set()\n\toutfile = open(outputfile, \"w\")\n\tfor line in open(inputfile, \"r\"):\n \t\tif line not in lines_seen:\n\t\t\toutfileput.write(line)\n\t\t\tlines_seen.add(line)\n\n\toutputfile.close()",
"def remove_dups(ll: SinglyLinkedList):\n seen = set()\n current = ll.head\n prev = None\n while current is not None:\n if current.data in seen:\n prev.next = current.next\n temp = current\n current = current.next\n temp.next = None\n else:\n seen.add(current.data)\n prev = current\n current = current.next",
"def remove_duplicates(self):\n names: Dict[str, int] = dict()\n for step in self.Sequence:\n if isinstance(step, Repeater):\n continue\n name = step.Name\n if name != '':\n if name not in names:\n names[name] = 1\n else:\n names[name] += 1\n for step in reversed(self.Sequence):\n if isinstance(step, Repeater):\n continue\n name = step.Name\n if name and (names[name] > 1):\n names[name] -= 1\n step.Name = name + \"_%i\" % names[name]",
"def dedup_rhs(self,inline_stop=set(),verbose=False):\n\n # Map an object index to the nonterminal that first defines it.\n index_to_name = dict()\n # Map a rule name to the rule name it should be replaced by.\n replacement = dict()\n\n def process_replacement(grammar,name,replacement_dict):\n # Update this rule with any scheduled replacements.\n rule = self.rules[name]\n changed_rule = False\n new_options = []\n for option in rule.as_container():\n changed_parts = False\n parts = []\n for x in option.as_container():\n if x.is_symbol_name() and x.content in replacement:\n parts.append(self.MakeSymbolName(replacement[x.content]))\n changed_parts = True\n changed_rule = True\n else:\n parts.append(x)\n new_options.append(self.MakeSeq(parts) if changed_parts else option)\n if changed_rule:\n self.rules[name] = self.MakeChoice(new_options)\n\n for A in reversed(self.preorder()):\n if A not in inline_stop:\n A_rule = self.rules[A]\n A_index = A_rule.reg_info.index\n if verbose:\n print(\" {} {} \".format(A,A_index))\n if A_index in index_to_name:\n if verbose:\n print(\"Replace {} with {}\".format(A,index_to_name[A_index]))\n replacement[A] = index_to_name[A_index]\n else:\n index_to_name[A_index] = A\n process_replacement(self,A,replacement)\n\n\n for A in self.preorder():\n process_replacement(self,A,replacement)\n\n self.remove_unused_rules()",
"def remove_duplicate_walls(self):\n wall_map = {}\n duplicates = []\n for cnt, thing in enumerate(self.things):\n if isinstance(thing, Wall):\n if not wall_map.has_key(thing.location):\n wall_map[thing.location] = True\n else:\n duplicates.append(cnt)\n for cnt, item in enumerate(duplicates):\n self.things.pop(item - cnt)",
"def eliminateRules(self):\n deleteKey = []\n for key,value in self._rules.items():\n if value[0] < self._minConfidence:\n deleteKey.append(key)\n \n for key in deleteKey:\n del self._rules[key]",
"def getRules(self):\n self._rules = {}\n _RuleSet = self._sets[self._currentSet - 1 ]\n for oneSet in _RuleSet :\n \n if len(oneSet) < 2 : \n pass \n \n for x in range(1, max(floor(len(oneSet) / 2),2) ):\n \n comb = combinations(oneSet, x)\n for item in comb:\n remaining = tuple(x for x in oneSet if x not in item)\n self._rules[(item,remaining)] = 0\n self._rules[(remaining,item)] = 0",
"def removeDuplicate(self,permutations=True):\n ind,ok = self.testDuplicate(permutations)\n return self[ind[ok]]",
"def remove_duplicates(self) -> bool:\n return self._remove_duplicates",
"def duplicateClean(obj=None, name=None):\n #Delete Unnecessary 'Orig' Nodes\n import fnmatch\n #duplicate obj\n if obj is None:\n obj = pm.ls(sl=1)[0]\n if name ==None:name = None\n dup = pm.duplicate(obj, n = name)[0]\n cleanUpAttr(sel=[dup],listAttr=['tx','ty','tz','rx','ry','rz','sx','sy','sz'],l=0,k=1,cb=0)\n nodes = pm.ls(dup,dag=1)\n for obj in nodes:\n if fnmatch.fnmatch(obj.name(),'*Orig*'):\n if len(pm.listConnections(obj))==0:\n pm.delete( obj)\n print 'delete unused node \"' +obj+'\" from this scene'\n return dup",
"def _resolve_duplicates(self) -> None:\n resource_ids_resources: DefaultDict[str, List[Resource]] = defaultdict(list)\n for resource in self.resources:\n resource_ids_resources[resource.resource_id].append(resource)\n merged_resources: List[Resource] = []\n for resource_id, resources in resource_ids_resources.items():\n if len(resources) > 1:\n merged_resource = ResourceSpec.merge_resources(\n resource_id=resource_id, resources=resources\n )\n merged_resources.append(merged_resource)\n for merged_resource in merged_resources:\n self.resources = [\n resource\n for resource in self.resources\n if resource.resource_id != merged_resource.resource_id\n ]\n self.resources.append(merged_resource)",
"def rmdup(sll):\n seen = set()\n prev = None\n current = sll.head\n while current:\n if current.payload in seen:\n prev.next_ = current.next_\n current = current.next_\n else:\n seen.add(current.payload)\n prev = current\n current = current.next_\n return sll # for chaining",
"def remove_sorted_duplicates(self):\n cur = self.head\n while cur is not None and cur.next is not None:\n if cur.next.data == cur.data:\n cur.next = cur.next.next\n else:\n cur = cur.next\n return self.head",
"def one_time_rules(self):\n # There is also a hidden sameAs rule in RDF Semantics: if a literal appears in a triple, and another one has\n # the same value, then the triple should be duplicated with the other value.\n literals = self.literal_proxies.lit_to_bnode\n items = ((lt1, lt2) for lt1, lt2 in product(literals, literals) if lt1 != lt2)\n for lt1, lt2 in items:\n try:\n lt1_d = lt1.lit.toPython()\n lt2_d = lt2.lit.toPython()\n if lt1_d == lt2_d:\n # In OWL, this line is simply stating a sameAs for the corresponding BNodes, and then let\n # the usual rules take effect. In RDFS this is not possible, so the sameAs rule is,\n # essentially replicated...\n bn1 = self.literal_proxies.lit_to_bnode[lt1]\n bn2 = self.literal_proxies.lit_to_bnode[lt2]\n for (s, p, o) in self.graph.triples((None, None, bn1)):\n self.graph.add((s, p, bn2))\n except:\n # there may be a problem with one of the python conversions; the rule is imply ignored\n # raise e\n pass",
"def _filter_duplicate_urls(urls: list) -> set:\n clean_urls = set()\n for url in urls:\n cleaned_url = url.split(\"&sa=U\")[0]\n clean_urls.add(cleaned_url)\n return clean_urls",
"def remdup_preserve_order(lst):\n val = set()\n val_add = val.add\n return [x for x in lst if not ((x in val) or val_add(x))]",
"def remove_duplicates_orderly(cls, list_with_duplicates: list, preserve_first_encounter: bool = True,\n\t\t\t\t\t\t\t\t preserve_original_list: bool = False) -> list:\n\t\tlist_set = set(list_with_duplicates)\n\t\tlist_new = list_with_duplicates.copy() if preserve_original_list else list_with_duplicates\n\t\tif len(list_new) == len(list_set): # No extra\n\t\t\treturn list_new\n\t\tif preserve_first_encounter:\n\t\t\tlist_new.reverse()\n\t\tfor index in range(len(list_new) - 1, -1, -1):\n\t\t\titem = list_new[index]\n\t\t\tif item in list_set:\n\t\t\t\tlist_set.remove(item)\n\t\t\telse:\n\t\t\t\tlist_new.pop(index)\n\t\tif preserve_first_encounter:\n\t\t\tlist_new.reverse()\n\t\treturn list_new",
"def _deduplicate(self):\n if self._clean:\n return\n\n sorted_entries = sorted(\n self._entries, key=lambda entry: (entry.depth, -len(entry.tail))\n )\n\n self._entries = []\n for entry in sorted_entries:\n if any(entry.startswith(p) for p in self._entries):\n continue\n self._entries.append(entry)\n self._clean = True",
"def _trim_duplicates(all_matches):\n trimmed_list = IndexedSet()\n for match in all_matches:\n if (\n match\n and match not in trimmed_list\n and match[::-1] not in trimmed_list\n ):\n trimmed_list.add(match)\n return trimmed_list"
] |
[
"0.6079783",
"0.5707271",
"0.5631043",
"0.5405995",
"0.5405514",
"0.536701",
"0.53547156",
"0.5305289",
"0.52767164",
"0.52653193",
"0.52208674",
"0.5214076",
"0.5193104",
"0.5186206",
"0.51850516",
"0.5170991",
"0.5167085",
"0.51482874",
"0.51319295",
"0.5107508",
"0.5105704",
"0.50745183",
"0.5057753",
"0.50475216",
"0.50281197",
"0.4996971",
"0.49671713",
"0.49635142",
"0.4962762",
"0.4955761"
] |
0.8605161
|
0
|
Extracts FST symbols that compose complex input label of the rewrite rule. FST symbols of a complex input label is; Epsilon symbol if the complex input label is an epsilon symbol (e.g. [''] for label ''). Digits of the complex input label if it is only composed of digits without any feature analysis tags (e.g. ['9', '0'] for the label '90'). Tokenized inflectional group boundaries, inflectional or derivational morphemes, proper noun and feature analyses tags, numbers, and punction if the complex input label is composed of these units (e.g. [')([VN]', 'YAn[Derivation=PresNom]'] for the label ')([VN]YAn[Derivation=PresNom]').
|
def _symbols_of_input(label: str) -> List[str]:
if label == common.EPSILON:
return [label]
# We add a state transition arc for each digit of a multi-digit number.
if "[" not in label:
return list(label)
# We add a state transition arc for each inflectional or derivational
# morpheme, inflectional group boundary, and proper noun analysis tag.
return _SYMBOLS_REGEX.findall(label)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _transform_compound(self, compound):\n assert isinstance(compound, str), \"Input is not a string!\"\n cmpd_features = np.array(compound_short_descriptors(compound),\n dtype=np.float)\n cmpd_features = np.pad(cmpd_features, (0, 80-cmpd_features.shape[0]),\n mode='constant')\n cmpd_features = np.nan_to_num(cmpd_features)\n\n return cmpd_features",
"def label_to_symbol(label: str, all_labels: list) -> str:\n index = all_labels.index(label)\n in_symbol = f\"[i-{index}]\"\n out_symbol = f\"[o-{index}]\"\n return in_symbol, out_symbol",
"def _make_vbenf_label(chain_parts):\n\n # toy label for development: run simple and dijet independently.\n # simple makes Et cuts on two jets. Independently (sharing possible)\n # of jets choosean by simple, the dijet\n # scenario requires a dijet of mass > 900, and opening angle in phi > 2.6\n\n assert len(chain_parts) == 1\n scenario = chain_parts[0]['hypoScenario']\n assert scenario.startswith('vbenf')\n args = _args_from_scenario(scenario)\n if not args:\n return 'and([]simple([(50et)(70et)])combgen([(2)] dijet([(900djmass, 26djdphi)])))' \n arg_res = [\n re.compile(r'(?P<lo>\\d*)(?P<key>fbet)(?P<hi>\\d*)'),\n re.compile(r'(?P<lo>\\d*)(?P<key>mass)(?P<hi>\\d*)'),\n re.compile(r'(?P<lo>\\d*)(?P<key>et)(?P<hi>\\d*)'),\n ]\n\n defaults = {\n 'et': ('101', 'inf'),\n 'mass': ('800', 'inf'),\n 'fbet': ('501', 'inf'),\n }\n\n argvals = {}\n while args:\n assert len(args) == len(arg_res)\n arg = args.pop()\n for r in arg_res:\n m = r.match(arg)\n if m is not None:\n arg_res.remove(r)\n gd = m.groupdict()\n key = gd['key']\n try:\n lo = float(gd['lo'])\n except ValueError:\n lo = defaults[key][0]\n argvals[key+'lo'] = lo \n try:\n hi = float(gd['hi'])\n except ValueError:\n hi = defaults[key][1]\n argvals[key+'hi'] = hi\n\n assert len(args) == len(arg_res)\n assert len(args) == 0\n\n return \"\"\"\n and\n (\n []\n simple\n (\n [(%(etlo).0fet, 500neta)(%(etlo).0fet, peta500)]\n )\n combgen\n (\n [(10et, 0eta320)]\n dijet\n (\n [(%(masslo).0fdjmass, 26djdphi)]\n ) \n simple\n (\n [(10et, 0eta320)(20et, 0eta320)]\n )\n )\n )\"\"\" % argvals",
"def _symbols_of_output(label: str) -> List[str]:\n if label == common.EPSILON:\n return [label]\n\n # We add a new state transition arc for each character of the output token.\n return list(label)",
"def transform(token):\n if token == '#t':\n return True\n if token == '#f':\n return False\n if token[0] == '\"':\n return bytes(token[1:-1], \"utf-8\").decode('unicode-escape')\n if token.startswith(';'):\n return ';'\n if token.startswith('#b'):\n return int(token[2:], 2)\n if token.startswith('#o'):\n return int(token[2:], 8)\n if token.startswith('#d'):\n return int(token[2:])\n if token.startswith('#x'):\n return int(token[2:], 16)\n try:\n return int(token)\n except ValueError:\n try:\n return float(token)\n except ValueError:\n try:\n result = complex(token.replace('i', 'j'))\n # user can't write a+bj and form like i, 2i, 3i where no '+' appers\n if token.find('j') >= 0 or token.find('+') < 0:\n return Symbol(token.lower())\n return result\n except ValueError:\n try:\n return fractions.Fraction(token)\n except ValueError:\n return Symbol(token.lower())",
"def BNF():\n\n sect_begin = Literal(\"{\").suppress()\n sect_end = Literal(\"}\").suppress()\n array_begin = Literal(\"[\").suppress()\n array_end = Literal(\"]\").suppress()\n tag_begin = Literal(\"<\").suppress()\n tag_end = Literal(\">\").suppress()\n eql = Literal(\"=\").suppress()\n dmark = Literal('$').suppress()\n end_data = Literal('$end').suppress()\n prtable = alphanums + r'!$%&*+-./<>?@^_|~'\n int_t = Regex('[-]?\\d+')\n float_t = Regex('-?\\d+\\.\\d*([eE]?[+-]?\\d+)?')\n bool_t = Regex('([Yy]es|[Nn]o|[Tt]rue|[Ff]alse|[Oo]n|[Oo]ff)')\n\n # Helper definitions\n kstr = quotedString.setParseAction(\n removeQuotes) ^ float_t ^ int_t ^ bool_t ^ Word(prtable)\n name = Word(alphas + \"_\", alphanums + \"_\")\n vec = array_begin + delimitedList(\n float_t ^ int_t ^ bool_t ^ Word(prtable) ^ Literal(\"\\n\").suppress() ^\n quotedString.setParseAction(removeQuotes)) + array_end\n sect = name + sect_begin\n tag_sect = name + Group(tag_begin + name + tag_end) + sect_begin\n\n # Grammar\n keyword = name + eql + kstr\n vector = name + eql + vec\n data = Combine(dmark + name) + SkipTo(end_data) + end_data\n #section = Forward()\n sect_def = (sect | tag_sect)\n #input = section | data | vector | keyword\n input = sect_def | data | vector | keyword | sect_end\n #section << sect_def + ZeroOrMore(input) + sect_end\n\n # Parsing actions\n int_t.setParseAction(token_actions.to_int)\n float_t.setParseAction(token_actions.to_float)\n bool_t.setParseAction(token_actions.to_bool)\n keyword.setParseAction(token_actions.to_scalar)\n vector.setParseAction(token_actions.to_array)\n data.setParseAction(token_actions.to_data)\n sect.setParseAction(token_actions.to_section)\n tag_sect.setParseAction(token_actions.to_section)\n sect_end.setParseAction(token_actions.end_of_section)\n\n bnf = ZeroOrMore(input) + StringEnd().setFailAction(\n token_actions.parse_error)\n bnf.ignore(pythonStyleComment)\n\n return Dict(bnf)",
"def prepare_label_feature(self, label2id: dict):\n text, wp_text, label, wp_label, wp_mark = [], [], [], [], []\n sorted_labels = sorted(label2id.items(), key=lambda x: x[1])\n for label_name, label_id in sorted_labels:\n if label_name == '[PAD]':\n continue\n tmp_text = self.convert_label_name(label_name)\n tmp_wp_text = self.tokenizer.tokenize(' '.join(tmp_text))\n text.extend(tmp_text)\n wp_text.extend(tmp_wp_text)\n label.extend(['O'] * len(tmp_text))\n wp_label.extend(['O'] * len(tmp_wp_text))\n wp_mark.extend([0] + [1] * (len(tmp_wp_text) - 1))\n label_item = self.data_item2feature_item(DataItem(text, label, wp_text, wp_label, wp_mark), 0)\n label_input = self.get_test_model_input(label_item)\n return label_input, label_item",
"def feature(root, suffix):\r\n if suffix == '$':\r\n return ('$', suffix)\r\n return (root[-1], suffix[0])",
"def symbol_to_label(symbol: str, all_labels: list) -> str:\n m = re.search(\"[i-(\\d+)]\", symbol)\n n = re.search(\"[o-(\\d+)]\", symbol)\n if m is None and n is None:\n raise ValueError(f\"Symbol {symbol} fails to match symbol regex\")\n elif m is not None:\n return all_labels[m.group(1)]\n else:\n return all_labels[n.group(1)]",
"def mathmode(strng):\n mathexp = (\n # forma: a^{b}\n re.compile(r\"([\\^])[{]([^}\\$]+)[}]\"), re.compile(r\"([_])[{]([^}$]+)[}]\"),\n # forma: a^\\beta\n re.compile(r\"([\\^])(\\\\[\\w]+)\"), re.compile(r\"([_])(\\\\[\\w]+)\"),\n # forma: a^b\n re.compile(r\"([\\^])([^\\{\\\\\\$])\"), re.compile(r\"([_])([^\\$\\{\\\\])\")\n )\n for i in mathexp:\n strng = i.sub(r\"$\\1{\\2}$\", strng)\n return strng",
"def create_nfa_from_postfix(regex: str):\n\n nfa_stack = []\n\n for char in regex:\n if char == '.':\n # to concat two nfas, add an epsilon arrow from every accepting state\n # of the first to the start state of the second and turn all accepting states\n # of the first into non accepting states\n\n if len(nfa_stack) < 2:\n raise InvalidRegexException()\n\n nfa2 = nfa_stack.pop()\n nfa1 = nfa_stack.pop()\n\n if nfa2.is_one_character_nfa:\n nfa2_matched_character, nfa2_accept_state = nfa2.start_state.transitions[0]\n for accept_state in nfa1.accept_states:\n accept_state.add_transition(nfa2_matched_character, nfa2_accept_state)\n accept_state.is_accepting = False\n\n else:\n for accept_state in nfa1.accept_states:\n accept_state.add_transition('eps', nfa2.start_state)\n accept_state.is_accepting = False\n\n\n nfa1.accept_states = nfa2.accept_states\n nfa1.is_one_character_nfa = False\n nfa_stack.append(nfa1)\n\n # for garbage collection\n nfa2.start_state = None\n nfa2.accept_states = None\n elif char == '*':\n # to apply a kleene star to an nfa, add a new start state, which is also an accept state,\n # to the nfa with an epsilon arrow going into the original start state.\n # add epsilon arrows from every accept state to the original start state\n\n if len(nfa_stack) < 1:\n raise InvalidRegexException()\n\n nfa = nfa_stack.pop()\n new_start_state = State([('eps', nfa.start_state)], True)\n for accept_state in nfa.accept_states:\n accept_state.add_transition('eps', nfa.start_state)\n\n nfa.accept_states.append(new_start_state)\n nfa.start_state = new_start_state\n nfa.is_one_character_nfa = False\n nfa_stack.append(nfa)\n\n elif char == '+':\n # TODO try this out on paper\n # we add epsilon arrows from every accept state to the start state\n\n if len(nfa_stack) < 1:\n raise InvalidRegexException()\n\n nfa = nfa_stack.pop()\n for accept_state in nfa.accept_states:\n accept_state.add_transition('eps', nfa.start_state)\n\n nfa.is_one_character_nfa = False\n nfa_stack.append(nfa)\n elif char == '|':\n # we apply the union operation by adding a new non accepting start state with\n # epsilon arrows going into the start state of each operand nfa\n\n if len(nfa_stack) < 2:\n raise InvalidRegexException()\n\n nfa2 = nfa_stack.pop()\n nfa1 = nfa_stack.pop()\n\n new_start_state = State([('eps', nfa1.start_state), ('eps', nfa2.start_state)], False)\n\n nfa1.start_state = new_start_state\n nfa1.accept_states.extend(nfa2.accept_states)\n nfa1.is_one_character_nfa = False\n nfa_stack.append(nfa1)\n\n # for garbage collection\n nfa2.start_state = None\n nfa2.accept_states = None\n else:\n # character from the alphabet\n accept_state = State([], True)\n start_state = State([(char, accept_state)], False)\n nfa_stack.append(NFA(start_state, [accept_state], True))\n\n if len(nfa_stack) != 1:\n raise InvalidRegexException()\n\n return nfa_stack[0]",
"def prepare_sep_label_feature(self, label2id):\n label_items = []\n for label_name in label2id:\n if label_name == '[PAD]':\n continue\n text = self.convert_label_name(label_name)\n wp_text = self.tokenizer.tokenize(' '.join(text))\n wp_label = ['O'] * len(wp_text)\n label = ['O'] * len(wp_text)\n wp_mark = [0] + [1] * (len(wp_text) - 1)\n label_items.append(self.data_item2feature_item(DataItem(text, label, wp_text, wp_label, wp_mark), 0))\n label_input = self.get_support_model_input(label_items, len(label2id) - 1) # no pad, so - 1\n return label_input, label_items",
"def getFeatures(c):\n\n\n feature_list = []\n lc_rc_list = []\n w1 = c.getStack(0)\n w2 = c.getStack(1)\n w3 = c.getStack(2)\n b1 = c.getBuffer(0)\n b2 = c.getBuffer(1)\n b3 = c.getBuffer(2)\n for i in [w1, w2]: #12\n lc = c.getLeftChild(i,1) # 1 st left child of the word on the stack.\n rc = c.getRightChild(i,1) # 1 st right child of the word on the stack.\n lc_rc_list.append(lc)\n lc_rc_list.append(rc)\n lc_rc_list.append(c.getLeftChild(i,2)) # 2 nd left child of the word on the stack\n lc_rc_list.append(c.getRightChild(i,2)) # 2 nd right child of the word on the stack\n lc_rc_list.append(c.getLeftChild(lc,1)) # 1 st left child of the left child of the word on the stack\n lc_rc_list.append(c.getRightChild(rc,1)) # 1 st right child of the right child of the word on the stack\n ########################### 18 Word Features ###########################\n for i in [w1,w2,w3,b1,b2,b3]:\n\n feature_list.append(getWordID(c.getWord(i))) # 6 words of the stack and buffer\n\n for i in lc_rc_list: #12 words of the tree\n feature_list.append(getWordID(c.getWord(i)))\n\n ########################### 18 Tag Features ###########################\n for i in [w1,w2,w3,b1,b2,b3]:\n\n feature_list.append(getPosID(c.getPOS(i))) # 6 tags of the owrds on the stack and the buffer\n\n for i in lc_rc_list:\n feature_list.append(getPosID(c.getPOS(i))) #12 tags of the words onthe stack and the buffer.\n ########################### 12 label Features ###########################\n for i in lc_rc_list:\n feature_list.append(getLabelID(c.getLabel(i))) #12 labels of the words on the stack and the buffer.\n\n\n return feature_list",
"def test_complex_variable(self, parse_input_mocked_metadata):\n bb = parse_input_mocked_metadata(\"complex Beta = -0.231+5.21j\")\n assert bb._var == {\"Beta\": -0.231 + 5.21j}",
"def test_complex_expression(self):\r\n\r\n self.assertAlmostEqual(\r\n calc.evaluator({}, {}, \"(2^2+1.0)/sqrt(5e0)*5-1\"),\r\n 10.180,\r\n delta=1e-3\r\n )\r\n self.assertAlmostEqual(\r\n calc.evaluator({}, {}, \"1+1/(1+1/(1+1/(1+1)))\"),\r\n 1.6,\r\n delta=1e-3\r\n )\r\n self.assertAlmostEqual(\r\n calc.evaluator({}, {}, \"10||sin(7+5)\"),\r\n -0.567, delta=0.01\r\n )\r\n self.assertAlmostEqual(\r\n calc.evaluator({}, {}, \"sin(e)\"),\r\n 0.41, delta=0.01\r\n )\r\n self.assertAlmostEqual(\r\n calc.evaluator({}, {}, \"k*T/q\"),\r\n 0.025, delta=1e-3\r\n )\r\n self.assertAlmostEqual(\r\n calc.evaluator({}, {}, \"e^(j*pi)\"),\r\n -1, delta=1e-5\r\n )",
"def test_complex_exponent_variable(self, parse_input_mocked_metadata):\n bb = parse_input_mocked_metadata(\"complex Beta = -0.231e-6+5.21e-2j\")\n assert bb._var == {\"Beta\": -0.231e-6 + 5.21e-2j}",
"def FeatureExtraction(Label, In, Ic, W, K=128, Fs=6, Delta=8):\n\n # get total regions\n NumofLabels = Label.max()\n\n # get Label size x\n size_x = Label.shape[0]\n\n # initialize centroids\n CentroidX = []\n CentroidY = []\n\n # initialize morphometry features\n Area = []\n Perimeter = []\n Eccentricity = []\n Circularity = []\n MajorAxisLength = []\n MinorAxisLength = []\n Extent = []\n Solidity = []\n\n # initialize FSD feature group\n FSDGroup = np.zeros((NumofLabels, Fs))\n\n # initialize Nuclei, Cytoplasms\n Nuclei = [[] for i in range(NumofLabels)]\n Cytoplasms = [[] for i in range(NumofLabels)]\n\n # create round structuring element\n Disk = disk(Delta)\n\n # initialize panda dataframe\n df = pd.DataFrame()\n\n # fourier descriptors, spaced evenly over the interval 1:K/2\n Interval = np.round(\n np.power(\n 2, np.linspace(0, math.log(K, 2)-1, Fs+1, endpoint=True)\n )\n ).astype(np.uint8)\n\n # extract feature information\n for region in regionprops(Label):\n # add centroids\n CentroidX = np.append(CentroidX, region.centroid[0])\n CentroidY = np.append(CentroidY, region.centroid[1])\n # add morphometry features\n Area = np.append(Area, region.area)\n Perimeter = np.append(Perimeter, region.perimeter)\n Eccentricity = np.append(Eccentricity, region.eccentricity)\n if region.perimeter == 0:\n Circularity = np.append(Circularity, 0)\n else:\n Circularity = np.append(\n Circularity,\n 4 * math.pi * region.area / math.pow(region.perimeter, 2)\n )\n MajorAxisLength = np.append(MajorAxisLength, region.major_axis_length)\n MinorAxisLength = np.append(MinorAxisLength, region.minor_axis_length)\n Extent = np.append(Extent, region.extent)\n Solidity = np.append(Solidity, region.solidity)\n # get bounds of dilated nucleus\n bounds = GetBounds(region.bbox, Delta, size_x)\n # grab nucleus mask\n Nucleus = (\n Label[bounds[0]:bounds[1], bounds[2]:bounds[3]] == region.label\n ).astype(np.uint8)\n # find nucleus boundaries\n Bounds = np.argwhere(\n find_boundaries(Nucleus, mode=\"inner\").astype(np.uint8) == 1\n )\n # calculate and add FSDs\n FSDGroup[region.label-1, :] = FSDs(\n Bounds[:, 0], Bounds[:, 1],\n K, Interval\n )\n # generate object coords for nuclei and cytoplasmic regions\n Nuclei[region.label-1] = region.coords\n # get mask for all nuclei in neighborhood\n Mask = (\n Label[bounds[0]:bounds[1], bounds[2]:bounds[3]] > 0\n ).astype(np.uint8)\n # remove nucleus region from cytoplasm+nucleus mask\n cytoplasm = (\n np.logical_xor(Mask, dilation(Nucleus, Disk))\n ).astype(np.uint8)\n # get list of cytoplasm pixels\n Cytoplasms[region.label-1] = GetPixCoords(cytoplasm, bounds)\n\n # calculate hematoxlyin features, capture feature names\n HematoxylinIntensityGroup = IntensityFeatureGroup(In, Nuclei)\n HematoxylinTextureGroup = TextureFeatureGroup(In, Nuclei)\n HematoxylinGradientGroup = GradientFeatureGroup(In, Nuclei)\n # calculate eosin features\n EosinIntensityGroup = IntensityFeatureGroup(Ic, Cytoplasms)\n EosinTextureGroup = TextureFeatureGroup(Ic, Cytoplasms)\n EosinGradientGroup = GradientFeatureGroup(Ic, Cytoplasms)\n\n # add columns to dataframe\n df['X'] = CentroidX\n df['Y'] = CentroidY\n\n df['Area'] = Area\n df['Perimeter'] = Perimeter\n df['Eccentricity'] = Eccentricity\n df['Circularity'] = Circularity\n df['MajorAxisLength'] = MajorAxisLength\n df['MinorAxisLength'] = MinorAxisLength\n df['Extent'] = Extent\n df['Solidity'] = Solidity\n\n for i in range(0, Fs):\n df['FSD' + str(i+1)] = FSDGroup[:, i]\n\n for f in HematoxylinIntensityGroup._fields:\n df['Hematoxylin' + f] = getattr(HematoxylinIntensityGroup, f)\n\n for f in HematoxylinTextureGroup._fields:\n df['Hematoxylin' + f] = getattr(HematoxylinTextureGroup, f)\n\n for f in HematoxylinGradientGroup._fields:\n df['Hematoxylin' + f] = getattr(HematoxylinGradientGroup, f)\n\n for f in EosinIntensityGroup._fields:\n df['Cytoplasm' + f] = getattr(EosinIntensityGroup, f)\n\n for f in EosinTextureGroup._fields:\n df['Cytoplasm' + f] = getattr(EosinTextureGroup, f)\n\n for f in EosinGradientGroup._fields:\n df['Cytoplasm' + f] = getattr(EosinGradientGroup, f)\n\n return df",
"def split_symbols_custom(predicate: Callable[[str], bool]):\n def _split_symbols(tokens: List[TOKEN], local_dict: DICT, global_dict: DICT):\n result: List[TOKEN] = []\n split = False\n split_previous=False\n\n for tok in tokens:\n if split_previous:\n # throw out closing parenthesis of Symbol that was split\n split_previous=False\n continue\n split_previous=False\n\n if tok[0] == NAME and tok[1] in ['Symbol', 'Function']:\n split = True\n\n elif split and tok[0] == NAME:\n symbol = tok[1][1:-1]\n\n if predicate(symbol):\n tok_type = result[-2][1] # Symbol or Function\n del result[-2:] # Get rid of the call to Symbol\n\n i = 0\n while i < len(symbol):\n char = symbol[i]\n if char in local_dict or char in global_dict:\n result.append((NAME, \"%s\" % char))\n elif char.isdigit():\n chars = [char]\n for i in range(i + 1, len(symbol)):\n if not symbol[i].isdigit():\n i -= 1\n break\n chars.append(symbol[i])\n char = ''.join(chars)\n result.extend([(NAME, 'Number'), (OP, '('),\n (NAME, \"'%s'\" % char), (OP, ')')])\n else:\n use = tok_type if i == len(symbol) else 'Symbol'\n result.extend([(NAME, use), (OP, '('),\n (NAME, \"'%s'\" % char), (OP, ')')])\n i += 1\n\n # Set split_previous=True so will skip\n # the closing parenthesis of the original Symbol\n split = False\n split_previous = True\n continue\n\n else:\n split = False\n\n result.append(tok)\n\n return result\n\n return _split_symbols",
"def test_symbol_repr(self):\n a = pybamm.Symbol(\"a\")\n b = pybamm.Symbol(\"b\")\n c = pybamm.Symbol(\"c\", domain=[\"test\"])\n d = pybamm.Symbol(\"d\", domain=[\"test\"])\n hex_regex = r\"\\-?0x[0-9,a-f]+\"\n self.assertRegex(\n a.__repr__(),\n r\"Symbol\\(\" + hex_regex + r\", a, children\\=\\[\\], domain\\=\\[\\]\\)\",\n )\n self.assertRegex(\n b.__repr__(),\n r\"Symbol\\(\" + hex_regex + r\", b, children\\=\\[\\], domain\\=\\[\\]\\)\",\n )\n self.assertRegex(\n c.__repr__(),\n r\"Symbol\\(\" + hex_regex + r\", c, children\\=\\[\\], domain\\=\\['test'\\]\\)\",\n )\n self.assertRegex(\n d.__repr__(),\n r\"Symbol\\(\" + hex_regex + r\", d, children\\=\\[\\], domain\\=\\['test'\\]\\)\",\n )\n self.assertRegex(\n (a + b).__repr__(),\n r\"Addition\\(\" + hex_regex + r\", \\+, children\\=\\['a', 'b'\\], domain=\\[\\]\\)\",\n )\n self.assertRegex(\n (c * d).__repr__(),\n r\"Multiplication\\(\"\n + hex_regex\n + r\", \\*, children\\=\\['c', 'd'\\], domain=\\['test'\\]\\)\",\n )\n self.assertRegex(\n pybamm.grad(a).__repr__(),\n r\"Gradient\\(\" + hex_regex + \", grad, children\\=\\['a'\\], domain=\\[\\]\\)\",\n )\n self.assertRegex(\n pybamm.grad(c).__repr__(),\n r\"Gradient\\(\"\n + hex_regex\n + \", grad, children\\=\\['c'\\], domain=\\['test'\\]\\)\",\n )",
"def get_labels(orthographic: str):\n labels = []\n tmp = ''\n tag = False\n\n # Get all labels from orthographic form\n for char in orthographic:\n if char == '[':\n tag = True\n elif char == ']':\n labels.append(tmp)\n tag = False\n tmp = ''\n elif tag:\n tmp += char\n return labels",
"def stoichiometry_func_doc(self, label):\n air = self.air_alias.val\n fuel = self.fuel_alias.val\n flue_gas = self.fuel_alias.val + '_fg'\n\n equations = ''\n for fluid in self.inl[0].fluid.val.keys():\n\n in1 = r'\\dot{m}_\\mathrm{in,1} \\cdot x_\\mathrm{fluid,in,1}'\n in2 = r'\\dot{m}_\\mathrm{in,2} \\cdot x_\\mathrm{fluid,in,2}'\n out = r'\\dot{m}_\\mathrm{out,1} \\cdot x_\\mathrm{fluid,out,1}'\n\n if fluid == air:\n latex = (\n r'0=\\Delta\\dot{m}_\\mathrm{' + fluid + r'} - '\n r'\\dot{m}_\\mathrm{' + fluid + r',stoich}'\n )\n elif fluid == fuel:\n latex = (\n r'0=\\Delta \\dot{m}_\\mathrm{' + fluid + r'} - '\n r'\\dot{m}_\\mathrm{' + fluid + r'}'\n )\n elif fluid == flue_gas:\n latex = (\n r'0=\\Delta \\dot{m}_\\mathrm{' + fluid.replace('_', ',') +\n r'} + \\dot{m}_\\mathrm{' + fuel + r'} +'\n r'\\dot{m}_\\mathrm{' + air + r',stoich}'\n )\n else:\n latex = r'0 = \\Delta \\dot{m}_\\mathrm{' + fluid + '}'\n\n if fluid == next(iter(self.inl[0].fluid.val)):\n balance = (\n r'\\Delta \\dot{m}_\\mathrm{fluid} = ' + in1 +\n '+' + in2 + '-' + out)\n m_fluid = r'\\dot{m}_\\mathrm{fluid} = ' + in1 + '+' + in2\n m_air_stoich = (\n r'\\dot{m}_\\mathrm{' + air + ',stoich}='\n r'\\dot{m}_\\mathrm{' + fuel + r'} \\cdot ' +\n str(round(self.air_min, 4)))\n latex_general_eq = (\n r'\\begin{split}' + '\\n'\n r'&' + balance + r'\\\\' + '\\n'\n r'&' + m_fluid + r'\\\\' + '\\n'\n r'&' + m_air_stoich + r'\\\\' + '\\n'\n r'\\end{split}'\n )\n equations += (\n generate_latex_eq(\n self, latex_general_eq, label + '_general_eq') + '\\n' +\n generate_latex_eq(self, latex, label + '_' + fluid) + '\\n')\n else:\n equations += (\n generate_latex_eq(self, latex, label + '_' + fluid) + '\\n')\n # remove last newline\n return equations[:-1]",
"def reducedFormTwo(self, equation):\n find = re.findall('(.)?(\\d+\\.\\d+|\\d+)(\\+|\\-)(\\d+\\.\\d+|\\d+)(.)?' , equation)\n for token in find:\n tmp = ''.join(map(str,token))\n if tmp[-1] == '*' or tmp[-1] == '^' or tmp[-1] == '/':\n continue\n if tmp[0] == '*' or tmp[0] == '^' or tmp[0] == '/':\n continue\n else:\n try:\n if tmp[0] == '-':\n pass\n if not tmp[-1].isnumeric():\n tmp = tmp[:-1]\n res = eval(tmp)\n if res > 0:\n res = '+' + str(res)\n equation = equation.replace(tmp, res)\n except:\n continue\n return equation",
"def specify_feature_content(arc_layer, label_dict):\n # get the AnnotationProps, that lead to the Labelrenderer and the Symbol\n feature_layer = change_interface(arc_layer, ArcGisModules.module_carto.IGeoFeatureLayer)\n annotation_parent_layer = change_interface(\n feature_layer.AnnotationProperties,\n ArcGisModules.module_carto.IAnnotateLayerPropertiesCollection2\n )\n label_engine = change_interface(\n annotation_parent_layer.Properties(0),\n ArcGisModules.module_carto.ILabelEngineLayerProperties2\n )\n if feature_layer.DisplayFeatureClass.ShapeType == 3:\n label_placement = '2'\n else:\n label_placement = '0'\n label_dict['labelValues']['placement']['placement'] = label_placement\n\n expression = label_engine.Expression\n label_dict['labelValues']['type'] = 'simple'\n label_dict['labelValues']['text-style']['fieldName'] = expression[1:-1]\n\n if annotation_parent_layer.Properties(0).AnnotationMaximumScale > 0.0 \\\n or annotation_parent_layer.Properties(0).AnnotationMinimumScale > 0.0:\n label_dict['labelValues']['rendering']['scaleVisibility'] = '1'\n label_dict['labelValues']['rendering']['scaleMax'] = unicode(\n annotation_parent_layer.Properties(0).AnnotationMinimumScale\n )\n label_dict['labelValues']['rendering']['scaleMin'] = unicode(\n annotation_parent_layer.Properties(0).AnnotationMaximumScale\n )\n\n symbol = label_engine.Symbol\n return symbol",
"def simplify_chunks(self, input_text):\n string = ''\n chunks = re.split(r'([\\w\\.-]+|[\\(\\)\\*\\+])', input_text)\n chunks = [chunk.strip() for chunk in chunks]\n chunks = [chunk for chunk in chunks if chunk != '']\n\n classifiers = [\n 'is_integer', 'is_float', 'is_operator', 'is_constant', 'is_function'\n ]\n\n for chunk in chunks:\n for classifier in classifiers:\n result = getattr(self, classifier)(chunk)\n if result is not False:\n string += str(result) + ' '\n break\n\n # Replace '^' with '**' to evaluate exponents\n string = string.replace('^', '**')\n\n return string",
"def parse_cst(self):\n stack = []\n self.tokenizer.next().must_be('{')\n for token in self.tokenizer:\n stack += [ token ] # Build a stack to process\n if token.text == \".\":\n # We've got a rule to process. Start by determining correct syntax.\n stack[1].must_be(':')\n ## Name analysis\n stack[0].assert_symbol_name()\n production_elements = stack[2:-1]\n for element in production_elements:\n element.assert_symbol_name()\n if stack[0].text in self.GlobalSymbolDict: # Redefined lexical sym or add a new production?\n existingSymbol = self.GlobalSymbolDict[stack[0].text]\n if existingSymbol.is_gla:\n raise Exception(\"Lexical Symbol %s redefined at %d,%d. Originally at %d,%d\" % \\\n (stack[0].text, stack[0].line, stack[0].col, \\\n existingSymbol.defining_token.line, existingSymbol.defining_token.col))\n existingSymbol.productions += [Production(existingSymbol,production_elements)]\n else: # Brand new symbol occurrence\n s = Symbol(stack[0])\n s.is_gla = False\n s.productions = [Production(s,production_elements)]\n self.GlobalSymbolDict[stack[0].text] = s\n stack = []\n elif token.text == \"{\":\n raise Exception(\"Unexpected %s\" % token)\n elif token.text == \"}\":\n if len(stack) > 1: raise Exception(\"Unfinished lexical specification beginning with %s\" % stack[0])\n #pp = pprint.PrettyPrinter()\n #pp.pprint(self.GlobalSymbolDict)\n return\n else: pass",
"def atom_featurizer(atom):\n\n return str(\n (\n atom.GetSymbol(),\n atom.GetNumRadicalElectrons(),\n atom.GetFormalCharge(),\n atom.GetChiralTag(),\n atom.GetIsAromatic(),\n get_ring_size(atom, max_size=6),\n atom.GetDegree(),\n atom.GetTotalNumHs(includeNeighbors=True),\n )\n )",
"def handle_math(strng, orden=0):\n mathexp = ([(re.compile(r'\\^([^{])', re.I), r'<sup>\\1</sup>'), (re.compile(r'\\^{([^{]+)}', re.I), r'<sup>\\1</sup>'), (re.compile(r'_([^{]+){', re.I), r'<sub>\\1</sub>'), (re.compile(r'_{([^{]+)}', re.I), r'<sub>\\1</sub>'), (re.compile(r'\\\\mathrm{([^{]+)}', re.I), r'{\\1}')\n ],\n [(re.compile(r'<sub>([^<]*)</sub>', re.I), r'$_{\\1}$'), (re.compile(r'<sup>([^<]*)</sup>', re.I), r'$^{\\1}$')\n ])\n# mathmarker= ('<math>','</math>')\n mathmarker = ('', '')\n\n if orden == 0:\n p = re.compile(r'\\$([^\\$]+)\\$') # Find math substrngings\n if p.search(strng):\n ini = 0\n linecontent = ''\n iterator = p.finditer(strng)\n for match in iterator:\n strngmath = match.group()[1:-1]\n linecontent += strng[ini:match.start()]\n for i, o in mathexp[orden]:\n strngmath = re.sub(i, o, strngmath)\n linecontent += mathmarker[0] + strngmath + mathmarker[1]\n ini = match.end()\n linecontent += strng[ini:]\n else:\n return strng\n else:\n for i, o in mathexp[orden]:\n strng = i.sub(o, strng)\n linecontent = strng\n\n return linecontent",
"def __init__(self: 'Leaf', symbol: str) -> None:\n RegexTree.__init__(self, symbol, [])",
"def subexpr_to_smtlib(expr, pre, suff='', fun_annotate_subexpr = None):\n if fun_annotate_subexpr is not None and pre in PythonOperators.logic_ops:\n return '(! (' + pre + ' ' + ExprTranslator.concatenate_args(expr, fun_annotate_subexpr) + suff + \\\n ') :named ' + fun_annotate_subexpr() + ')'\n else:\n return '(' + pre + ' ' + ExprTranslator.concatenate_args(expr, fun_annotate_subexpr) + suff + ')'",
"def test_regref_transform(self, parse_input_mocked_metadata):\n bb = parse_input_mocked_metadata(\n \"float alpha = 0.5\\nfloat Delta=sqrt(2)\\nCoherent(alpha*q0, Delta*sqrt(pi), 0.2*10) | 0\\n\"\n )\n\n p = sym.Symbol(\"q0\")\n assert isinstance(bb.operations[0][\"args\"][0], RegRefTransform)\n assert bb.operations[0][\"args\"][0].func_str == str(0.5 * p)"
] |
[
"0.52776694",
"0.49526072",
"0.49122655",
"0.48906052",
"0.4840586",
"0.48198035",
"0.47981805",
"0.47640973",
"0.46877915",
"0.46727678",
"0.46708876",
"0.4634626",
"0.46228975",
"0.46164334",
"0.45988402",
"0.4594093",
"0.45656273",
"0.45655215",
"0.4559517",
"0.45564628",
"0.45507693",
"0.4538898",
"0.45292073",
"0.44968015",
"0.4492525",
"0.4480803",
"0.44807488",
"0.44800487",
"0.4452452",
"0.4451797"
] |
0.59415656
|
0
|
Extracts FST symbols that compose complex output label of the rewrite rule. FST symbols of a complex output label is; Epsilon symbol if the complex output label is an epsilon symbol (e.g. [''] for the label ''). All characters of the complex output label if it is not an epsilon symbol (e.g. ['{', 'l', 'p'] for the label '{lp').
|
def _symbols_of_output(label: str) -> List[str]:
if label == common.EPSILON:
return [label]
# We add a new state transition arc for each character of the output token.
return list(label)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _symbols_of_input(label: str) -> List[str]:\n if label == common.EPSILON:\n return [label]\n\n # We add a state transition arc for each digit of a multi-digit number.\n if \"[\" not in label:\n return list(label)\n\n # We add a state transition arc for each inflectional or derivational\n # morpheme, inflectional group boundary, and proper noun analysis tag.\n return _SYMBOLS_REGEX.findall(label)",
"def label_to_symbol(label: str, all_labels: list) -> str:\n index = all_labels.index(label)\n in_symbol = f\"[i-{index}]\"\n out_symbol = f\"[o-{index}]\"\n return in_symbol, out_symbol",
"def filter_symbols(nm_output):\n lines = nm_output.split('\\n')\n subset = [e for e in lines if (' T ' in e or ' D ' in e)]\n\n for s in subset:\n _, _, symbol = s.split()\n yield symbol",
"def feature(root, suffix):\r\n if suffix == '$':\r\n return ('$', suffix)\r\n return (root[-1], suffix[0])",
"def mathmode(strng):\n mathexp = (\n # forma: a^{b}\n re.compile(r\"([\\^])[{]([^}\\$]+)[}]\"), re.compile(r\"([_])[{]([^}$]+)[}]\"),\n # forma: a^\\beta\n re.compile(r\"([\\^])(\\\\[\\w]+)\"), re.compile(r\"([_])(\\\\[\\w]+)\"),\n # forma: a^b\n re.compile(r\"([\\^])([^\\{\\\\\\$])\"), re.compile(r\"([_])([^\\$\\{\\\\])\")\n )\n for i in mathexp:\n strng = i.sub(r\"$\\1{\\2}$\", strng)\n return strng",
"def test_symbol_repr(self):\n a = pybamm.Symbol(\"a\")\n b = pybamm.Symbol(\"b\")\n c = pybamm.Symbol(\"c\", domain=[\"test\"])\n d = pybamm.Symbol(\"d\", domain=[\"test\"])\n hex_regex = r\"\\-?0x[0-9,a-f]+\"\n self.assertRegex(\n a.__repr__(),\n r\"Symbol\\(\" + hex_regex + r\", a, children\\=\\[\\], domain\\=\\[\\]\\)\",\n )\n self.assertRegex(\n b.__repr__(),\n r\"Symbol\\(\" + hex_regex + r\", b, children\\=\\[\\], domain\\=\\[\\]\\)\",\n )\n self.assertRegex(\n c.__repr__(),\n r\"Symbol\\(\" + hex_regex + r\", c, children\\=\\[\\], domain\\=\\['test'\\]\\)\",\n )\n self.assertRegex(\n d.__repr__(),\n r\"Symbol\\(\" + hex_regex + r\", d, children\\=\\[\\], domain\\=\\['test'\\]\\)\",\n )\n self.assertRegex(\n (a + b).__repr__(),\n r\"Addition\\(\" + hex_regex + r\", \\+, children\\=\\['a', 'b'\\], domain=\\[\\]\\)\",\n )\n self.assertRegex(\n (c * d).__repr__(),\n r\"Multiplication\\(\"\n + hex_regex\n + r\", \\*, children\\=\\['c', 'd'\\], domain=\\['test'\\]\\)\",\n )\n self.assertRegex(\n pybamm.grad(a).__repr__(),\n r\"Gradient\\(\" + hex_regex + \", grad, children\\=\\['a'\\], domain=\\[\\]\\)\",\n )\n self.assertRegex(\n pybamm.grad(c).__repr__(),\n r\"Gradient\\(\"\n + hex_regex\n + \", grad, children\\=\\['c'\\], domain=\\['test'\\]\\)\",\n )",
"def subexpr_to_smtlib(expr, pre, suff='', fun_annotate_subexpr = None):\n if fun_annotate_subexpr is not None and pre in PythonOperators.logic_ops:\n return '(! (' + pre + ' ' + ExprTranslator.concatenate_args(expr, fun_annotate_subexpr) + suff + \\\n ') :named ' + fun_annotate_subexpr() + ')'\n else:\n return '(' + pre + ' ' + ExprTranslator.concatenate_args(expr, fun_annotate_subexpr) + suff + ')'",
"def create_nfa_from_postfix(regex: str):\n\n nfa_stack = []\n\n for char in regex:\n if char == '.':\n # to concat two nfas, add an epsilon arrow from every accepting state\n # of the first to the start state of the second and turn all accepting states\n # of the first into non accepting states\n\n if len(nfa_stack) < 2:\n raise InvalidRegexException()\n\n nfa2 = nfa_stack.pop()\n nfa1 = nfa_stack.pop()\n\n if nfa2.is_one_character_nfa:\n nfa2_matched_character, nfa2_accept_state = nfa2.start_state.transitions[0]\n for accept_state in nfa1.accept_states:\n accept_state.add_transition(nfa2_matched_character, nfa2_accept_state)\n accept_state.is_accepting = False\n\n else:\n for accept_state in nfa1.accept_states:\n accept_state.add_transition('eps', nfa2.start_state)\n accept_state.is_accepting = False\n\n\n nfa1.accept_states = nfa2.accept_states\n nfa1.is_one_character_nfa = False\n nfa_stack.append(nfa1)\n\n # for garbage collection\n nfa2.start_state = None\n nfa2.accept_states = None\n elif char == '*':\n # to apply a kleene star to an nfa, add a new start state, which is also an accept state,\n # to the nfa with an epsilon arrow going into the original start state.\n # add epsilon arrows from every accept state to the original start state\n\n if len(nfa_stack) < 1:\n raise InvalidRegexException()\n\n nfa = nfa_stack.pop()\n new_start_state = State([('eps', nfa.start_state)], True)\n for accept_state in nfa.accept_states:\n accept_state.add_transition('eps', nfa.start_state)\n\n nfa.accept_states.append(new_start_state)\n nfa.start_state = new_start_state\n nfa.is_one_character_nfa = False\n nfa_stack.append(nfa)\n\n elif char == '+':\n # TODO try this out on paper\n # we add epsilon arrows from every accept state to the start state\n\n if len(nfa_stack) < 1:\n raise InvalidRegexException()\n\n nfa = nfa_stack.pop()\n for accept_state in nfa.accept_states:\n accept_state.add_transition('eps', nfa.start_state)\n\n nfa.is_one_character_nfa = False\n nfa_stack.append(nfa)\n elif char == '|':\n # we apply the union operation by adding a new non accepting start state with\n # epsilon arrows going into the start state of each operand nfa\n\n if len(nfa_stack) < 2:\n raise InvalidRegexException()\n\n nfa2 = nfa_stack.pop()\n nfa1 = nfa_stack.pop()\n\n new_start_state = State([('eps', nfa1.start_state), ('eps', nfa2.start_state)], False)\n\n nfa1.start_state = new_start_state\n nfa1.accept_states.extend(nfa2.accept_states)\n nfa1.is_one_character_nfa = False\n nfa_stack.append(nfa1)\n\n # for garbage collection\n nfa2.start_state = None\n nfa2.accept_states = None\n else:\n # character from the alphabet\n accept_state = State([], True)\n start_state = State([(char, accept_state)], False)\n nfa_stack.append(NFA(start_state, [accept_state], True))\n\n if len(nfa_stack) != 1:\n raise InvalidRegexException()\n\n return nfa_stack[0]",
"def getComplex(self, base, aspirated=False):\n res = ''\n if base == 'c':\n res = self.useRetroflex and 'ʈ͡ʂ' or 't͡ɕ'\n elif base == 'j':\n res = self.useRetroflex and 'ɖ͡ʐ' or 'd͡ʑ'\n elif base == 'ts':\n res = 't͡s'\n else:\n res = 'd͡z'\n if aspirated:\n res += 'ʰ'\n return res",
"def compile_term(self):\n\n\t\tself.outfile.write('<term>\\n')\n\n\t\tcount = 0\n\n\t\twhile(self.tokenizer.get_token() not in [')',']',';',',', '/', '|', '<', '>', '=', '*', '+', '&']):\n\t\t\tif self.tokenizer.get_token().isdigit():\n\t\t\t\tself.outfile.write(self.tokenizer.int_value())\n\t\t\telif '\"' in self.tokenizer.get_token():\n\t\t\t\tself.outfile.write(self.tokenizer.str_value())\n\t\t\telif self.tokenizer.get_token() in ['true', 'false', 'null', 'this']:\n\t\t\t\tself.outfile.write(self.tokenizer.keyword())\n\t\t\telif self.tokenizer.get_token() == '-' and count == 0:\n\t\t\t\tself.outfile.write(self.tokenizer.symbol())\n\t\t\t\tself.compile_term()\n\t\t\telif self.tokenizer.get_token() == '-' and count > 0:\n\t\t\t\tbreak\n\t\t\telif self.tokenizer.get_token() == '~':\n\t\t\t\tself.outfile.write(self.tokenizer.symbol())\n\n\t\t\t\tif self.tokenizer.get_token() != '(':\n\t\t\t\t\tself.compile_term()\n\n\t\t\t\telse:\n\t\t\t\t\tself.outfile.write('<term>\\n' + self.tokenizer.symbol())\n\t\t\t\t\tself.compile_expression()\n\t\t\t\t\txml = self.tokenizer.symbol() + '</term>\\n'\n\t\t\t\t\tself.outfile.write(xml)\n\n\t\t\telif self.tokenizer.get_token() == '(':\n\n\t\t\t\tself.outfile.write(self.tokenizer.symbol())\n\t\t\t\tself.compile_expression()\n\t\t\t\tself.outfile.write(self.tokenizer.symbol())\n\n\t\t\telif self.tokenizer.get_token() == '[':\n\t\t\t\txml = self.tokenizer.symbol()\n\t\t\t\tself.outfile.write(xml)\n\n\t\t\t\tself.compile_expression()\n\n\t\t\t\tself.outfile.write(self.tokenizer.symbol())\n\n\t\t\telif self.tokenizer.get_token() == '.':\n\t\t\t\txml = self.tokenizer.symbol() + self.tokenizer.identifier() + self.tokenizer.symbol() + '<expressionList>\\n'\n\t\t\t\tself.outfile.write(xml)\n\n\t\t\t\tif self.tokenizer.get_token() != ')':\n\t\t\t\t\tself.compile_expression_list()\n\n\t\t\t\tself.outfile.write('</expressionList>\\n' + self.tokenizer.symbol())\n\t\t\n\t\t\telse:\n\t\t\t\tself.outfile.write(self.tokenizer.identifier())\n\n\t\t\tcount = count + 1\n\n\t\tself.outfile.write('</term>\\n')\n\n\t\tif self.tokenizer.get_token() in self.tokenizer._operands:\n\t\t\tif self.tokenizer.get_token() in ['<', '>', '\"', '&']:\n\t\t\t\txml = '<symbol> ' + CompilationEngine._operands.get(self.tokenizer.get_token()) + ' </symbol>\\n'\n\t\t\t\tself.tokenizer.advance()\n\t\t\telse:\n\t\t\t\txml = self.tokenizer.symbol()\n\n\t\t\tself.outfile.write(xml)\n\t\t\tself.compile_term()",
"def handle_math(strng, orden=0):\n mathexp = ([(re.compile(r'\\^([^{])', re.I), r'<sup>\\1</sup>'), (re.compile(r'\\^{([^{]+)}', re.I), r'<sup>\\1</sup>'), (re.compile(r'_([^{]+){', re.I), r'<sub>\\1</sub>'), (re.compile(r'_{([^{]+)}', re.I), r'<sub>\\1</sub>'), (re.compile(r'\\\\mathrm{([^{]+)}', re.I), r'{\\1}')\n ],\n [(re.compile(r'<sub>([^<]*)</sub>', re.I), r'$_{\\1}$'), (re.compile(r'<sup>([^<]*)</sup>', re.I), r'$^{\\1}$')\n ])\n# mathmarker= ('<math>','</math>')\n mathmarker = ('', '')\n\n if orden == 0:\n p = re.compile(r'\\$([^\\$]+)\\$') # Find math substrngings\n if p.search(strng):\n ini = 0\n linecontent = ''\n iterator = p.finditer(strng)\n for match in iterator:\n strngmath = match.group()[1:-1]\n linecontent += strng[ini:match.start()]\n for i, o in mathexp[orden]:\n strngmath = re.sub(i, o, strngmath)\n linecontent += mathmarker[0] + strngmath + mathmarker[1]\n ini = match.end()\n linecontent += strng[ini:]\n else:\n return strng\n else:\n for i, o in mathexp[orden]:\n strng = i.sub(o, strng)\n linecontent = strng\n\n return linecontent",
"def _symbols_table_file_content(\n rule_set: _RewriteRuleSet) -> Generator[str, None, None]:\n\n def _line(symbol: str, index: int) -> str:\n return f\"{symbol}\\t{index}\\n\"\n\n fst_symbols = []\n\n for rule in rule_set.rule:\n fst_symbols.extend(_symbols_of_input(rule.input))\n fst_symbols.extend(_symbols_of_output(rule.output))\n\n unique_symbols = set(fst_symbols).difference({common.EPSILON})\n complex_symbols = [s for s in unique_symbols if len(s) > 1]\n\n index = 983040 # start of the Unicode private use area.\n\n for symbol in sorted(complex_symbols):\n yield _line(symbol, index)\n index += 1\n\n logging.info(\"generated complex symbols file content\")",
"def format_symbol(item):\n prefix = item.get(\"containerName\", \"\")\n label = prefix + \".\" + item.get(\"name\") if prefix else item.get(\"name\")\n return [label, format_symbol_kind(item.get(\"kind\"))]",
"def _getFrac(expr):\n expr=expr.replace(' ', '')\n l = len(expr)\n frac = []; start = 0; par = 0\n pack=''; num=''\n op = ['+','-']\n operator = ['+','-','/','*']\n sym = ['x','y']\n multFrac = False\n\n for i in range(0,l):\n if expr[i]=='(' : #(\n if par==0 : start=i\n par += 1\n elif expr[i] == ')' : #)\n par -= 1\n if par==0 :\n pack += expr[start:i+1]; start = i+1\n elif expr[i]=='*'and par==0: #*\n pack += expr[start:i]; start = i+1\n if num!='' :\n frac.append((num,pack))\n frac.append(expr[i])\n pack = ''; num = ''\n else :\n pack += expr[i]\n elif expr[i]=='/'and par==0: #/\n pack += expr[start:i]\n num += pack\n pack = ''\n start = i+1\n elif expr[i] in op and par==0 and num != '': #+-\n pack += expr[start:i]\n frac.append((num,pack))\n frac.append(expr[i])\n pack = ''; num = ''; start = i+1\n elif expr[i] in op and par==0:\n pack += expr[start:i]\n frac.append((pack,''))\n frac.append(expr[i])\n pack = ''; num = ''; start = i+1\n\n if start < l : pack += expr[start:l]\n if num != '' :\n frac.append((num,pack))\n else:\n frac.append((pack,''))\n\n frac2 = [frac[0]]\n i=1\n while i<len(frac):\n if frac[i] in operator and frac[i]!='*' :\n frac2.append(frac[i])\n frac2.append(frac[i+1])\n elif frac[i]=='*' :\n (a1,b1)=frac[i-1]\n (a2,b2)=frac[i+1]\n frac2[len(frac2)-1]=(a1+'*'+a2,b1+'*'+b2)\n i+=2\n return frac2",
"def remove_epsilons(string, epsilon='@_EPSILON_SYMBOL_@'):\n return string.replace('@_EPSILON_SYMBOL_@', '')",
"def test_complex_expression(self):\r\n\r\n self.assertAlmostEqual(\r\n calc.evaluator({}, {}, \"(2^2+1.0)/sqrt(5e0)*5-1\"),\r\n 10.180,\r\n delta=1e-3\r\n )\r\n self.assertAlmostEqual(\r\n calc.evaluator({}, {}, \"1+1/(1+1/(1+1/(1+1)))\"),\r\n 1.6,\r\n delta=1e-3\r\n )\r\n self.assertAlmostEqual(\r\n calc.evaluator({}, {}, \"10||sin(7+5)\"),\r\n -0.567, delta=0.01\r\n )\r\n self.assertAlmostEqual(\r\n calc.evaluator({}, {}, \"sin(e)\"),\r\n 0.41, delta=0.01\r\n )\r\n self.assertAlmostEqual(\r\n calc.evaluator({}, {}, \"k*T/q\"),\r\n 0.025, delta=1e-3\r\n )\r\n self.assertAlmostEqual(\r\n calc.evaluator({}, {}, \"e^(j*pi)\"),\r\n -1, delta=1e-5\r\n )",
"def diracNotation(self):\n diracNotation=\"\"\n for i in range(len(self.reversedStatevector)):\n if self.reversedStatevector[i]==0:\n continue\n diracNotation+=self.numberFormat(self.reversedStatevector[i].real)\n diracNotation+=self.numberFormat(self.reversedStatevector[i].imag,True)\n #the next line generates the state .. ex circuit with 3 wires -> i=2 => state:010\n diracNotation+=\"|\"+str((\"{0:0\"+str(self.num_qubits).replace('.0000','')+\"b}\").format(i))+\"⟩ \" \n return diracNotation.lstrip(\"+\")",
"def decode_fn(s_in):\r\n s_out = []\r\n for w in s_in:\r\n if w == '<s>':\r\n continue\r\n elif w=='</s>':\r\n break\r\n s_out.append(w)\r\n s_out = ' '.join(s_out)\r\n return s_out",
"def reducedFormTwo(self, equation):\n find = re.findall('(.)?(\\d+\\.\\d+|\\d+)(\\+|\\-)(\\d+\\.\\d+|\\d+)(.)?' , equation)\n for token in find:\n tmp = ''.join(map(str,token))\n if tmp[-1] == '*' or tmp[-1] == '^' or tmp[-1] == '/':\n continue\n if tmp[0] == '*' or tmp[0] == '^' or tmp[0] == '/':\n continue\n else:\n try:\n if tmp[0] == '-':\n pass\n if not tmp[-1].isnumeric():\n tmp = tmp[:-1]\n res = eval(tmp)\n if res > 0:\n res = '+' + str(res)\n equation = equation.replace(tmp, res)\n except:\n continue\n return equation",
"def stoichiometry_func_doc(self, label):\n air = self.air_alias.val\n fuel = self.fuel_alias.val\n flue_gas = self.fuel_alias.val + '_fg'\n\n equations = ''\n for fluid in self.inl[0].fluid.val.keys():\n\n in1 = r'\\dot{m}_\\mathrm{in,1} \\cdot x_\\mathrm{fluid,in,1}'\n in2 = r'\\dot{m}_\\mathrm{in,2} \\cdot x_\\mathrm{fluid,in,2}'\n out = r'\\dot{m}_\\mathrm{out,1} \\cdot x_\\mathrm{fluid,out,1}'\n\n if fluid == air:\n latex = (\n r'0=\\Delta\\dot{m}_\\mathrm{' + fluid + r'} - '\n r'\\dot{m}_\\mathrm{' + fluid + r',stoich}'\n )\n elif fluid == fuel:\n latex = (\n r'0=\\Delta \\dot{m}_\\mathrm{' + fluid + r'} - '\n r'\\dot{m}_\\mathrm{' + fluid + r'}'\n )\n elif fluid == flue_gas:\n latex = (\n r'0=\\Delta \\dot{m}_\\mathrm{' + fluid.replace('_', ',') +\n r'} + \\dot{m}_\\mathrm{' + fuel + r'} +'\n r'\\dot{m}_\\mathrm{' + air + r',stoich}'\n )\n else:\n latex = r'0 = \\Delta \\dot{m}_\\mathrm{' + fluid + '}'\n\n if fluid == next(iter(self.inl[0].fluid.val)):\n balance = (\n r'\\Delta \\dot{m}_\\mathrm{fluid} = ' + in1 +\n '+' + in2 + '-' + out)\n m_fluid = r'\\dot{m}_\\mathrm{fluid} = ' + in1 + '+' + in2\n m_air_stoich = (\n r'\\dot{m}_\\mathrm{' + air + ',stoich}='\n r'\\dot{m}_\\mathrm{' + fuel + r'} \\cdot ' +\n str(round(self.air_min, 4)))\n latex_general_eq = (\n r'\\begin{split}' + '\\n'\n r'&' + balance + r'\\\\' + '\\n'\n r'&' + m_fluid + r'\\\\' + '\\n'\n r'&' + m_air_stoich + r'\\\\' + '\\n'\n r'\\end{split}'\n )\n equations += (\n generate_latex_eq(\n self, latex_general_eq, label + '_general_eq') + '\\n' +\n generate_latex_eq(self, latex, label + '_' + fluid) + '\\n')\n else:\n equations += (\n generate_latex_eq(self, latex, label + '_' + fluid) + '\\n')\n # remove last newline\n return equations[:-1]",
"def fmtd_str(self,c=False,prefix_symbol=\"\"):\n psym = prefix_symbol\n ms1 = (\n f\"{self.filename_prefix_mono}{self.event_kind:9} \"\n f\"{self.separator * len(self.stack)} \"\n )\n lms1 = len(ms1)+len(\"(\")\n join_mstr = f\",\\n{' '*lms1}\"\n mavs = (\n f\"{self.argvars}\"\n )\n ms = f\"{psym}{ms1}{mavs}\"\n if c:\n aac = argvars_argname_color = \"MAGENTA\"\n ps1 = (\n f\"{self.filename_prefix_poly}{self.color.fore(f'{self.event_kind:9}','KIND')} \"\n f\"{self.separator * len(self.stack)} \"\n )\n lps1 = lms1\n join_pstr = f\",\\n{' '*lps1}\"\n pavs = (\n f\"{self.argvars}\"\n )\n ps = f\"{psym}{ps1}{pavs}\"\n return ps\n return ms",
"def to_node(value: str) -> Node:\n if not value:\n res = Empty()\n elif value in CONCATENATION_SYMBOLS:\n res = Concatenation()\n elif value in UNION_SYMBOLS:\n res = Union()\n elif value in KLEENE_STAR_SYMBOLS:\n res = KleeneStar()\n elif value in EPSILON_SYMBOLS:\n res = Epsilon()\n elif value[0] == \"\\\\\":\n res = Symbol(value[1:])\n else:\n res = Symbol(value)\n return res",
"def __str__(self):\n unarybrackets = ['sq', 'sqrt']\n #unary operators which require brackets around their operand\n #if the operand is a leaf, we force the brackets; otherwise the operand\n #is a non-leaf expression and will create its own brackets\n outstr = ''\n if self.is_leaf():\n outstr = outstr + str(self._element)\n else:\n if self._parent and self._element not in unarybrackets:\n outstr = '('\n #unary minus is unary, but needs brackets outside the minus\n if self._leftchild:\n outstr = outstr + str(self._leftchild)\n outstr = outstr + str(self._element)\n if self._element in unarybrackets and self._rightchild.is_leaf():\n outstr = outstr + '('\n outstr = outstr + str(self._rightchild)\n if self._element in unarybrackets and self._rightchild.is_leaf():\n outstr = outstr + ')'\n if self._parent and self._element not in unarybrackets:\n outstr = outstr + ')'\n return outstr",
"def nn_to_rpn(self, nn):\n expression = []\n ops = []\n\n # handle +-*/) to add a space before and after the operator\n nn = nn.strip()\n nn = re.sub(r\"(?P<operator>[+\\-*/])\", add_spaces_operator, nn)\n # handle the wrongly replaced \" * * \"(maybe many spaces around *) to \"**\"\n nn = re.sub(r\" *\\* {2}\\* *\", \"**\", nn)\n nn = re.sub(r\"(?P<operator>[(])\", add_spaces_left_bracket, nn)\n nn = re.sub(r\"(?P<operator>[)])\", add_spaces_right_bracket, nn)\n items = re.split(r\"\\s+\", nn)\n for item in items:\n if item in [\"+\", \"-\", \"*\", \"/\"]:\n while len(ops) >= 0:\n if len(ops) == 0:\n ops.append(item)\n break\n op = ops.pop()\n if op == \"(\" or self.ops_rule[item] > self.ops_rule[op]:\n ops.append(op)\n ops.append(item)\n break\n else:\n expression.append(op)\n elif item == \"(\":\n ops.append(item)\n elif item == \")\":\n while len(ops) > 0:\n op = ops.pop()\n if op == \"(\":\n break\n else:\n expression.append(op)\n else:\n expression.append(item)\n\n while len(ops) > 0:\n expression.append(ops.pop())\n\n return expression",
"def prefix_to_postfix(input_str): # prefix requires that all operators precede the two operands that they work on\n\n \"\"\"Input argument: a string containing a prefix expression where tokens are \n space separated. Tokens are either operators + - * / ** << >> or numbers (integers or floats)\n Returns a String containing a postfix expression(tokens are space separated)\"\"\"\n if input_str is None: raise ValueError\n # split input string into list\n term_list = input_str.split()\n #print(\"TERM LIST \",term_list) \n # initialize output list\n output_list = []\n #print(\"OUT SIZE \", len(output_list))\n # initialize operator stack\n operator_stack = Stack(len(term_list)//3+1)\n for i in range(len(term_list)):\n term = term_list[i]\n # prefix should begin with an operator otherwise raise Exception\n if i == 0:\n if operator_present(term) is True: operator_stack.push(term)\n else: raise PostfixFormatException()\n # Check for operator\n elif operator_present(term): \n operator_stack.push(term)\n # check for operand\n elif operand_present(term):\n output_list.append(term)\n # if previous two terms in output list were operands, pop operator stack to output list once\n if operand_present(term_list[i-1]):\n output_list.append(operator_stack.pop())\n # for every three operands there should be an additional operator\n if operand_present(term_list[i-3]) and operator_stack.size() != 0:\n output_list.append(operator_stack.pop())\n while operator_stack.size() != 0:\n output_list.append(operator_stack.pop())\n new_str = (\" \".join(output_list))\n #print(\"NEW STR \", new_str)\n return new_str",
"def _make_simple_label(chain_parts):\n \n if not _select_simple_chainparts(chain_parts):\n msg = 'Jet Configuration error: '\\\n 'chain fails substring selection: not \"simple\" '\n\n raise NotImplementedError(msg)\n \n label = 'simple(['\n for cp in chain_parts:\n smcstr = str(cp['smc'])\n jvtstr = str(cp['jvt'])\n if smcstr == 'nosmc':\n smcstr = ''\n for i in range(int(cp['multiplicity'])):\n # condition_str = '(%set,%s,%s)' % (str(cp['threshold']),\n # str(cp['etaRange']),\n # smcstr,)\n condition_str = '(%set,%s' % (str(cp['threshold']),\n str(cp['etaRange']),)\n if smcstr: # Run 2 chains have \"INF\" in the SMC substring\n condition_str += ',%s)' % smcstr.replace('INF','')\n elif jvtstr:\n condition_str += ',%s)' % jvtstr\n else:\n condition_str += ')'\n label += condition_str\n label += '])'\n return label",
"def get_labels(orthographic: str):\n labels = []\n tmp = ''\n tag = False\n\n # Get all labels from orthographic form\n for char in orthographic:\n if char == '[':\n tag = True\n elif char == ']':\n labels.append(tmp)\n tag = False\n tmp = ''\n elif tag:\n tmp += char\n return labels",
"def oversimplify(strng):\n s = strng.encode('latex').decode('utf-8')\n s = reg_simplify.sub('', s)\n return s",
"def _make_vbenf_label(chain_parts):\n\n # toy label for development: run simple and dijet independently.\n # simple makes Et cuts on two jets. Independently (sharing possible)\n # of jets choosean by simple, the dijet\n # scenario requires a dijet of mass > 900, and opening angle in phi > 2.6\n\n assert len(chain_parts) == 1\n scenario = chain_parts[0]['hypoScenario']\n assert scenario.startswith('vbenf')\n args = _args_from_scenario(scenario)\n if not args:\n return 'and([]simple([(50et)(70et)])combgen([(2)] dijet([(900djmass, 26djdphi)])))' \n arg_res = [\n re.compile(r'(?P<lo>\\d*)(?P<key>fbet)(?P<hi>\\d*)'),\n re.compile(r'(?P<lo>\\d*)(?P<key>mass)(?P<hi>\\d*)'),\n re.compile(r'(?P<lo>\\d*)(?P<key>et)(?P<hi>\\d*)'),\n ]\n\n defaults = {\n 'et': ('101', 'inf'),\n 'mass': ('800', 'inf'),\n 'fbet': ('501', 'inf'),\n }\n\n argvals = {}\n while args:\n assert len(args) == len(arg_res)\n arg = args.pop()\n for r in arg_res:\n m = r.match(arg)\n if m is not None:\n arg_res.remove(r)\n gd = m.groupdict()\n key = gd['key']\n try:\n lo = float(gd['lo'])\n except ValueError:\n lo = defaults[key][0]\n argvals[key+'lo'] = lo \n try:\n hi = float(gd['hi'])\n except ValueError:\n hi = defaults[key][1]\n argvals[key+'hi'] = hi\n\n assert len(args) == len(arg_res)\n assert len(args) == 0\n\n return \"\"\"\n and\n (\n []\n simple\n (\n [(%(etlo).0fet, 500neta)(%(etlo).0fet, peta500)]\n )\n combgen\n (\n [(10et, 0eta320)]\n dijet\n (\n [(%(masslo).0fdjmass, 26djdphi)]\n ) \n simple\n (\n [(10et, 0eta320)(20et, 0eta320)]\n )\n )\n )\"\"\" % argvals",
"def __str__(self):\n termStrings = []\n for term in self.LHS:\n coefficient = term[0]\n unknownSet = term[1]\n\n termString = str(coefficient) + ' * '\n unknownStrings = []\n for unknown in unknownSet:\n unknownString = unknown[0].__class__.__name__ + '@' + str(id(unknown[0]))[-4:] + '.' + unknown[1] # last 4 digits of variable ID . attribute name\n unknownStrings.append(unknownString)\n termString += str.join(' * ', unknownStrings)\n termStrings.append(termString)\n\n termStrings = str.join(' + ', termStrings)\n return termStrings + ' = ' + str(self.RHS)"
] |
[
"0.5881027",
"0.53526294",
"0.53450996",
"0.5294573",
"0.50718004",
"0.50265986",
"0.50232095",
"0.5006316",
"0.49348602",
"0.4923187",
"0.4909728",
"0.48874924",
"0.4883918",
"0.48639122",
"0.4845914",
"0.48448756",
"0.4844453",
"0.48414075",
"0.4839914",
"0.4830731",
"0.48128864",
"0.48080072",
"0.48059177",
"0.47907978",
"0.4761895",
"0.4755481",
"0.474311",
"0.47429273",
"0.473852",
"0.4729663"
] |
0.6123915
|
0
|
r"""Generates the content of the complex symbols table file. Generated file is in AT&T format. It defines the labels for state transition arcs and assigns a unique index to each. The first label in the file get the index 983040 (decimal value for the beginning of the Unicode private use area). Successive labels have incremental index. Note that we do not generate distinct symbols table files for complex input and output labels, yet only create a single symbols table file that contains the union of the set of labels on both sides.
|
def _symbols_table_file_content(
rule_set: _RewriteRuleSet) -> Generator[str, None, None]:
def _line(symbol: str, index: int) -> str:
return f"{symbol}\t{index}\n"
fst_symbols = []
for rule in rule_set.rule:
fst_symbols.extend(_symbols_of_input(rule.input))
fst_symbols.extend(_symbols_of_output(rule.output))
unique_symbols = set(fst_symbols).difference({common.EPSILON})
complex_symbols = [s for s in unique_symbols if len(s) > 1]
index = 983040 # start of the Unicode private use area.
for symbol in sorted(complex_symbols):
yield _line(symbol, index)
index += 1
logging.info("generated complex symbols file content")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def save(self):\n # First, just allocate enough memory for the SDAT header.\n data = bytearray(0x40)\n\n # -------------------\n # Make the SYMB block\n\n symbolsStringTable = bytearray()\n def addSymbolAndGetOffset(symbol):\n if symbol is None:\n return -1\n offset = len(symbolsStringTable)\n symbolsStringTable.extend(symbol.encode('latin-1') + b'\\0')\n return offset\n\n symbolsHeaderOffsets = []\n\n # Parallel arrays, here.\n symbolsTableValues = []\n shouldIncrementByTableLen = []\n\n anySymbolsInWholeFile = False\n\n def addSymbolsFrom(namedList, nested=False):\n\n # First, figure out if any actual symbols exist\n anyActualSymbols = False\n anyActualSubsymbols = False\n if not nested:\n for symbol, _ in namedList:\n if symbol is not None:\n anyActualSymbols = True\n break\n else:\n for symbol, entry in namedList:\n if symbol is not None:\n anyActualSymbols = True\n break\n for subSymbol, subEntry in entry.sequences:\n if subSymbol is not None:\n anyActualSubsymbols = True\n break\n\n\n nonlocal anySymbolsInWholeFile\n anySymbolsInWholeFile |= anyActualSymbols\n anySymbolsInWholeFile |= anyActualSubsymbols\n\n # If there *are* any symbols, keep going\n symbolsHeaderOffsets.append(len(symbolsTableValues) * 4)\n\n if not nested:\n symbolsTableValues.append(len(namedList))\n shouldIncrementByTableLen.append(False)\n\n for symbol, _ in namedList:\n symbolsTableValues.append(addSymbolAndGetOffset(symbol))\n shouldIncrementByTableLen.append(True)\n\n else:\n mainList, subListsArea = [], []\n mainListSIBTL, subListsAreaSIBTL = [], []\n\n mainList.append(len(namedList))\n mainListSIBTL.append(False)\n\n mainListFullLength = (1 + 2 * len(namedList)) * 4\n subListsAreaOffset = (0x40\n + len(symbolsTableValues) * 4\n + mainListFullLength)\n\n for symbol, entry in namedList:\n\n mainList.append(addSymbolAndGetOffset(symbol))\n mainListSIBTL.append(True)\n\n subListOffset = subListsAreaOffset + len(subListsArea) * 4\n\n if entry is None:\n subNames = []\n else:\n subNames = [n for (n, s) in entry.sequences]\n\n if entry or subNames:\n subListsArea.append(len(subNames))\n subListsAreaSIBTL.append(False)\n\n for subSymbol in subNames:\n subListsArea.append(addSymbolAndGetOffset(subSymbol))\n subListsAreaSIBTL.append(True)\n\n mainList.append(subListOffset)\n mainListSIBTL.append(False)\n\n else:\n mainList.append(0)\n mainListSIBTL.append(False)\n\n symbolsTableValues.extend(mainList)\n symbolsTableValues.extend(subListsArea)\n shouldIncrementByTableLen.extend(mainListSIBTL)\n shouldIncrementByTableLen.extend(subListsAreaSIBTL)\n\n addSymbolsFrom(self.sequences)\n addSymbolsFrom(self.sequenceArchives, True)\n addSymbolsFrom(self.banks)\n addSymbolsFrom(self.waveArchives)\n addSymbolsFrom(self.sequencePlayers)\n addSymbolsFrom(self.groups)\n addSymbolsFrom(self.streamPlayers)\n addSymbolsFrom(self.streams)\n\n # Only add the SYMB block if there are any symbols\n if anySymbolsInWholeFile:\n symbolsBlockOffset = len(data)\n\n symbolsTableLen = len(symbolsTableValues) * 4\n symbolsTable = bytearray()\n for value, shouldIncrement in itertools.zip_longest(symbolsTableValues,\n shouldIncrementByTableLen):\n if value == -1:\n symbolsTable.extend(b'\\0\\0\\0\\0')\n else:\n if shouldIncrement:\n value += symbolsTableLen + 0x40\n symbolsTable.extend(struct.pack('<I', value))\n\n symbolsBlockSize = 0x40 + len(symbolsTable) + len(symbolsStringTable)\n paddedSymbSize = symbolsBlockSize\n while paddedSymbSize % 4:\n paddedSymbSize += 1\n if self.padSymbSizeTo4InSDATHeader:\n symbolsBlockSize = paddedSymbSize\n\n symbolsHeaderOffsetsTable = bytearray()\n for value in symbolsHeaderOffsets:\n if value is None:\n symbolsHeaderOffsetsTable.extend(b'\\0\\0\\0\\0')\n else:\n symbolsHeaderOffsetsTable.extend(struct.pack('<I', value + 0x40))\n\n symbolsHeader = struct.pack('<4sI',\n b'SYMB', paddedSymbSize)\n\n data.extend(symbolsHeader)\n data.extend(symbolsHeaderOffsetsTable)\n data.extend(b'\\0' * 0x18)\n data.extend(symbolsTable)\n data.extend(symbolsStringTable)\n\n else:\n symbolsBlockOffset = None\n symbolsBlockSize = None\n\n\n # -------------------\n # Make the INFO block\n while len(data) % 4: data.append(0)\n infoBlockOffset = len(data)\n\n # Add room to add the header later\n data.extend(b'\\0' * (8 + 8 * 4))\n\n # Pad to 0x20 relative to the INFO block, for some reason\n while (len(data) - infoBlockOffset) % 0x20: data.append(0)\n\n # Helper functions\n def info_declarePart(partNumber):\n struct.pack_into('<I', data, infoBlockOffset + 8 + 4 * partNumber,\n len(data) - infoBlockOffset)\n def addFileAndGetID(file, dataMergeOptimizationID):\n idx = _common.listFind(files, file)\n\n while idx != -1:\n if dataMergeOptimizationID == fileMergeIDs[idx]:\n return idx\n idx = _common.listFind(files, file, idx + 1)\n\n files.append(file)\n fileMergeIDs.append(dataMergeOptimizationID)\n return len(files) - 1\n\n # We encode sections out of order, so that the files will be in\n # the same order as in retail SDATs.\n fileMergeIDs = []\n files = []\n\n # Info part 0: SSEQ\n info_declarePart(0)\n\n data.extend(struct.pack('<I', len(self.sequences)))\n sseqOffsetsTableOffset = len(data)\n data.extend(b'\\0' * (4 * len(self.sequences)))\n\n for i, (_, sseq) in enumerate(self.sequences):\n if sseq is None:\n entryOff = 0\n else:\n entryOff = len(data) - infoBlockOffset\n\n (file, unk02, bankID, volume, channelPressure,\n polyphonicPressure, playerID) = sseq.save()\n fileID = addFileAndGetID(file, sseq.dataMergeOptimizationID)\n\n data.extend(struct.pack('<3H4Bxx',\n fileID, unk02, bankID, volume, channelPressure,\n polyphonicPressure, playerID))\n\n struct.pack_into('<I', data, sseqOffsetsTableOffset + 4 * i, entryOff)\n\n # Info part 1: SSAR\n info_declarePart(1)\n\n data.extend(struct.pack('<I', len(self.sequenceArchives)))\n ssarOffsetsTableOffset = len(data)\n data.extend(b'\\0' * (4 * len(self.sequenceArchives)))\n\n for i, (_, ssar) in enumerate(self.sequenceArchives):\n if ssar is None:\n entryOff = 0\n else:\n entryOff = len(data) - infoBlockOffset\n\n file, unk02, _ = ssar.save()\n fileID = addFileAndGetID(file, ssar.dataMergeOptimizationID)\n\n data.extend(struct.pack('<HH',\n fileID, unk02))\n\n struct.pack_into('<I', data, ssarOffsetsTableOffset + 4 * i, entryOff)\n\n # Info part 2: SBNK\n info_declarePart(2)\n\n data.extend(struct.pack('<I', len(self.banks)))\n sbnkOffsetsTableOffset = len(data)\n data.extend(b'\\0' * (4 * len(self.banks)))\n\n for i, (sbnkName, sbnk) in enumerate(self.banks):\n if sbnk is None:\n entryOff = 0\n else:\n entryOff = len(data) - infoBlockOffset\n\n file, unk02, waveArchives = sbnk.save()\n fileID = addFileAndGetID(file, sbnk.dataMergeOptimizationID)\n\n swarIDs = []\n for s in waveArchives:\n swarIDs.append(-1 if s is None else s)\n while len(swarIDs) < 4:\n swarIDs.append(-1)\n\n if len(swarIDs) > 4:\n raise ValueError(f'SBNK {i} (\"{sbnkName}\") uses '\n f'{len(swarIDs)} SWARs. The maximum is 4.')\n\n data.extend(struct.pack('<HH4h',\n fileID, unk02, *swarIDs))\n\n struct.pack_into('<I', data, sbnkOffsetsTableOffset + 4 * i, entryOff)\n\n\n # Info part 3: SWAR\n info_declarePart(3)\n\n data.extend(struct.pack('<I', len(self.waveArchives)))\n swarOffsetsTableOffset = len(data)\n data.extend(b'\\0' * (4 * len(self.waveArchives)))\n\n for i, (_, swar) in enumerate(self.waveArchives):\n if swar is None:\n entryOff = 0\n else:\n entryOff = len(data) - infoBlockOffset\n\n file, unk02 = swar.save()\n fileID = addFileAndGetID(file, swar.dataMergeOptimizationID)\n\n data.extend(struct.pack('<HH',\n fileID, unk02))\n\n struct.pack_into('<I', data, swarOffsetsTableOffset + 4 * i, entryOff)\n\n\n # Info part 4: Sequence players\n info_declarePart(4)\n\n data.extend(struct.pack('<I', len(self.sequencePlayers)))\n spOffsetsTableOffset = len(data)\n data.extend(b'\\0' * (4 * len(self.sequencePlayers)))\n\n for i, (_, sp) in enumerate(self.sequencePlayers):\n if sp is None:\n entryOff = 0\n else:\n entryOff = len(data) - infoBlockOffset\n\n maxSequences, channels, heapSize = sp.save()\n\n channelMask = 0\n for j in range(16):\n if j in channels:\n channelMask |= 1 << j\n\n data.extend(struct.pack('<HHI',\n maxSequences, channelMask, heapSize))\n\n struct.pack_into('<I', data, spOffsetsTableOffset + 4 * i, entryOff)\n\n\n # Info part 5: Groups\n info_declarePart(5)\n\n data.extend(struct.pack('<I', len(self.groups)))\n groupOffsetsTableOffset = len(data)\n data.extend(b'\\0' * (4 * len(self.groups)))\n\n for i, (_, group) in enumerate(self.groups):\n if group is None:\n entryOff = 0\n else:\n entryOff = len(data) - infoBlockOffset\n\n data.extend(struct.pack('<I', len(group)))\n\n for gEntry in group:\n data.extend(struct.pack('<BHxI', *gEntry.save()))\n\n struct.pack_into('<I', data, groupOffsetsTableOffset + 4 * i, entryOff)\n\n\n # Info part 6: Stream players\n info_declarePart(6)\n\n data.extend(struct.pack('<I', len(self.streamPlayers)))\n spOffsetsTableOffset = len(data)\n data.extend(b'\\0' * (4 * len(self.streamPlayers)))\n\n for i, (_, sp) in enumerate(self.streamPlayers):\n if sp is None:\n entryOff = 0\n else:\n entryOff = len(data) - infoBlockOffset\n\n channels = sp.save()\n chanCount = len(channels)\n while len(channels) < 16:\n channels.append(0xFF)\n\n data.append(chanCount)\n data.extend(channels)\n\n # This has to occur in order for the padding to work out\n # correctly. Weird, but, what can you do. Might even be\n # an unknown value.\n data.extend(b'\\0\\0\\0\\0')\n\n struct.pack_into('<I', data, spOffsetsTableOffset + 4 * i, entryOff)\n\n while len(data) % 4: data.append(0)\n\n\n # Info part 7: Streams\n info_declarePart(7)\n\n data.extend(struct.pack('<I', len(self.streams)))\n strmOffsetsTableOffset = len(data)\n data.extend(b'\\0' * (4 * len(self.streams)))\n\n for i, (_, strm) in enumerate(self.streams):\n if strm is None:\n entryOff = 0\n else:\n entryOff = len(data) - infoBlockOffset\n\n file, unk02, volume, priority, playerID, unk07 = strm.save()\n fileID = addFileAndGetID(file, strm.dataMergeOptimizationID)\n\n data.extend(struct.pack('<HH4B4x',\n fileID, unk02, volume, priority, playerID, unk07))\n\n struct.pack_into('<I', data, strmOffsetsTableOffset + 4 * i, entryOff)\n\n # Now we can finally fill the header in.\n struct.pack_into('<4sI', data, infoBlockOffset,\n b'INFO', len(data) - infoBlockOffset)\n\n infoBlockSize = len(data) - infoBlockOffset\n\n\n # ----------------------\n # Make a dummy FAT block, to be filled in when adding to the\n # FILE block\n\n while len(data) % 4: data.append(0)\n fatBlockOffset = len(data)\n fatBlockSize = 0xC + 0x10 * len(files)\n fatTableOffset = fatBlockOffset + 0xC\n\n fatHeader = struct.pack('<4sII',\n b'FAT ', 0xC + 0x10 * len(files), len(files))\n\n data.extend(fatHeader)\n data.extend(b'\\0' * (0x10 * len(files)))\n\n\n # -------------------\n # Make the FILE block and fill in the FAT block\n while len(data) % 4: data.append(0)\n fileBlockOffset = len(data)\n\n # Dummy header (to be filled in after we know the total size)\n data.extend(b'\\0' * 0xC)\n\n # Some games align the first file differently\n if self.firstFileAlignment is not None:\n while len(data) % self.firstFileAlignment:\n data.append(0)\n\n # Add each file\n for i, file in enumerate(files):\n\n # Files must be aligned to 0x20 relative to the SDAT\n # itself... usually. Some games align to other amounts.\n while len(data) % self.fileAlignment:\n data.append(0)\n\n # Actually add the file\n fileOffset = len(data)\n data.extend(file)\n \n # Add the appropriate FAT entry\n fLen = len(file)\n if self.fatLengthsIncludePadding:\n while fLen % self.fileAlignment: fLen += 1\n\n struct.pack_into('<II', data, fatTableOffset + 0x10 * i,\n fileOffset, fLen)\n\n # And one last pad for good measure. (And because retail files\n # do so.)\n if self.padAtEnd:\n while len(data) % self.fileAlignment:\n data.append(0)\n\n # Add the header\n struct.pack_into('<4sII', data, fileBlockOffset,\n b'FILE', len(data) - fileBlockOffset, len(files))\n\n fileBlockSize = len(data) - fileBlockOffset\n\n\n # -----------------------\n # Put the blocks together\n\n # Write the SDAT header\n struct.pack_into('<8I', data, 0x10,\n 0 if symbolsBlockOffset is None else symbolsBlockOffset,\n 0 if symbolsBlockSize is None else symbolsBlockSize,\n 0 if infoBlockOffset is None else infoBlockOffset,\n 0 if infoBlockSize is None else infoBlockSize,\n 0 if fatBlockOffset is None else fatBlockOffset,\n 0 if fatBlockSize is None else fatBlockSize,\n 0 if fileBlockOffset is None else fileBlockOffset,\n 0 if fileBlockSize is None else fileBlockSize)\n\n # Write the standard header to the beginning\n _common.NDS_STD_FILE_HEADER.pack_into(data, 0,\n b'SDAT', 0xFEFF, 0x100, len(data), 0x40,\n 3 if symbolsBlockOffset is None else 4)\n\n return data",
"def create_display_data_table():\n\n for ccd in range(0, 10):\n for node in range(0, 4):\n file = 'ccd' + str(ccd) + '_' + str(node)\n infile = data_dir + file\n outfile = web_dir + 'Data/' + file\n\n f = open(infile, 'r')\n data = [line.strip() for line in f.readlines()]\n f.close()\n\n fo = open(outfile, 'w')\n#\n#--- adding heading\n#\n line = \"#\\n#Date Mn K alpha Al K alpha Ti K alpha Slope Sigma Int Sigma\\n#\\n\"\n fo.write(line)\n for ent in data:\n atemp = re.split('\\s+', ent)\n stime = int(atemp[0])\n#\n#--- converting the date into <mon> <year> form (e.g. May 2013)\n#\n ltime = tcnv.axTimeMTA(stime)\n btemp = re.split(':', ltime)\n year = btemp[0]\n [mon, mdate] = tcnv.changeYdateToMonDate(int(year), int(btemp[1]))\n lmon = tcnv.changeMonthFormat(mon)\n line = lmon + ' ' + year \n for j in range(1, len(atemp)):\n line = line + '\\t' + atemp[j]\n\n line = line + '\\n'\n fo.write(line)\n fo.close()",
"def generate_latex_table(dictionary,filename,location=\".\"):\n if type(filename) != str:\n raise TypeError('filename should be string')\n if type(dictionary) != dict:\n raise TypeError('dictionary should be dictionary')\n\n head_code = \"\"\"\\\\documentclass{article}\n%In the preamble section include the arabtex and utf8 packages\n\\\\usepackage{arabtex}\n\\\\usepackage{utf8}\n\\\\usepackage{longtable}\n\\\\usepackage{color, colortbl}\n\\\\usepackage{supertabular}\n\\\\usepackage{multicol}\n\\\\usepackage{geometry}\n\\\\geometry{left=.1in, right=.1in, top=.1in, bottom=.1in}\n\n\\\\begin{document}\n\\\\begin{multicols}{6}\n\\\\setcode{utf8}\n\n\\\\begin{center}\"\"\"\n\n tail_code = \"\"\"\\\\end{center}\n\\\\end{multicols}\n\\\\end{document}\"\"\"\n\n begin_table = \"\"\"\\\\begin{tabular}{ P{2cm} P{1cm}}\n\\\\textbf{words} & \\\\textbf{\\\\#} \\\\\\\\\n\\\\hline\n\\\\\\\\[0.01cm]\"\"\"\n end_table= \"\"\"\\\\end{tabular}\"\"\"\n rows_num = 40\n if location != '.':\n filename = location +\"/\"+ filename\n\n try:\n file = open(filename+'.tex', 'w', encoding='utf8')\n file.write(head_code+'\\n')\n n= int(len(dictionary)/rows_num)\n words = [(\"\\\\<\"+word+\"> & \"+str(frequancy)+' \\\\\\\\ \\n') for word, frequancy in dictionary.items()]\n start=0\n end=rows_num\n new_words = []\n for i in range(n):\n new_words = new_words+ [begin_table+'\\n'] +words[start:end] +[end_table+\" \\n\"]\n start=end\n end+=rows_num\n remain_words = len(dictionary) - rows_num*n\n if remain_words > 0:\n new_words += [begin_table+\" \\n\"]+ words[-1*remain_words:]+[end_table+\" \\n\"]\n for word in new_words:\n file.write(word)\n file.write(tail_code)\n file.close()\n return True\n except:\n return False",
"def export(self, filename):\r\n size = 10\r\n \r\n initialTime = time.clock()\r\n \r\n file = open(filename, 'w')\r\n# file.write(\"#Atoms\\n#Number X Y Z\\n\")\r\n# for i in range(size):\r\n# for j in range(size):\r\n# for k in range(size):\r\n# for atom in self.getCutoffCell().getAllAtoms():\r\n# num = atom.getIndexNumber()\r\n# pos = atom.getPosition()\r\n# x = pos[0] + (i * self.getCutoffCell().getNa())\r\n# y = pos[1] + (j * self.getCutoffCell().getNa())\r\n# z = pos[2] + (k * self.getCutoffCell().getNa())\r\n# line = str(num) + \" \" + str(x) + \" \" + str(y) + \" \" + str(z)\r\n# file.write(line + \"\\n\")\r\n #Right now this will be extremely slow (8 nested for loops) \r\n class SimpleBond():\r\n def __init__(self, pos1, pos2, jMatrix):\r\n self.pos1 = pos1\r\n self.pos2 = pos2\r\n self.jMatrix = jMatrix\r\n \r\n def sameBond(self, bond2):\r\n if self.pos1 == bond2.pos1 or self.pos1 == bond2.pos2:\r\n if self.pos2 == bond2.pos2 or self.pos2 == bond2.pos1:\r\n return True\r\n return False\r\n \r\n class SimpleBondList():\r\n def __init__(self):\r\n self.list = []\r\n \r\n def addBond(self, bond):\r\n if not self.containsBond(bond):\r\n self.list.append(bond)\r\n \r\n def containsBond(self, bond):\r\n for eachBond in self.list:\r\n if eachBond.sameBond(bond):\r\n return True\r\n return False\r\n \r\n Na = self.getCutoffCell().getNa()\r\n Nb = self.getCutoffCell().getNb()\r\n Nc = self.getCutoffCell().getNc()\r\n \r\n \r\n def contains(list, element):\r\n for item in list:\r\n if (item == element).all():\r\n return True\r\n return False\r\n \r\n def indexOf(list, item):\r\n for i in range(len(list)):\r\n if item.all() == list[i].all():\r\n return i\r\n return -1\r\n \r\n \r\n matrices = []\r\n for bond in self.getCutoffCell().getBonds():\r\n# pos1 = bond.getAtom1().getPosition()\r\n# pos2 = bond.getAtom2().getPosition()\r\n jMat = bond.getJMatrix()\r\n# count = matrices.count(jMat)\r\n if not contains(matrices, jMat):\r\n matrices.append(jMat)\r\n \r\n \r\n simpleCellBonds = []\r\n for bond in self.getCutoffCell().getBonds():\r\n pos1 = bond.getAtom1().getPosition()\r\n pos2 = bond.getAtom2().getPosition()\r\n jMat = bond.getJMatrix()\r\n newBond = SimpleBond(pos1, pos2, indexOf(matrices,jMat))\r\n simpleCellBonds.append(newBond)\r\n \r\n \r\n simpleBonds = SimpleBondList()\r\n for bond in simpleCellBonds:\r\n pos1 = bond.pos1\r\n pos2 = bond.pos2\r\n jMatInt = bond.jMatrix\r\n for i in range(2):\r\n for j in range(2):\r\n for k in range(2):\r\n for a in range(Na):\r\n for b in range(Nb):\r\n for c in range(Nc):\r\n x1 = pos1[0] + a + (Na * i)\r\n y1 = pos1[1] + b + (Nb * j)\r\n z1 = pos1[2] + c + (Nc * k)\r\n \r\n x2 = pos2[0] + a + (Na * i)\r\n y2 = pos2[1] + b + (Nb * j)\r\n z2 = pos2[2] + c + (Nc * k) \r\n bond = SimpleBond( (x1,y1,z1), (x2,y2,z2), jMatInt )\r\n simpleBonds.addBond(bond) \r\n\r\n #Pick out bonds that link first cutoff cell and another\r\n interCutoffBonds = []\r\n for eachBond in simpleBonds.list:\r\n #Check if one bond is in the first cutoff cell\r\n if eachBond.pos1[0] < Na or eachBond.pos2[0] < Na: #x\r\n if eachBond.pos1[1] < Nb or eachBond.pos2[1] < Nb: #y\r\n if eachBond.pos1[2] < Nc or eachBond.pos2[2] < Nc: #z\r\n #check if the second bond is not in the first cutoff cell\r\n if (not eachBond.pos1[0] < Na) or (not eachBond.pos2[0] < Na): #x\r\n if (not eachBond.pos1[1] < Nb) or (not eachBond.pos2[1] < Nb): #y\r\n if (not eachBond.pos1[2] < Nc) or (not eachBond.pos2[2] < Nc): #z\r\n interCutoffBonds.append(eachBond)\r\n \r\n \r\n \r\n \r\n# finalBondList = []\r\n #Translate all bonds within the cutoff cell\r\n\r\n \r\n file.write(\"#J Matrices\\n#Number J11 J12 J13 J21 J22 J23 J31 J32 J33\\n\")\r\n for i in range(len(matrices)):\r\n jMat = matrices[i]\r\n jStr = str(i) + \" \" + str(jMat[0][0]) + \" \" + str(jMat[0][1]) + \" \" + str(jMat[0][2]) + \" \" + str(jMat[1][0]) + \" \" + str(jMat[1][1]) + \" \" + str(jMat[1][2]) + \" \" + str(jMat[2][0]) + \" \" + str(jMat[2][1]) + \" \" + str(jMat[2][2])\r\n file.write(jStr + \"\\n\")\r\n \r\n \r\n file.write(\"#Bonds\\n#X1 Y1 Z1 X2 Y2 Z2 J\\n\")\r\n for bond in simpleCellBonds: \r\n pos1 = bond.pos1\r\n pos2 = bond.pos2\r\n# jStr += \" \" \r\n# jStr += str(jMat[0][1]) \r\n# jStr += \" \" + str(jMat[0][2]) \r\n# jStr += \" \" + str(jMat[1][0]) + \" \" + str(jMat[1][1]) + \" \" + str(jMat[1][2]) + \" \" + str(jMat[2][0]) + \" \" + str(jMat[2][1]) + \" \" + str(jMat[2][2])\r\n for i in range(size):\r\n for j in range(size):\r\n for k in range(size):\r\n x1 = pos1[0] + (Na * i)\r\n y1 = pos1[1] + (Nb * j)\r\n z1 = pos1[2] + (Nc * k)\r\n \r\n x2 = pos2[0] + (Na * i)\r\n y2 = pos2[1] + (Nb * j)\r\n z2 = pos2[2] + (Nc * k) \r\n# smplBond = SimpleBond( (x1,y1,z1), (x2,y2,z2), jMat )\r\n # finalBondList.append(smplBond)\r\n pos1Str = str(x1) + \" \" + str(y1) + \" \" + str(z1)\r\n pos2Str = str(x2) + \" \" + str(y2) + \" \" + str(z2)\r\n jStr = str(bond.jMatrix)\r\n#There should always be a jMatrix if jMat != None:\r\n file.write(pos1Str + \" \" + pos2Str + \" \" + jStr + \"\\n\")\r\n# else:\r\n# file.write(pos1Str + \" \" + pos2Str + \"\\n\")\r\n \r\n for smplBond in interCutoffBonds:\r\n pos1 = smplBond.pos1\r\n pos2 = smplBond.pos2\r\n jMat = smplBond.jMatrix\r\n jStr = str(jMat)\r\n# jStr = str(jMat[0][0]) + \" \" + str(jMat[0][1]) + \" \" + str(jMat[0][2]) + \" \" + str(jMat[1][0]) + \" \" + str(jMat[1][1]) + \" \" + str(jMat[1][2]) + \" \" + str(jMat[2][0]) + \" \" + str(jMat[2][1]) + \" \" + str(jMat[2][2])\r\n aDisp = abs(pos1[0] - pos2[0])\r\n bDisp = abs(pos1[1] - pos2[1])\r\n cDisp = abs(pos1[2] - pos2[2])\r\n# if pos1[0] > pos2[0]:\r\n# aDisp = pos1[0]\r\n# else:\r\n# aDisp = pos2[0]\r\n# if pos1[1] > pos2[1]:\r\n# bDisp = pos1[1]\r\n# else:\r\n# bDisp = pos2[1]\r\n# if pos1[2] > pos2[2]:\r\n# cDisp = pos1[2]\r\n# else:\r\n# cDisp = pos2[2]\r\n for i in range(size - aDisp):\r\n for j in range(size - bDisp):\r\n for k in range(size - cDisp):\r\n x1 = pos1[0] + (Na * i)\r\n y1 = pos1[1] + (Nb * j)\r\n z1 = pos1[2] + (Nc * k)\r\n \r\n x2 = pos2[0] + (Na * i)\r\n y2 = pos2[1] + (Nb * j)\r\n z2 = pos2[2] + (Nc * k) \r\n# smplBond = SimpleBond( (x1,y1,z1), (x2,y2,z2), jMat )\r\n # finalBondList.append(smplBond)\r\n pos1Str = str(x1) + \" \" + str(y1) + \" \" + str(z1)\r\n pos2Str = str(x2) + \" \" + str(y2) + \" \" + str(z2)\r\n#There should always be a jMatrix if jMat != None:\r\n file.write(pos1Str + \" \" + pos2Str + \" \" + jStr + \"\\n\")\r\n \r\n #Check for reapeats in finalBond List just for testing\r\n# def isRepeat(finalBondList):\r\n# for i in range(0, len(finalBondList)):\r\n# for j in range(i + 1, len(finalBondList)):\r\n# if finalBondList[i].sameBond(finalBondList[j]):\r\n# return True\r\n# return False\r\n# \r\n# if isRepeat(finalBondList):\r\n# print \"There is a repeat!\"\r\n# else:\r\n# print \"NO repeats!\"\r\n \r\n \r\n\r\n \r\n file.close()\r\n seconds = time.clock() - initialTime\r\n minutes = int(seconds)/60\r\n seconds -= (minutes*60)\r\n hours = minutes/60\r\n minutes -= hours*60\r\n print \"Done\\nTime:\", hours, \"hours\", minutes, \"minutes\", seconds, \"seconds\"",
"def temp_generate_position_label_data_matrix_All_label():\n temp_position_label = OrderedDict()\n with open(\"%s/temp_label_final_raw.txt\" % args.filter2_only_snp_vcf_dir, 'rU') as csv_file:\n print \"Reading temporary label positions file: %s/temp_label_final_raw.txt \\n\" % args.filter2_only_snp_vcf_dir\n csv_reader = csv.reader(csv_file, delimiter='\\t')\n next(csv_reader, None)\n for row in csv_reader:\n temp_position_label[row[0]] = row[1:]\n f33=open(\"%s/temp_Only_filtered_positions_for_closely_matrix.txt\" % args.filter2_only_snp_vcf_dir, 'w+')\n print_string_header = \"\\t\"\n for i in vcf_filenames:\n print_string_header = print_string_header + os.path.basename(i) + \"\\t\"\n f33.write('\\t' + print_string_header.strip() + '\\n')\n for value in temp_position_label:\n lll = ['reference_unmapped_position', 'LowFQ', 'LowFQ_DP', 'LowFQ_QUAL', 'LowFQ_DP_QUAL', 'LowFQ_QUAL_DP', 'HighFQ_DP', 'HighFQ_QUAL', 'HighFQ_DP_QUAL', 'HighFQ_QUAL_DP', 'HighFQ', 'LowFQ_proximate_SNP', 'LowFQ_DP_proximate_SNP', 'LowFQ_QUAL_proximate_SNP', 'LowFQ_DP_QUAL_proximate_SNP', 'LowFQ_QUAL_DP_proximate_SNP', 'HighFQ_DP_proximate_SNP', 'HighFQ_QUAL_proximate_SNP', 'HighFQ_DP_QUAL_proximate_SNP', 'HighFQ_QUAL_DP_proximate_SNP', 'HighFQ_proximate_SNP', '_proximate_SNP']\n ref_var = ['reference_allele', 'VARIANT']\n if set(ref_var) & set(temp_position_label[value]):\n if set(lll) & set(temp_position_label[value]):\n print_string = \"\"\n for i in temp_position_label[value]:\n print_string = print_string + \"\\t\" + i\n STRR2 = value + print_string + \"\\n\"\n f33.write(STRR2)\n f33.close()\n csv_file.close()\n\n \"\"\"\n Read temp_Only_filtered_positions_for_closely_matrix file and generate a matrix of positions that are being filtered just because of FQ\n \"\"\"\n temp_position_label_FQ = OrderedDict()\n with open(\"%s/temp_Only_filtered_positions_for_closely_matrix.txt\" % args.filter2_only_snp_vcf_dir, 'rU') as csv_file:\n print \"Reading temporary Only_filtered_positions label file: %s/temp_Only_filtered_positions_for_closely_matrix.txt \\n\" % args.filter2_only_snp_vcf_dir\n csv_reader = csv.reader(csv_file, delimiter='\\t')\n\n next(csv_reader, None)\n for row in csv_reader:\n temp_position_label_FQ[row[0]] = row[1:]\n f44=open(\"%s/temp_Only_filtered_positions_for_closely_matrix_FQ.txt\" % args.filter2_only_snp_vcf_dir, 'w+')\n print_string_header = \"\\t\"\n for i in vcf_filenames:\n print_string_header = print_string_header + os.path.basename(i) + \"\\t\"\n f44.write('\\t' + print_string_header.strip() + '\\n')\n for value in temp_position_label_FQ:\n #lll = ['reference_unmapped_position', 'LowFQ', 'LowFQ_DP', 'LowFQ_QUAL', 'LowFQ_DP_QUAL', 'LowFQ_QUAL_DP', 'HighFQ_DP', 'HighFQ_QUAL', 'HighFQ_DP_QUAL', 'HighFQ_QUAL_DP', 'HighFQ', 'LowFQ_proximate_SNP', 'LowFQ_DP_proximate_SNP', 'LowFQ_QUAL_proximate_SNP', 'LowFQ_DP_QUAL_proximate_SNP', 'LowFQ_QUAL_DP_proximate_SNP', 'HighFQ_DP_proximate_SNP', 'HighFQ_QUAL_proximate_SNP', 'HighFQ_DP_QUAL_proximate_SNP', 'HighFQ_QUAL_DP_proximate_SNP', 'HighFQ_proximate_SNP', '_proximate_SNP']\n lll = ['LowFQ']\n #ref_var = ['reference_allele', 'VARIANT']\n if set(lll) & set(temp_position_label_FQ[value]):\n print_string = \"\"\n for i in temp_position_label_FQ[value]:\n print_string = print_string + \"\\t\" + i\n STRR2 = value + print_string + \"\\n\"\n f44.write(STRR2)\n f44.close()\n csv_file.close()\n\n ## Perform Sed\n subprocess.call([\"sed -i 's/_filter2_final.vcf_no_proximate_snp.vcf//g' %s/temp_Only_filtered_positions_for_closely_matrix_FQ.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/reference_unmapped_position/0/g' %s/temp_Only_filtered_positions_for_closely_matrix_FQ.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/reference_allele/1/g' %s/temp_Only_filtered_positions_for_closely_matrix_FQ.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/VARIANT/2/g' %s/temp_Only_filtered_positions_for_closely_matrix_FQ.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/LowFQ_QUAL_DP_proximate_SNP/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_FQ.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/LowFQ_DP_QUAL_proximate_SNP/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_FQ.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/LowFQ_QUAL_proximate_SNP/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_FQ.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/LowFQ_DP_proximate_SNP/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_FQ.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/LowFQ_proximate_SNP/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_FQ.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/LowFQ_QUAL_DP/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_FQ.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/LowFQ_DP_QUAL/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_FQ.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/LowFQ_QUAL/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_FQ.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/LowFQ_DP/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_FQ.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/HighFQ_QUAL_DP_proximate_SNP/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_FQ.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/HighFQ_DP_QUAL_proximate_SNP/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_FQ.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/HighFQ_QUAL_proximate_SNP/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_FQ.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/HighFQ_DP_proximate_SNP/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_FQ.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/HighFQ_proximate_SNP/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_FQ.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/HighFQ_QUAL_DP/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_FQ.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/HighFQ_DP_QUAL/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_FQ.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/HighFQ_QUAL/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_FQ.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/HighFQ_DP/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_FQ.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/LowFQ/3/g' %s/temp_Only_filtered_positions_for_closely_matrix_FQ.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/HighFQ/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_FQ.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n \"\"\"\n Read temp_Only_filtered_positions_for_closely_matrix file and generate a matrix of positions that are being filtered just because of Dp\n \"\"\"\n temp_position_label_DP = OrderedDict()\n with open(\"%s/temp_Only_filtered_positions_for_closely_matrix.txt\" % args.filter2_only_snp_vcf_dir, 'rU') as csv_file:\n print \"Reading temporary Only_filtered_positions label file: %s/temp_Only_filtered_positions_for_closely_matrix.txt \\n\" % args.filter2_only_snp_vcf_dir\n csv_reader = csv.reader(csv_file, delimiter='\\t')\n next(csv_reader, None)\n for row in csv_reader:\n temp_position_label_DP[row[0]] = row[1:]\n f44=open(\"%s/temp_Only_filtered_positions_for_closely_matrix_DP.txt\" % args.filter2_only_snp_vcf_dir, 'w+')\n print_string_header = \"\\t\"\n for i in vcf_filenames:\n print_string_header = print_string_header + os.path.basename(i) + \"\\t\"\n f44.write('\\t' + print_string_header.strip() + '\\n')\n for value in temp_position_label_DP:\n #lll = ['reference_unmapped_position', 'LowFQ', 'LowFQ_DP', 'LowFQ_QUAL', 'LowFQ_DP_QUAL', 'LowFQ_QUAL_DP', 'HighFQ_DP', 'HighFQ_QUAL', 'HighFQ_DP_QUAL', 'HighFQ_QUAL_DP', 'HighFQ', 'LowFQ_proximate_SNP', 'LowFQ_DP_proximate_SNP', 'LowFQ_QUAL_proximate_SNP', 'LowFQ_DP_QUAL_proximate_SNP', 'LowFQ_QUAL_DP_proximate_SNP', 'HighFQ_DP_proximate_SNP', 'HighFQ_QUAL_proximate_SNP', 'HighFQ_DP_QUAL_proximate_SNP', 'HighFQ_QUAL_DP_proximate_SNP', 'HighFQ_proximate_SNP', '_proximate_SNP']\n lll = ['HighFQ_DP']\n #ref_var = ['reference_allele', 'VARIANT']\n if set(lll) & set(temp_position_label_FQ[value]):\n print_string = \"\"\n for i in temp_position_label_FQ[value]:\n print_string = print_string + \"\\t\" + i\n STRR2 = value + print_string + \"\\n\"\n f44.write(STRR2)\n f44.close()\n csv_file.close()\n\n\n #Perform Sed\n subprocess.call([\"sed -i 's/_filter2_final.vcf_no_proximate_snp.vcf//g' %s/temp_Only_filtered_positions_for_closely_matrix_DP.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/reference_unmapped_position/0/g' %s/temp_Only_filtered_positions_for_closely_matrix_DP.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/reference_allele/1/g' %s/temp_Only_filtered_positions_for_closely_matrix_DP.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/VARIANT/2/g' %s/temp_Only_filtered_positions_for_closely_matrix_DP.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/LowFQ_QUAL_DP_proximate_SNP/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_DP.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/LowFQ_DP_QUAL_proximate_SNP/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_DP.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/LowFQ_QUAL_proximate_SNP/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_DP.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/LowFQ_DP_proximate_SNP/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_DP.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/LowFQ_proximate_SNP/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_DP.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/LowFQ_QUAL_DP/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_DP.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/LowFQ_DP_QUAL/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_DP.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/LowFQ_QUAL/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_DP.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/LowFQ_DP/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_DP.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/HighFQ_QUAL_DP_proximate_SNP/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_DP.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/HighFQ_DP_QUAL_proximate_SNP/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_DP.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/HighFQ_QUAL_proximate_SNP/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_DP.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/HighFQ_DP_proximate_SNP/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_DP.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/HighFQ_proximate_SNP/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_DP.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/HighFQ_QUAL_DP/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_DP.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/HighFQ_DP_QUAL/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_DP.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/HighFQ_QUAL/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_DP.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/HighFQ_DP/3/g' %s/temp_Only_filtered_positions_for_closely_matrix_DP.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/LowFQ/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_DP.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/HighFQ/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_DP.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n\n\n \"\"\"\n Read each Sample columns and calculate the percentage of each label to generate barplot statistics.\n This will give a visual explanation of how many positions in each samples were filtered out because of different reason\n \"\"\"\n\n c_reader = csv.reader(open('%s/temp_Only_filtered_positions_for_closely_matrix.txt' % args.filter2_only_snp_vcf_dir, 'r'), delimiter='\\t')\n columns = list(zip(*c_reader))\n counts = 1\n end = len(vcf_filenames) + 1\n f_bar_count = open(\"%s/bargraph_counts.txt\" % args.filter2_only_snp_vcf_dir, 'w+')\n f_bar_perc = open(\"%s/bargraph_percentage.txt\" % args.filter2_only_snp_vcf_dir, 'w+')\n f_bar_count.write(\"Sample\\tunmapped_positions\\treference_allele\\ttrue_variant\\tOnly_low_FQ\\tOnly_DP\\tOnly_low_MQ\\tother\\n\")\n f_bar_perc.write(\"Sample\\tunmapped_positions_perc\\ttrue_variant_perc\\tOnly_low_FQ_perc\\tOnly_DP_perc\\tOnly_low_MQ_perc\\tother_perc\\n\")\n for i in xrange(1, end, 1):\n \"\"\" Bar Count Statistics: Variant Position Count Statistics \"\"\"\n true_variant = columns[i].count('VARIANT')\n unmapped_positions = columns[i].count('reference_unmapped_position')\n reference_allele = columns[i].count('reference_allele')\n Only_low_FQ = columns[i].count('LowFQ')\n Only_DP = columns[i].count('HighFQ_DP')\n Only_low_MQ = columns[i].count('HighFQ')\n low_FQ_other_parameters = columns[i].count('LowFQ_QUAL_DP_proximate_SNP') + columns[i].count('LowFQ_DP_QUAL_proximate_SNP') + columns[i].count('LowFQ_QUAL_proximate_SNP') + columns[i].count('LowFQ_DP_proximate_SNP') + columns[i].count('LowFQ_proximate_SNP') + columns[i].count('LowFQ_QUAL_DP') + columns[i].count('LowFQ_DP_QUAL') + columns[i].count('LowFQ_QUAL') + columns[i].count('LowFQ_DP')\n high_FQ_other_parameters = columns[i].count('HighFQ_QUAL_DP_proximate_SNP') + columns[i].count('HighFQ_DP_QUAL_proximate_SNP') + columns[i].count('HighFQ_QUAL_proximate_SNP') + columns[i].count('HighFQ_DP_proximate_SNP') + columns[i].count('HighFQ_proximate_SNP') + columns[i].count('HighFQ_QUAL_DP') + columns[i].count('HighFQ_DP_QUAL') + columns[i].count('HighFQ_QUAL')\n other = low_FQ_other_parameters + high_FQ_other_parameters\n total = true_variant + unmapped_positions + reference_allele + Only_low_FQ + Only_DP + low_FQ_other_parameters + high_FQ_other_parameters + Only_low_MQ\n filename_count = i - 1\n bar_string = \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n\" % (os.path.basename(vcf_filenames[filename_count].replace('_filter2_final.vcf_no_proximate_snp.vcf', '')), unmapped_positions, reference_allele, true_variant, Only_low_FQ, Only_DP, Only_low_MQ, other)\n f_bar_count.write(bar_string)\n\n \"\"\" Bar Count Percentage Statistics: Variant Position Percentage Statistics \"\"\"\n try:\n true_variant_perc = float((columns[i].count('VARIANT') * 100) / total)\n except ZeroDivisionError:\n true_variant_perc = 0\n try:\n unmapped_positions_perc = float((columns[i].count('reference_unmapped_position') * 100) / total)\n except ZeroDivisionError:\n unmapped_positions_perc = 0\n try:\n reference_allele_perc = float((columns[i].count('reference_allele') * 100) / total)\n except ZeroDivisionError:\n reference_allele_perc = 0\n try:\n Only_low_FQ_perc = float((columns[i].count('LowFQ') * 100) / total)\n except ZeroDivisionError:\n Only_low_FQ_perc = 0\n try:\n Only_DP_perc = float((columns[i].count('HighFQ_DP') * 100) / total)\n except ZeroDivisionError:\n Only_DP_perc = 0\n try:\n Only_low_MQ_perc = float((columns[i].count('HighFQ') * 100) / total)\n except ZeroDivisionError:\n Only_low_MQ_perc = 0\n try:\n low_FQ_other_parameters_perc = float(((columns[i].count('LowFQ_QUAL_DP_proximate_SNP') + columns[i].count('LowFQ_DP_QUAL_proximate_SNP') + columns[i].count('LowFQ_QUAL_proximate_SNP') + columns[i].count('LowFQ_DP_proximate_SNP') + columns[i].count('LowFQ_proximate_SNP') + columns[i].count('LowFQ_QUAL_DP') + columns[i].count('LowFQ_DP_QUAL') + columns[i].count('LowFQ_QUAL') + columns[i].count('LowFQ_DP')) * 100) / total)\n except ZeroDivisionError:\n low_FQ_other_parameters_perc = 0\n try:\n high_FQ_other_parameters_perc = float(((columns[i].count('HighFQ_QUAL_DP_proximate_SNP') + columns[i].count('HighFQ_DP_QUAL_proximate_SNP') + columns[i].count('HighFQ_QUAL_proximate_SNP') + columns[i].count('HighFQ_DP_proximate_SNP') + columns[i].count('HighFQ_proximate_SNP') + columns[i].count('HighFQ_QUAL_DP') + columns[i].count('HighFQ_DP_QUAL') + columns[i].count('HighFQ_QUAL')) * 100) / total)\n except ZeroDivisionError:\n high_FQ_other_parameters_perc = 0\n\n other_perc = float(low_FQ_other_parameters_perc + high_FQ_other_parameters_perc)\n bar_perc_string = \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n\" % (os.path.basename(vcf_filenames[filename_count].replace('_filter2_final.vcf_no_proximate_snp.vcf', '')), unmapped_positions_perc, true_variant_perc, Only_low_FQ_perc, Only_DP_perc, Only_low_MQ_perc, other_perc)\n f_bar_perc.write(bar_perc_string)",
"def __generate_symboltable(self, code):\n\n code_without_lables = []\n address = 0\n for line in code:\n label_code = line.split(':')\n label = label_code[0]\n if len(label) != len(line):\n self.__symboltable[label] = address\n address += REG_SIZE\n instruction = label_code.pop().strip()\n code_without_lables = code_without_lables + [instruction]\n else:\n instruction = label_code.pop().strip()\n code_without_lables = code_without_lables + [instruction]\n\n tokens = instruction.split(' ')\n asm_directive = tokens[0]\n if tokens[0] in AssemblerDirectives.to_string():\n if asm_directive == AssemblerDirectives.ORG.name:\n address = int(tokens[1])\n else:\n address += REG_SIZE\n\n return code_without_lables",
"def build_toc(self) -> None:\n logger.info(__('writing toc.ncx file...'))\n\n if self.config.epub_tocscope == 'default':\n doctree = self.env.get_and_resolve_doctree(self.config.root_doc,\n self, prune_toctrees=False,\n includehidden=False)\n refnodes = self.get_refnodes(doctree, [])\n self.toc_add_files(refnodes)\n else:\n # 'includehidden'\n refnodes = self.refnodes\n self.check_refnodes(refnodes)\n navpoints = self.build_navpoints(refnodes)\n level = max(item['level'] for item in self.refnodes)\n level = min(level, self.config.epub_tocdepth)\n copy_asset_file(path.join(self.template_dir, 'toc.ncx_t'), self.outdir,\n self.toc_metadata(level, navpoints))",
"def _buildtable(self):\n\n tabrows = []\n\n for i, (expid, exfiles) in enumerate(self._exposure_files.items()):\n specflux_b, specflux_r, specflux_z = [], [], []\n tab = None\n\n if len(exfiles) == 0:\n continue\n\n print(expid)\n for exfile in exfiles:\n print(exfile)\n hdu = fits.open(exfile)\n\n # The following tables are present in the redux sframes and the\n # nightwatch qcframes.\n wave = hdu['WAVELENGTH'].data\n\n # However, in the nightwatch files the wavelength data are a\n # table of size nfiber x nwavelength.\n if self._filetype == 'nightwatch':\n if wave.ndim > 1:\n wave = wave[0]\n\n fluxhead = hdu['FLUX'].header\n fluxdata = hdu['FLUX'].data\n ivardata = hdu['IVAR'].data\n fibermap = hdu['FIBERMAP'].data\n exptime = fluxhead['EXPTIME']\n if not np.all(self._unditherfa['FIBER'] ==\n np.arange(len(self._unditherfa))):\n raise ValueError('weird fiberassign file format!')\n fibermap = self._unditherfa[fibermap['FIBER']]\n\n target_id = fibermap['TARGETID']\n target_ra = fibermap['TARGET_RA']\n target_dec = fibermap['TARGET_DEC']\n fiber = fibermap['FIBER']\n objtype = fibermap['OBJTYPE']\n flux_g = fibermap['FLUX_G']\n flux_r = fibermap['FLUX_R']\n flux_z = fibermap['FLUX_Z']\n x, y = [fibermap['FIBERASSIGN_{}'.format(val)] for val in ('X', 'Y')]\n\n camera = fluxhead['CAMERA'][0].upper()\n\n if getattr(self, '_deltara', None) is not None:\n dra = self._deltara[i]*np.ones(len(fiber))\n ddec = self._deltadec[i]*np.ones(len(fiber))\n elif self._dithertype == 'telescope':\n dithra = self._ditherfa['target_ra']\n dithdec = self._ditherfa['target_dec']\n udithra = self._unditherfa['target_ra']\n udithdec = self._unditherfa['target_dec']\n ontarget = ((self._ditherfa['targetid'] ==\n self._unditherfa['targetid']) &\n (self._ditherfa['objtype'] == 'TGT'))\n dfiberra = (dithra-udithra)*np.cos(np.radians(udithdec))*60*60\n dfiberdec = (dithdec-udithdec)*60*60\n if not np.all(self._ditherfa['FIBER'] ==\n np.arange(len(self._ditherfa))):\n raise ValueError('unexpected shape of dither file')\n dfiberra[~ontarget] = np.nan\n dfiberdec[~ontarget] = np.nan\n dfiberra = dfiberra[fiber]\n dfiberdec = dfiberdec[fiber]\n wcs = self.lookup_wcs(fluxhead['MJD-OBS'])\n centralwcs = self._central_wcs\n if (~np.isfinite(centralwcs['cenra'][1]) or\n ~np.isfinite(centralwcs['cendec'][1])):\n raise ValueError('central pointing ra/dec is NaN!')\n dtelra = (wcs['cenra'][1]-centralwcs['cenra'][1])\n dtelra *= np.cos(np.radians(centralwcs['cendec'][1]))\n dteldec = wcs['cendec'][1]-centralwcs['cendec'][1]\n dra = dfiberra + dtelra*60*60\n ddec = dfiberdec + dteldec*60*60\n if np.all(~np.isfinite(dra)):\n print('warning: no good telescope offset for %s' %\n exfile)\n else:\n raise ValueError('not implemented')\n \n for j, fiber_id in enumerate(fiber):\n flux = fluxdata[j]\n ivar = ivardata[j]\n if not np.any(ivar > 0):\n specflux = 0\n specflux_ivar = 0\n else:\n meanivar = np.mean(ivar[ivar > 0])\n mask = ivar > meanivar / 100\n specflux = np.trapz(flux*mask, wave)\n specflux_ivar = 1./np.sum(ivar[mask]**-1)\n # Schlegel: sum over correct wavelengths, all three\n # filters, plus 11 pixel median filter to reject\n # cosmics.\n # will require being better about reading in\n # the spectrographs together.\n tabrows.append((expid, exptime,\n target_id[j], target_ra[j], target_dec[j],\n fiber[j], objtype[j],\n flux_g[j], flux_r[j], flux_z[j],\n specflux, specflux_ivar, camera,\n dra[j], ddec[j],\n x[j], y[j]))\n\n tab = Table(rows=tabrows,\n names=('EXPID', 'EXPTIME',\n 'TARGETID', 'TARGET_RA', 'TARGET_DEC',\n 'FIBER', 'OBJTYPE',\n 'FLUX_G', 'FLUX_R', 'FLUX_Z',\n 'SPECTROFLUX', 'SPECTROFLUX_IVAR', 'CAMERA',\n 'DELTA_X_ARCSEC', 'DELTA_Y_ARCSEC',\n 'XFOCAL', 'YFOCAL'),\n meta={'EXTNAME' : 'DITHER',\n 'TILEID' : '{}'.format(self._tileid)})\n\n return tab",
"def make_table_file(lines, labels, dir_path, filename):\r\n lines.sort()\r\n lines.insert(0, '\\t'.join(labels))\r\n\r\n output = open(os.path.join(dir_path, filename), 'w')\r\n output.write('\\n'.join(lines))\r\n output.close()",
"def write_cycle_table(cycle_info, c_table_file):\r\n \r\n txt = \"typedef struct {\\n\"\r\n txt = txt + \" int min;\\n\"\r\n txt = txt + \" int max;\\n\"\r\n txt = txt + \"} cycle_count_t;\\n\"\r\n txt = txt + \"\\n\\n\"\r\n txt = txt + \"cycle_count_t cycle_count[256] = {\\n \"\r\n for i in range(len(cycle_info)):\r\n item = cycle_info[i]\r\n if item[2] == 0:\r\n txt = txt + \"{ 0, 0}, \"\r\n else:\r\n txt = txt + \"{%2u,%2u}, \" % (item[0], item[1])\r\n if (i % 8) == 7:\r\n txt = txt + \"\\n \"\r\n txt = txt + \"};\\n\\n\"\r\n \r\n \r\n fout = open(c_table_file, \"w\")\r\n fout.write(txt)\r\n fout.close()",
"def _dta_obj_to_file(self, address):\n global get_missing\n \n type_dict = {\n 65530: ['b',1],\n 65529: ['h',2],\n 65528: ['l',4], \n 65527: ['f',4],\n 65526: ['d',8]\n }\n first_missing = {\n 65530: 101,\n 65529: 32741,\n 65528: 2147483620,\n 65527: float.fromhex('0x1.0p+127'),\n 65526: float.fromhex('0x1.0p+1023')\n }\n typlist = self._typlist\n byteorder = self._byteorder\n nvar = self._nvar\n \n def write_value_label_table(labname, table):\n # Stata limits are a bit confusing.\n # Total length of text (incl. null terminators) must be <= 32000 ?\n # Total number of vals must be <= 65536 ?\n # But the limit on text length forces no. of vals <= 16000 since\n # each label must occupy at least two bytes \n # (including null terminator).\n labname = labname[:32]\n \n val = sorted(table.keys())\n # each value may be up to 81 chars including null\n txt = [table[v][:80] for v in val] \n \n nval = len(val)\n if nval > 65536: # max number of values allowed\n val = val[:65536]\n txt = txt[:65536]\n nval = 65536\n \n off = [0]\n for i in range(nval - 1):\n # in next line, \"+ 1\" to leave room for \\0\n offset = off[i] + len(txt[i]) + 1\n if offset > 32000: # if too much text\n off = off[:i] # cut off at before the ith one\n val = val[:i]\n txt = txt[:i]\n nval = i\n break\n off.append(offset)\n txt_len = off[-1] + len(txt[-1]) + 1\n \n table_len = 4 + 4 + 4*nval + 4*nval + txt_len\n \n dta.write(bytearray('<lbl>', 'iso-8859-1'))\n dta.write(pack(byteorder + \"l\", table_len))\n dta.write(bytearray(labname, 'iso-8859-1') + \n b'\\0'*(33-len(labname)))\n dta.write(b'\\x00\\x00\\x00')\n \n dta.write(pack(byteorder + \"l\", nval))\n dta.write(pack(byteorder + \"l\", txt_len))\n for o in off: dta.write(pack(byteorder + \"l\", o))\n for v in val: dta.write(pack(byteorder + \"l\", v))\n for t in txt: dta.write(bytearray(t, 'iso-8859-1') + b'\\0')\n dta.write(bytearray('</lbl>', 'iso-8859-1'))\n \n with open(address, 'wb') as dta:\n dta.write(bytearray('<stata_dta>', 'iso-8859-1'))\n \n # header\n dta.write(bytearray('<header>', 'iso-8859-1'))\n dta.write(bytearray('<release>', 'iso-8859-1'))\n dta.write(bytearray('117', 'iso-8859-1'))\n dta.write(bytearray('</release>', 'iso-8859-1'))\n dta.write(bytearray('<byteorder>', 'iso-8859-1'))\n dta.write(\n bytearray('MSF' if byteorder == '>' else 'LSF', 'iso-8859-1'))\n dta.write(bytearray('</byteorder>', 'iso-8859-1'))\n dta.write(bytearray('<K>', 'iso-8859-1'))\n dta.write(pack(byteorder + 'H', self._nvar))\n dta.write(bytearray('</K>', 'iso-8859-1'))\n dta.write(bytearray('<N>', 'iso-8859-1'))\n dta.write(pack(byteorder + 'I', self._nobs))\n dta.write(bytearray('</N>', 'iso-8859-1'))\n dta.write(bytearray('<label>', 'iso-8859-1'))\n label = self._data_label\n label_length = len(label)\n dta.write(pack(byteorder + 'B', label_length))\n dta.write(bytearray(label, 'iso-8859-1'))\n dta.write(bytearray('</label>', 'iso-8859-1'))\n dta.write(bytearray('<timestamp>', 'iso-8859-1'))\n stamp = self._time_stamp\n m = re.match(\n '^([ 0-3][0-9]) ' + \n '(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec) ' + \n '[0-9]{4} ([ 0-2][0-9]):([0-9]{2})$', \n stamp)\n if (m and \n 1 <= int(m.group(1)) <= 31 and \n 0 <= int(m.group(3)) <= 24 and\n 0 <= int(m.group(4)) < 60):\n dta.write(pack(byteorder + 'B', 17))\n # next line includes optional binary zero\n dta.write(bytearray(stamp, 'iso-8859-1'))\n else: # there's something wrong with the time stamp, just skip it\n dta.write(pack(byteorder + 'B', 0))\n dta.write(bytearray('</timestamp>', 'iso-8859-1'))\n dta.write(bytearray('</header>', 'iso-8859-1'))\n \n # map\n offset_map = [0, dta.tell()]\n dta.write(bytearray(\"<map>\", 'iso-8859-1'))\n for i in range(14):\n dta.write(pack(byteorder + 'Q', 0))\n dta.write(bytearray(\"</map>\", \"iso-8859-1\"))\n \n # variable types\n offset_map.append(dta.tell())\n dta.write(bytearray(\"<variable_types>\", 'iso-8859-1'))\n dta.write(pack(byteorder + 'H'*nvar, *typlist))\n dta.write(bytearray(\"</variable_types>\", 'iso-8859-1'))\n \n # variable names\n offset_map.append(dta.tell())\n dta.write(bytearray(\"<varnames>\", 'iso-8859-1'))\n for name in self._varlist:\n name = name[:32]\n dta.write(bytearray(name, 'iso-8859-1') + b'\\0'*(33-len(name)))\n dta.write(bytearray(\"</varnames>\", 'iso-8859-1'))\n \n # sort order\n offset_map.append(dta.tell())\n dta.write(bytearray(\"<sortlist>\", 'iso-8859-1'))\n srtlist = self._srtlist + [None]\n srtlist = [srt + 1 if srt is not None else 0 for srt in srtlist]\n dta.write(pack(byteorder + 'H'*(nvar + 1), *srtlist))\n dta.write(bytearray(\"</sortlist>\", 'iso-8859-1'))\n \n # formats\n offset_map.append(dta.tell())\n dta.write(bytearray(\"<formats>\", 'iso-8859-1'))\n for fmt in self._fmtlist:\n fmt = fmt[:48]\n dta.write(bytearray(fmt, 'iso-8859-1') + b'\\0'*(49-len(fmt)))\n dta.write(bytearray(\"</formats>\", 'iso-8859-1'))\n \n # value-label names\n offset_map.append(dta.tell())\n dta.write(bytearray(\"<value_label_names>\", 'iso-8859-1'))\n for lab in self._lbllist:\n lab = lab[:32]\n dta.write(bytearray(lab, 'iso-8859-1') + b'\\0'*(33-len(lab)))\n dta.write(bytearray(\"</value_label_names>\", 'iso-8859-1'))\n \n # variable labels\n offset_map.append(dta.tell())\n dta.write(bytearray(\"<variable_labels>\", 'iso-8859-1'))\n for lab in self._vlblist:\n lab = lab[:80]\n dta.write(bytearray(lab, 'iso-8859-1') + b'\\0'*(81-len(lab)))\n dta.write(bytearray(\"</variable_labels>\", 'iso-8859-1'))\n \n # characteristics\n offset_map.append(dta.tell())\n dta.write(bytearray(\"<characteristics>\", 'iso-8859-1'))\n chrdict = self._chrdict\n for varname in chrdict:\n varname = varname[:32]\n var_dict = chrdict[varname]\n for charname in var_dict:\n charname = charname[:32]\n char = var_dict[charname][:67784] # or 8681 for Small Stata\n full_length = 66 + len(char) + 1 # +1 for null termination\n \n dta.write(bytearray('<ch>', 'iso-8859-1'))\n dta.write(pack(byteorder + 'I', full_length))\n dta.write(bytearray(varname, 'iso-8859-1') + \n b'\\0'*(33-len(varname)))\n dta.write(bytearray(charname, 'iso-8859-1') + \n b'\\0'*(33-len(charname)))\n dta.write(bytearray(char, 'iso-8859-1') + b'\\0')\n dta.write(bytearray('</ch>', 'iso-8859-1'))\n dta.write(bytearray(\"</characteristics>\", 'iso-8859-1'))\n \n # data\n offset_map.append(dta.tell())\n strls = {}\n dta.write(bytearray(\"<data>\", 'iso-8859-1'))\n varvals = self._varvals\n nvar, nobs = self._nvar, self._nobs\n missing_save_val = self._missing_save_val\n for i in range(nobs):\n row = varvals[i]\n for j in range(nvar):\n value, st_type = row[j], typlist[j]\n if st_type <= 2045:\n value = value[:st_type]\n dta.write(bytearray(value, 'iso-8859-1') + \n b'\\0'*(st_type - len(value)))\n elif st_type == 32768:\n if value == \"\":\n o,v = 0,0\n elif value in strls:\n o,v = strls[value]\n else:\n strls[value] = o,v = (i+1,j+1)\n dta.write(pack(byteorder + 'II', v, o))\n else:\n fmt = 'bhlfd'[65530 - st_type]\n if value is None:\n value = first_missing[st_type]\n elif isinstance(value, MissingValue):\n value = missing_save_val(value, st_type)\n elif (value > 8.988465674311579e+307 or \n value < -1.7976931348623157e+308):\n # is this the right way to handle this ?\n value = missing_save_val(\n get_missing(value), st_type)\n dta.write(pack(byteorder + fmt, value))\n dta.write(bytearray(\"</data>\", 'iso-8859-1'))\n \n # strls\n offset_map.append(dta.tell())\n strls = [(val, key) for key,val in strls.items()]\n strls.sort()\n dta.write(bytearray(\"<strls>\", 'iso-8859-1'))\n for (o,v), value in strls:\n dta.write(bytearray('GSO', 'iso-8859-1'))\n dta.write(pack(byteorder + 'II', v, o))\n if isinstance(value, str):\n try:\n # expect error in next line if anywhere\n value = bytes(value, 'iso-8859-1') + b'\\x00'\n dta.write(pack('B', 130))\n except UnicodeEncodeError:\n value = bytes(value, 'utf-8')\n dta.write(pack('B', 129))\n elif (not isinstance(value, bytes) and \n not isinstance(value, bytearray)):\n msg = \"only bytes or str object allowed in Stata strl\"\n raise TypeError(msg)\n else:\n dta.write(pack('B', 129))\n val_len = len(value)\n dta.write(pack(byteorder + 'I', val_len))\n num_vals = unpack(str(val_len) + 'b', value)\n dta.write(value)\n dta.write(bytearray(\"</strls>\", 'iso-8859-1'))\n \n # value labels\n offset_map.append(dta.tell())\n dta.write(bytearray(\"<value_labels>\", 'iso-8859-1'))\n for name, table in self._vallabs.items():\n write_value_label_table(name, table)\n dta.write(bytearray(\"</value_labels>\", 'iso-8859-1'))\n \n # end file\n offset_map.append(dta.tell())\n dta.write(bytearray(\"</stata_dta>\", 'iso-8859-1'))\n \n offset_map.append(dta.tell())\n \n # write map\n dta.seek(offset_map[1] + 5)\n for offset in offset_map:\n dta.write(pack(byteorder + 'Q', offset))",
"def generate_table(self):\n states = self.get_canonical_collection()\n # self.print_canonical_collection(states)\n table = [{} for _ in range(len(states))]\n\n for index in range(len(states)):\n state = states[index]\n first_rule_cnt = 0\n second_rule_cnt = 0\n third_rule_cnt = 0\n beta = []\n for prod in state:\n dot_index = prod[1].index('.')\n alpha = prod[1][:dot_index]\n beta = prod[1][dot_index + 1:]\n if len(beta) != 0:\n first_rule_cnt += 1\n else:\n if prod[0] != 'S1':\n second_rule_cnt += 1\n production_index = self.grammar.P.index((prod[0], alpha))\n elif alpha == [self.grammar.S[0]]:\n third_rule_cnt += 1\n if first_rule_cnt == len(state):\n table[index]['action'] = 'shift'\n\n elif second_rule_cnt == len(state):\n table[index]['action'] = 'reduce ' + str(production_index)\n\n elif third_rule_cnt == len(state):\n table[index]['action'] = 'acc'\n else:\n conflict_msg = 'Conflict! State I' + str(index) + ': ' + str(state) + '\\nSymbol: ' + beta[0]\n raise (Exception(conflict_msg))\n for symbol in self.grammar.N + self.grammar.E: # the goto part of the table\n next_state = self.go_to(state, symbol)\n if next_state in states:\n table[index][symbol] = states.index(next_state)\n # print(\"table\", table)\n return table",
"def generate_data(input_file):\n \n mol_mass_list = []\n inchi_list = []\n SMILES_list = []\n identifier_list = []\n inchi_key1_list = [] \n inchi_key2_list = [] \n mol_formula_list = []\n NA_list = []\n \n pre_SMILES_list = []\n identifier_list = []\n all_lines = input_file.split('\\n')\n if all_lines[-1] == '':\n all_lines = all_lines[:-1]\n for line in all_lines:\n line = line.split('\\t')\n\n #Convert to mol and remove invalid structures \n smile_string = ''\n id_string = ''\n m = line[0]\n id_name = line[1]\n mol = Chem.MolFromSmiles(m)\n if mol != None:\n smile_string += m\n id_string += id_name\n pre_SMILES_list += [smile_string]\n \n #Source identifiers\n identifier_list += [id_string]\n \n pre_inchi_list = []\n for smile in pre_SMILES_list:\n #Generate mol\n m = Chem.MolFromSmiles(smile)\n #SMILES, canonical\n sm = Chem.MolToSmiles(m)\n SMILES_list += [sm]\n #Monoisotopic mass\n mol_weigth = Descriptors.ExactMolWt(m)\n mol_mass_list += [mol_weigth]\n #Mol Forumula\n mol_formula = rdMolDescriptors.CalcMolFormula(m)\n mol_formula_list += [mol_formula]\n # InChI \n inchi = rdinchi.MolToInchi(m)\n pre_inchi_list += [inchi[0]] \n \n \n # InChIKey1 and InChIKey2\n for inchi in pre_inchi_list:\n if not str(inchi).startswith('InCh'):\n inchi = 'NA'\n inchi_list += [inchi]\n \n pre_inchi_key_list =[]\n for inchi2 in inchi_list: \n if inchi2 == 'NA':\n inchi_key = \"NA-NA\"\n pre_inchi_key_list += [inchi_key]\n if inchi2 != 'NA':\n inchi_key = rdinchi.InchiToInchiKey(inchi2)\n pre_inchi_key_list += [inchi_key]\n \n for inchi_key in pre_inchi_key_list:\n inchi_key = inchi_key.split('-')\n inchi_key2 = inchi_key[1]\n inchi_key2_list += [inchi_key2]\n inchi_key1 = inchi_key[0]\n inchi_key1_list += [inchi_key1]\n\n # NA list \n nr_of_structures = len(SMILES_list)\n NA_list += ['NA'] * nr_of_structures\n\n overall_list = [mol_mass_list]+[inchi_list]+[SMILES_list]+\\\n [identifier_list]+[inchi_key2_list]+[inchi_key1_list]+[mol_formula_list]+\\\n [NA_list]+[NA_list]+[NA_list]+[NA_list]\n \n return overall_list",
"def write_text_catalog(self, filename):\n if self.component_type != \"point\":\n raise ValueError(\"component_type must be 'point' to use this method.\")\n\n if not self.stokes.unit.is_equivalent(\"Jy\"):\n raise ValueError(\n \"Stokes units must be equivalent to Jy to use this method.\"\n )\n\n if self.spectral_type == \"subband\":\n warnings.warn(\n \"Text files do not support subband types, this will be written as a \"\n \"'full' spectral type (losing the frequency edge array information).\"\n )\n\n self.check()\n\n comp_names = self._get_lon_lat_component_names()\n frame_obj = self._get_frame_obj()\n frame_desc_str = _get_frame_desc_str(frame_obj)\n comp_field = []\n for comp_name in comp_names:\n # This will add e.g. ra_J2000 and dec_J2000 for FK5\n comp_field.append(comp_name + \"_\" + frame_desc_str)\n\n header = f\"source_id\\t{comp_field[0]} [deg]\\t{comp_field[1]} [deg]\"\n format_str = \"{}\\t{:0.8f}\\t{:0.8f}\"\n if self.reference_frequency is not None:\n header += \"\\tFlux [Jy]\"\n if self.stokes_error is not None:\n header += \"\\tFlux_error [Jy]\"\n format_str += \"\\t{:0.8f}\"\n header += \"\\tFrequency [Hz]\"\n format_str += \"\\t{:0.8f}\"\n format_str += \"\\t{:0.8f}\"\n if self.spectral_index is not None:\n header += \"\\tSpectral_Index\"\n format_str += \"\\t{:0.8f}\"\n elif self.freq_array is not None:\n for freq in self.freq_array:\n freq_hz_val = freq.to(units.Hz).value\n if freq_hz_val > 1e9:\n freq_str = \"{:g}_GHz\".format(freq_hz_val * 1e-9)\n elif freq_hz_val > 1e6:\n freq_str = \"{:g}_MHz\".format(freq_hz_val * 1e-6)\n elif freq_hz_val > 1e3:\n freq_str = \"{:g}_kHz\".format(freq_hz_val * 1e-3)\n else:\n freq_str = \"{:g}_Hz\".format(freq_hz_val)\n\n format_str += \"\\t{:0.8f}\"\n header += f\"\\tFlux_{freq_str} [Jy]\"\n if self.stokes_error is not None:\n header += f\"\\tFlux_error_{freq_str} [Jy]\"\n format_str += \"\\t{:0.8f}\"\n else:\n # flat spectral response, no freq info\n header += \"\\tFlux [Jy]\"\n format_str += \"\\t{:0.8f}\"\n if self.stokes_error is not None:\n header += \"\\tFlux_error [Jy]\"\n format_str += \"\\t{:0.8f}\"\n\n header += \"\\n\"\n format_str += \"\\n\"\n\n with open(filename, \"w+\") as fo:\n fo.write(header)\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n arr = self._text_write_preprocess()\n fieldnames = arr.dtype.names\n comp_names = self._get_lon_lat_component_names()\n lon_name = None\n lat_name = None\n lon_name = fieldnames[\n np.nonzero(np.char.find(fieldnames, comp_names[0]) > -1)[0][0]\n ]\n lat_name = fieldnames[\n np.nonzero(np.char.find(fieldnames, comp_names[1]) > -1)[0][0]\n ]\n for src in arr:\n fieldvals = src\n entry = dict(zip(fieldnames, fieldvals))\n srcid = entry[\"source_id\"]\n lon = entry[lon_name]\n lat = entry[lat_name]\n flux_i = entry[\"I\"]\n if self.stokes_error is not None:\n flux_i_err = entry[\"I_error\"]\n fluxes_write = []\n for ind in range(self.Nfreqs):\n fluxes_write.extend([flux_i[ind], flux_i_err[ind]])\n else:\n fluxes_write = flux_i\n\n if self.reference_frequency is not None:\n rfreq = entry[\"reference_frequency\"]\n if self.spectral_index is not None:\n spec_index = entry[\"spectral_index\"]\n fo.write(\n format_str.format(\n srcid, lon, lat, *fluxes_write, rfreq, spec_index\n )\n )\n else:\n fo.write(\n format_str.format(srcid, lon, lat, *fluxes_write, rfreq)\n )\n else:\n fo.write(format_str.format(srcid, lon, lat, *fluxes_write))",
"def openCif(self, filename):\r\n cf = CifFile.ReadCif(filename)\r\n \r\n #Assuming all data is in one outer block like NIST examples:\r\n data = cf[cf.keys()[0]]\r\n \r\n #Create a Crystollographic Unit Cell\r\n a = data['_cell_length_a']\r\n b = data['_cell_length_b']\r\n c = data['_cell_length_c']\r\n \r\n alpha = data['_cell_angle_alpha']\r\n gamma = data['_cell_angle_gamma']\r\n beta = data['_cell_angle_beta']\r\n \r\n spaceGroupInt = int(data['_symmetry_Int_Tables_number'])\r\n spaceGroup = SpaceGroups.GetSpaceGroup(spaceGroupInt)\r\n \r\n unitcell = Cell(spaceGroup, 0,0,0, a, b, c, alpha, gamma, beta)\r\n \r\n atomLabels = data['_atom_site_label']\r\n atomSymbol = data['_atom_site_type_symbol']\r\n xPositions = data['_atom_site_fract_x']\r\n yPositions = data['_atom_site_fract_y']\r\n zPositions = data['_atom_site_fract_z']\r\n \r\n atoms = [] #for the cell window\r\n for i in range(len(atomLabels)):\r\n #unitcell.generateAtoms((float(xPositions[i]), float(yPositions[i]), float(zPositions[i])), atomLabels[i])\n\r\n aData = [atomLabels[i], 0, float(xPositions[i]), float(yPositions[i]), float(zPositions[i])]\r\n #--Added to atomData: single ion anisotropy, spin magnitude, valence\r\n aData.append(0.0)#Dx\r\n aData.append(0.0)#Dy\r\n aData.append(0.0)#Dz\r\n aData.append(1)#Spin Magnitude\r\n aData.append('')#valence\r\n #-------------------------------------------------------------------\r\n atoms.append(aData)\r\n \r\n self.atomTable.SetValue(i, 0, atomLabels[i])\r\n self.atomTable.SetValue(i, 2, xPositions[i])\r\n self.atomTable.SetValue(i, 3, yPositions[i])\r\n self.atomTable.SetValue(i, 4, zPositions[i])\r\n \r\n #Create a Magnetic Cell\r\n self.MagCell = MagneticCell(unitcell, 1,1,1, spaceGroup)\r\n\r\n\r\n Na = 1 #Cif files only contain 1 unit cell\r\n Nb = 1\r\n Nc = 1\r\n \r\n #self.cellChange(spaceGroupInt, a, b, c, alpha, beta, gamma, magNa = Na, magNb = Nb, magNc = Nc, cutNa = Na, cutNb = Nb, cutNc = Nc, atomData = atoms)\n self.updateCell(spaceGroupInt, a, b, c, alpha, beta, gamma, magNa = Na, magNb = Nb, magNc = Nc, cutNa = Na, cutNb = Nb, cutNc = Nc, atomData = atoms)\n self.refreshGUI()\n \n \r\n #send signal to the cell window to show the info that has been loaded and to vtkWindow to draw it\r\n n = self.atomTable.GetNumberRows()\r\n for i in range(n):\r\n print self.atomTable.GetValue(i, 0)\r\n send(signal = \"File Load\", sender = \"Session\", spaceGroup = spaceGroupInt, a = a, b = b, c = c, alpha = alpha, beta = beta, gamma = gamma, magNa = Na, magNb = Nb, magNc = Nc, cutNa = Na, cutNb = Nb, cutNc = Nc)",
"def htk2dag(self, file_path):\n field_re = re.compile(r'(\\S+)=(?:\"((?:[^\\\\\"]+|\\\\.)*)\"|(\\S+))')\n open_fn = gzip.open if file_path.endswith('.gz') else open\n with open_fn(file_path, 'rt', encoding='utf-8') as fh:\n self.header = {}\n self.nframes = 0\n state = 'header'\n # Read everything\n for spam in fh:\n if spam.startswith('#'):\n continue\n fields = dict(map(lambda t: (t[0], t[1] or t[2]),\n field_re.findall(spam.rstrip())))\n # Number of nodes and arcs\n if 'N' in fields:\n num_nodes = int(fields['N'])\n self.nodes = [None] * num_nodes\n num_arcs = int(fields['L'])\n self.arcs = [None] * num_arcs\n state = 'items'\n if state == 'header':\n self.header.update(fields)\n else:\n # This is a node\n if 'I' in fields:\n idx = int(fields['I'])\n frame = int(float(fields['t']) * FRATE)\n var = int(fields['v']) if 'v' in fields else None\n node = self.Node(\n fields['W'].replace('\\\\', ''), frame, var)\n self.nodes[idx] = node\n if frame > self.nframes:\n self.nframes = frame\n # This is an arc\n elif 'J' in fields:\n idx = int(fields['J'])\n start_node = self.nodes[int(fields['S'])]\n end_node = self.nodes[int(fields['E'])]\n ascr = float(fields.get('a', 0))\n lscr = float(fields.get('l', 0))\n nscr = fields.get('n', [])\n if isinstance(nscr, str):\n nscr = [float(n) for n in nscr.split(',')]\n iscr = fields.get('i', [])\n if isinstance(iscr, str):\n iscr = [float(i) for i in iscr.split(',')]\n arc = self.Arc(\n start_node, end_node, ascr, lscr, nscr, iscr)\n self.arcs[idx] = arc\n # Link up existing nodes\n start_node.exits.append(arc)\n end_node.entries.append(arc)\n\n self.sort_nodes()",
"def main(argv):\r\n\r\n # We should parse the command line arguments, etc. \r\n # For this quick-and-dirty script we'll hardcode all the parameters...\r\n \r\n # ...the target svg file names...\r\n svg_base_filename = \"./table\"\r\n # ...the target text file name where a C-format table with the cycle counts\r\n # will be written to...\r\n c_table_file = \"./cycle_table.c\"\r\n # ...and the source CSV cycle count log file. Note this path is the default\r\n # working path for the Modelsim simulations, change if necessary.\r\n cycle_log_filename = \"../../sim/cycle_count_log.csv\"\r\n \r\n # Read cycle count data...\r\n cycle_info = read_cycle_info(cycle_log_filename)\r\n # ...and read opcode table data (instruction mnemonics and byte counts).\r\n opcode_info = read_opcode_info(\"opcode_info.txt\")\r\n \r\n # First of all, write the C-format cycle table, to be copied and pasted\r\n # into the B51 simulator.\r\n write_cycle_table(cycle_info, c_table_file)\r\n \r\n # We can render the opcode table 'whole', resulting in a wide table, or\r\n # we can render the left and right halves separately, which gives a format\r\n # better suted for a printed page. \r\n \r\n # So, for all three possible rendering formats...\r\n parts = (\"left\", \"right\", \"full\")\r\n # ...render the opcode table.\r\n for part in parts:\r\n \r\n # Build the SVG text for the table...\r\n svg = build_svg_table(opcode_info, cycle_info, part)\r\n # ...and write it to the target file.\r\n fout = None\r\n try:\r\n full_filename = svg_base_filename + \"_\" + part + \".svg\"\r\n fout = open(full_filename, \"w\")\r\n fout.write(svg)\r\n fout.close()\r\n print \"SVG opcode table written to %s\" % full_filename\r\n except:\r\n print \"Trouble opening %s for output\" % full_filename\r\n finally:\r\n if fout: fout.close()",
"def decode(n_pir,template,localtime,draw,bin_display):\n template_filename=template+\"%02d\"\n\n for n in range(n_pir):\n decode_in_file=template_filename%(n+1)\n decode_out_file=decode_in_file+\"_parsed.txt\"\n click.echo(\"Working on file: %s\"%decode_out_file)\n buff_size=8\n try:\n with open(decode_in_file,'rb') as i: #\n with open(decode_out_file,'w') as o:\n #Header\n o.write('Time,Status\\n')\n while True:\n anteroom=i.read(buff_size)\n if anteroom==b'':\n break\n anteroom_tuple=struct.unpack('=If',anteroom)\n time_=anteroom_tuple[0]\n status=anteroom_tuple[1]\n if localtime:\n time_=time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(time_))\n o.write('%s,%f\\n'%(time_,status))\n else:\n o.write('%i,%f\\n'%(time_,status))\n except FileNotFoundError:\n continue\n if draw:\n actogram(template_filename, n_pir, bin_display)",
"def generate_position_label_data_matrix():\n def generate_position_label_data_matrix_All_label():\n position_label = OrderedDict()\n with open(\"%s/All_label_final_sorted_header.txt\" % args.filter2_only_snp_vcf_dir, 'rU') as csv_file:\n print \"Reading All label positions file: %s/All_label_final_sorted_header.txt \\n\" % args.filter2_only_snp_vcf_dir\n csv_reader = csv.reader(csv_file, delimiter='\\t')\n next(csv_reader, None)\n for row in csv_reader:\n position_label[row[0]] = row[1:]\n print \"Generating different list of Positions and heatmap data matrix... \\n\"\n f1=open(\"%s/Only_ref_variant_positions_for_closely\" % args.filter2_only_snp_vcf_dir, 'w+')\n f2=open(\"%s/Only_ref_variant_positions_for_closely_matrix.txt\" % args.filter2_only_snp_vcf_dir, 'w+')\n f3=open(\"%s/Only_filtered_positions_for_closely_matrix.txt\" % args.filter2_only_snp_vcf_dir, 'w+')\n f4=open(\"%s/Only_filtered_positions_for_closely_matrix_TRUE_variants_filtered_out.txt\" % args.filter2_only_snp_vcf_dir, 'w+')\n print_string_header = \"\\t\"\n for i in vcf_filenames:\n print_string_header = print_string_header + os.path.basename(i) + \"\\t\"\n f1.write('\\t' + print_string_header.strip() + '\\n')\n f2.write('\\t' + print_string_header.strip() + '\\n')\n f3.write('\\t' + print_string_header.strip() + '\\n')\n f4.write('\\t' + print_string_header.strip() + '\\n')\n for value in position_label:\n lll = ['0', '2', '3', '4', '5', '6', '7']\n ref_var = ['1', '1TRUE']\n if set(ref_var) & set(position_label[value]):\n if set(lll) & set(position_label[value]):\n print_string = \"\"\n for i in position_label[value]:\n print_string = print_string + \"\\t\" + i\n STRR2 = value + print_string + \"\\n\"\n f3.write(STRR2)\n if position_label[value].count('1TRUE') >= 2:\n f4.write('1\\n')\n else:\n f4.write('0\\n')\n else:\n strr = value + \"\\n\"\n f1.write(strr)\n STRR3 =\tvalue +\t\"\\t\" + str(position_label[value]) + \"\\n\"\n f2.write(STRR3)\n f1.close()\n f2.close()\n f3.close()\n csv_file.close()\n\n subprocess.call([\"sed -i 's/_filter2_final.vcf_no_proximate_snp.vcf//g' %s/Only_ref_variant_positions_for_closely\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/_filter2_final.vcf_no_proximate_snp.vcf//g' %s/Only_ref_variant_positions_for_closely_matrix.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/_filter2_final.vcf_no_proximate_snp.vcf//g' %s/Only_filtered_positions_for_closely_matrix.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/_filter2_final.vcf_no_proximate_snp.vcf//g' %s/Only_filtered_positions_for_closely_matrix_TRUE_variants_filtered_out.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/1TRUE/-1/g' %s/Only_filtered_positions_for_closely_matrix.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n\n def temp_generate_position_label_data_matrix_All_label():\n\n \"\"\"\n Read **temp_label_final_raw.txt** SNP position label data matrix for generating barplot statistics.\n \"\"\"\n temp_position_label = OrderedDict()\n with open(\"%s/temp_label_final_raw.txt\" % args.filter2_only_snp_vcf_dir, 'rU') as csv_file:\n print \"Reading temporary label positions file: %s/temp_label_final_raw.txt \\n\" % args.filter2_only_snp_vcf_dir\n csv_reader = csv.reader(csv_file, delimiter='\\t')\n next(csv_reader, None)\n for row in csv_reader:\n temp_position_label[row[0]] = row[1:]\n f33=open(\"%s/temp_Only_filtered_positions_for_closely_matrix.txt\" % args.filter2_only_snp_vcf_dir, 'w+')\n print_string_header = \"\\t\"\n for i in vcf_filenames:\n print_string_header = print_string_header + os.path.basename(i) + \"\\t\"\n f33.write('\\t' + print_string_header.strip() + '\\n')\n for value in temp_position_label:\n lll = ['reference_unmapped_position', 'LowFQ', 'LowFQ_DP', 'LowFQ_QUAL', 'LowFQ_DP_QUAL', 'LowFQ_QUAL_DP', 'HighFQ_DP', 'HighFQ_QUAL', 'HighFQ_DP_QUAL', 'HighFQ_QUAL_DP', 'HighFQ', 'LowFQ_proximate_SNP', 'LowFQ_DP_proximate_SNP', 'LowFQ_QUAL_proximate_SNP', 'LowFQ_DP_QUAL_proximate_SNP', 'LowFQ_QUAL_DP_proximate_SNP', 'HighFQ_DP_proximate_SNP', 'HighFQ_QUAL_proximate_SNP', 'HighFQ_DP_QUAL_proximate_SNP', 'HighFQ_QUAL_DP_proximate_SNP', 'HighFQ_proximate_SNP', '_proximate_SNP']\n ref_var = ['reference_allele', 'VARIANT']\n if set(ref_var) & set(temp_position_label[value]):\n if set(lll) & set(temp_position_label[value]):\n print_string = \"\"\n for i in temp_position_label[value]:\n print_string = print_string + \"\\t\" + i\n STRR2 = value + print_string + \"\\n\"\n f33.write(STRR2)\n f33.close()\n csv_file.close()\n\n \"\"\"\n Read temp_Only_filtered_positions_for_closely_matrix file and generate a matrix of positions that are being filtered just because of FQ\n \"\"\"\n temp_position_label_FQ = OrderedDict()\n with open(\"%s/temp_Only_filtered_positions_for_closely_matrix.txt\" % args.filter2_only_snp_vcf_dir, 'rU') as csv_file:\n print \"Reading temporary Only_filtered_positions label file: %s/temp_Only_filtered_positions_for_closely_matrix.txt \\n\" % args.filter2_only_snp_vcf_dir\n csv_reader = csv.reader(csv_file, delimiter='\\t')\n\n next(csv_reader, None)\n for row in csv_reader:\n temp_position_label_FQ[row[0]] = row[1:]\n f44=open(\"%s/temp_Only_filtered_positions_for_closely_matrix_FQ.txt\" % args.filter2_only_snp_vcf_dir, 'w+')\n print_string_header = \"\\t\"\n for i in vcf_filenames:\n print_string_header = print_string_header + os.path.basename(i) + \"\\t\"\n f44.write('\\t' + print_string_header.strip() + '\\n')\n for value in temp_position_label_FQ:\n #lll = ['reference_unmapped_position', 'LowFQ', 'LowFQ_DP', 'LowFQ_QUAL', 'LowFQ_DP_QUAL', 'LowFQ_QUAL_DP', 'HighFQ_DP', 'HighFQ_QUAL', 'HighFQ_DP_QUAL', 'HighFQ_QUAL_DP', 'HighFQ', 'LowFQ_proximate_SNP', 'LowFQ_DP_proximate_SNP', 'LowFQ_QUAL_proximate_SNP', 'LowFQ_DP_QUAL_proximate_SNP', 'LowFQ_QUAL_DP_proximate_SNP', 'HighFQ_DP_proximate_SNP', 'HighFQ_QUAL_proximate_SNP', 'HighFQ_DP_QUAL_proximate_SNP', 'HighFQ_QUAL_DP_proximate_SNP', 'HighFQ_proximate_SNP', '_proximate_SNP']\n lll = ['LowFQ']\n #ref_var = ['reference_allele', 'VARIANT']\n if set(lll) & set(temp_position_label_FQ[value]):\n print_string = \"\"\n for i in temp_position_label_FQ[value]:\n print_string = print_string + \"\\t\" + i\n STRR2 = value + print_string + \"\\n\"\n f44.write(STRR2)\n f44.close()\n csv_file.close()\n\n ## Perform Sed\n subprocess.call([\"sed -i 's/_filter2_final.vcf_no_proximate_snp.vcf//g' %s/temp_Only_filtered_positions_for_closely_matrix_FQ.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/reference_unmapped_position/0/g' %s/temp_Only_filtered_positions_for_closely_matrix_FQ.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/reference_allele/1/g' %s/temp_Only_filtered_positions_for_closely_matrix_FQ.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/VARIANT/2/g' %s/temp_Only_filtered_positions_for_closely_matrix_FQ.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/LowFQ_QUAL_DP_proximate_SNP/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_FQ.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/LowFQ_DP_QUAL_proximate_SNP/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_FQ.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/LowFQ_QUAL_proximate_SNP/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_FQ.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/LowFQ_DP_proximate_SNP/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_FQ.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/LowFQ_proximate_SNP/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_FQ.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/LowFQ_QUAL_DP/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_FQ.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/LowFQ_DP_QUAL/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_FQ.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/LowFQ_QUAL/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_FQ.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/LowFQ_DP/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_FQ.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/HighFQ_QUAL_DP_proximate_SNP/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_FQ.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/HighFQ_DP_QUAL_proximate_SNP/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_FQ.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/HighFQ_QUAL_proximate_SNP/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_FQ.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/HighFQ_DP_proximate_SNP/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_FQ.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/HighFQ_proximate_SNP/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_FQ.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/HighFQ_QUAL_DP/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_FQ.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/HighFQ_DP_QUAL/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_FQ.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/HighFQ_QUAL/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_FQ.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/HighFQ_DP/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_FQ.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/LowFQ/3/g' %s/temp_Only_filtered_positions_for_closely_matrix_FQ.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/HighFQ/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_FQ.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n \"\"\"\n Read temp_Only_filtered_positions_for_closely_matrix file and generate a matrix of positions that are being filtered just because of Dp\n \"\"\"\n temp_position_label_DP = OrderedDict()\n with open(\"%s/temp_Only_filtered_positions_for_closely_matrix.txt\" % args.filter2_only_snp_vcf_dir, 'rU') as csv_file:\n print \"Reading temporary Only_filtered_positions label file: %s/temp_Only_filtered_positions_for_closely_matrix.txt \\n\" % args.filter2_only_snp_vcf_dir\n csv_reader = csv.reader(csv_file, delimiter='\\t')\n next(csv_reader, None)\n for row in csv_reader:\n temp_position_label_DP[row[0]] = row[1:]\n f44=open(\"%s/temp_Only_filtered_positions_for_closely_matrix_DP.txt\" % args.filter2_only_snp_vcf_dir, 'w+')\n print_string_header = \"\\t\"\n for i in vcf_filenames:\n print_string_header = print_string_header + os.path.basename(i) + \"\\t\"\n f44.write('\\t' + print_string_header.strip() + '\\n')\n for value in temp_position_label_DP:\n #lll = ['reference_unmapped_position', 'LowFQ', 'LowFQ_DP', 'LowFQ_QUAL', 'LowFQ_DP_QUAL', 'LowFQ_QUAL_DP', 'HighFQ_DP', 'HighFQ_QUAL', 'HighFQ_DP_QUAL', 'HighFQ_QUAL_DP', 'HighFQ', 'LowFQ_proximate_SNP', 'LowFQ_DP_proximate_SNP', 'LowFQ_QUAL_proximate_SNP', 'LowFQ_DP_QUAL_proximate_SNP', 'LowFQ_QUAL_DP_proximate_SNP', 'HighFQ_DP_proximate_SNP', 'HighFQ_QUAL_proximate_SNP', 'HighFQ_DP_QUAL_proximate_SNP', 'HighFQ_QUAL_DP_proximate_SNP', 'HighFQ_proximate_SNP', '_proximate_SNP']\n lll = ['HighFQ_DP']\n #ref_var = ['reference_allele', 'VARIANT']\n if set(lll) & set(temp_position_label_FQ[value]):\n print_string = \"\"\n for i in temp_position_label_FQ[value]:\n print_string = print_string + \"\\t\" + i\n STRR2 = value + print_string + \"\\n\"\n f44.write(STRR2)\n f44.close()\n csv_file.close()\n\n\n #Perform Sed\n subprocess.call([\"sed -i 's/_filter2_final.vcf_no_proximate_snp.vcf//g' %s/temp_Only_filtered_positions_for_closely_matrix_DP.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/reference_unmapped_position/0/g' %s/temp_Only_filtered_positions_for_closely_matrix_DP.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/reference_allele/1/g' %s/temp_Only_filtered_positions_for_closely_matrix_DP.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/VARIANT/2/g' %s/temp_Only_filtered_positions_for_closely_matrix_DP.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/LowFQ_QUAL_DP_proximate_SNP/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_DP.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/LowFQ_DP_QUAL_proximate_SNP/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_DP.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/LowFQ_QUAL_proximate_SNP/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_DP.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/LowFQ_DP_proximate_SNP/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_DP.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/LowFQ_proximate_SNP/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_DP.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/LowFQ_QUAL_DP/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_DP.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/LowFQ_DP_QUAL/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_DP.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/LowFQ_QUAL/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_DP.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/LowFQ_DP/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_DP.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/HighFQ_QUAL_DP_proximate_SNP/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_DP.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/HighFQ_DP_QUAL_proximate_SNP/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_DP.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/HighFQ_QUAL_proximate_SNP/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_DP.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/HighFQ_DP_proximate_SNP/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_DP.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/HighFQ_proximate_SNP/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_DP.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/HighFQ_QUAL_DP/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_DP.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/HighFQ_DP_QUAL/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_DP.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/HighFQ_QUAL/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_DP.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/HighFQ_DP/3/g' %s/temp_Only_filtered_positions_for_closely_matrix_DP.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/LowFQ/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_DP.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n subprocess.call([\"sed -i 's/HighFQ/4/g' %s/temp_Only_filtered_positions_for_closely_matrix_DP.txt\" % args.filter2_only_snp_vcf_dir], shell=True)\n\n\n \"\"\"\n Read each Sample columns and calculate the percentage of each label to generate barplot statistics.\n This will give a visual explanation of how many positions in each samples were filtered out because of different reason\n \"\"\"\n\n c_reader = csv.reader(open('%s/temp_Only_filtered_positions_for_closely_matrix.txt' % args.filter2_only_snp_vcf_dir, 'r'), delimiter='\\t')\n columns = list(zip(*c_reader))\n counts = 1\n end = len(vcf_filenames) + 1\n f_bar_count = open(\"%s/bargraph_counts.txt\" % args.filter2_only_snp_vcf_dir, 'w+')\n f_bar_perc = open(\"%s/bargraph_percentage.txt\" % args.filter2_only_snp_vcf_dir, 'w+')\n f_bar_count.write(\"Sample\\tunmapped_positions\\treference_allele\\ttrue_variant\\tOnly_low_FQ\\tOnly_DP\\tOnly_low_MQ\\tother\\n\")\n f_bar_perc.write(\"Sample\\tunmapped_positions_perc\\ttrue_variant_perc\\tOnly_low_FQ_perc\\tOnly_DP_perc\\tOnly_low_MQ_perc\\tother_perc\\n\")\n for i in xrange(1, end, 1):\n \"\"\" Bar Count Statistics: Variant Position Count Statistics \"\"\"\n true_variant = columns[i].count('VARIANT')\n unmapped_positions = columns[i].count('reference_unmapped_position')\n reference_allele = columns[i].count('reference_allele')\n Only_low_FQ = columns[i].count('LowFQ')\n Only_DP = columns[i].count('HighFQ_DP')\n Only_low_MQ = columns[i].count('HighFQ')\n low_FQ_other_parameters = columns[i].count('LowFQ_QUAL_DP_proximate_SNP') + columns[i].count('LowFQ_DP_QUAL_proximate_SNP') + columns[i].count('LowFQ_QUAL_proximate_SNP') + columns[i].count('LowFQ_DP_proximate_SNP') + columns[i].count('LowFQ_proximate_SNP') + columns[i].count('LowFQ_QUAL_DP') + columns[i].count('LowFQ_DP_QUAL') + columns[i].count('LowFQ_QUAL') + columns[i].count('LowFQ_DP')\n high_FQ_other_parameters = columns[i].count('HighFQ_QUAL_DP_proximate_SNP') + columns[i].count('HighFQ_DP_QUAL_proximate_SNP') + columns[i].count('HighFQ_QUAL_proximate_SNP') + columns[i].count('HighFQ_DP_proximate_SNP') + columns[i].count('HighFQ_proximate_SNP') + columns[i].count('HighFQ_QUAL_DP') + columns[i].count('HighFQ_DP_QUAL') + columns[i].count('HighFQ_QUAL')\n other = low_FQ_other_parameters + high_FQ_other_parameters\n total = true_variant + unmapped_positions + reference_allele + Only_low_FQ + Only_DP + low_FQ_other_parameters + high_FQ_other_parameters + Only_low_MQ\n filename_count = i - 1\n bar_string = \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n\" % (os.path.basename(vcf_filenames[filename_count].replace('_filter2_final.vcf_no_proximate_snp.vcf', '')), unmapped_positions, reference_allele, true_variant, Only_low_FQ, Only_DP, Only_low_MQ, other)\n f_bar_count.write(bar_string)\n\n \"\"\" Bar Count Percentage Statistics: Variant Position Percentage Statistics \"\"\"\n try:\n true_variant_perc = float((columns[i].count('VARIANT') * 100) / total)\n except ZeroDivisionError:\n true_variant_perc = 0\n try:\n unmapped_positions_perc = float((columns[i].count('reference_unmapped_position') * 100) / total)\n except ZeroDivisionError:\n unmapped_positions_perc = 0\n try:\n reference_allele_perc = float((columns[i].count('reference_allele') * 100) / total)\n except ZeroDivisionError:\n reference_allele_perc = 0\n try:\n Only_low_FQ_perc = float((columns[i].count('LowFQ') * 100) / total)\n except ZeroDivisionError:\n Only_low_FQ_perc = 0\n try:\n Only_DP_perc = float((columns[i].count('HighFQ_DP') * 100) / total)\n except ZeroDivisionError:\n Only_DP_perc = 0\n try:\n Only_low_MQ_perc = float((columns[i].count('HighFQ') * 100) / total)\n except ZeroDivisionError:\n Only_low_MQ_perc = 0\n try:\n low_FQ_other_parameters_perc = float(((columns[i].count('LowFQ_QUAL_DP_proximate_SNP') + columns[i].count('LowFQ_DP_QUAL_proximate_SNP') + columns[i].count('LowFQ_QUAL_proximate_SNP') + columns[i].count('LowFQ_DP_proximate_SNP') + columns[i].count('LowFQ_proximate_SNP') + columns[i].count('LowFQ_QUAL_DP') + columns[i].count('LowFQ_DP_QUAL') + columns[i].count('LowFQ_QUAL') + columns[i].count('LowFQ_DP')) * 100) / total)\n except ZeroDivisionError:\n low_FQ_other_parameters_perc = 0\n try:\n high_FQ_other_parameters_perc = float(((columns[i].count('HighFQ_QUAL_DP_proximate_SNP') + columns[i].count('HighFQ_DP_QUAL_proximate_SNP') + columns[i].count('HighFQ_QUAL_proximate_SNP') + columns[i].count('HighFQ_DP_proximate_SNP') + columns[i].count('HighFQ_proximate_SNP') + columns[i].count('HighFQ_QUAL_DP') + columns[i].count('HighFQ_DP_QUAL') + columns[i].count('HighFQ_QUAL')) * 100) / total)\n except ZeroDivisionError:\n high_FQ_other_parameters_perc = 0\n\n other_perc = float(low_FQ_other_parameters_perc + high_FQ_other_parameters_perc)\n bar_perc_string = \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n\" % (os.path.basename(vcf_filenames[filename_count].replace('_filter2_final.vcf_no_proximate_snp.vcf', '')), unmapped_positions_perc, true_variant_perc, Only_low_FQ_perc, Only_DP_perc, Only_low_MQ_perc, other_perc)\n f_bar_perc.write(bar_perc_string)\n\n \"\"\" Methods \"\"\"\n generate_position_label_data_matrix_All_label()\n temp_generate_position_label_data_matrix_All_label()",
"def _dta_obj_to_file(self, address):\n global get_missing\n \n type_dict = {\n 251: ['b',1],\n 252: ['h',2], \n 253: ['l',4],\n 254: ['f',4],\n 255: ['d',8]\n }\n first_missing = {\n 251: 101,\n 252: 32741,\n 253: 2147483620, \n 254: float.fromhex('0x1.0p+127'),\n 255: float.fromhex('0x1.0p+1023')\n }\n typlist = self._typlist\n nvar = self._nvar\n \n missing_save_val = self._missing_save_val\n \n def write_value_label_table(labname, table):\n # Stata limits are a bit confusing. Total length of text \n # (including null terminators) must be <= 32000? Total \n # number of vals must be <= 65536? But the limit on text \n # length forces no. of vals <= 16000 since each label must \n # occupy at least two bytes (including null terminator).\n \n labname = labname[:32]\n \n val = sorted(table.keys())\n # each value may be up to 81 chars including null\n txt = [table[v][:80] for v in val] \n \n nval = len(val)\n if nval > 65536: # max number of values allowed\n val = val[:65536]\n txt = txt[:65536]\n nval = 65536\n \n off = [0]\n for i in range(nval - 1):\n # in next line, \"+ 1\" to leave room for \\0\n offset = off[i] + len(txt[i]) + 1\n if offset > 32000: # if too much text\n off = off[:i] # cut off at before the ith one\n val = val[:i]\n txt = txt[:i]\n nval = i\n break\n off.append(offset)\n txt_len = off[-1] + len(txt[-1]) + 1\n \n table_len = 4 + 4 + 4*nval + 4*nval + txt_len\n \n dta.write(pack(byteorder + \"l\", table_len))\n dta.write(bytearray(labname, 'iso-8859-1') +\n b'\\0'*(33-len(labname)))\n dta.write(b'\\x00\\x00\\x00')\n \n dta.write(pack(byteorder + \"l\", nval))\n dta.write(pack(byteorder + \"l\", txt_len))\n for o in off: dta.write(pack(byteorder + \"l\", o))\n for v in val: dta.write(pack(byteorder + \"l\", v))\n #for t in txt: write_byte_str((t,), len(t) + 1)\n for t in txt: dta.write(bytearray(t, 'iso-8859-1') + b'\\0')\n \n with open(address, 'wb') as dta:\n # header\n dta.write(pack('b', 115)) # ds_format\n byteorder = self._byteorder\n dta.write(pack('b', 1 if byteorder == '>' else 2)) # byteorder\n dta.write(pack('b', 1)) # filetype\n dta.write(pack('b', 0)) # padding\n dta.write(pack(byteorder + 'h', self._nvar))\n dta.write(pack(byteorder + 'i', self._nobs))\n data_label = self._data_label[:80]\n dta.write(bytearray(data_label, 'iso-8859-1') +\n b'\\0'*(81-len(data_label)))\n self._set_timestamp() # new time_stamp\n time_stamp = self._time_stamp[:17]\n dta.write(bytearray(time_stamp, 'iso-8859-1') +\n b'\\0'*(18-len(time_stamp)))\n \n # descriptors\n dta.write(bytes(self._typlist))\n for name in self._varlist:\n name = name[:32]\n dta.write(bytearray(name, 'iso-8859-1') + b'\\0'*(33-len(name)))\n # In srtlist, Nones are replaced with zeroes and \n # a terminating zero is appended (the file needs \n # nvar + 1 ints including terminating zero).\n srtlist = self._srtlist + [None]\n srtlist = [srt + 1 if srt is not None else 0 for srt in srtlist]\n dta.write(pack(byteorder + 'h'*(nvar + 1), *srtlist))\n for fmt in self._fmtlist:\n fmt = fmt[:48]\n dta.write(bytearray(fmt, 'iso-8859-1') + b'\\0'*(49-len(fmt)))\n for lab in self._lbllist:\n lab = lab[:32]\n dta.write(bytearray(lab, 'iso-8859-1') + b'\\0'*(33-len(lab)))\n \n # variable labels\n for lab in self._vlblist:\n lab = lab[:80]\n dta.write(bytearray(lab, 'iso-8859-1') + b'\\0'*(81-len(lab)))\n \n # characteristics\n chrdict = self._chrdict\n for varname in chrdict:\n varname = varname[:32]\n vardict = chrdict[varname]\n for charname in vardict:\n charname = charname[:32]\n char = vardict[charname][:67784] # or 8681 for Small Stata\n data_len = 66 + len(char) + 1 # +1 for null termination\n dta.write(b'\\x01') # data_type\n dta.write(pack(byteorder + 'i', data_len))\n dta.write(bytearray(varname, 'iso-8859-1') + \n b'\\0'*(33 - len(varname)))\n dta.write(bytearray(charname, 'iso-8859-1') + \n b'\\0'*(33 - len(charname)))\n dta.write(bytearray(char, 'iso-8859-1') + b'\\0')\n dta.write(b'\\x00\\x00\\x00\\x00\\x00')\n \n # data\n for row in self._varvals:\n for value, st_type in zip(row, typlist):\n if st_type <= 244:\n dta.write(bytearray(value, 'iso-8859-1') + \n b'\\0'*(st_type - len(value)))\n else:\n fmt, nbytes = type_dict[st_type]\n # Get correct dta value if missing. As a safety, check\n # for non-standard missing (None and large values).\n if value is None:\n value = first_missing[st_type]\n elif isinstance(value, MissingValue):\n value = missing_save_val(value, st_type)\n elif (value > 8.988465674311579e+307 or \n value < -1.7976931348623157e+308):\n # is this the right way to handle this ?\n value = missing_save_val(\n get_missing(value), st_type) \n dta.write(pack(byteorder + fmt, value))\n \n # value labels\n value_labels = self._vallabs\n for labname in value_labels.keys():\n write_value_label_table(labname, value_labels[labname])",
"def write_shortfile_table(self):\n\n # KMEL actually removes duplicate short filenames from this\n # table.\n\n start_of_shortfiles = self.db_file.tell()\n\n shortfiles = {}\n for miEntry in self.mainIndex:\n short_filename = miEntry.encodedShortfile\n if short_filename in shortfiles:\n miEntry.set_shortfile_offset(\n shortfiles[short_filename])\n else:\n shortfiles[short_filename] = \\\n self.db_file.tell() - start_of_shortfiles\n\n miEntry.set_shortfile_offset(\n shortfiles[short_filename])\n self.db_file.write(short_filename)",
"def save_to_arc(self, filename, header = True, comment = None):\n if header:\n F = open( filename, 'w' )\n F.write( \"!BIOSYM archive 2\\n\" )\n if comment is not None:\n F.write( '!%s\\n'%comment )\n F.write( \"PBC=ON\\n\" )\n else:\n F = open( filename, 'a' )\n \n #FIXME: If you think this is the ugliest python code you've ever seen,\n # you are quite right! It is literal translation of some old AWK script.\n # But it works for now, so... \n\n unit_cell = self.unit_cell\n a=sqrt(unit_cell[0,0]*unit_cell[0,0]+\n unit_cell[0,1]*unit_cell[0,1]+\n unit_cell[0,2]*unit_cell[0,2])\n b=sqrt(unit_cell[1,0]*unit_cell[1,0]+\n unit_cell[1,1]*unit_cell[1,1]+\n unit_cell[1,2]*unit_cell[1,2])\n c=sqrt(unit_cell[2,0]*unit_cell[2,0]+\n unit_cell[2,1]*unit_cell[2,1]+\n unit_cell[2,2]*unit_cell[2,2])\n alpha=(unit_cell[1,0]*unit_cell[2,0]+\n unit_cell[1,1]*unit_cell[2,1]+\n unit_cell[1,2]*unit_cell[2,2])/(b*c)\n beta =(unit_cell[0,0]*unit_cell[2,0]+\n unit_cell[0,1]*unit_cell[2,1]+\n unit_cell[0,2]*unit_cell[2,2])/(a*c)\n gamma=(unit_cell[0,0]*unit_cell[1,0]+\n unit_cell[0,1]*unit_cell[1,1]+\n unit_cell[0,2]*unit_cell[1,2])/(a*b)\n alpha=math.atan2(sqrt(1-alpha*alpha),alpha)\n beta =math.atan2(sqrt(1-beta *beta ),beta )\n gamma=math.atan2(sqrt(1-gamma*gamma),gamma)\n\n transf=zeros((3,3))\n transf[0,0]=a\n transf[1,0]=0.0\n transf[2,0]=0.0\n transf[0,1]=b*cos(gamma)\n transf[1,1]=b*sin(gamma)\n transf[2,1]=0.0\n transf[0,2]=c*cos(beta)\n transf[1,2]=c*(cos(alpha)-(cos(gamma)*cos(beta)))/sin(gamma)\n transf[2,2]=sqrt(c*c-transf[0,2]*transf[0,2]-transf[1,2]*transf[1,2])\n\n alpha=180*alpha/pi\n beta =180* beta/pi\n gamma=180*gamma/pi\n\n recip_cell = self.recip_cell.T\n frac_pos = zeros(self.atoms.shape)\n positions= zeros(self.atoms.shape)\n for i in range(self.num_atoms):\n for j in range(3):\n frac_pos[i,j]=0.\n for k in range(3):\n frac_pos[i,j]+=self.atoms[i,k]*recip_cell[j,k]\n for j in range(3):\n positions[i,j] = 0.\n for k in range(3):\n positions[i,j]+=frac_pos[i,k]*transf[j,k]\n\n try:\n F.write( '%80.6f\\n'%self.energy )\n except:\n F.write( '\\n' )\n F.write( '!DATE\\n' )\n F.write( 'PBC %8.4f %8.4f %8.4f %8.4f %8.4f %8.4f\\n'%( a, b, c, alpha, beta, gamma ) )\n \n for i in range(self.num_atoms):\n F.write( '%2s %-13.9f %-13.9f %-13.9f CORE %4d %2s %2s %6.4f %4d\\n'%(\n self.species[i], positions[i,0], positions[i,1], positions[i,2],i,\n self.species[i], self.species[i], 0, i ) )\n F.write( 'end\\n' )\n F.write( 'end\\n' )",
"def generate_constraint_file(pdb_object):\n complex = pdb_object.complex.pdb.read()\n output_path = pdb_object.constraints.path\n metals = complex.select(constants.metal_selector)\n results = []\n if metals:\n for atom in metals:\n pos = atom.getCoords()\n close_ligand = complex.select(constants.close_ligand_selector, t=pos)\n if close_ligand:\n for close in close_ligand:\n results.append((atom.getName(),\n str(atom.getResnum())+atom.getChid(),\n close.getName(),\n str(close.getResnum())+close.getChid()))\n pdb_object.constraints.write(\n [f\"AtomPair {r[0]} {r[1]} {r[2]} {r[3]} SQUARE_WELL 2.5 -2000\\n\" for r in results])",
"def compress(file, special_character='%'):\n\n def check_size_address(addr_output, curr_size, output):\n addr_size = size_in_bits(addr_output)\n delta_size = addr_size - curr_size\n\n if delta_size > 0:\n addr_special_character = dico.index(special_character)\n df.loc[df_i - 1]['Output'] = \\\n f\"@[{special_character * delta_size}]=\" \\\n f\"{addr_special_character * delta_size}\"\n\n for i in range(delta_size):\n output += write_addr_n_bits(addr_special_character, curr_size)\n\n return delta_size, output\n\n dico = make_dico(file, special_character)\n\n addr = len(dico)\n output = ''\n df = pd.DataFrame(columns=['Buffer', 'Input', 'New sequence', 'Address',\n 'Output'])\n\n curr_size = size_in_bits(addr - 1)\n df_i = 0\n buffer = ''\n for input in file: # foreach input character in the file\n\n new_seq = buffer + input\n if new_seq in dico:\n df.loc[df_i] = [buffer, input, '', '', '']\n buffer = new_seq\n else: # the new sequence does not exist in the dictionary\n # Update dico\n dico.append(new_seq)\n output_local = new_seq[:-1] # all except last character\n addr_output = dico.index(output_local)\n\n # Check size of the address\n delta_size, output = check_size_address(addr_output, curr_size,\n output)\n if delta_size > 0:\n curr_size += delta_size\n\n # update table\n output_local = f\"@[{output_local}]={addr_output}\"\n df.loc[df_i] = [buffer, input, new_seq, addr, output_local]\n addr += 1\n buffer = new_seq[-1]\n\n # Update output\n output += write_addr_n_bits(addr_output, curr_size)\n\n df_i += 1\n\n # no more characters. Empty the buffer now\n assert (buffer in dico)\n\n addr_output = dico.index(buffer)\n delta_size, output = check_size_address(addr_output, curr_size, output)\n if delta_size > 0:\n curr_size += delta_size\n\n output_local = f\"@[{buffer}]={addr_output}\"\n df.loc[df_i] = [buffer, '', '', '', output_local]\n df_i += 1\n output += write_addr_n_bits(addr_output, curr_size)\n\n return output, df, dico",
"def Export_Aux(self, size):\r\n class SimpleBond():\r\n def __init__(self, pos1, pos2, jMatrix, anisotropy1, anisotropy2, spinMag1, spinMag2):\r\n self.pos1 = pos1\r\n self.anisotropy1 = anisotropy1\r\n self.pos2 = pos2\r\n self.anisotropy2 = anisotropy2\r\n self.jMatrix = jMatrix\r\n self.spinMag1 = spinMag1\r\n self.spinMag2 = spinMag2\r\n \r\n def sameBond(self, bond2):\r\n if self.pos1 == bond2.pos1 or self.pos1 == bond2.pos2:\r\n if self.pos2 == bond2.pos2 or self.pos2 == bond2.pos1:\r\n return True\r\n return False\r\n \r\n Na = self.getCutoffCell().getNa()\r\n Nb = self.getCutoffCell().getNb()\r\n Nc = self.getCutoffCell().getNc()\r\n \r\n class SimpleAtom():\r\n def __init__(self, pos, anisotropy, spinMag, label, massNum, cellNum, valence):\r\n self.anisotropy = anisotropy\r\n self.pos = pos\r\n# self.cellPos = []\r\n# self.cellPosX = int(pos[0])/Na\r\n# self.cellPosY = int(pos[1])/Nb\r\n# self.cellPosZ = int(pos[2])/Nc\r\n self.interactions = []\r\n self.interCellInteractions = []#these must be translated with different logic and therefore must be kept separate\r\n #self.interactions[position of other atom] = j number\r\n self.spinMag = spinMag\r\n self.label = label\r\n self.massNum = massNum\r\n self.cellNum = cellNum\n #valence is an int, not str\r\n #self.valence = string.join(valence.split(), \"\")#just in case, get rid of whitespace\n self.valence = valence\r\n \r\n #might want to change this to position later when all atoms wont be created in same list\r\n def addInteraction(self, atom2, jMat):\r\n# self.interactions[atom2] = jMat\r\n self.interactions.append((atom2, jMat))\r\n \r\n def addInterCellInteraction(self, atom2, jMat, direction):\r\n \"\"\"Direction is in form (bool, bool, bool) for (x,y,z)\"\"\"\r\n #Check for repeats (necessary for method used to translate these bonds)\r\n for interaction in self.interCellInteractions:\r\n if interaction[0] == atom2 and interaction[1] == jMat:\r\n return #contains this interaction already\r\n self.interCellInteractions.append((atom2, jMat, direction))\r\n \r\n def __eq__(self, other):\r\n if self.pos[0] == other.pos[0]:\r\n if self.pos[1] == other.pos[1]:\r\n if self.pos[2] == other.pos[2]:\r\n return True\r\n return False\r\n \r\n \r\n class SimpleBondList():\r\n def __init__(self):\r\n self.list = []\r\n \r\n def addBond(self, bond):\r\n if not self.containsBond(bond):\r\n self.list.append(bond)\r\n# else:\r\n# print \"Duplicate Bonds!\" #should not get here\r\n \r\n def containsBond(self, bond):\r\n for eachBond in self.list:\r\n if eachBond.sameBond(bond):\r\n return True\r\n return False\r\n \r\n \r\n \r\n def contains(list, element):\r\n for item in list:\r\n if (item == element).all():\r\n return True\r\n return False\r\n \r\n def atomListContains(list, element):\r\n for item in list:\r\n if item == element:\r\n return True\r\n return False\r\n \r\n def indexOf(list, item):\r\n for i in range(len(list)):\r\n if (item == list[i]).all():\r\n return i\r\n return -1\r\n \r\n def translateToFirstCutoffCell(pos):\r\n \"\"\"Translates a position back to the first Cutoff cell.\"\"\"\r\n x = pos[0]\r\n y = pos[1]\r\n z = pos[2]\r\n \r\n while x >= Na:\r\n x = x - Na\r\n \r\n while y >= Nb:\r\n y = y - Nb\r\n \r\n while z >= Nc:\r\n z = z - Nc\r\n \r\n return (x,y,z)\r\n\r\n\r\n \r\n #Create list of matrices\r\n matrices = []\r\n for bond in self.getCutoffCell().getBonds():\r\n# pos1 = bond.getAtom1().getPosition()\r\n# pos2 = bond.getAtom2().getPosition()\r\n jMat = bond.getJMatrix()\r\n# count = matrices.count(jMat)\r\n if not contains(matrices, jMat):\r\n matrices.append(jMat)\r\n \r\n \r\n #create simple bonds within cutoff cell\r\n simpleCellBonds = []\r\n for bond in self.getCutoffCell().getBonds():\r\n pos1 = bond.getAtom1().getPosition()\r\n anisotropy1 = bond.getAtom1().getAnisotropy()\r\n spin1 = bond.getAtom1().getSpinMagnitude()\r\n# print anisotropy1\r\n pos2 = bond.getAtom2().getPosition()\r\n anisotropy2 = bond.getAtom2().getAnisotropy()\r\n spin2 = bond.getAtom2().getSpinMagnitude()\r\n# print anisotropy2\r\n# time.sleep(.1)\r\n jMat = bond.getJMatrix()\r\n newBond = SimpleBond(pos1, pos2, indexOf(matrices,jMat), anisotropy1, anisotropy2, spin1, spin2)\r\n simpleCellBonds.append(newBond)\r\n \r\n \r\n def PosInFirstCutoff(pos):\r\n return (pos[0] < Na and pos[1] < Nb and pos[2] < Nc)\r\n \r\n \r\n cellAtoms = []\r\n cellBonds = SimpleBondList()\r\n interCellBonds = SimpleBondList()#bonds between cells cannot be translated\r\n #with the same index arithmetic because of edges\r\n for bond in simpleCellBonds:\r\n pos1 = bond.pos1\r\n anisotropy1 = bond.anisotropy1\r\n pos2 = bond.pos2\r\n anisotropy2 = bond.anisotropy2\r\n spin1 = bond.spinMag1\r\n spin2 = bond.spinMag2\r\n #--Adding labels, cell number, valence, and mass number----\r\n a1 = self.MagCell.atomAtPosition(pos1)\r\n a2 = self.MagCell.atomAtPosition(pos2)\r\n #label1 = a1.description\r\n #label2 = a2.description\n label1 = a1.getSymbol()\n label2 = a2.getSymbol()\r\n #cellNum1 = a1.getIndexNumber()\r\n #cellNum2 = a2.getIndexNumber()\n cellNum1 = a1.getIDNum()\n cellNum2 = a2.getIDNum()\r\n massNum1 = a1.getMassNum()\r\n massNum2 = a2.getMassNum()\r\n valence1 = a1.getValence()\r\n valence2 = a2.getValence()\r\n #---------------------------------------------------\r\n jMatInt = bond.jMatrix\r\n for i in range(2):\r\n for j in range(2):\r\n for k in range(2):\r\n for a in range(Na):\r\n for b in range(Nb):\r\n for c in range(Nc):\r\n x1 = pos1[0] + a + (Na * i)\r\n y1 = pos1[1] + b + (Nb * j)\r\n z1 = pos1[2] + c + (Nc * k)\r\n \r\n x2 = pos2[0] + a + (Na * i)\r\n y2 = pos2[1] + b + (Nb * j)\r\n z2 = pos2[2] + c + (Nc * k) \r\n newPos1 = (x1,y1,z1)\r\n newPos2 = (x2,y2,z2)\r\n \r\n #It is possible for an atom to be bonded to an atom which was\r\n #translated from a non-bonded atom in the original unit cell.\r\n #This non-bonded atom which translates into bonded atom(s) must\r\n #be included in the cellAtoms list, or it will not be translated\r\n #through the allAtoms list to create the other atoms which are\r\n #bonded, and hence, must be included.\r\n \r\n if PosInFirstCutoff(newPos1) or PosInFirstCutoff(newPos2): \r\n if PosInFirstCutoff(newPos1) and PosInFirstCutoff(newPos2):\r\n #Both are in first cutoff\r\n #Add the atoms to the list of atoms within the first cell\r\n newAtom1 = SimpleAtom(newPos1, anisotropy1, spin1, label1, massNum1, cellNum1, valence1)\r\n if not atomListContains(cellAtoms, newAtom1):\r\n cellAtoms.append(newAtom1)\r\n newAtom2 = SimpleAtom(newPos2, anisotropy2, spin2, label2, massNum2, cellNum2, valence2)\r\n if not atomListContains(cellAtoms, newAtom2):\r\n cellAtoms.append(newAtom2)\r\n #Add the bond to bonds within the cell\r\n bond = SimpleBond( (x1,y1,z1), (x2,y2,z2), jMatInt , None, None, None, None)\r\n cellBonds.addBond(bond)\r\n else:#It is an inter-cellular bond\r\n bond = SimpleBond( (x1,y1,z1), (x2,y2,z2), jMatInt, None, None, None, None)\r\n interCellBonds.addBond(bond)\r\n #If the atom is in the first cutoff cell then it must be added and\r\n #translating the position will do nothing. If it is not in the first cutoff\r\n #cell, then the corresponding atom in the first cutoff cell must be added\r\n #to create this one through translation\r\n transPos1 = translateToFirstCutoffCell(newPos1)\r\n transPos2 = translateToFirstCutoffCell(newPos2)\r\n \r\n newAtom1 = SimpleAtom(transPos1, anisotropy1, spin1, label1, massNum1, cellNum1, valence1)\r\n newAtom2 = SimpleAtom(transPos2, anisotropy2, spin2, label2, massNum2, cellNum2, valence2)\r\n if not atomListContains(cellAtoms, newAtom1):\r\n cellAtoms.append(newAtom1)\r\n if not atomListContains(cellAtoms, newAtom2):\r\n cellAtoms.append(newAtom2)\r\n \r\n print \"after iterating through bonds...\"\n print \"atoms:\"\n for atom in cellAtoms:\n print atom.pos\n print \"cellBonds: \"\n for b in cellBonds.list:\n print b.pos1, \" , \", b.pos2\n print \"interCelBbonds: \"\n for b in interCellBonds.list:\n print b.pos1, \" , \", b.pos2\n \r\n #symmetry equivalent bonds between unit cells will not be represented in\r\n #the cutoff cell if the cutoff cell is only one unit cell wide in any\r\n #dimension which would include these inter-cellular bonds\r\n if size > 1 and (Na == 1 or Nb == 1 or Nc == 1):\r\n for bond in simpleCellBonds:\r\n xyz = bond.pos1\r\n xyz2 = bond.pos2\r\n \r\n #one of the two atoms should be in the first unit cell\r\n if(xyz[0] < 1 and xyz[1] < 1 and xyz[2] < 1) or (xyz2[0] < 1 and xyz2[1] < 1 and xyz2[2] < 1):\r\n \r\n for symop in self.MagCell.space_Group.iter_symops():\r\n # operate on coordinates in non-shifted spacegroup\r\n pos1 = symop(xyz)\r\n pos2 = symop(xyz2)\r\n \r\n mask1 = numpy.logical_or(pos1 < 0.0, pos1 >= 1.0)\r\n translation = numpy.floor(pos1[mask1]) #translates the first atom back to cell at (0,0,0)\r\n pos1[mask1] -= translation\r\n pos2[mask1] -= translation #Uses same translation to translate other atom\r\n \r\n \r\n #translate new Bond by 1 cell in each direction so all\r\n #translations of intercellular bonds are represented.\r\n \r\n \r\n #iterate through each translation and check if there are atoms there that could\r\n #be bonded; if so, add the bond\r\n for i in range(0, 2): #translate in x direction (Na - Cell X position) times\r\n for j in range(0, 2): #translate in y direction (Nb - Cell Y position) times\r\n for k in range(0, 2): #translate in z direction (Nc - Cell Z position) times\r\n translatedPos1 = [i + pos1[0],j + pos1[1],k + pos1[2]]\r\n translatedPos2 = [i + pos2[0],j + pos2[1],k + pos2[2]]\r\n \r\n \r\n #Check if the bond crosses the border(s) of the dimension of only one unit cell.\r\n #Check if the bond exists in intercellular bonds, and if not, add it? redundant ^?\r\n #Then check if the aotm that is in the cutoff cell is represented in cellAtoms, and\r\n #if not, add it.\r\n \r\n #If the bond crosses a dimension of size 1 unit cell\r\n if ((Na == 1) and (int(translatedPos1[0]) != int(translatedPos2[0]) or translatedPos2[0] < 0)) or ((Nb == 1) and (int(translatedPos1[1]) != int(translatedPos2[1]) or translatedPos2[1] < 0)) or ((Nc == 1) and (int(translatedPos1[2]) != int(translatedPos2[2]) or translatedPos2[2] < 0)):\r\n \r\n #Add the atom in the cutoff Cell and add the bond to intercellular bonds\r\n print translatedPos1, translatedPos2\r\n atomObj1 = self.MagCell.atomAtPosition(translatedPos1)\r\n atomObj2 = self.MagCell.atomAtPosition(translatedPos2)\r\n if(atomObj1 != None):#Add the atom if it is in the cutoff cell\r\n newAtom1 = SimpleAtom(translatedPos1, atomObj1.anisotropy, atomObj1.spinMagnitude, atomObj1.description, atomObj1.getMassNum(), atomObj1.getIDNum(), atomObj1.valence)\r\n #Add atom if there is not already an atom at that position\r\n if not atomListContains(cellAtoms, newAtom1):\r\n cellAtoms.append(newAtom1)\r\n \r\n if(atomObj2 != None):#Add the atom if it is in the cutoff cell\r\n newAtom2 = SimpleAtom(translatedPos2, atomObj2.anisotropy, atomObj2.spinMagnitude, atomObj2.description, atomObj2.getMassNum(), atomObj2.getIDNum(), atomObj2.valence)\r\n #Add atom if there is not already an atom at that position\r\n if not atomListContains(cellAtoms, newAtom2):\r\n cellAtoms.append(newAtom2)\r\n \r\n #If one of the atoms are in the cutoff cell and both have positive coordinates, add the bond\r\n if(atomObj1 != None) or atomObj2 != None:\r\n if(translatedPos1[0] >= 0 and translatedPos1[1] >= 0 and translatedPos1[2] >= 0 and translatedPos2[0] >= 0 and translatedPos2[1] >= 0 and translatedPos2[2] >= 0):\r\n interCellBonds.addBond(SimpleBond(translatedPos1, translatedPos2, bond.jMatrix, None, None, None, None))\r\n \r\n \r\n print \"after iterating through symops...\"\n print \"atoms:\"\n for atom in cellAtoms:\n print atom.pos\n print \"cellBonds: \"\n for b in cellBonds.list:\n print b.pos1, \" , \", b.pos2\n print \"interCelBbonds: \"\n for b in interCellBonds.list:\n print b.pos1, \" , \", b.pos2\r\n allAtoms = []\r\n numAtomsPerCell = len(cellAtoms)\r\n \r\n print \"atoms in cell: \", numAtomsPerCell\r\n \r\n for i in range(size):\r\n for j in range(size):\r\n for k in range(size):\r\n for index in range(len(cellAtoms)):\r\n pos = cellAtoms[index].pos\r\n anisotropy = cellAtoms[index].anisotropy\r\n x = pos[0] + (Na * i)\r\n y = pos[1] + (Nb * j)\r\n z = pos[2] + (Nc * k)\r\n newAtom = SimpleAtom((x,y,z), anisotropy, cellAtoms[index].spinMag, cellAtoms[index].label, cellAtoms[index].massNum, cellAtoms[index].cellNum, cellAtoms[index].valence)\r\n# print (len(allAtoms)%numAtomsPerCell == index)#just a check, should always be true\r\n allAtoms.append(newAtom)\r\n\r\n \r\n #for atom in allAtoms:\r\n # print atom.pos\r\n \r\n #Add bonds cellBonds to allAtoms (currently not most efficient way, but its a short list)\r\n for bond in cellBonds.list:\r\n #Make sure each position is not yet represented the easy but inefficient way\r\n #It's not actually that inefficient since it should only go through a fraction of the list\r\n pos1 = bond.pos1\r\n pos2 = bond.pos2\r\n pos1Index = -1\r\n pos2Index = -1\r\n for i in range(len(allAtoms)):\r\n currentPos = allAtoms[i].pos\r\n #if currentPos == pos1:\r\n if currentPos[0] == pos1[0] and currentPos[1] == pos1[1] and currentPos[2] == pos1[2]:\r\n pos1Index = i\r\n break\r\n \r\n for i in range(len(allAtoms)):\r\n currentPos = allAtoms[i].pos \r\n #if currentPos == pos2:\r\n if currentPos[0] == pos2[0] and currentPos[1] == pos2[1] and currentPos[2] == pos2[2]:\r\n pos2Index = i\r\n break\r\n \r\n if pos1Index < 0 or pos2Index < 0:\r\n print \"Atom list does not contain all atoms!\"\r\n if pos1Index < 0:\r\n print pos1, \" missing\"\r\n if pos2Index < 0:\r\n print pos2, \" missing\"\r\n raise Exception(\"Export Failed\")\r\n else:\r\n allAtoms[pos1Index].addInteraction(pos2Index, bond.jMatrix)\r\n allAtoms[pos2Index].addInteraction(pos1Index, bond.jMatrix)\r\n\r\n\r\n def bondDirection(pos1, pos2):\r\n xCell1 = int(pos1[0]/Na) #Find the cutoff cell\r\n xCell2 = int(pos2[0]/Na)\r\n yCell1 = int(pos1[1]/Nb)\r\n yCell2 = int(pos2[1]/Nb)\r\n zCell1 = int(pos1[2]/Nc)\r\n zCell2 = int(pos2[2]/Nc)\r\n xShiftBool = (xCell1 != xCell2)\r\n yShiftBool = (yCell1 != yCell2)\r\n zShiftBool = (zCell1 != zCell2)\r\n return (xShiftBool, yShiftBool, zShiftBool)\r\n \r\n \r\n #Now repeat process for inter-cellular bonds\r\n for bond in interCellBonds.list:\n pos1 = bond.pos1\r\n pos2 = bond.pos2\n print \"processing intercell bond: \", pos1, \" , \", pos2\r\n pos1Index = -1\r\n pos2Index = -1\r\n for i in range(len(allAtoms)):\r\n currentPos = allAtoms[i].pos\r\n #if currentPos == pos1:\r\n if currentPos[0] == pos1[0] and currentPos[1] == pos1[1] and currentPos[2] == pos1[2]:\r\n pos1Index = i\r\n break\r\n \r\n for i in range(len(allAtoms)):\r\n currentPos = allAtoms[i].pos \r\n #if currentPos == pos2:\r\n if currentPos[0] == pos2[0] and currentPos[1] == pos2[1] and currentPos[2] == pos2[2]:\r\n pos2Index = i\r\n break\r\n \r\n if pos1Index < 0 or pos2Index < 0:\r\n print \"Atom list does not contain all atoms!\"\r\n if pos1Index < 0:\r\n print pos1, \" missing\"\r\n if pos2Index < 0:\r\n print pos2, \" missing\"\r\n raise Exception(\"Export Failed\")\r\n else:\r\n direction = bondDirection(pos1, pos2)\r\n allAtoms[pos1Index].addInterCellInteraction(pos2Index, bond.jMatrix, direction)\r\n allAtoms[pos2Index].addInterCellInteraction(pos1Index, bond.jMatrix, direction)\n print \"adding interaction between atom \", pos1Index, \" and atom \", pos2Index\r\n \r\n \r\n \r\n# timer.printTime()\r\n print\"translating...\"\r\n \r\n def validBond(index1, index2, direction):\n \"\"\"1/20/11 I beleive the point of this function was to find the interactions that go off\n the edge of the translated lattice and therefore are invalid. When the larger lattice is\n made, it is made by translating the cutoff cell. So let's say there is an interaction\n from cell one, to it's neighbo rin the x direction in cell2. Then we translate in the x\n direction and get cell three, but then we have gone as far as we need to in the x direction\n (made by a for loop) and cell 4 is put next to cell one again, with y coordinate incremented.\n The interaction between cell 3 and cell 4 is invalid becuase they are not next to each other\n in the x direction. That is the problem, but this method is not working correctly.\n To work correctly it needs to check each direction that the bond crosses a cell boundary in \n (the direction argument) to see if it would be crossing the edge of the whole translated lattice.\n \n Right now it checks if the bond is in a different translated row of cells, but the problem\n is that legitimate interactions can do this.\"\"\"\n #print \"?valid bond: \", allAtoms[index1].pos, \" , \", allAtoms[index2].pos, direction\n cell1 = index1/numAtomsPerCell\n cell2 = index2/numAtomsPerCell\n #Find the coordinates of the cell in units of interaction cells\n posInX1 = int(cell1/(size*size))\n posInX2 = int(cell1/(size*size))\n leftover1 = cell1%(size*size)\n leftover2 = cell2%(size*size)\n posInY1 = int(leftover1/size)\n posInY2 = int(leftover2/size)\n posInZ1 = leftover1%size\n posInZ2 = leftover2%size\n \n #Now, a valid interaction can cross an interaction cell boundary in any direction,\n #but it has a maximum length of one interaction cell. However, I have made the minimum\n #size of this larger translated lattice equal to 3*3*3 interaction cells. Therefore,\n #when we hit an edge and get in invalid interaction, the cells will be at least 2\n #interaction cells apart in the direction of the interaction.\n if(direction[0]):\n if numpy.abs(posInX1 - posInX2)>1:\n #print \"false\"\n return False\n if(direction[1]):\n if numpy.abs(posInY1 - posInY2)>1:\n #print \"false\"\n return False\n if(direction[2]):\n if numpy.abs(posInZ1 - posInZ2)>1:\n #print \"false\"\n return False\n print #\"true\"\n return True\n\n #Old (incorrect) method:\n if 0:\r\n print \"?valid bond: \", allAtoms[index1].pos, \" , \", allAtoms[index2].pos, direction\r\n cell1 = index1/numAtomsPerCell\r\n cell2 = index2/numAtomsPerCell\r\n zRow1 = cell1/size#this relies on the list being created in the nested for loop that was used, z within y within x\r\n zRow2 = cell2/size\r\n if(zRow1 != zRow2 and direction[2]):\n print \"false\"\r\n return False\r\n xLayer1 = cell1/(size*size)\r\n xLayer2 = cell2/(size*size)\r\n if(xLayer1 != xLayer2 and direction[1]):\n print \"false\"\r\n return False\r\n #shouldn't have to check z, because if it's not valid in z direction, it would be off the list (>len(allAtoms))\n print \"true\"\r\n return True\r\n \n \r\n #translate bonds contained within the CutoffCell\r\n for i in range(len(allAtoms)- numAtomsPerCell):\r\n newIndex = i+numAtomsPerCell\r\n for interaction in allAtoms[i].interactions:\r\n newInteraction = interaction[0] + numAtomsPerCell\r\n if newInteraction < len(allAtoms):#Should always be the case now\r\n allAtoms[newIndex].addInteraction(newInteraction, interaction[1])\r\n else:#for testing\r\n print \"\\n\\ncellbonds contains inter-cutoff cell bond!\\n\\n\"\r\n raise Exception(\"cellbonds contains inter-cutoff cell bond!\")\r\n \r\n \r\n \r\n \r\n #translate bonds between Cutoff cells\r\n #size^3 * numAtomsPerCell = len(allAtoms)\r\n \r\n #This method iterates through the whole list of atoms. Each time it encounters\r\n #an interaction it translates it to all later corresponding indices. This was\r\n #a necessary change from the method above, because with the method above, a bond\r\n #would stop propagating as soon as it encountered one invalid location (an edge).\r\n #This new method, however, will re-copy interactions that were copied early on \r\n #in the main loop, but are encountered later again in the main loop. This could\r\n #become slow with large lists. An alternate method would be to create a copy of\r\n #the list and copy only from the original to the copy, which eliminates the need\r\n #for checking repeats and ensures that each interaction is only propagated once.\r\n cubeSize = size*size*size\r\n for cell in range(cubeSize):\r\n for i in range(numAtomsPerCell):\r\n atomIndex = cell*numAtomsPerCell + i\r\n for interaction in allAtoms[atomIndex].interCellInteractions:\r\n for n in range(1, cubeSize - cell):\r\n displacement = numAtomsPerCell*n\n if interaction[0] + displacement < len(allAtoms):\r\n if validBond(atomIndex + displacement, interaction[0] + displacement, interaction[2]):\r\n #Checks for duplicates\r\n allAtoms[atomIndex + displacement].addInterCellInteraction(interaction[0] + displacement, interaction[1], interaction[2])\r\n \r\n \r\n# newInteraction = interaction[0] + numAtomsPerCell\r\n# if newInteraction < len(allAtoms):\r\n# allAtoms[i+numAtomsPerCell].addInterCellInteraction(newInteraction, interaction[1])\r\n \r\n \r\n \r\n# print \"done translating, checking list\"\r\n# timer.printTime()\r\n \r\n #Check for reapeats in finalBond List just for testing\r\n# def isRepeat(finalBondList):\r\n# for i in range(0, len(finalBondList)):\r\n# for j in range(i + 1, len(finalBondList)):\r\n# if finalBondList[i].sameBond(finalBondList[j]):\r\n# return True\r\n# return False\r\n# \r\n# if isRepeat(finalBondList):\r\n# print \"There is a repeat!\"\r\n# else:\r\n# print \"NO repeats!\"\r\n# \r\n# timer.printTime()\r\n \r\n \r\n #Check the simple atom list\r\n def atomBalanced(atomIndex):\r\n atom = allAtoms[atomIndex]\r\n for otherAtomIndex in range(len(allAtoms)):\r\n otherAtom = allAtoms[otherAtomIndex]\r\n if atomInteractsWithAtom(atomIndex, otherAtom):\r\n if atomInteractsWithAtom(otherAtomIndex, atom):\r\n return True\r\n else:\r\n return False\r\n return False\r\n \r\n def atomInteractsWithAtom(atomIndex, otherAtom):\r\n for interaction in otherAtom.interactions:\r\n if atomIndex == interaction[0]:\r\n return True\r\n return False\r\n \r\n \r\n# for atomIndex in range(len(allAtoms)):\r\n# if not atomBalanced(atomIndex):\r\n# print \"Not Balanced!!!\"\r\n# break\r\n# else:\r\n# print \"Balanced!\"\r\n\r\n return matrices, allAtoms",
"def construct_table(self):\n table_str = self.header_row\n row_lbls, col_lbls = self.get_idxvals()\n for r,rlbl in enumerate(row_lbls):\n row_data = [self.data[rlbl,clbl] for clbl in col_lbls]\n table_str += self.construct_row(r, row_data)\n \n return table_str",
"def make_derived_table(filename):\r\n column_keys, get_data = get_csv(filename)\r\n\r\n year_column = column_keys[1:].index('Year')\r\n pcg_column = column_keys[1:].index('PrimaryConditionGroup')\r\n \r\n #pcg_keys = list(PCG_LUT.keys())\r\n \r\n t0 = time.clock()\r\n \r\n NUM_GROUPS = 100\r\n num_rows = 0\r\n for group in range(NUM_GROUPS):\r\n derived_dict = {'ALL':{}, 'Y1':{}, 'Y2':{}, 'Y3':{}}\r\n print 'group=%d' % group\r\n _, get_data = get_csv(filename)\r\n for k,v in get_data():\r\n if (int(k) % NUM_GROUPS) != group:\r\n continue\r\n year = v[year_column]\r\n pcg = get_pcg_index(v[pcg_column])\r\n #if not v[pcg_column] in pcg_keys:\r\n # pcg_keys.append(v[pcg_column])\r\n #print '>', v[pcg_column]\r\n #print '\"%s\" => %d' % (v[pcg_column], pcg)\r\n \r\n if num_rows and num_rows % 10000 == 0:\r\n t = time.clock() - t0\r\n eta = int(t * (2668990 - num_rows)/num_rows)\r\n print ' %8d row (%4.1f%%) %7.1f sec, %4d rows/sec, eta = %6d sec' % (num_rows, \r\n 100.0 * num_rows/2668990, t, int(num_rows/t), eta) \r\n\r\n for y in (year, 'ALL'):\r\n if not k in derived_dict[y].keys():\r\n derived_dict[y][k] = [0, {}] \r\n derived_dict[y][k][0] += 1\r\n derived_dict[y][k][1][pcg] = derived_dict[y][k][1].get(pcg, 0) + 1 \r\n \r\n num_rows += 1\r\n \r\n print 'Coallescing' \r\n for year in derived_dict:\r\n for k in derived_dict[year].keys():\r\n if int(k) % NUM_GROUPS != group:\r\n continue\r\n derived_dict[year][k][1] = get_max_key(derived_dict[year][k][1]) \r\n pickled_path = make_group_name(group) \r\n pkl_file = open(pickled_path , 'wb')\r\n pickle.dump(derived_dict, pkl_file, -1) # Pickle the data using the highest protocol available.\r\n pkl_file.close() \r\n\r\n derived_dict = {'ALL':{}, 'Y1':{}, 'Y2':{}, 'Y3':{}} \r\n for group in range(NUM_GROUPS):\r\n pickled_path = make_group_name(group) \r\n pkl_file = open(pickled_path , 'rb')\r\n part_dict = pickle.load(pkl_file) \r\n pkl_file.close()\r\n for y,d in part_dict.items():\r\n for k,v in d.items():\r\n derived_dict[y][k] = (part_dict[y][k][0], part_dict[y][k][1]) \r\n\r\n if False:\r\n print '-' *80\r\n for k in pcg_keys:\r\n print \" '%s',\" % k \r\n exit() \r\n \r\n for year in derived_dict:\r\n derived_filename = '%s%s_%s' % (DERIVED_PREFIX, year, filename)\r\n data_writer = csv.writer(open(derived_filename , 'wb'), delimiter=',', quotechar='\"')\r\n data_writer.writerow(DERIVED_COLUMN_KEYS)\r\n for k in sorted(derived_dict[year].keys()):\r\n v = derived_dict[year][k]\r\n #print ' ', derived_dict[year][k], v2\r\n data_writer.writerow([k, str(v[0]), str(v[1])])",
"def create_sid_table_from_file(filepath):\n df = pd.read_csv(filepath, index_col=\"Symbol\")\n df = df.drop_duplicates()\n\n coded_sectors_for_ticker = df[\"Sector\"].map(SECTOR_CODING)\n\n ae_d = get_ticker_sid_dict_from_bundle('quantopian-quandl')\n N = max(ae_d.values()) + 1\n\n # create empty 1-D array to hold data where index = SID\n sectors = np.full(N, -1, np.dtype('int64'))\n\n # iterate over Assets in the bundle, and fill in sectors\n for ticker, sid in ae_d.items():\n sectors[sid] = coded_sectors_for_ticker.get(ticker, -1)\n\n np.save(os.path.join(BASE_PATH , SID_FILE), sectors)",
"def make_atomic_decay_table(nuc_data, build_dir=\"\"):\n xrd = parse_atomic_data(build_dir)\n\n db = tb.open_file(nuc_data, \"a\", filters=BASIC_FILTERS)\n\n # Make a new the table\n if not hasattr(db.root, \"decay\"):\n db.create_group(\"/\", \"decay\", \"ENSDF Decay data\")\n\n atomic_table = db.create_table(\n \"/decay/\",\n \"atomic\",\n xrd,\n \"z\"\n \"k_shell_fluor\"\n \"k_shell_fluor_error\"\n \"l_shell_fluor\"\n \"l_shell_fluor_error\"\n \"prob\"\n \"k_shell_be\"\n \"k_shell_be_err\"\n \"li_shell_be\"\n \"li_shell_be_err\"\n \"mi_shell_be\"\n \"mi_shell_be_err\"\n \"ni_shell_be\"\n \"ni_shell_be_err\"\n \"kb_to_ka\"\n \"kb_to_ka_err\"\n \"ka2_to_ka1\"\n \"ka2_to_ka1_err\"\n \"k_auger\"\n \"k_auger\"\n \"ka1_x_ray_en\"\n \"ka1_x_ray_en_err\"\n \"ka2_x_ray_en\"\n \"ka2_x_ray_en_err\"\n \"kb_x_ray_en\"\n \"l_x_ray_en\",\n expectedrows=103,\n )\n atomic_table.flush()\n db.close()",
"def __init__(self, filename):\r\n self.__output__ = open(format(filename, '08X') + '.gen', 'wb')"
] |
[
"0.5849805",
"0.5413301",
"0.53558016",
"0.5234502",
"0.5228045",
"0.51632726",
"0.5125793",
"0.5085775",
"0.50833756",
"0.5082366",
"0.5081895",
"0.5077303",
"0.50645727",
"0.5060005",
"0.50593895",
"0.5053085",
"0.5021432",
"0.50182503",
"0.50168055",
"0.49868885",
"0.49842334",
"0.49799553",
"0.49794215",
"0.49601212",
"0.4952479",
"0.49436864",
"0.49434027",
"0.49359223",
"0.49346957",
"0.49338186"
] |
0.66138935
|
0
|
r"""Generates the content of the text FST file. Generated file is in AT&T format. It defines the state transition arcs and input/output label pairs of the morphotactics model.
|
def _text_fst_file_content(
rule_set: _RewriteRuleSet) -> Generator[str, None, None]:
class _Local:
state_count = 0
def _new_state_index() -> int:
_Local.state_count += 1
return _Local.state_count
def arc(from_: str,
to: str,
input_: str = common.EPSILON,
output: str = common.EPSILON) -> str:
return f"{from_}\t{to}\t{input_}\t{output}\n"
start_state = common.START_STATE
epsilon = common.EPSILON
index_of = collections.defaultdict(_new_state_index)
index_of[start_state] = 0
for rule in rule_set.rule:
input_symbols = _symbols_of_input(rule.input)
output_symbols = _symbols_of_output(rule.output)
# Pad list of input and output symbols with epsilon transitions until they
# have the same length.
while len(input_symbols) < len(output_symbols):
input_symbols.append(epsilon)
while len(output_symbols) < len(input_symbols):
output_symbols.append(epsilon)
from_ = index_of[rule.from_state]
for input_, output in zip(input_symbols, output_symbols):
to = _new_state_index()
yield arc(from_, to, input_, output)
from_ = to
yield arc(from_, index_of[rule.to_state])
# Last line should be the index of the accept state.
yield f"{index_of[common.ACCEPT_STATE]}\n"
logging.info("generated text FST file content")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_make_flow_txt(self):\r\n flow_fp = os.path.join(self.sff_dir, 'test.txt')\r\n flow_gz_fp = os.path.join(self.gz_sff_dir, 'test_gz.txt')\r\n make_flow_txt(self.sff_fp, flow_fp)\r\n make_flow_txt(self.sff_gz_fp, flow_gz_fp)\r\n self.assertEqual(open(flow_fp).read(), flow_txt)\r\n self.assertEqual(open(flow_gz_fp).read(), flow_txt)",
"def generate(self, analysis):\n\n #analysis = ['p','a','n','i','c','+past form']\n # Let's define our first FST\n\n f1 = FST('morphology-generate')\n \n f1.add_state('1')\n f1.add_state('2')\n f1.add_state('3')\n f1.add_state('4')\n f1.add_state('5') \n f1.add_state('6') #non-c state\n f1.add_state('7') #c state\n f1.add_state('8') #add k\n f1.add_state('9') #+present \n f1.add_state('10') #+past\n \n f1.initial_state = '1'\n #f1.set_final('8')\n f1.set_final('9')\n f1.set_final('10')\n \n #state 1 to 2, and 2 to 3. we don't care about vowel or consonant here\n for letter in list(string.ascii_letters):\n f1.add_arc('1', '2', letter, letter)\n f1.add_arc('2', '3', letter, letter)\n \n #3 to 5 input/output consonants\n vowels = ['a','e','i','o','u','A','E','I','O','U']\n consonants = [c for c in list(string.ascii_letters) if c not in vowels]\n non_c_con = [c for c in consonants if c not in ['c', 'C']]\n for letter in consonants:\n f1.add_arc('3', '5', letter, letter)\n f1.add_arc('5', '5', letter, letter)\n \n #the third and fourth input should be a vowel\n for letter in vowels:\n f1.add_arc('3', '4', letter, letter)\n f1.add_arc('4', '4', letter, letter)\n \n #if the fourth input is a non c consonant, go to 5\n for letter in non_c_con:\n f1.add_arc('4', '5', letter, letter)\n \n #if the input at state 5 is a vowel, go back to 4 \n for letter in vowels:\n f1.add_arc('5', '4', letter, letter)\n \n #if the second last letter is a c, go to 7\n f1.add_arc('4', '7', 'c', 'c')\n \n #add k after 7\n f1.add_arc('7', '8', '', 'k')\n #output nothing from 5 to 8\n f1.add_arc('5', '8', '', '')\n \n f1.add_arc('8','9','+present participle form','ing')\n f1.add_arc('8','10','+past form','ed')\n \n output = f1.transduce(analysis)[0]\n return ''.join(output)",
"def produce_output_txt(self):\n\n NAME = \"TODO get name form cpacs object\"\n\n result_dir = get_results_directory(\"WeightConventional\")\n\n output_file = Path(result_dir, \"Aircraft_Geometry.out\")\n\n OutputTextFile = open(output_file, \"w\")\n\n OutputTextFile.write(\"\\n#################################################\")\n OutputTextFile.write(\"\\n###### AIRCRAFT GEOMETRY EVALUATION MODULE ######\")\n OutputTextFile.write(\"\\n###### OUTPUTS ######\")\n OutputTextFile.write(\"\\n#################################################\")\n OutputTextFile.write(\"\\n-------------------------------------------------\")\n OutputTextFile.write(\"\\nAircraft: \" + NAME)\n OutputTextFile.write(\"\\n-------------------------------------------------\")\n OutputTextFile.write(\"\\n-------------------------------------------------\")\n OutputTextFile.write(\"\\nGeometry Evaluations-----------------------------\")\n OutputTextFile.write(\"\\n-------------------------------------------------\")\n OutputTextFile.write(\"\\nUSEFUL INFO -------------------------------------\")\n OutputTextFile.write(\"\\n-------------------------------------------------\")\n OutputTextFile.write(\n \"\\nIf fuselage or wing number is greater than 1 the\\n\"\n \"information of each obj are listed in an \"\n \"array ordered\\nprogressively\"\n )\n OutputTextFile.write(\n \"\\nSymmetry output: 0 = no symmetry, 1 = x-y,\\n\" + \"2 = x-z, 3 = y-z planes\"\n )\n OutputTextFile.write(\"\\n-------------------------------------------------\")\n OutputTextFile.write(\"\\nRESULTS -----------------------------------------\")\n OutputTextFile.write(\"\\n-------------------------------------------------\")\n OutputTextFile.write(\"\\nFUSELAGE ----------------------------------------\")\n OutputTextFile.write(\"\\n-------------------------------------------------\")\n OutputTextFile.write(f\"\\nNumber of fuselage sections [-]: {self.fuse_sec_nb}\")\n OutputTextFile.write(f\"\\nNumber of fuselage segments [-]: {self.fuse_seg_nb}\")\n OutputTextFile.write(f\"\\nCabin segments array [-]: {self.cabin_seg}\")\n OutputTextFile.write(f\"\\nFuse Length [m]: {np.around(self.fuse_length, 5)}\")\n OutputTextFile.write(f\"\\nFuse nose Length [m]: {np.around(self.fuse_nose_length, 5)}\")\n OutputTextFile.write(f\"\\nFuse cabin Length [m]: {np.around(self.fuse_cabin_length, 5)}\")\n OutputTextFile.write(f\"\\nFuse tail Length [m]: {np.around(self.fuse_tail_length, 5)}\")\n OutputTextFile.write(f\"\\nAircraft Length [m]: {np.around(self.tot_length, 5)}\")\n OutputTextFile.write(\n \"\\nCircumference of each section of the fuselage [m]:\"\n f\"\\n{np.around(self.fuse_sec_circ, 5)}\"\n )\n OutputTextFile.write(\n \"\\nRelative distance of each section of the\"\n + \"fuselage, respect to the first one [m]: \\n\"\n + str(np.around(self.fuse_sec_rel_dist, 5))\n )\n OutputTextFile.write(\n \"\\nLength of each segment of the fuselage [m]: \\n\"\n + str(np.around(self.fuse_seg_length, 5))\n )\n OutputTextFile.write(\n \"\\nMean fuselage width [m]: \" + str(np.around(self.fuse_mean_width, 5))\n )\n OutputTextFile.write(\n \"\\nWidth of each section of the fuselage [m]: \\n\"\n + str(np.around(self.fuse_sec_width, 5))\n )\n OutputTextFile.write(\n \"\\nVolume of each segment of the fuselage \"\n \"[m^3]: \\n\" + str(np.around(self.fuse_seg_vol, 5))\n )\n OutputTextFile.write(\n \"\\nVolume of the cabin [m^3]: \" + str(np.around(self.fuse_cabin_vol, 5))\n )\n OutputTextFile.write(\"\\nVolume of the fuselage [m^3]: \" + str(np.around(self.fuse_vol, 5)))\n OutputTextFile.write(\"\\n\")\n OutputTextFile.write(\"\\n-------------------------------------------------\")\n OutputTextFile.write(\"\\nWINGS -------------------------------------------\")\n OutputTextFile.write(\"\\n-------------------------------------------------\")\n OutputTextFile.write(f\"\\nNumber of Wings [-]: {self.wing_nb}\")\n OutputTextFile.write(f\"\\nWing symmetry plane [-]: {self.wing_sym}\")\n OutputTextFile.write(f\"\\nNumber of wing sections [-]: {self.wing_sec_nb}\")\n OutputTextFile.write(f\"\\nNumber of wing segments [-]: {self.wing_seg_nb}\")\n OutputTextFile.write(f\"\\nWing Span [m]: \\n{np.around(self.wing_span, 5)}\")\n OutputTextFile.write(\n \"\\nWing MAC length [m]: \\n\"\n + str(\n np.around(\n self.wing_mac[\n 0,\n ],\n 5,\n )\n )\n )\n OutputTextFile.write(\n \"\\nWing MAC x,y,z coordinate [m]: \\n\"\n + str(\n np.around(\n self.wing_mac[\n 1:4,\n ],\n 5,\n )\n )\n )\n OutputTextFile.write(\n \"\\nWings sections thickness [m]: \\n\" + str(np.around(self.wing_sec_thickness, 5))\n )\n OutputTextFile.write(\n \"\\nWings sections mean thickness [m]: \\n\" + str(np.around(self.wing_sec_mean_thick, 5))\n )\n OutputTextFile.write(\n \"\\nWing segments length [m]: \\n\" + str(np.around(self.wing_seg_length, 5))\n )\n OutputTextFile.write(\n \"\\nWing max chord length [m]: \\n\" + str(np.around(self.wing_max_chord, 5))\n )\n OutputTextFile.write(\n \"\\nWing min chord length [m]: \\n\" + str(np.around(self.wing_min_chord, 5))\n )\n OutputTextFile.write(\n \"\\nWings planform area [m^2]: \\n\" + str(np.around(self.wing_plt_area, 5))\n )\n OutputTextFile.write(\n \"\\nMain wing planform area [m^2]: \" + str(np.around(self.wing_plt_area_main, 5))\n )\n OutputTextFile.write(\"\\nVolume of each wing [m^3]: \\n\" + str(np.around(self.wing_vol, 5)))\n OutputTextFile.write(\"\\nTotal wing volume [m^3]: \" + str(np.around(self.wing_tot_vol, 5)))\n OutputTextFile.write(\"\\nWing volume for fuel storage [m^3]: \" + str(self.wing_fuel_vol))\n\n # Close Text File\n OutputTextFile.close()",
"def make_flow_txt(sff_fp, output_fp, use_sfftools=False):\r\n if use_sfftools:\r\n _fail_on_gzipped_sff(sff_fp)\r\n check_sffinfo()\r\n _check_call(['sffinfo', sff_fp], stdout=open(output_fp, 'w'))\r\n else:\r\n try:\r\n format_binary_sff(qiime_open(sff_fp, 'rb'), open(output_fp, 'w'))\r\n except:\r\n raise IOError(\"Could not parse SFF %s\" % sff_fp)",
"def readFST(self):\n\n fname = self.fst_file\n print \"reading FAST template file\", fname\n try:\n fh = open(fname,'r')\n self.lines_fast = fh.readlines()\n fh.close()\n except:\n sys.stdout.write (\"Error opening master FAST input file %s\\n\" % fname)\n return 0\n\n for line in self.lines_fast:\n f = line.lstrip().split()\n if (len(f) < 2):\n continue\n\n if (f[1] == 'PtfmFile' and self.ptfm_file == None):\n self.ptfm_file = f[0][1:-1]\n if (f[1] == 'TwrFile' and self.twr_file == None):\n self.twr_file = f[0][1:-1]\n if (f[1] == 'ADAMSFile' and self.adams_file == None):\n self.adams_file = f[0][1:-1]\n if (f[1] == 'BldFile(1)' and self.blade1_file == None):\n self.blade1_file = f[0][1:-1]\n if (f[1] == 'BldFile(2)' and self.blade2_file == None):\n self.blade2_file = f[0][1:-1]\n if (f[1] == 'BldFile(3)' and self.blade3_file == None):\n self.blade3_file = f[0][1:-1]\n if (f[1] == 'ADFile' and self.ad_file == None):\n self.ad_file = f[0][1:-1]\n if (f[1] == 'NoiseFile' and self.noise_file == None):\n self.noise_file = f[0][1:-1]\n \n print \"FAST subfiles:\"\n print \"ptfm \", self.ptfm_file\n print \"twr \", self.twr_file\n print \"blades \", self.blade1_file, self.blade2_file, self.blade3_file\n print \"ad \", self.ad_file\n print \"noise \", self.noise_file",
"def make_fet(self):\n fet_filename = self.filename + \".fet.\" + str(self.tet_num)\n with open(fet_filename, \"w\") as f:\n f.write(str(self.feature_array.shape[1]))\n f.write(\"\\n\")\n np.savetxt(f, self.feature_array, fmt=\"%1.5f\")",
"def store_story(self):\n\n file_path = self.dir + '/' + self.filename + '.txt'\n file_path = self.check_path(file_path)\n with open(file_path, 'a') as f_out:\n f_out.write('Name: ' + self.name + '\\n')\n f_out.write('Age: ' + self.age + '\\n')\n f_out.write('Country of origin: ' + self.origin + '\\n')\n f_out.write('Company: ' + self.company + '\\n')\n f_out.write('First country of arrival: ' + self.route)\n if self.route.lower() != 'the netherlands':\n f_out.write(' >>> DUBLIN PROCEDURE\\n')\n else:\n f_out.write('\\n')\n f_out.write('Entrance: ' + self.entrance + '\\n')\n f_out.write('Documentation: ' + self.documentation + '\\n')\n f_out.write('Exclusion: ' + self.exclusion + '\\n')\n f_out.write('Conflict: ' + self.conflict + '\\n')\n f_out.write('Inhumanity: ' + self.inhumanity + '\\n')\n f_out.write('Family with a permit: ' + self.family + '\\n')\n return",
"def train_and_generate(text_path):\n\n print(\"\\n------------------ ff.io Parameters ------------------\")\n print(f\"Generate text length: {text_length}\")\n print(f\"Sequence length: {seq_length}\\n\")\n print(f\"{layers_count} layers with dimension {layers_dim}\")\n print(f\"{epoch_num} epochs with batch size {batch_s}\\n\")\n\n text = read_text(text_path)\n\n if load_model:\n print(\"Loading model from file.\")\n\n if model_type == 'word':\n print(\"Creating word maps.\")\n characters, n_to_char, char_to_n = word_map(text)\n \n else: # Default to character maps\n print(\"Creating character maps.\")\n characters, n_to_char, char_to_n = character_map(text)\n\n if seed_text:\n seed_text_str = read_text(seed_text_filepath)\n\n print(\"Processing text.\")\n X, Y, characters, n_to_char = process_text(text, characters, n_to_char, char_to_n)\n\n print(\"Modelling\\n\")\n mod = model(X, Y, characters)\n\n gen_text = generate_text(mod, text_length, text, X, characters, n_to_char, char_to_n, seed_text_str = seed_text_str)\n\n return gen_text",
"def createTOFin(En):\n ftemplate = open(\"TOFtemplate.in\", \"r\")\n lines = ftemplate.readlines()\n ftofin = open(\"TOF.in\", \"w\") \n energyline = lines[12].split()\n lines[12] = \"%s %g %s\\n\"%(energyline[0], En, energyline[2])\n ftofin.writelines(lines)\n ftemplate.close()\n ftofin.close()",
"def astext(self):\n self.elements.update({\n 'body': u''.join(self.body),\n 'indices': self.generate_indices()\n })\n return self.render('beamer.tex_t', self.elements)",
"def feature_table(chr_id, source, orient, genes, transcripts, cds, exons, unk):\n for gname, ginfo in genes.items():\n line = [str(chr_id), \n 'gbk_to_gff',\n ginfo[3],\n str(ginfo[0]),\n str(ginfo[1]),\n '.',\n ginfo[2],\n '.',\n 'ID='+str(gname)+';Name='+str(gname)+';Note='+ginfo[-1]]\n print '\\t'.join(line) \n ## construct the transcript line is not defined in the original file \n t_line = [str(chr_id), 'gbk_to_gff', source, 0, 1, '.', ginfo[2], '.'] \n\n if not transcripts:\n t_line.append('ID=Transcript:'+str(gname)+';Parent='+str(gname))\n\n if exons: ## get the entire transcript region from the defined feature\n t_line[3] = str(exons[gname][0][0])\n t_line[4] = str(exons[gname][0][-1])\n elif cds:\n t_line[3] = str(cds[gname][0][0])\n t_line[4] = str(cds[gname][0][-1])\n print '\\t'.join(t_line) \n\n if exons:\n exon_line_print(t_line, exons[gname], 'Transcript:'+str(gname), 'exon')\n\n if cds:\n exon_line_print(t_line, cds[gname], 'Transcript:'+str(gname), 'CDS')\n if not exons:\n exon_line_print(t_line, cds[gname], 'Transcript:'+str(gname), 'exon')\n\n else: ## transcript is defined \n for idx in transcripts[gname]: \n t_line[2] = idx[3]\n t_line[3] = str(idx[0])\n t_line[4] = str(idx[1])\n t_line.append('ID='+str(idx[2])+';Parent='+str(gname))\n print '\\t'.join(t_line) \n \n ## feature line print call \n if exons:\n exon_line_print(t_line, exons[gname], str(idx[2]), 'exon')\n if cds:\n exon_line_print(t_line, cds[gname], str(idx[2]), 'CDS')\n if not exons:\n exon_line_print(t_line, cds[gname], str(idx[2]), 'exon')\n\n if len(genes) == 0: ## feature entry with fragment information \n \n line = [str(chr_id), 'gbk_to_gff', source, 0, 1, '.', orient, '.'] \n fStart = fStop = None \n\n for eid, ex in cds.items(): \n fStart = ex[0][0] \n fStop = ex[0][-1]\n\n for eid, ex in exons.items(): \n fStart = ex[0][0] \n fStop = ex[0][-1]\n\n if fStart or fStart:\n\n line[2] = 'gene'\n line[3] = str(fStart)\n line[4] = str(fStop)\n line.append('ID=Unknown_Gene_' + str(unk) + ';Name=Unknown_Gene_' + str(unk))\n print \"\\t\".join(line)\n\n if not cds:\n line[2] = 'transcript'\n else:\n line[2] = 'mRNA'\n line[8] = 'ID=Unknown_Transcript_' + str(unk) + ';Parent=Unknown_Gene_' + str(unk)\n print \"\\t\".join(line)\n \n if exons:\n exon_line_print(line, cds[None], 'Unknown_Transcript_' + str(unk), 'exon')\n \n if cds:\n exon_line_print(line, cds[None], 'Unknown_Transcript_' + str(unk), 'CDS')\n if not exons:\n exon_line_print(line, cds[None], 'Unknown_Transcript_' + str(unk), 'exon')\n \n unk +=1 \n\n return unk",
"def create_opl_mod_output_text(tdf):\n return _create_opl_mod_text(tdf, True)",
"def write_file(self, f=None):\n # get model information\n nlay = self.parent.nlay\n dis = self.parent.get_package(\"DIS\")\n if dis is None:\n dis = self.parent.get_package(\"DISU\")\n\n # Open file for writing\n if f is None:\n f_obj = open(self.fn_path, \"w\")\n\n # Item 1: ipakcb, HDRY, IWDFLG, WETFCT, IWETIT, IHDWET, IKVFLAG, IKCFLAG\n f_obj.write(\n f\" {self.ipakcb:9d} {self.hdry:9.3G} {self.iwdflg:9d}\"\n f\" {self.wetfct:9.3G} {self.iwetit:9d} {self.ihdwet:9d}\"\n f\" {self.ikvflag:9d} {self.ikcflag:9d}\\n\"\n )\n\n # LAYCON array\n for layer in range(nlay):\n if self.intercellt[layer] > 0:\n f_obj.write(\n f\"{self.intercellt[layer]:1d} {self.laycon[layer]:1d} \"\n )\n else:\n f_obj.write(f\"0{self.laycon[layer]:1d} \")\n f_obj.write(\"\\n\")\n\n # TRPY, <ANGLEX>\n f_obj.write(self.trpy.get_file_entry())\n transient = not dis.steady.all()\n structured = self.parent.structured\n anis = any(t != 1 for t in self.trpy)\n if (not structured) and anis:\n f_obj.write(self.anglex.get_file_entry())\n\n # <SF1>, <TRAN>, <HY>, <VCONT>, <KV>, <SF2>, <WETDRY>\n for layer in range(nlay):\n if transient:\n f_obj.write(self.sf1[layer].get_file_entry())\n\n if self.ikcflag == 0:\n self._write_hy_tran_vcont_kv(f_obj, layer)\n\n if transient and (self.laycon[layer] in [2, 3, 4]):\n f_obj.write(self.sf2[layer].get_file_entry())\n\n if (self.iwdflg != 0) and (self.laycon[layer] in [1, 3]):\n f_obj.write(self.wetdry[layer].get_file_entry())\n\n # <KSAT> (if ikcflag==1)\n if abs(self.ikcflag == 1):\n f_obj.write(self.ksat.get_file_entry())\n\n f_obj.close()",
"def createTXT(self):\n now = dt.datetime.now().strftime(\"%m-%d %H-%M\")\n self.filename = \"bwcca_tags \" + now\n try:\n if \"/\" in self.dir_lbl[\"text\"]:\n desired_list = self.phraseMaker()\n with open(f\"{self.folder}/{self.filename}.txt\", \"w\") as f:\n for i in desired_list:\n f.write(f\"{i}\\n\")\n self.stat_lbl[\"text\"] = f\"/{self.filename} created!\"\n else:\n self.dir_lbl[\"text\"] = \"Select a folder!\"\n self.dir_btn.focus()\n except Exception as e:\n self.dir_lbl[\"text\"] = e",
"def eficas_translation(ts_file, new_ts_file, lang):\n dicoCataToLabel={}\n dicoCataToTelemac={}\n header = '<?xml version=\"1.0\" encoding=\"utf-8\"?>'\n header +='<!DOCTYPE TS><TS version=\"1.1\" language=\"'+lang+'\">'\n header +='<context>\\n'\n header +=' <name>@deafult</name>\\n'\n\n end ='</context>\\n</TS>\\n'\n\n pattern_In=re.compile(r'^\\s*<source>(?P<ident>.*)</source>\\s*$')\n pattern_Out=re.compile(r'^\\s*<translation>(?P<traduit>.*)</translation>\\s*$')\n pattern_In2=re.compile(r'^\\s*<source2>(?P<ident>.*)</source2>\\s*$')\n pattern_Out2=re.compile(r'^\\s*<translation2>(?P<traduit>.*)</translation2>\\s*$')\n listeMaj=[]\n listeMaj.append(('for h','for H'))\n listeMaj.append(('pour h','pour H'))\n listeMaj.append(('for u','for U'))\n listeMaj.append(('pour u','pour U'))\n listeMaj.append(('of k','of K'))\n listeMaj.append(('de k','de K'))\n listeMaj.append(('of h','of H'))\n listeMaj.append(('de h','de H'))\n listeMaj.append(('u and v','U and V'))\n listeMaj.append(('u et v','U et V'))\n listeMaj.append(('on h','on H'))\n listeMaj.append(('sur h','sur H'))\n listeMaj.append(('supg','SUPG'))\n listeMaj.append(('k and epsilon','K and Epsilon'))\n listeMaj.append(('k-epsilon','K-Epsilon'))\n listeMaj.append(('gmres','GMRES'))\n listeMaj.append(('cgstab','CGSTAB'))\n listeMaj.append(('q(z)','Q(Z)'))\n listeMaj.append(('z(q)','Z(Q)'))\n listeMaj.append(('wgs84','WGS84'))\n listeMaj.append(('wgs84','UTM'))\n listeMaj.append(('n-scheme','N-Scheme'))\n listeMaj.append(('scheme n','Scheme N'))\n listeMaj.append(('psi-scheme','PSI-Scheme'))\n listeMaj.append((' psi',' PSI'))\n listeMaj.append(('f(t90)','F(T90)'))\n listeMaj.append(('(pa)','(Pa)'))\n listeMaj.append(('h clipping','H clipping'))\n listeMaj.append(('delwaq','DELWAQ'))\n listeMaj.append(('tomawac','TOMAWAC'))\n listeMaj.append(('chezy','CHEZY'))\n listeMaj.append(('hllc','HLLC'))\n listeMaj.append(('c-u','C-U'))\n listeMaj.append(('c,u,v','C,U,V'))\n listeMaj.append(('h,u,v','H,U,V'))\n listeMaj.append(('previmer','PREVIMER'))\n listeMaj.append(('fes20xx','FES20XX'))\n listeMaj.append(('legos-nea','LEGOS-NEA'))\n listeMaj.append(('tpxo','TPXO'))\n listeMaj.append((' x',' X'))\n listeMaj.append((' y',' Y'))\n listeMaj.append(('waf','WAF'))\n listeMaj.append(('(w/kg)','(W/kg)'))\n listeMaj.append(('(j/kg)','(W/kg)'))\n listeMaj.append(('zokagoa','Zokagoa'))\n listeMaj.append(('nikuradse','Nikuradse'))\n listeMaj.append(('froude','Froude'))\n listeMaj.append(('gauss','Gauss'))\n listeMaj.append(('seidel','Seidel'))\n listeMaj.append(('leo','Leo'))\n listeMaj.append(('postma','Postma'))\n listeMaj.append(('crout','Crout'))\n listeMaj.append(('okada','Okada'))\n listeMaj.append(('jmj','JMJ'))\n listeMaj.append(('haaland','HAALAND'))\n listeMaj.append(('grad(u)','grad(U)'))\n listeMaj.append(('variable z','variable Z'))\n listeMaj.append(('variable r','variable R'))\n listeMaj.append(('ascii','ASCII'))\n\n with open(ts_file, 'r') as f:\n for ligne in f.readlines():\n if pattern_In.match(ligne):\n m = pattern_In.match(ligne)\n ident = m.group('ident')\n if pattern_Out.match(ligne):\n m = pattern_Out.match(ligne)\n traduit = m.group('traduit')\n dicoCataToTelemac[ident] = traduit\n traduitMin = traduit.lower()\n for t in listeMaj :\n traduit = traduitMin.replace(t[0], t[1])\n traduitMin = traduit\n chaine = traduitMin[0].upper() + traduitMin[1:]\n dicoCataToLabel[ident] = chaine\n if pattern_In2.match(ligne):\n m = pattern_In2.match(ligne)\n ident = m.group('ident')\n if pattern_Out2.match(ligne):\n m = pattern_Out2.match(ligne)\n traduit = m.group('traduit')\n dicoCataToTelemac[ident] = traduit\n dicoCataToLabel[ident] = traduit\n\n with open(new_ts_file, 'w') as f:\n f.write(header)\n for k in dicoCataToTelemac :\n text = \" <message>\\n <source>\"\n text += k\n text += \"</source>\\n <translation>\"\n text += dicoCataToLabel[k]\n text += \"</translation>\\n </message>\\n\"\n f.write(text)\n f.write(end)",
"def write_dftb_in(self, filename):\n\n outfile = open(filename, 'w')\n outfile.write('Geometry = GenFormat { \\n')\n #outfile.write(' <<< \"geo_end.gen\" \\n')\n outfile.write(' <<< %s \\n' %self.geo_fname)\n outfile.write('} \\n')\n outfile.write(' \\n')\n\n params = self.parameters.copy()\n\n s = 'Hamiltonian_MaxAngularMomentum_'\n for key in params:\n if key.startswith(s) and len(key) > len(s):\n break\n else:\n # User didn't specify max angular mometa. Get them from\n # the .skf files:\n symbols = set(self.atoms.get_chemical_symbols())\n for symbol in symbols:\n path = os.path.join(self.slako_dir,\n '{0}-{0}.skf'.format(symbol))\n l = read_max_angular_momentum(path)\n params[s + symbol] = '\"{}\"'.format('spdf'[l])\n\n # --------MAIN KEYWORDS-------\n previous_key = 'dummy_'\n myspace = ' '\n for key, value in sorted(params.items()):\n current_depth = key.rstrip('_').count('_')\n previous_depth = previous_key.rstrip('_').count('_')\n for my_backsclash in reversed(\n range(previous_depth - current_depth)):\n outfile.write(3 * (1 + my_backsclash) * myspace + '} \\n')\n outfile.write(3 * current_depth * myspace)\n if key.endswith('_') and len(value) > 0:\n outfile.write(key.rstrip('_').rsplit('_')[-1] +\n ' = ' + str(value) + '{ \\n')\n elif (key.endswith('_') and (len(value) == 0) \n and current_depth == 0): # E.g. 'Options {'\n outfile.write(key.rstrip('_').rsplit('_')[-1] +\n ' ' + str(value) + '{ \\n')\n elif (key.endswith('_') and (len(value) == 0) \n and current_depth > 0): # E.g. 'Hamiltonian_Max... = {'\n outfile.write(key.rstrip('_').rsplit('_')[-1] +\n ' = ' + str(value) + '{ \\n')\n elif key.count('_empty') == 1:\n outfile.write(str(value) + ' \\n')\n elif ((key == 'Hamiltonian_ReadInitialCharges') and \n (str(value).upper() == 'YES')):\n f1 = os.path.isfile(self.directory + os.sep + 'charges.dat')\n f2 = os.path.isfile(self.directory + os.sep + 'charges.bin')\n if not (f1 or f2):\n print('charges.dat or .bin not found, switching off guess')\n value = 'No'\n outfile.write(key.rsplit('_')[-1] + ' = ' + str(value) + ' \\n')\n else:\n outfile.write(key.rsplit('_')[-1] + ' = ' + str(value) + ' \\n')\n # point\n if self.pcpot is not None and ('DFTB' in str(value)):\n outfile.write(' ElectricField = { \\n')\n outfile.write(' PointCharges = { \\n')\n outfile.write(\n ' CoordsAndCharges [Angstrom] = DirectRead { \\n')\n outfile.write(' Records = ' +\n str(len(self.pcpot.mmcharges)) + ' \\n')\n outfile.write(\n ' File = \"dftb_external_charges.dat\" \\n')\n outfile.write(' } \\n')\n outfile.write(' } \\n')\n outfile.write(' } \\n')\n previous_key = key\n\n current_depth = key.rstrip('_').count('_')\n for my_backsclash in reversed(range(current_depth)):\n outfile.write(3 * my_backsclash * myspace + '} \\n')\n #outfile.write('ParserOptions { \\n')\n #outfile.write(' IgnoreUnprocessedNodes = Yes \\n')\n #outfile.write('} \\n')\n #if self.do_forces:\n # outfile.write('Analysis { \\n')\n # outfile.write(' CalculateForces = Yes \\n')\n # outfile.write('} \\n')\n\n outfile.close()",
"def parse(self, word):\n # Ok so now let's do the second FST\n f2 = FST('morphology-parse')\n f2.add_state('start')\n f2.initial_state = 'start'\n \n #add states for the word lick\n for w in list('lick'):\n state_name = 'lick-' + w\n f2.add_state(state_name)\n #add first letter \n f2.add_arc('start', 'lick-l', 'l', 'l')\n \n #add arc for the word lick\n lick = list('lick')\n for w in range(0,len(lick)-1):\n f2.add_arc('lick-'+lick[w], 'lick-'+lick[w+1], lick[w+1], lick[w+1] )\n \n #add states for the word lick \n for w in list('want'):\n state_name = 'want-' + w\n f2.add_state(state_name)\n \n f2.add_arc('start', 'want-w', 'w', 'w')\n #add arc for the word want\n want = list('want')\n for w in range(0,len(want)-1):\n f2.add_arc('want-'+want[w], 'want-'+want[w+1], want[w+1], want[w+1] )\n\n #add states for the word sync\n sync = list('sync')\n for w in sync:\n state_name = 'sync-' + w\n f2.add_state(state_name)\n \n f2.add_arc('start', 'sync-s', 's', 's')\n #add arc for the word sync\n for w in range(0,len(sync)-1):\n f2.add_arc('sync-'+sync[w], 'sync-'+sync[w+1], sync[w+1], sync[w+1] )\n \n #add states for the word panic\n panic = list('panic')\n for w in panic:\n state_name = 'panic-' + w\n f2.add_state(state_name)\n \n f2.add_arc('start', 'panic-p', 'p', 'p')\n #add arc for the word panic\n for w in range(0,len(panic)-1):\n f2.add_arc('panic-'+panic[w], 'panic-'+panic[w+1], panic[w+1], panic[w+1] )\n \n #add states for the word havoc\n havoc = list('havoc')\n for w in havoc:\n state_name = 'havoc-' + w\n f2.add_state(state_name)\n \n f2.add_arc('start', 'havoc-h', 'h', 'h')\n #add arc for the word havoc\n for w in range(0,len(havoc)-1):\n f2.add_arc('havoc-'+havoc[w], 'havoc-'+havoc[w+1], havoc[w+1], havoc[w+1] )\n \n f2.add_state('intermediate1')\n f2.add_state('intermediate2')\n f2.add_state('pres1')\n f2.add_state('past1')\n \n f2.add_arc('lick-k', 'intermediate1', '', '')\n f2.add_arc('want-t', 'intermediate1', '', '')\n f2.add_arc('sync-c', 'intermediate1', '', '')\n f2.add_arc('panic-c', 'intermediate1', 'k', '')\n f2.add_arc('havoc-c', 'intermediate1', 'k', '')\n \n f2.add_arc('intermediate1', 'pres1', 'ing', '+present participle form')\n f2.add_arc('intermediate1', 'past1', 'ed', '+past form')\n\n f2.set_final('pres1')\n f2.set_final('past1')\n \n if ''.join(word[-3:]) == 'ing':\n inputs = word[:-3]\n inputs.append('ing')\n elif ''.join(word[-2:]) == 'ed':\n inputs = word[:-2]\n inputs.append('ed')\n else:\n inputs = word\n \n output = f2.transduce(inputs)[0]\n return ''.join(output)",
"def create_eqt_template(nodes, input_filename):\n output_filename = f'{input_filename[:-4]}_eqpt_sheet.txt'\n with open(output_filename, 'w', encoding='utf-8') as my_file:\n # print header similar to excel\n my_file.write('OPTIONAL\\n\\n\\n\\\n \\t\\tNode a egress amp (from a to z)\\t\\t\\t\\t\\tNode a ingress amp (from z to a) \\\n \\nNode A \\tNode Z \\tamp type \\tatt_in \\tamp gain \\ttilt \\tatt_out\\\n amp type \\tatt_in \\tamp gain \\ttilt \\tatt_out\\n')\n\n for node in nodes.values():\n if node.eqpt == 'ILA':\n my_file.write(f'{node.uid}\\t{node.to_node[0]}\\n')\n if node.eqpt == 'ROADM':\n for to_node in node.to_node:\n my_file.write(f'{node.uid}\\t{to_node}\\n')\n\n print(f'File {output_filename} successfully created with Node A - Node Z entries for Eqpt sheet in excel file.')",
"def build_transcript(speaker_label_transcript):\n with open('main_transcript.txt', 'a') as the_file:\n for t in speaker_label_transcript:\n the_file.write(f\"{t['speaker']}:\\n\")\n the_file.write(f\"{t['content']}\\n\\n\")",
"def createInput(dirPath,gSettings):\n \n with open(os.path.join('../in','input.txt')) as f:\n inpFile = f.readlines()\n \n\n # Model settings\n model = gSettings[\"Model\"]\n inpFile[13] = \"insgrav: {:1d}\\n\".format(int(model[\"NS gravity\"][\"Flag\"]))\n inpFile[14] = \"isun: {:1d}\\n\".format(int(model[\"Lunisolar\"][\"Sun\"]))\n inpFile[15] = \"imoon: {:1d}\\n\".format(int(model[\"Lunisolar\"][\"Moon\"]))\n\n if model[\"Drag\"][\"Flag\"] == False:\n inpFile[16] = \"idrag: 0\\n\"\n else:\n dm = model[\"Drag\"][\"Model\"].lower()\n if dm == \"wertz\":\n idrag = 1\n elif dm == \"us76\":\n idrag = 2\n elif dm == \"j77\":\n idrag = 3\n elif dm == \"msis00\":\n idrag = 4\n else:\n raise ValueError('Value \"' + model[\"Drag\"][\"Model\"] + '\" invalid.')\n inpFile[16] = \"idrag: {:1d}\\n\".format(idrag)\n if model[\"Drag\"][\"Solar flux\"].lower() == \"constant\":\n inpFile[17] = \"iF107: 0\\n\"\n elif model[\"Drag\"][\"Solar flux\"].lower() == \"variable\":\n inpFile[17] = \"iF107: 1\\n\"\n else:\n raise ValueError('Value \"' + model[\"Drag\"][\"Solar flux\"] + '\" invalid.')\n\n if model[\"SRP\"][\"Flag\"] == False:\n inpFile[18] = \"iSRP: {:1d}\\n\".format(int(model[\"SRP\"][\"Flag\"]))\n else:\n inpFile[18] = \"iSRP: {:1d}\\n\".format(int(model[\"SRP\"][\"Flag\"]))\n if model[\"SRP\"][\"Eclipses\"]:\n inpFile[18] = \"iSRP: 2\\n\"\n \n if model[\"Lunisolar\"][\"Ephemerides\"] == \"DE431\":\n inpFile[19] = \"iephem: 1\\n\"\n elif model[\"Lunisolar\"][\"Ephemerides\"] == \"Meeus\":\n inpFile[19] = \"iephem: 2\\n\"\n else:\n raise ValueError('Value \"' + model[\"Lunisolar\"][\"Ephemerides\"] + '\" invalid.')\n \n inpFile[20] = \"gdeg: {:3d}\\n\".format(model[\"NS gravity\"][\"Degree\"])\n if model[\"NS gravity\"][\"Order\"] <= model[\"NS gravity\"][\"Degree\"]:\n inpFile[21] = \"gord: {:3d}\\n\".format(model[\"NS gravity\"][\"Order\"])\n else:\n raise ValueError(\"Order {0:d} of the gravity field is greater than degree {1:d}\".format(model[\"NS gravity\"][\"Order\"],model[\"NS gravity\"][\"Degree\"]))\n \n\n\n # Integration settings\n integ = gSettings[\"Integration\"]\n inpFile[29] = \"tol: {:22.15E}\\n\".format(integ[\"Tolerance\"])\n inpFile[30] = \"tspan: {:22.15E}\\n\".format(integ[\"Duration\"] * 365.25)\n inpFile[31] = \"tstep: {:22.15E}\\n\".format(integ[\"Step\"])\n inpFile[39] = \"eqs: {:2d}\\n\".format(integ[\"Equations\"])\n\n\n\n # Output settings\n inpFile[44] = \"verb: 0\\n\"\n inpFile[45] = \"out: \" + os.path.abspath(os.path.join(dirPath, ' '))\n\n\n with open(os.path.join(dirPath,'input.txt'),'w') as f:\n f.writelines(inpFile)",
"def write_inputs(self, extraFstDict={}):\n\n if (self.run_dir == self.fst_dir):\n raise ValueError, \"run_dir == fst_dir, you cannot run directly in the template directory\"\n\n self.run_name, ext = os.path.splitext(self.fst_file)\n\n if (not os.path.isdir(self.run_dir)):\n os.mkdir(self.run_dir)\n\n self.fst_dir = os.path.abspath(self.fst_dir)\n\n if (self.exec_count <= 1): # Is 0 when invoked by main()\n # Is 1 when invoked by Assembly ???\n self.read_inputs()\n\n for key in extraFstDict:\n self.fstDict[key] = extraFstDict[key]\n\n curdir = os.getcwd()\n os.chdir (self.run_dir) ###note, change to run_dir\n\n self.writeFST(self.fst_file,self.fstDict) \n self.writeAD()\n self.writeBlade()\n self.writeWnd()\n self.writeNoise()\n self.writePtfm(self.fstDict)\n self.copyTwr()\n self.copyAdams()\n\n os.chdir(curdir) ## restore dir",
"def _amber_write_input_file(self):\n logger.debug(\"Writing {}\".format(self.input))\n with open(os.path.join(self.path, self.input), \"w\") as f:\n f.write(\"{}\\n\".format(self.title))\n f.write(\" &cntrl\\n\")\n self._write_dict_to_mdin(f, self.cntrl)\n\n if self.ewald is not None:\n f.write(\" &ewald\\n\")\n self._write_dict_to_mdin(f, self.ewald)\n\n if self.cntrl[\"nmropt\"] == 1:\n if self.wt is not None:\n for line in self.wt:\n f.write(\" \"+line+\"\\n\")\n f.write(\" &wt type = 'END', /\\n\")\n if self.restraint_file is not None:\n f.write(\"DISANG = {}\\n\".format(self.restraint_file))\n f.write(\"LISTOUT = POUT\\n\\n\")\n if self.group is not None:\n f.write(\"{:s}\".format(self.group))",
"def write_coord_seq():\n \n import os\n choice = input('Enter the name of the file: ')\n filepath = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Data', choice)\n lis = []\n with open(filepath, 'r') as file:\n for line in file:\n if line[:4] == 'ATOM':\n line_split = line.split()\n lis.append(line_split[3:4])\n choice1 = input('Enter name for the output file: ')\n filepath1 = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Results', choice1)\n with open(filepath1, 'w') as myfile:\n for i in lis:\n myfile.writelines(i)\n print('Done!')\n \n with open(choice, 'r') as myfile:\n header = ''\n for line in myfile:\n if line.startswith(\"TITLE\"): \n head_split = line.split()\n header = header + ' '.join(head_split[1:])\n \n choice2 = input('Enter output file name with a .fasta extension: ')\n filepath2 = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Results', choice2)\n z = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Results', choice1)\n with open(z, 'r') as file:\n with open(filepath2, 'w') as output:\n for i in file:\n output.writelines('>' + header + '\\n' + i)\n print('>' + header + '\\n' + i)\n print('Fasta file generated!')",
"def htk2dag(self, file_path):\n field_re = re.compile(r'(\\S+)=(?:\"((?:[^\\\\\"]+|\\\\.)*)\"|(\\S+))')\n open_fn = gzip.open if file_path.endswith('.gz') else open\n with open_fn(file_path, 'rt', encoding='utf-8') as fh:\n self.header = {}\n self.nframes = 0\n state = 'header'\n # Read everything\n for spam in fh:\n if spam.startswith('#'):\n continue\n fields = dict(map(lambda t: (t[0], t[1] or t[2]),\n field_re.findall(spam.rstrip())))\n # Number of nodes and arcs\n if 'N' in fields:\n num_nodes = int(fields['N'])\n self.nodes = [None] * num_nodes\n num_arcs = int(fields['L'])\n self.arcs = [None] * num_arcs\n state = 'items'\n if state == 'header':\n self.header.update(fields)\n else:\n # This is a node\n if 'I' in fields:\n idx = int(fields['I'])\n frame = int(float(fields['t']) * FRATE)\n var = int(fields['v']) if 'v' in fields else None\n node = self.Node(\n fields['W'].replace('\\\\', ''), frame, var)\n self.nodes[idx] = node\n if frame > self.nframes:\n self.nframes = frame\n # This is an arc\n elif 'J' in fields:\n idx = int(fields['J'])\n start_node = self.nodes[int(fields['S'])]\n end_node = self.nodes[int(fields['E'])]\n ascr = float(fields.get('a', 0))\n lscr = float(fields.get('l', 0))\n nscr = fields.get('n', [])\n if isinstance(nscr, str):\n nscr = [float(n) for n in nscr.split(',')]\n iscr = fields.get('i', [])\n if isinstance(iscr, str):\n iscr = [float(i) for i in iscr.split(',')]\n arc = self.Arc(\n start_node, end_node, ascr, lscr, nscr, iscr)\n self.arcs[idx] = arc\n # Link up existing nodes\n start_node.exits.append(arc)\n end_node.entries.append(arc)\n\n self.sort_nodes()",
"def main():\r\n\timport sys\r\n\r\n\tlistofSequences = FastAreader(sys.stdin).readFasta() \r\n\tPAMSequences = PAMfinder(listofSequences).classController() # Calls on controller class to return desired models.\r\n\tf = open('Guide Sequences.txt','w') \r\n\tfor i in range(len(PAMSequences[0])):\r\n\t\tf.write(PAMSequences[0][i]) # Prints the header sequence into the file.\r\n\t\tf.write('\\n') \r\n\t\tprint(PAMSequences[0][i]) \r\n\t\tfor j in range(len(PAMSequences[1][i])): \r\n\t\t\tif j == 0: \r\n\t\t\t\tf.write(\"Forward Strand PAM Sites:\") \r\n\t\t\t\tf.write('\\n')\r\n\t\t\t\tprint(\"Forward Strand PAM Sites:\") \r\n\t\t\tprint(PAMSequences[1][i][j]) # Prints the forward sequences\r\n\t\t\ty = str(PAMSequences[1][i][j]) # Changes from int to string characters.\r\n\t\t\tx = ''.join(y) # Joining all the string values so we can print to file.\r\n\t\t\tf.write(x) # Write the joined forward sequences to the file.\r\n\t\t\tf.write('\\n')\r\n\t\tfor k in range(len(PAMSequences[2][i])): # For reverse sequences, and follows same logic as forward. \r\n\t\t\tif k == 0:\r\n\t\t\t\tf.write(\"Reverse Strand PAM Sites (in reference to the Top Strand Position):\")\r\n\t\t\t\tf.write('\\n')\r\n\t\t\t\tprint(\"Reverse Strand PAM Sites (in reference to the Top Strand Position):\")\r\n\t\t\tprint(PAMSequences[2][i][k]) # Prints the reverse sequences with the corresponding positions. \r\n\t\t\ta = str(PAMSequences[2][i][k]) # Changes the integer to string characters, allowing for the values to join.\r\n\t\t\tb = ''.join(a)\r\n\t\t\tf.write(b) # Write all of the reverse sequences onto the text file with their positions. \r\n\t\t\tf.write('\\n')\r\n\tf.close() # Close the file.\r",
"def convert_chn_text(detail=True):\n p = {\n \"data_path\": \"../data/data_literature\",\n \"output_dir\": \"../data/converted_data\"\n }\n if detail:\n gen_params_info(p)\n\n os.system(\"rm -rf %s\" % p[\"output_dir\"])\n os.system(\"mkdir -p %s\" % p[\"output_dir\"])\n files = os.listdir(p[\"data_path\"])\n for file_name in files:\n if detail:\n print(\"to process %s\" % file_name)\n file_path = \"%s/%s\" % (p[\"data_path\"], file_name)\n out_file_path = \"%s/%s\" % (p[\"output_dir\"], file_name)\n fh_in = codecs.open(filename=file_path, mode=\"r\", encoding='utf8')\n fh_out = codecs.open(filename=out_file_path, mode=\"w\", encoding='utf8')\n line_idx = 1\n verb = \"\"\n for line in fh_in:\n line = line.lstrip()\n if line.find(\"\\t\") < 0:\n print(\"Please check in file %s, line: %s\\nsentence :%s\\n\"\\\n \"The above sentence has NO TAB and has been skiped!\" \\\n % (file_name, line_idx, line))\n continue\n items = line.split(\"\\t\")\n if len(items) != 4:\n print(\"Please check in file %s, line: %s\\nsentence :%s\\n\"\\\n \"The above sentence has NO 4 TAB and has been skiped!\" \\\n % (file_name, line_idx, line))\n continue\n frame_id = items[0]\n if frame_id.find(\".\") >= 0:\n frame_id = frame_id.split(\".\")[0]\n verb = items[2].strip()\n left_sent = items[1].strip()\n right_sent = items[3].strip()\n out_line = \"%s\\t%s\\t%s\\t%s\"\\\n % (frame_id, left_sent, verb, right_sent)\n print(out_line, file=fh_out)\n\n line_idx += 1\n\n fh_in.close()\n fh_out.close()",
"def _get_and_build_text_structure(self):\n return Text_structure(self.filename, self)",
"def main(self):\n root = etree.Element(\"OpenSCENARIO\")\n self.get_header(root)\n self.get_parameter_declarations(root)\n etree.SubElement(root, \"CatalogLocations\")\n self.get_road_network(root)\n self.get_entities(root)\n storyboard = etree.SubElement(root, \"Storyboard\")\n self.get_init(storyboard)\n story = etree.SubElement(storyboard, \"Story\")\n story.set(\"name\", \"OSC Generated Story\")\n act = etree.SubElement(story, \"Act\")\n act.set(\"name\", \"OSC Generated Act\")\n self.get_maneuvers(act)\n self.get_story_start_trigger(act)\n self.get_story_stop_trigger(act)\n self.get_end_eval_criteria(storyboard)\n\n generated_xml = etree.tostring(root)\n self.write_xosc(generated_xml)",
"def create_travel_model_input_file(self,\r\n config,\r\n year,\r\n zone_set,\r\n datasets,\r\n tm_input_file_name=\"tm_input.txt\",\r\n delimiter = '\\t',\r\n ):\r\n tm_config = config['travel_model_configuration']\r\n if not tm_config.has_key('travel_model_base_directory') and \\\r\n tm_config.has_key('generic_directory'):\r\n tm_config['travel_model_base_directory'] = tm_config['generic_directory']\r\n tm_input_file_name = 'y%stazdata.xls' % year\r\n\r\n urbansim_to_tm = tm_config['urbansim_to_tm_variable_mapping']\r\n if 'DataTable' in urbansim_to_tm:\r\n datatable = urbansim_to_tm['DataTable']\r\n else:\r\n datatable = \"TAZ Data Table\"\r\n if 'JoinField' in urbansim_to_tm:\r\n joinfield = urbansim_to_tm['JoinField']\r\n else:\r\n joinfield = 'ID'\r\n \r\n variable_mapping = urbansim_to_tm['variable_mapping']\r\n \r\n variable_list = []\r\n column_name = []\r\n for variable in variable_mapping:\r\n variable_list.append(variable[0])\r\n column_name.append(variable[1])\r\n \r\n logger.log_status('variable_list: %s' % variable_list)\r\n logger.log_status('column_name: %s' % column_name)\r\n\r\n zone_set.compute_variables(variable_list)\r\n variable_short_name = [VariableName(x).get_alias() for x in variable_list]\r\n \r\n tm_input_data_dir = os.path.join(tm_config['travel_model_base_directory'], tm_config[year]['data_exchange_dir'])\r\n if not os.path.exists(tm_input_data_dir):\r\n os.makedirs(tm_input_data_dir)\r\n\r\n input_file = os.path.join(tm_input_data_dir, tm_input_file_name)\r\n\r\n logger.log_status('write travel model input file to directory: %s' % tm_input_data_dir)\r\n rows = zone_set.size()\r\n cols = len(variable_short_name)\r\n data = zeros(shape=(rows,cols))\r\n for i in range(cols):\r\n this_column=zone_set.get_attribute(variable_short_name[i])\r\n data[:,i] = this_column\r\n \r\n header = column_name\r\n self._update_travel_model_data_file(config=tm_config, \r\n data=data, \r\n header=header, \r\n filepath=input_file, \r\n datatable=datatable, \r\n joinfield=joinfield, \r\n delimiter=delimiter,\r\n year=year,\r\n zone_set=zone_set\r\n )",
"def output(self, out):\n res = \"# File: \" + out + \"\\n# NFA\\n# Q_ - the set of states\\n\"\n for q in self.states:\n res += q + ' '\n res = res[0:-1]\n res += \"\\n# Sigma_ the alphabet\\n\"\n for a in self.alphabet:\n res += a + ' '\n res = res[0:-1]\n res += '\\n# q_0_ the start state\\n' + self.q_0 + \"\\n# F_ the set of accept states\\n\"\n for f in self.final:\n res += f + ' '\n res = res[0:-1]\n res += \"\\n# delta_ the transition function\\n\"\n for x in self.transition:\n splitted = list(str(x).split(','))\n res += splitted[0] + \" \" + splitted[1]\n for i in self.transition[x]:\n res += \" \" + i\n res += '\\n'\n f = open(out, 'w')\n f.write(res)\n f.close()"
] |
[
"0.6127326",
"0.6039715",
"0.59984165",
"0.58013976",
"0.578428",
"0.57156134",
"0.5675818",
"0.5628369",
"0.557599",
"0.5498544",
"0.5491181",
"0.5416907",
"0.5413934",
"0.5403777",
"0.53988117",
"0.5386455",
"0.5377619",
"0.5375378",
"0.5355942",
"0.535454",
"0.53346777",
"0.53324646",
"0.5323732",
"0.53092575",
"0.5307906",
"0.52597874",
"0.52513915",
"0.52384937",
"0.5233702",
"0.51960474"
] |
0.68856895
|
0
|
Makes the output directory if it does not exist.
|
def _make_output_directory(output_dir: str) -> None:
if output_dir and not os.path.exists(output_dir):
os.makedirs(output_dir)
logging.info(f"output directory does not exist, made '{output_dir}'")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _make_output_directory(self):\n fs = self._filesystem\n output_filename = fs.join(self._root_output_dir, self._test_name)\n fs.maybe_make_directory(fs.dirname(output_filename))",
"def _make_output_dir(self):\n out_dir = os.path.dirname(self._out_format)\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n LOG.info('Created output directory: %s', out_dir)",
"def create_output_dir(self):\n if self.output_dir is None:\n new_path = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')\n self.output_dir = os.path.expanduser(os.path.join(self.input_dir, new_path))\n try:\n os.makedirs(self.output_dir)\n except OSError:\n pass",
"def setup_outdir():\n try:\n shutil.rmtree(OUTDIR)\n except FileNotFoundError:\n pass\n os.makedirs(OUTDIR, exist_ok=True)",
"def ensure_exists(output_dir):\n try:\n makedirs(output_dir)\n except OSError:\n if not isdir(output_dir):\n raise",
"def create_dir(output_path):\n if not os.path.exists(output_path) and is_directory(output_path):\n os.makedirs(output_path)",
"def __manage_output_folder(self):\n if not os.path.exists(self.output_folder):\n os.makedirs(self.output_folder)",
"def make_sure_path_exists(out_path):\n try:\n os.makedirs(out_path)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n print \"Errors in output folder path! please change the output path or analysis name\\n\"\n exit()",
"def ensure_out_dir(out_dir):\n if not os.path.isdir(out_dir):\n os.makedirs(out_dir)",
"def create_out_dir(out): \n out_path = os.path.join(out,out_dir_name)\n try:\n os.stat(out_path)\n except:\n os.mkdir(out_path)",
"def make_output_dir(directory):\r\n if os.path.exists(directory):\r\n try:\r\n shutil.rmtree(directory)\r\n except OSError:\r\n print(\"[SETUP] ERROR: Removing the existing output directory failed\")\r\n return False\r\n else:\r\n print(\"[SETUP] STATUS: Existing output directory removed\")\r\n\r\n try:\r\n os.mkdir(directory)\r\n except OSError:\r\n print(\"[SETUP] ERROR: Creation of the output directory failed\")\r\n return False\r\n else:\r\n print(\"[SETUP] STATUS: Successfully created output directory\")\r\n return True",
"def setup(self, newdir=None):\n if not os.path.exists(self.output_path):\n os.makedirs(self.output_path)\n if newdir:\n _new = os.path.join(self.output_path, newdir)\n if not os.path.exists(_new):\n os.makedirs(_new)",
"def __setup_output_directory(self):\n print('Setting up output directory')\n time_stamp = datetime.now().strftime(\"%d-%m-%Y-%H-%M-%S\")\n self.output_path = os.path.join(self.output_base_path, '%s_%s' % (self.execution_name, time_stamp))\n print('- Creating output directory: %s' % self.output_path)\n os.makedirs(self.output_path)\n print('- Output directory created')",
"def create_output_dir(output_dir, dir_name):\n try:\n os.mkdir(os.path.join(output_dir, dir_name))\n except OSError:\n print(os.path.join(output_dir, dir_name) + \" exits... :(\")",
"def check_output(self):\n directory, file = split(self.target)\n if not exists(directory):\n mkdir(directory)\n if exists(self.target):\n unlink(self.target)",
"def SetupOutDir(out_dir):\n logging.info('entering ...')\n assert re.match(r'^[a-zA-Z_\\-0-9]+$', out_dir), 'bad out_dir: %s' % out_dir\n\n if os.path.exists(out_dir):\n subprocess.check_call(['rm', '-rf', out_dir])\n os.mkdir(out_dir)\n logging.info('... done')",
"def setup_output_path(self):\n self.logger.info('setting up output path')\n try:\n self.output_path.mkdir()\n except FileExistsError:\n pass\n try:\n (self.output_path / 'simple').mkdir()\n except FileExistsError:\n pass\n for filename in resource_listdir(__name__, 'static'):\n if filename == 'index.html':\n # Skip template\n continue\n with (self.output_path / filename).open('wb') as f:\n source = resource_stream(__name__, 'static/' + filename)\n f.write(source.read())\n source.close()",
"def cleanOutputDir(output):\n if os.path.exists(output) and os.path.isdir(output):\n shutil.rmtree(output)",
"def make_dir(name='results'):\n if os.path.isabs(name):\n output_path = name\n else:\n output_path = os.path.join(os.getcwd(), name)\n\n if ('.' not in output_path):\n directory = os.path.dirname(os.path.join(output_path, 'toto')) # doesn't work w/o 'toto'\n else :\n directory = os.path.dirname(output_path);\n\n try:\n os.makedirs(directory)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n return output_path",
"def create_output_dir(self, cfg: dict) -> str:\n output_dir = cfg.get(\"output\").get(\"output_dir\")\n time_sfx = cfg.get(\"output\").get(\"time_suffix\", True)\n if not os.path.isabs(output_dir):\n output_dir = os.path.join(self.repo_path, output_dir)\n subdir = self.project_name\n if time_sfx:\n cur_time = get_cur_time_str()\n subdir = f\"{subdir}_{cur_time}\"\n output_dir = os.path.join(output_dir, subdir) # type: str\n if check_dir(output_dir, make_if_not=True):\n logger.info(\"Results will be in {}\".format(output_dir))\n else:\n exit(ErrorCode.PATH_ERROR)\n return output_dir",
"def getOutputDir():\n directory = os.path.join(Configurations.getProjectRootDir(), OUTPUT_DIR_NAME)\n if not os.path.exists(directory):\n logger.warning('Directory %s not exist, CREATE!', directory)\n os.makedirs(directory)\n\n return directory",
"def check_out_dir_exists(out_dir):\n if not os.path.isdir(out_dir):\n os.makedirs(out_dir)",
"def create_output_dir(output_dir):\n if not os.path.isdir(output_dir):\n os.mkdir(output_dir)\n\n for folder in [CHECKPOINT_DIR, LOG_DIR]:\n folder_path = os.path.join(output_dir, folder)\n if not os.path.isdir(folder_path):\n os.mkdir(folder_path)",
"def __mkdir(self, output_directory):\n try:\n if not os.path.exists(output_directory):\n os.mkdir(output_directory)\n return True\n except Exception as e:\n print e\n return False",
"def make_output_dir(experiment_dir, identifier):\n output_dir = Path(experiment_dir, identifier).resolve()\n output_dir.mkdir(parents=True, exist_ok=True)\n return output_dir",
"def __init__(self, output_dir: str):\n self.output_dir = output_dir\n makedirs(self.output_dir, exist_ok=True)",
"def prepDir(path=None):\n if path:\n if os.path.exists(path):\n return path\n else:\n os.makedirs(path)\n else:\n # Do something innocent when no path is provided\n path = tempfile.mkdtemp(prefix='XEPs_')\n print \"creating {} for output\".format(path)\n return path",
"def setOutputDir(self,outdir):\n\n logger = self.logger\n runstring = self.runstring\n force = self.force\n\n logger.info(runstring + \"Output directory is [%s}\"%outdir)\n\n # Checks for existence and deletes if --force option is set\n if os.path.exists(outdir):\n\n if not force:\n msg = runstring + \"Old output directory exists. Use the --force option to overwrite or remove [%s}\"%outdir\n logger.error(msg)\n exit(msg)\n else:\n try:\n logger.info(runstring + \"Removing old output directory [%s}\"%outdir)\n shutil.rmtree(outdir) \n except OSError as err:\n msg = runstring + \"Error removing existing output directory [%s]\"%err\n logger.error(msg)\n exit(msg)\n\n\n # Creates the output directory and sets permissions to 0755\n try:\n logger.info(runstring + \"Making output directory is [%s}\"%outdir)\n os.mkdir( outdir, 0775 );\n except OSError as err:\n msg = runstring + \"Error making output directory [%s]\"%err\n logger.error(msg)\n exit(msg)\n\n return outdir",
"def clean_dirs(output_dir):\n if os.path.exists(output_dir):\n shutil.rmtree(output_dir)\n os.makedirs(output_dir)",
"def setOutputDir(self, outputdir):\n self.outputdir = outputdir\n if not os.path.isdir(outputdir):\n os.makedirs(outputdir)"
] |
[
"0.83254516",
"0.82470155",
"0.7971942",
"0.79512066",
"0.7879319",
"0.7845083",
"0.772621",
"0.77161586",
"0.7657744",
"0.7641563",
"0.74298275",
"0.74211967",
"0.7351358",
"0.7329179",
"0.73192096",
"0.7261498",
"0.7255115",
"0.7237214",
"0.71339095",
"0.71255136",
"0.7116088",
"0.71126056",
"0.70461106",
"0.7035215",
"0.6981501",
"0.69661784",
"0.6941854",
"0.69302833",
"0.6900312",
"0.6897202"
] |
0.8270133
|
1
|
r"""Writes the file content to the output path as a text file.
|
def _write_file(output_path: str, file_content: Iterable[str]) -> None:
with open(output_path, "w+", encoding="utf-8") as f:
f.writelines(file_content)
logging.info(f"wrote to '{output_path}'")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def fileWrite(content):\n file = open('./result.txt', 'w')\n file.write(content)\n file.close()",
"def write_text_tofile(text):\n try:\n with open(os.path.join(script_dir, 'output_file.txt'), 'a') as output:\n output.write(text + '\\n')\n except:\n pass",
"def write_to_file(file_name, content):\n with open(file_name, \"w\") as text_file:\n text_file.write(str(content))",
"def writeText(outputText, fileName):\n with open(fileName,\"w\") as fileObject:\n fileObject.write(outputText)",
"def write_output_file(self, xml_text, xml_file):\n xml_fo = open(xml_file, 'w')\n xml_fo.write(xml_text+'</xml>')\n xml_fo.close()\n return",
"def _write_txt(\n output_path, records\n):\n output_path.write_text(_records_to_string(records))",
"def to_output_file(self, content):\n self.__log(f'Starting to write response content to output file.')\n if self.output_file_exists() and not self.config['FORCE_OVERWRITE']:\n self.__log(f'Cannot write to file. Selected output file exists and FORCE_OVERWRITE is disabled.', 'error')\n raise FileExistsError\n file = self.config['OUT_FOLDER'] + '/' + self.config['OUTPUT_FOLDER'] + '/' + self.output_filename + '.' \\\n + self.options['image_format'].lower()\n with open(file, 'w') as f:\n f.writelines(content)\n self.__log(f'Successfully wrote response content to \"{file}\".', 'success')",
"def txt_file_writer(path):\n return open(path, 'w', encoding=cfg.ENCODING)",
"def write_to_file(self, file, content):\n with open(file, 'a') as report_file:\n report_file.write('{}\\n'.format(content))",
"def write_content_file(file_content):\n\n f = open(workingfile, \"w\")\n if hasattr(file_content, \"decode\"):\n f.write(str(file_content.decode(\"utf-8\")))\n else:\n f.write(str(file_content))\n f.close()",
"def write_txt(data, out_path, type=\"w\"):\n with open(out_path, type) as f:\n f.write(data.encode(\"utf-8\"))",
"def save_file(self, file_name, text):\n\n with open(file_name, 'w') as content_file:\n content = content_file.write(text)",
"def write_to_file(self, content):\n try:\n with open(self.full_path_to_file, \"wb\") as fp:\n fp.write(content)\n except PermissionError:\n logging.error(\n \"Conversion cannot be performed. Permission denied for this directory\"\n )\n sys.exit()\n self.logger.info(\"News has been successfully converted\")",
"def output(content, filename=TARGET, ext=\".txt\", mode=\"print\", charlimit=0, print_in_console=False, overwrite=True):\n\n if not os.path.exists(OUTPUT):\n os.makedirs(os.path.abspath(OUTPUT))\n else:\n if os.path.exists(OUTPUT+\"\\\\\"+filename+ext):\n if overwrite:\n print filename+ext + \" already exists! Overwriting.\"\n else:\n print filename+ext + \" already exists! Terminating write operation.\"\n return # Terminate function: Avoid overwrite.\n if charlimit > 0:\n content = content[:charlimit]\n\n f = open(OUTPUT + \"\\\\\" + filename + ext, 'w')\n if mode == \"write\":\n f.write(content)\n elif mode == \"print\":\n print >> f, content\n elif mode == \"savetxt\":\n savetxt(OUTPUT + \"\\\\\" + filename + ext, content)\n\n if print_in_console:\n print filename + ext\n print content",
"def write_file(filename=\"\", text=\"\"):\n with open(filename, \"w\") as f:\n return(f.write(text))",
"def _write_output_file(output: str, file_name: str):\n\tfile1 = open(file_name, 'w')\n\tfile1.write(output)\n\tfile1.close()",
"def write_output(self, output_path, output_filename):\n self.output_file = output_path + '/' + output_filename\n if os.path.isfile(self.output_file + '.txt'): # Creación del archivo txt de salida.\n os.remove(self.output_file + '.txt')\n file = open(self.output_file + '.txt', \"x\")\n\n self.parse_html() # Obtiene los html de entrada.\n file.write(\"############################\\n\")\n file.write(\"# ISAMI VERSION: v11.1.0 #\\n\")\n file.write(\"# INITIATION LUG #\\n\")\n file.write(\"# ISAMI_LUG VERSION: v1.0 #\\n\")\n file.write(\"############################\\n\")\n for id in self.parsed_html_dic: # Escribe la salida en el txt con el nombre del caso y kt correspondiente.\n file.writelines('-----------------------------------\\n')\n header = id + \"\\n\"\n file.writelines(header)\n file.writelines('-----------------------------------\\n')\n tables = self.read_tables(self.parsed_html_dic[id])\n info = tables[0]\n for i in info:\n file.writelines(i + \" = \" + str(info[i]) + \"\\n\")\n kt = self.find_kt(self.parsed_html_dic[id])\n file.writelines(\" Kt = \" + str(kt) + \"\\n\")\n file.close()",
"def write_file(filename=\"\", text=\"\"):\n with open(filename, 'w') as f:\n return f.write(text)",
"def write_file(filename=\"\", text=\"\"):\n with open(filename, 'w') as fl:\n wr = fl.write(text)\n return wr",
"def write_txt_file(title, abstract, f_out):\n\n print(\n '*' * 40,\n '\\n',\n '[Title] {}'.format(title),\n '\\n',\n '[Abstract] {}'.format(abstract),\n file=f_out\n )",
"def write_output(content, dir_to_file):\n\n if not dir_to_file:\n dir_to_file = '{0}output-{1}'.format(dir_to_file, uuid.uuid4())\n\n f = open(dir_to_file, 'a')\n f.write(content)\n f.close()\n\n log.info('function: {} dir_to_file: {}'.format('write_output', dir_to_file))\n\n return dir_to_file",
"def _write_file(self, filename, content, mode=None):\n with open(filename, 'w') as fp:\n fp.write(dedent(content).strip())\n fp.write('\\n')\n\n if mode is not None:\n os.chmod(filename, mode)",
"def write_to_file(self, filename: str) -> None:",
"def write_text_file(path: Path, data: str) -> None:\n path.write_text(data, encoding='utf-8')",
"def write_file(filename=\"\", text=\"\"):\n with open(filename, mode=\"w\", encoding=\"utf-8\") as m:\n return m.write(text)",
"def write_to(self, filepath):\n output = self._generate_output()\n with open(filepath, 'wb') as out:\n out.write(output.encode('utf-8'))\n out.write(b'<!-- handrolled for excellence -->\\n')",
"def write_file(filename, content):\n codecs.open(filename, \"w\", encoding='utf-8').writelines(content)",
"def write_to_file(output, test_case_name, path):\n path_to_store = OutputWrite.make_test_dir(path, test_case_name)\n time_stamp = OutputWrite.get_time_stamp()\n try:\n LOG.debug('Changing the dir to {0}'.format(path_to_store))\n os.chdir(path_to_store)\n except Exception as _ex_:\n LOG.exception('Error :{0}'.format(_ex_))\n else:\n file_name = os.path.join(path_to_store, test_case_name +\n time_stamp)\n LOG.debug('The file name after joining = {0}'.format(file_name))\n try:\n LOG.debug('Writing Test case output to the file')\n with open(file_name, 'w') as file_obj:\n file_obj.write(output)\n except FileNotFoundError as _ex_:\n LOG.exception('Error : {0}'.format(_ex_))",
"def write_file(self):\n if self._write_file == None:\n return\n\n try:\n out = file(self._write_file, \"w\")\n except IOError, e:\n print e\n sys.exit(1)\n out.writelines(\"A cases\") \n out.close()",
"def write_file(content, file_path, mode='w', encoding='utf-8'):\n with codecs.open(file_path, mode, encoding=encoding) as fid:\n fid.write(content)"
] |
[
"0.7704573",
"0.7301734",
"0.72312963",
"0.71394956",
"0.7093242",
"0.69964176",
"0.6989156",
"0.69638515",
"0.6942887",
"0.6878225",
"0.68769836",
"0.6869035",
"0.6862951",
"0.68133956",
"0.6745909",
"0.6725797",
"0.670779",
"0.6668074",
"0.6640407",
"0.6637989",
"0.6637467",
"0.6633644",
"0.66331714",
"0.66292727",
"0.66163033",
"0.66001725",
"0.6596474",
"0.65861934",
"0.657655",
"0.6568201"
] |
0.73362166
|
1
|
Return the canonical path to /etc/rc.local or an equivalent shell script that gets executed during boot up. The last component in the path must not be be a symlink, other components may be.
|
def _get_rc_local_path( self ):
# might be a symlink but prepend_remote_shell_script doesn't work with symlinks
return sudo( 'readlink -f /etc/rc.local' )
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def launcher_path() -> Optional[str]:\n return u.resource(LAUNCHER_SCRIPT)",
"def executable_path(self):\n prepend = self._active_environment(ActiveEnvironment).prepend\n return prepend.get(\"PATH\", \"\")",
"def determine_usr_bin():\n if git_install_requested():\n projects_yaml = config('openstack-origin-git')\n projects_yaml = git_default_repos(projects_yaml)\n return os.path.join(git_pip_venv_dir(projects_yaml), 'bin')\n else:\n return '/usr/bin'",
"def _get_reporoot():\n from os import path\n import acorn\n medpath = path.abspath(acorn.__file__)\n return path.dirname(path.dirname(medpath))",
"def home():\n if sys.prefix == sys.exec_prefix:\n return sys.prefix\n else:\n return ':'.join((sys.prefix, sys.exec_prefix))",
"def _GetSystemPath():\n return encoding_util.GetEncodedValue(os.environ, \"PATH\")",
"def systemdir():\n if platform == 'windows':\n return os.path.join(os.environ['ProgramFiles'], 'automaton')\n else:\n return \"/etc/automaton/\"",
"def get_executable_path(executable):\n\n if os.name == 'posix':\n return '{0}/bin/{1}'.format(VIRTUALENV, executable)\n else:\n return '{0}\\\\Scripts\\\\{1}'.format(VIRTUALENV, executable)",
"def getScriptPath():\n\treturn os.path.dirname(os.path.realpath(sys.argv[0]))",
"def get_path(self):\r\n path = [\"/bin\", \"/usr/bin\", \"/usr/local/bin\"]\r\n if \"PATH\" in os.environ:\r\n p = os.environ[\"PATH\"]\r\n if p:\r\n path = p.split(os.pathsep)\r\n return path",
"def get_installation_path():\n file_abs_path = os.path.abspath(__file__)\n real_file_abs_path = os.path.realpath(file_abs_path)\n return real_file_abs_path[:real_file_abs_path.find('/node')]",
"def _get_config_path():\n return os.path.join(os.path.expanduser('~'))",
"def absolute_path(path):\n path = re.sub('~', os.environ['HOME'], str(path))\n if path[0] != '/':\n path = str(sh.pwd()).strip() + '/' + path\n return path",
"def shell_path() -> str:\n try:\n return os.environ[\"PATH\"]\n except KeyError:\n raise ShellError(1, \"Environment symbol `PATH` is not set\")",
"def get_path() -> str:\n return os.path.dirname(os.path.realpath(__file__))",
"def get_home():\n try:\n return str(Path.home())\n except Exception:\n return None",
"def realPath(self):\n \n return (self.useLink and [self.linkPath] or [self.installPath])[0]",
"def _fast_get_system_executable(self):\n if self.real_prefix or (\n self.base_prefix is not None and self.base_prefix != self.prefix\n ): # if this is a virtual environment\n if self.real_prefix is None:\n base_executable = getattr(sys, \"_base_executable\", None) # some platforms may set this to help us\n if base_executable is not None: # noqa: SIM102 # use the saved system executable if present\n if sys.executable != base_executable: # we know we're in a virtual environment, cannot be us\n if os.path.exists(base_executable):\n return base_executable\n # Python may return \"python\" because it was invoked from the POSIX virtual environment\n # however some installs/distributions do not provide a version-less \"python\" binary in\n # the system install location (see PEP 394) so try to fallback to a versioned binary.\n #\n # Gate this to Python 3.11 as `sys._base_executable` path resolution is now relative to\n # the 'home' key from pyvenv.cfg which often points to the system install location.\n major, minor = self.version_info.major, self.version_info.minor\n if self.os == \"posix\" and (major, minor) >= (3, 11):\n # search relative to the directory of sys._base_executable\n base_dir = os.path.dirname(base_executable)\n for base_executable in [\n os.path.join(base_dir, exe) for exe in (f\"python{major}\", f\"python{major}.{minor}\")\n ]:\n if os.path.exists(base_executable):\n return base_executable\n return None # in this case we just can't tell easily without poking around FS and calling them, bail\n # if we're not in a virtual environment, this is already a system python, so return the original executable\n # note we must choose the original and not the pure executable as shim scripts might throw us off\n return self.original_executable",
"def get_remote_environment_path(node: Node,\n path: Optional[str]) -> str:\n if path is None:\n path = node.run(\"printenv IDACT_CONFIG_PATH || echo {}\".format(\n DEFAULT_REMOTE_ENVIRONMENT_PATH))\n\n path = node.run(\"readlink -vf {}\".format(path))\n if not path:\n raise RuntimeError(\"Unable to determine remote config path.\")\n\n return path",
"def getssh():\n return Path.home() / \".ssh\"",
"def get_hookscript_path ( self ):\n return self.hook_script_fspath",
"def cmdpath(self):\n return os.system('pwd')",
"def get_executable_path(target=False):\n instance = get_ctx_instance(target=target)\n initial_executable_path = \\\n instance.runtime_properties.get('executable_path')\n executable_path = initial_executable_path\n if not initial_executable_path:\n terraform_config = get_terraform_config(target=target)\n executable_path = terraform_config.get('executable_path') or \\\n os.path.join(get_node_instance_dir(target=target), 'terraform')\n if initial_executable_path and \\\n not os.path.exists(initial_executable_path) \\\n and is_using_existing(target=target):\n # executable path set by relationship precreate operation\n raise RecoverableError(\n \"If executable_path {} does not exist and there is no \"\n \"executable_path in terraform_config there is a need \"\n \"to retry and wait for file system to sync.\".format(\n initial_executable_path\n ))\n if not os.path.exists(executable_path) and \\\n is_using_existing(target=target):\n node = get_ctx_node(target=target)\n terraform_config = node.properties.get('terraform_config', {})\n executable_path = terraform_config.get('executable_path')\n if not initial_executable_path:\n instance.runtime_properties['executable_path'] = executable_path\n return executable_path",
"def scriptdir(follow_symlinks=True):\n if getattr(sys, 'frozen', False):\n path_ = path.abspath(scriptdir)\n else:\n path_ = getabsfile(scriptdir)\n\n if follow_symlinks:\n path_ = path.realpath(path_)\n\n return path.dirname(path_)",
"def _getSshDir():\n return f'{Path.home()}/.ssh'",
"def default_path():\n return os.path.join(os.environ.get('OVERRIDE_ETC', '/etc'), 'auth')",
"def path(self):\n if not self._path:\n logger.spam(\"Checking for helper executable %s\", self.name)\n self._path = distutils.spawn.find_executable(self.name)\n if self._path:\n logger.debug(\"%s is at %s\", self.name, self.path)\n self._installed = True\n else:\n logger.debug(\"No path to %s found\", self.name)\n return self._path",
"def _get_base_command(self):\n import inspect\n import os\n # get current script directory path. We are in /an/unknown/path/kalliope/core\n cur_script_directory = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\n # get parent dir. Now we are in /an/unknown/path/kalliope\n parent_dir = os.path.normpath(cur_script_directory + os.sep + os.pardir)\n # we add the kalliope.py file name\n real_entry_point_path = parent_dir + os.sep + KALLIOPE_ENTRY_POINT_SCRIPT\n # We test that the file exist before return it\n logger.debug(\"Real Kalliope.py path: %s\" % real_entry_point_path)\n if os.path.isfile(real_entry_point_path):\n crontab_cmd = \"python %s start --brain-file %s --run-synapse \" % (real_entry_point_path,\n self.brain.brain_file)\n return crontab_cmd\n raise IOError(\"kalliope.py file not found\")",
"def get_exec_path():\n if hasattr(sys, \"frozen\"): # compiled by py2exe\n return os.path.dirname(sys.executable)\n else:\n return os.path.dirname(sys.path[0]) # should be path to /fpdb",
"def get_root_path(path):\n if not path:\n path = __opts__.get(\"lxc.root_path\", DEFAULT_PATH)\n return path"
] |
[
"0.6142366",
"0.61250097",
"0.6099698",
"0.6046178",
"0.6010571",
"0.6000824",
"0.59402305",
"0.5883539",
"0.58717144",
"0.58534086",
"0.58243835",
"0.58120936",
"0.57910746",
"0.57867146",
"0.5785221",
"0.5725474",
"0.57024145",
"0.56905836",
"0.5689721",
"0.56822443",
"0.5677786",
"0.5672075",
"0.56570315",
"0.5651427",
"0.563975",
"0.5622318",
"0.56179774",
"0.56131643",
"0.5597398",
"0.5595001"
] |
0.80080163
|
0
|
Insert the given script into the remote file at the given path before the first script line. See prepend_shell_script() for a definition of script line.
|
def _prepend_remote_shell_script( self, script, remote_path, **put_kwargs ):
with closing( StringIO( ) ) as out_file:
with closing( StringIO( ) ) as in_file:
get( remote_path=remote_path, local_path=in_file )
in_file.seek( 0 )
prepend_shell_script( '\n' + script, in_file, out_file )
out_file.seek( 0 )
put( remote_path=remote_path, local_path=out_file, **put_kwargs )
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def pre_start_script_tmp_sh(tmp_path: Path) -> Path:\n tmp_file = tmp_path / \"prestart.sh\"\n with open(Path(tmp_file), \"x\") as f:\n f.write('echo \"Hello World, from a temporary pre-start shell script\"\\n')\n return Path(tmp_file)",
"def insert(self, line, where=0):\n self.buffer.insert(where, line)",
"def insert_first_line(filename, string):\n try:\n import fileinput\n for line in fileinput.input([filename], inplace=True):\n if fileinput.isfirstline():\n print string\n print line,\n except Exception as e:\n print('\\nError adding specified string to file {}: {}.'.format(filename, e))",
"def prepend_path(path, paths):\n\n if path in paths: paths.remove(path)\n paths.insert(0, path)",
"def _remoteScript(self, source_script):",
"def Prepend(filepath, text):\n file_data = text\n if os.path.exists(filepath):\n file_data += open(filepath).read()\n f = open(filepath, 'w')\n f.write(file_data)\n f.close()",
"def prepend_line(file_name, line):\n # define name of temporary dummy file\n dummy_file = file_name + '.bak'\n # open original file in read mode and dummy file in write mode\n with open(file_name, 'r') as read_obj, open(dummy_file, 'w') as write_obj:\n # Write given line to the dummy file\n write_obj.write(line + '\\n')\n # Read lines from original file one by one and append them to the dummy file\n for line in read_obj:\n write_obj.write(line)\n # remove original file\n os.remove(file_name)\n # Rename dummy file as the original file\n os.rename(dummy_file, file_name)",
"def line_prepender(file_path: str, line: str) -> None:\n with open(file_path, 'r+') as f:\n content = f.read()\n f.seek(0, 0)\n f.write(line.rstrip('\\r\\n') + '\\n' + content)",
"def script(self, code):\r\n LOG(\"Executing script \" + repr(code))\r\n cmd = MsgHelper.createMessage(Messages.CMD_SCRIPT)\r\n cmd[Messages.FIELD_SCRIPT] = code\r\n cmd[Messages.FIELD_FORCE] = True\r\n self.mailbox.push( cmd, high_priority = False )",
"def run_remote_script(self, script_file, args=None, log_error=True, additional_files=None):\n script_name = os.path.basename(script_file)\n self.__sftp_client.put(script_file, script_name)\n if not args:\n args = []\n return self.run_remote_command(\n [\"/bin/bash\", \"--login\", script_name] + args, log_error=log_error, additional_files=additional_files\n )",
"def _adjust_shebang(self, script, outfile):\n # Always open the file, but ignore failures in dry-run mode --\n # that way, we'll get accurate feedback if we can read the\n # script.\n try:\n with open(script, \"r\") as stream:\n firstline = stream.readline()\n match = build_scripts.first_line_re.match(firstline)\n if match:\n post_interp = match.group(1) or ''\n log.info(\"copying and adjusting %s -> %s\", script,\n self.build_dir)\n if not self.dry_run:\n with open(outfile, \"w\") as outstream:\n # write script to target file\n outstream.write(\"#!%s%s\\n\" % (self.executable,\n post_interp))\n outstream.write(stream.read())\n return True\n except IOError:\n if not self.dry_run:\n raise\n return False",
"def relative_script(lines):\n activate = (\n \"import os; \"\n \"activate_this=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'activate_this.py'); \"\n \"exec(compile(open(activate_this).read(), activate_this, 'exec'), { '__file__': activate_this}); \"\n \"del os, activate_this\"\n )\n # Find the last future statement in the script. If we insert the activation\n # line before a future statement, Python will raise a SyntaxError.\n activate_at = None\n for idx, line in reversed(list(enumerate(lines))):\n if line.split()[:3] == [\"from\", \"__future__\", \"import\"]:\n activate_at = idx + 1\n break\n if activate_at is None:\n # Activate after the shebang.\n activate_at = 1\n return lines[:activate_at] + [\"\", activate, \"\"] + lines[activate_at:]",
"def populateScript(self):\n filePath = pm.fileDialog2(fileMode=1,\n startingDirectory=self.startDir,\n fileFilter=' Post Script .py (*%s)' % \".py\")\n if not filePath:\n return\n if not isinstance(filePath, string_types):\n filePath = filePath[0]\n self.gtUIInst.script_lineEdit.setText(filePath)",
"def execute_script(self, script, enterpreter='/bin/sh'):\n destination = '/tmp/' + ''.join(\n random.choice(string.lowercase) for i in range(16))\n\n self.upload(script, destination)\n self.execute('%s %s' % (enterpreter, destination))\n self.execute('rm %s' % destination)",
"def pre_start_script_tmp_py(tmp_path: Path) -> Path:\n tmp_file = shutil.copy(Path(pre_start_module.__file__), tmp_path)\n return Path(tmp_file)",
"def insert_rsync_marker( path, target_dir ):\n rel_cutoff = len( split_all( target_dir ) )\n parts = split_all( path )\n parts.insert( rel_cutoff, \".\" )\n return os.path.join( *parts )",
"def prepend_to(self, key, entry):\n try:\n tail = os.path.pathsep + self[key]\n except KeyError:\n tail = \"\"\n self[key] = entry + tail",
"def register_script(self, script):\n return self.conn.register_script(script)",
"def local_push_file(job_log_dir, file_path, local_config):\n dest_dir = os.path.join(local_config['path'], job_log_dir)\n dest_filename = os.path.basename(file_path)\n if not os.path.isdir(dest_dir):\n os.makedirs(dest_dir)\n\n dest_file = os.path.join(dest_dir, dest_filename)\n\n shutil.copyfile(file_path, dest_file)\n return local_config['prepend_url'] + os.path.join(job_log_dir,\n dest_filename)",
"def prepend_file(self, file_hex):\n new_file_byte_length = int(len(file_hex)/2)\n self._local_file_headers = f'{file_hex}{self._local_file_headers}'\n\n for header in self.get_central_dir_list():\n header.shift_relative_local_header_offset(new_file_byte_length)\n\n self._end_central_dir.shift_start_central_dir_start_offset(\n new_file_byte_length)",
"def run_setup_script(self, script_path):\n try:\n f = open(script_path, 'r')\n setup_script = f.read()\n # print(setup_script)\n c = self.conn.cursor()\n c.executescript(setup_script)\n except (Error, IOError) as e:\n print('[Datanase] Error:')\n print(e)",
"def updateFromScript(self, scriptPath):\n\t\tsubprocess.run([\"PowerShell\", \"-ExecutionPolicy\", \"Bypass\", \"-File\", scriptPath])",
"def pass_import_entry(path, data):\n print \"path:%r data:%r\" % (path,data)\n\tproc = Popen(['pass', 'insert', '--multiline', path], stdin=PIPE, stdout=PIPE)\n\tproc.communicate(data)\n\tproc.wait()",
"def add_to_cmd(run, batch, source, add_line, basename='xrb'):\n filepath = grid_strings.cmd_filepath(run, batch, source=source, basename=basename)\n print(f'Writing: {filepath}')\n with open(filepath) as f:\n lines = f.readlines()\n\n lines = [f'{add_line}\\n'] + lines\n with open(filepath, 'w') as f:\n f.writelines(lines)",
"def putscript(self, name, content):\n content = self.__prepare_content(content)\n code, data = (\n self.__send_command(\"PUTSCRIPT\", [name.encode(\"utf-8\"), content]))\n if code == \"OK\":\n return True\n return False",
"def insert_before(self, text, line, col):\n col = self.canonicalize_column_index(line, col)\n col_off = self.col_offs[line]\n adj_col = (col_off.get_rewritten_pos(col) -\n col_off.get_insertion_length(col))\n theline = self.lines[line]\n self.lines[line] = theline[:adj_col] + text + theline[adj_col:]\n col_off.insert(col, len(text))",
"def import_psh(self, script_path):\n if 'powershell' not in self.info['plugins']:\n self.load_plugin('powershell')\n end_strs = ['[-]', '[+]']\n out = self.run_with_output(f'powershell_import {script_path}', end_strs)\n if 'failed to load' in out:\n raise MsfRpcError(f'File {script_path} failed to load.')\n return out",
"def _define_script_command(command_name,\n parent_shell,\n bootstrap_script,\n container_path,\n scripts_path,\n script):\n script_fragment = \"\\\"{}\\\"\".format(script) if script else \"\"\n parent_shell.define_command(command_name,\n \"python \\\"{bootstrap}\\\" \"\n \"-d \\\"{container}\\\" \"\n \"-r \\\"{scripts}\\\" \"\n \"-s {script}\"\n \"\".format(bootstrap=bootstrap_script,\n container=container_path,\n scripts=scripts_path,\n script=script_fragment))",
"def run_remote_script(self, lines, file_output=False, instance=None):\n data = '\\n'.join(lines)\n return self.push_remote_text_file(input_data=data, run=True, file_output=file_output, instance=instance)",
"def script(self, script):\n\n self._script = script"
] |
[
"0.5698845",
"0.5542687",
"0.5500571",
"0.54082674",
"0.528102",
"0.52535087",
"0.52462095",
"0.5182523",
"0.5134501",
"0.507421",
"0.50361365",
"0.5027348",
"0.50166833",
"0.49855557",
"0.49790967",
"0.4978083",
"0.49382308",
"0.49119568",
"0.48901075",
"0.48578307",
"0.4856829",
"0.48461005",
"0.48246345",
"0.48233277",
"0.48110768",
"0.47976074",
"0.47948676",
"0.47935224",
"0.47905552",
"0.47878245"
] |
0.827533
|
0
|
r""" Patch /etc/environment by A) adding a list of directories to a PATH o PATHlike variable and/or B) adding other environment variables to it.
|
def _patch_etc_environment( cls, env_file, dirs=None, dirs_var='PATH', env_pairs=None ):
def parse_entry( s ):
m = cls.env_entry_re.match( s )
return m.group( 1 ), m.group( 2 )
env_file.seek( 0 )
env = dict( parse_entry( _ ) for _ in env_file.read( ).splitlines( ) )
# Do we have directories to add to a path?
if dirs is not None:
path = filter( None, env.get( dirs_var, '' ).split( ':' ) )
path.extend( dirs )
env[ dirs_var ] = ':'.join( path )
# Do we have other environment variables to write?
if env_pairs is not None:
for (k, v) in env_pairs.iteritems():
env[k] = v
env_file.seek( 0 )
env_file.truncate( 0 )
for var in sorted( env.items( ) ):
env_file.write( '%s="%s"\n' % var )
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def add_to_env(self, path, value):\n name = [MakeEnvironArgs.CONFIG]\n for element in path:\n name.append(MakeEnvironArgs.DOT)\n name.append(element)\n self.env[''.join(name)] = value\n return self.env",
"def setUpEnvironmentVariables(basedir):\n\tif sys.platform == 'win32':\n\t\toldpath = os.environ[\"PATH\"]\n\t\tcwd = os.getcwd()\n\t\tos.environ[\"PATH\"] = oldpath + ';' + cwd + fileSeperator + basedir + fileSeperator + \"platform-tools\"\n\t\tprint os.environ[\"PATH\"]\n\telse:\n\t\tcwd = os.getcwd()\n\t\toldpath = os.environ[\"PATH\"]\n\t\tnewpath = cwd + fileSeperator + basedir + fileSeperator + \"tools:\" + fileSeperator + cwd + fileSeperator + basedir + fileSeperator + \"platform-tools\"\n\t\tos.environ[\"PATH\"] = oldpath + fileSeperator + newpath",
"def add_to_path(path):\n from fabric.contrib.files import append\n import vars\n vars = vars.Vars()\n for file in [ vars.os.default_shell_config, vars.os.default_loginshell_config ]:\n append(file, \"export PATH=$PATH:\"+path, use_sudo=True)",
"def update_env_in_script(fn, names):\n with open(fn) as ifs:\n content = ifs.read()\n content = _prepend_env_paths(content, names)\n with open(fn, 'w') as ofs:\n ofs.write(content)",
"def append_path_env(self, path):\n self._cmd_runner.append_to_env_var('PATH', os.path.expanduser(path), sep=os.pathsep)",
"def set_envvars(self):\n # self.logger.trace(\"update os.environ with %s\", self.environ)\n for key in os.environ:\n current = self.environ.get(key)\n if current is None:\n del os.environ[key]\n for key, value in self.environ.items():\n if value is not None:\n os.environ[key] = str(value)",
"def patch_environment(**kwargs):\n for key, value in kwargs.items():\n os.environ[key.upper()] = str(value)\n\n yield\n\n for key in kwargs:\n if key.upper() in os.environ:\n del os.environ[key.upper()]",
"def _prepend_env_paths(content, names):\n export_env_vars = ['export %(k)s=%(v)s:${%(k)s}' %dict(\n k=name, v=os.environ.get(name, '')) for name in names]\n return '\\n'.join(export_env_vars + [content])",
"def update_environ():\n\n # Environment variables to set.\n BASE = os.getcwd()\n PLUGINS = os.path.join(BASE, 'lib')\n RESOURCES = os.path.join(BASE, 'res')\n MODELS = os.path.join(RESOURCES, 'models')\n\n # Set the vaue to '' to set the var to ''.\n # Anything else will be added to current var value.\n minimapper_env = {\n 'GAZEBO_RESOURCE_PATH': RESOURCES,\n 'GAZEBO_MODEL_PATH': MODELS,\n 'GAZEBO_PLUGIN_PATH': PLUGINS,\n 'GAZEBO_MODEL_DATABASE_URI': None\n }\n\n # Conditionally set environment variables.\n env = os.environ.copy()\n for key, val in minimapper_env.items():\n if val is None:\n env[key] = ''\n elif key not in env:\n env[key] = val\n elif key in env and val not in env[key]:\n env[key] = val + ':' + env[key]\n\n return env",
"def __setitem__(self, key, item):\n super(EnvironmentVariables, self).__setitem__(key, item)\n os.environ[key] = item",
"def set_env_var(self):\n\n list_env_vars = self.config.items('environment_variables')\n for env_var in list_env_vars:\n os.environ[env_var[0].upper()] = env_var[1]",
"def prepend_path_env(self, path):\n self._cmd_runner.prepend_to_env_var('PATH', os.path.expanduser(path), sep=os.pathsep)",
"def append(env, env_b):\n # todo: should this be refactored to \"join\" or \"extend\"\n # todo: this function name might also be confusing with \"merge\"\n env = env.copy()\n for variable, value in env_b.items():\n for path in value.split(\";\"):\n if not path:\n continue\n\n lib.append_path(env, variable, path)\n\n return env",
"def update(self, env_obj):\n if env_obj:\n if isinstance(env_obj, EnvValues):\n for package_name, env_vars in env_obj.data.items():\n for name, value in env_vars.items():\n if isinstance(value, list):\n value = copy.copy(value) # Aware of copying by reference the list\n self.add(name, value, package_name)\n # DepsEnvInfo. the OLD values are always kept, never overwrite,\n elif isinstance(env_obj, DepsEnvInfo):\n for (name, value) in env_obj.vars.items():\n name = name.upper() if name.lower() == \"path\" else name\n self.add(name, value)\n else:\n raise ConanException(\"unknown env type: %s\" % env_obj)",
"def prepend_environment_variable(parent, key, value):\n os.environ[key] = \"{0}{1}{2}\".format(str(value),\n os.pathsep,\n os.environ.get(key) or \"\")\n\n if parent:\n parent.prepend_environment_variable(key, value)",
"def change_environment_variables():\n values = load('environment.yaml')\n\n for key in values.keys():\n os.environ[key] = values[key]\n\n info(f'Changed environment variables to {values}')",
"def make_environment_relocatable(home_dir):\n home_dir, lib_dir, inc_dir, bin_dir = path_locations(home_dir)\n activate_this = os.path.join(bin_dir, \"activate_this.py\")\n if not os.path.exists(activate_this):\n _LoggerInstance.fatal(\n \"The environment doesn't have a file %s -- please re-run virtualenv \" \"on this environment to update it\",\n activate_this,\n )\n fixup_scripts(home_dir, bin_dir)\n fixup_pth_and_egg_link(home_dir)",
"def test_environment_patchtest(self):\n self.env = patch.dict('os.environ', {'hello': 'world'})\n with self.env:\n self.assertEqual(os.environ['hello'], 'world')",
"def set_envs(self):\n # pylint:disable=protected-access\n # Need to call sys.__getframe() to get the filename and method/func\n # for logging information.\n\n # Useful for logging\n # Logging output: TIME UTC |TYPE (DEBUG, INFO, WARNING, etc.) |\n # [File : function]| Message\n cur_filename = sys._getframe().f_code.co_filename\n cur_function = sys._getframe().f_code.co_name\n\n self.logger.info('Setting env variables from config file...')\n # Set all the environment variables that are needed by the\n # MET config file.\n\n tmp_amodel = self.c_dict['AMODEL']\n if tmp_amodel:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_amodel_str = str(tmp_amodel).replace(\"\\'\", \"\\\"\")\n tmp_amodel = ''.join(tmp_amodel_str.split())\n self.add_env_var('AMODEL', tmp_amodel)\n else:\n self.add_env_var('AMODEL', \"[]\")\n\n tmp_bmodel = self.c_dict['BMODEL']\n if tmp_bmodel:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_bmodel_str = str(tmp_bmodel).replace(\"\\'\", \"\\\"\")\n tmp_bmodel = ''.join(tmp_bmodel_str.split())\n self.add_env_var('BMODEL', tmp_bmodel)\n else:\n self.add_env_var('BMODEL', \"[]\")\n\n tmp_desc = self.c_dict['DESC']\n if tmp_desc:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_desc_str = str(tmp_desc).replace(\"\\'\", \"\\\"\")\n tmp_desc = ''.join(tmp_desc_str.split())\n self.add_env_var('DESC', tmp_desc)\n else:\n self.add_env_var('DESC', \"[]\")\n\n tmp_storm_id = self.c_dict['STORM_ID']\n if tmp_storm_id:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_storm_id_str = str(tmp_storm_id).replace(\"\\'\", \"\\\"\")\n tmp_storm_id = ''.join(tmp_storm_id_str.split())\n self.add_env_var('STORM_ID', tmp_storm_id)\n else:\n self.add_env_var('STORM_ID', \"[]\")\n\n tmp_basin = self.c_dict['BASIN']\n if tmp_basin:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_basin_str = str(tmp_basin).replace(\"\\'\", \"\\\"\")\n tmp_basin = ''.join(tmp_basin_str.split())\n self.add_env_var('BASIN', tmp_basin)\n else:\n self.add_env_var('BASIN', \"[]\")\n\n tmp_cyclone = self.c_dict['CYCLONE']\n if tmp_cyclone:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_cyclone_str = str(tmp_cyclone).replace(\"\\'\", \"\\\"\")\n tmp_cyclone = ''.join(tmp_cyclone_str.strip())\n self.add_env_var('CYCLONE', tmp_cyclone)\n else:\n self.add_env_var('CYCLONE', \"[]\")\n\n tmp_storm_name = self.c_dict['STORM_NAME']\n if tmp_storm_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_storm_name_str = str(tmp_storm_name).replace(\"\\'\", \"\\\"\")\n tmp_storm_name = ''.join(tmp_storm_name_str.strip())\n self.add_env_var('STORM_NAME', tmp_storm_name)\n else:\n self.add_env_var('STORM_NAME', \"[]\")\n\n if self.c_dict['INIT_BEG']:\n self.add_env_var('INIT_BEG', self.c_dict['INIT_BEG'])\n else:\n self.add_env_var('INIT_BEG', \"\")\n\n if self.c_dict['INIT_END']:\n self.add_env_var('INIT_END', self.c_dict['INIT_END'])\n else:\n self.add_env_var('INIT_END', \"\")\n\n tmp_init_include = self.c_dict['INIT_INCLUDE']\n if tmp_init_include:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_include_str = str(tmp_init_include).replace(\"\\'\", \"\\\"\")\n tmp_init_include = ''.join(tmp_init_include_str.strip())\n self.add_env_var('INIT_INCLUDE', tmp_init_include)\n else:\n self.add_env_var('INIT_INCLUDE', \"[]\")\n\n tmp_init_exclude = self.c_dict['INIT_EXCLUDE']\n if tmp_init_exclude:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_exclude_str = str(tmp_init_exclude).replace(\"\\'\", \"\\\"\")\n tmp_init_exclude = ''.join(tmp_init_exclude_str.strip())\n self.add_env_var('INIT_EXCLUDE', tmp_init_exclude)\n else:\n self.add_env_var('INIT_EXCLUDE', \"[]\")\n\n tmp_init_hour = self.c_dict['INIT_HOUR']\n if tmp_init_hour:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_hour_str = str(tmp_init_hour).replace(\"\\'\", \"\\\"\")\n tmp_init_hour = ''.join(tmp_init_hour_str.split())\n self.add_env_var('INIT_HOUR', tmp_init_hour)\n else:\n self.add_env_var('INIT_HOUR', \"[]\")\n\n tmp_valid_begin = self.c_dict['VALID_BEG']\n if tmp_valid_begin:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_begin_str = str(tmp_valid_begin).replace(\"\\'\", \"\\\"\")\n tmp_valid_begin = ''.join(tmp_valid_begin_str.strip())\n self.add_env_var('VALID_BEG', tmp_valid_begin)\n else:\n self.add_env_var('VALID_BEG', '')\n\n tmp_valid_end = self.c_dict['VALID_END']\n if tmp_valid_end:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_end_str = str(tmp_valid_end).replace(\"\\'\", \"\\\"\")\n tmp_valid_end = ''.join(tmp_valid_end_str.strip())\n self.add_env_var('VALID_END', tmp_valid_end)\n else:\n self.add_env_var('VALID_END', \"\")\n\n tmp_valid_include = self.c_dict['VALID_INCLUDE']\n if tmp_valid_include:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_include_str = str(tmp_valid_include).replace(\"\\'\", \"\\\"\")\n tmp_valid_include = ''.join(tmp_valid_include_str.strip())\n self.add_env_var('VALID_INCLUDE', tmp_valid_include)\n else:\n self.add_env_var('VALID_INCLUDE', \"[]\")\n\n tmp_valid_exclude = self.c_dict['VALID_EXCLUDE']\n if tmp_valid_exclude:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_exclude_str = str(tmp_valid_exclude).replace(\"\\'\", \"\\\"\")\n tmp_valid_exclude = ''.join(tmp_valid_exclude_str.strip())\n self.add_env_var('VALID_EXCLUDE', tmp_valid_exclude)\n else:\n self.add_env_var('VALID_EXCLUDE', \"[]\")\n\n tmp_valid_hour = self.c_dict['VALID_HOUR']\n if tmp_valid_hour:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_hour_str = str(tmp_valid_hour).replace(\"\\'\", \"\\\"\")\n tmp_valid_hour = ''.join(tmp_valid_hour_str.strip())\n self.add_env_var('VALID_HOUR', tmp_valid_hour)\n else:\n self.add_env_var('VALID_HOUR', \"[]\")\n\n tmp_lead_req = self.c_dict['LEAD_REQ']\n if tmp_lead_req:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_lead_req_str = str(tmp_lead_req).replace(\"\\'\", \"\\\"\")\n tmp_lead_req = ''.join(tmp_lead_req_str.strip())\n self.add_env_var('LEAD_REQ', tmp_lead_req)\n else:\n self.add_env_var('LEAD_REQ', \"[]\")\n\n tmp_lead = self.c_dict['LEAD']\n if tmp_lead:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_lead_str = str(tmp_lead).replace(\"\\'\", \"\\\"\")\n tmp_lead = ''.join(tmp_lead_str.strip())\n self.add_env_var('LEAD', tmp_lead)\n else:\n self.add_env_var('LEAD', \"[]\")\n\n tmp_init_mask = self.c_dict['INIT_MASK']\n if tmp_init_mask:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_mask_str = str(tmp_init_mask).replace(\"\\'\", \"\\\"\")\n tmp_init_mask = ''.join(tmp_init_mask_str.strip())\n self.add_env_var('INIT_MASK', tmp_init_mask)\n else:\n self.add_env_var('INIT_MASK', \"[]\")\n\n tmp_valid_mask = self.c_dict['VALID_MASK']\n if tmp_valid_mask:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_mask_str = str(tmp_valid_mask).replace(\"\\'\", \"\\\"\")\n tmp_valid_mask = ''.join(tmp_valid_mask_str.strip())\n self.add_env_var('VALID_MASK', tmp_valid_mask)\n else:\n self.add_env_var('VALID_MASK', \"[]\")\n\n tmp_track_watch_warn = self.c_dict['TRACK_WATCH_WARN']\n if tmp_track_watch_warn:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_track_watch_warn_str = str(tmp_track_watch_warn).replace(\"\\'\",\n \"\\\"\")\n tmp_track_watch_warn = ''.join(tmp_track_watch_warn_str.strip())\n self.add_env_var('TRACK_WATCH_WARN', tmp_track_watch_warn)\n else:\n self.add_env_var('TRACK_WATCH_WARN', \"[]\")\n\n tmp_column_thresh_name = self.c_dict['COLUMN_THRESH_NAME']\n if tmp_column_thresh_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_column_thresh_name_str = str(tmp_column_thresh_name).replace(\n \"\\'\", \"\\\"\")\n tmp_column_thresh_name = ''.join(tmp_column_thresh_name_str.strip())\n self.add_env_var('COLUMN_THRESH_NAME', tmp_column_thresh_name)\n else:\n self.add_env_var('COLUMN_THRESH_NAME', \"[]\")\n\n tmp_column_thresh_val = self.c_dict['COLUMN_THRESH_VAL']\n if tmp_column_thresh_val:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_column_thresh_val_str = str(tmp_column_thresh_val).replace(\"\\'\",\n \"\\\"\")\n tmp_column_thresh_val = ''.join(tmp_column_thresh_val_str.strip())\n self.add_env_var('COLUMN_THRESH_VAL', tmp_column_thresh_val)\n else:\n self.add_env_var('COLUMN_THRESH_VAL', \"[]\")\n\n tmp_column_str_name = self.c_dict['COLUMN_STR_NAME']\n if tmp_column_str_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_column_str_name = str(tmp_column_str_name).replace(\"\\'\",\n \"\\\"\")\n tmp_column_str_name = ''.join(tmp_column_str_name.strip())\n self.add_env_var('COLUMN_STR_NAME', tmp_column_str_name)\n else:\n self.add_env_var('COLUMN_STR_NAME', \"[]\")\n\n tmp_column_str_val = self.c_dict['COLUMN_STR_VAL']\n if tmp_column_str_val:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_column_str_val_str = str(tmp_column_str_val).replace(\"\\'\", \"\\\"\")\n tmp_column_str_val = ''.join(tmp_column_str_val_str.strip())\n self.add_env_var('COLUMN_STR_VAL', tmp_column_str_val)\n else:\n self.add_env_var('COLUMN_STR_VAL', \"[]\")\n\n tmp_init_thresh_name = self.c_dict['INIT_THRESH_NAME']\n if tmp_init_thresh_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_thresh_name_str = str(tmp_init_thresh_name).replace(\"\\'\",\n \"\\\"\")\n tmp_init_thresh_name = ''.join(tmp_init_thresh_name_str.strip())\n\n self.add_env_var('INIT_THRESH_NAME', tmp_init_thresh_name)\n\n else:\n self.add_env_var('INIT_THRESH_NAME', \"[]\")\n\n tmp_init_thresh_val = self.c_dict['INIT_THRESH_VAL']\n if tmp_init_thresh_val:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_thresh_val_str = str(tmp_init_thresh_val).replace(\"\\'\",\n \"\\\"\")\n tmp_init_thresh_val = ''.join(tmp_init_thresh_val_str.strip())\n self.add_env_var('INIT_THRESH_VAL', tmp_init_thresh_val)\n else:\n self.add_env_var('INIT_THRESH_VAL', \"[]\")\n\n tmp_init_str_name = self.c_dict['INIT_STR_NAME']\n if tmp_init_str_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_str_name_str = str(tmp_init_str_name).replace(\"\\'\", \"\\\"\")\n tmp_init_str_name = ''.join(tmp_init_str_name_str.strip())\n self.add_env_var('INIT_STR_NAME', tmp_init_str_name)\n else:\n self.add_env_var('INIT_STR_NAME', \"[]\")\n\n tmp_init_str_val = self.c_dict['INIT_STR_VAL']\n if tmp_init_str_val:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_str_val_str = str(tmp_init_str_val).replace(\"\\'\", \"\\\"\")\n tmp_init_str_val = ''.join(tmp_init_str_val_str.strip())\n self.add_env_var('INIT_STR_VAL', tmp_init_str_val)\n else:\n self.add_env_var('INIT_STR_VAL', \"[]\")\n\n # boolean values for WATER_ONLY\n if self.c_dict['WATER_ONLY']:\n flag = \"TRUE\"\n else:\n flag = \"FALSE\"\n self.add_env_var('WATER_ONLY', flag)\n\n # boolean value for LANDFALL\n if self.c_dict['LANDFALL']:\n flag = \"TRUE\"\n else:\n flag = \"FALSE\"\n self.add_env_var('LANDFALL', flag)\n\n if self.c_dict['LANDFALL_BEG']:\n self.add_env_var('LANDFALL_BEG',\n self.c_dict['LANDFALL_BEG'])\n else:\n # Set to default\n self.add_env_var('LANDFALL_BEG', '-24')\n\n if self.c_dict['LANDFALL_END']:\n self.add_env_var('LANDFALL_END',\n self.c_dict['LANDFALL_END'])\n else:\n # Set to default\n self.add_env_var('LANDFALL_END', '00')\n\n # boolean value for MATCH_POINTS\n if self.c_dict['MATCH_POINTS'] == 'true':\n flag = \"TRUE\"\n else:\n flag = \"FALSE\"\n self.add_env_var('MATCH_POINTS', flag)\n\n if self.c_dict['CONFIG_FILE']:\n self.add_env_var('CONFIG_FILE',\n self.c_dict['CONFIG_FILE'])\n else:\n self.log_error(\n cur_filename + '|' + cur_function +\n ': no MET TC-Stat config file found. Exiting')\n sys.exit(1)\n\n jobs_list_tmp = self.c_dict['JOBS_LIST']\n if jobs_list_tmp:\n # MET is expecting a string\n jobs_list_str = '\"' + jobs_list_tmp + '\"'\n self.add_env_var('JOBS', jobs_list_str)\n else:\n self.log_error('No jobs list defined. Please check your METplus'\n 'config file. Exiting...')\n sys.exit(1)\n return 0",
"def _update_environment(config: VshConfig) -> Dict:\n env = {k: v for k, v in os.environ.items()}\n # VSH specific variable to set venv name\n env[package_metadata['name'].upper()] = config.venv_name\n\n # Expected venv changes to environment variables during an activate\n env['VIRTUAL_ENV'] = str(config.venv_path)\n env['PATH'] = ':'.join([str(config.venv_path / 'bin')] + env['PATH'].split(':'))\n\n # Updates to shell prompt to show virtual environment info\n shell = Path(env.get('SHELL') or '/bin/sh').name\n disable_prompt = env.get('VIRTUAL_ENV_DISABLE_PROMPT') or None\n shell_prompt_mapping = {\n 'bash': 'PS1',\n 'sh': 'PS1',\n 'zsh': 'PROMPT'\n }\n shell_prompt_var = shell_prompt_mapping.get(shell, '')\n default_prompt = terminal.blue(\"\\\\w\") + '\\\\$ '\n prompt = env.get(shell_prompt_var, None) or default_prompt\n prompt = _escape_zero_length_codes(prompt) if shell in ['bash', 'sh'] else prompt\n if shell_prompt_var and not disable_prompt:\n env[shell_prompt_var] = prompt\n return env",
"def set_env(self, propagated_env_vars={}):\n os.environ['BUILD_ROOT'] = self.build_root\n # This is how we tell run-test.sh what set of C++ binaries to use for mini-clusters in Java\n # tests.\n for env_var_name, env_var_value in propagated_env_vars.iteritems():\n os.environ[env_var_name] = env_var_value",
"def overwrite_environment_variable(parent, key, value):\n if value is not None:\n os.environ[key] = str(value)\n elif os.environ.get(key, None):\n del os.environ[key]\n\n if parent:\n parent.overwrite_environment_variable(key, value)",
"def config(monkeypatch):\n\n monkeypatch.setenv(\"NESTOR_CONFIG_PATH\", \"/fixtures-nestor-config\")\n monkeypatch.setenv(\"NESTOR_PRISTINE_PATH\", \"/fixtures-nestor-pristine\")\n monkeypatch.setenv(\"NESTOR_WORK_PATH\", \"/fixtures-nestor-work\")",
"def fix_dot_env_file():\n # Create path to the .env file\n env_file_path = Path(\".env\")\n\n # Ensure that the .env file exists\n env_file_path.touch(exist_ok=True)\n\n # Otherwise, extract all the lines in the .env file\n env_file_lines = env_file_path.read_text().splitlines(keepends=False)\n\n # Extract all the environment variables in the .env file\n env_vars = [line.split(\"=\")[0] for line in env_file_lines]\n\n # For each of the desired environment variables, check if it exists in the .env\n # file\n env_vars_missing = [\n env_var\n for env_var in DESIRED_ENVIRONMENT_VARIABLES.keys()\n if env_var not in env_vars\n ]\n\n # Create all the missing environment variables\n with env_file_path.open(\"a\") as f:\n for env_var in env_vars_missing:\n value = \"\"\n if env_var == \"GPG_KEY_ID\":\n gpg = subprocess.Popen(\n [\"gpg\", \"--list-secret-keys\", \"--keyid-format=long\"],\n stdout=subprocess.PIPE,\n )\n grep = subprocess.Popen(\n [\"grep\", \"sec\"], stdin=gpg.stdout, stdout=subprocess.PIPE\n )\n value = (\n subprocess.check_output(\n [\"sed\", \"-E\", \"s/.*\\\\/([^ ]+).*/\\\\1/\"],\n stdin=grep.stdout,\n )\n .decode()\n .strip(\"\\n\")\n )\n gpg.wait()\n grep.wait()\n if value == \"\":\n value = input(DESIRED_ENVIRONMENT_VARIABLES[env_var])\n f.write(f'{env_var}=\"{value}\"\\n')",
"def overwrite_environment_variable(self, key, value):\n if value is not None:\n self._printer(\"$env:{0} = \\\"{1}\\\"\".format(key, value))\n else:\n self._printer(\"$env:{0} = \\\"\\\"\".format(key))",
"def modified_environ(*remove, **update):\n env = os.environ\n update = update or {}\n remove = remove or []\n\n # List of environment variables being updated or removed.\n stomped = (set(update.keys()) | set(remove)) & set(env.keys())\n # Environment variables and values to restore on exit.\n update_after = {k: env[k] for k in stomped}\n # Environment variables and values to remove on exit.\n remove_after = frozenset(k for k in update if k not in env)\n\n try:\n env.update(update)\n [env.pop(k, None) for k in remove] # pylint: disable=expression-not-assigned\n yield\n finally:\n env.update(update_after)\n [env.pop(k) for k in remove_after] # pylint: disable=expression-not-assigned",
"def modified_environ(*remove, **update):\n env = os.environ\n update = update or {}\n remove = remove or []\n\n # List of environment variables being updated or removed.\n stomped = (set(update.keys()) | set(remove)) & set(env.keys())\n # Environment variables and values to restore on exit.\n update_after = {k: env[k] for k in stomped}\n # Environment variables and values to remove on exit.\n remove_after = frozenset(k for k in update if k not in env)\n\n try:\n env.update(update)\n [env.pop(k, None) for k in remove] # pylint: disable=expression-not-assigned\n yield\n finally:\n env.update(update_after)\n [env.pop(k) for k in remove_after] # pylint: disable=expression-not-assigned",
"def update_from_env(d: dict, variables: List[str], inplace: bool = False):\n new_keys = {}\n for var in variables:\n new_keys[var] = os.environ[var]\n\n if inplace:\n d.update(new_keys)\n return d\n\n for key in d:\n if key not in new_keys:\n new_keys[key] = d[key]\n\n return new_keys",
"def overwrite_environment_variable(self, key, value):\n if value is not None:\n value = BashParentEnvironment._format_environment_value(value)\n self._printer(\"export {0}=\\\"{1}\\\"\".format(key, value))\n else:\n self._printer(\"unset {0}\".format(key))",
"def pupdate(self):\n try:\n tmp = self.path_list[0]\n except IndexError:\n print(\"Empty value for env variable \", self.name)\n return\n\n for p in self.path_list[1:]:\n tmp = tmp + ':' + p\n self.val = tmp"
] |
[
"0.67909825",
"0.65598273",
"0.6248789",
"0.62381274",
"0.61344135",
"0.61076623",
"0.6048537",
"0.60125136",
"0.5980316",
"0.5967851",
"0.58665335",
"0.5862674",
"0.5858224",
"0.5845886",
"0.5845145",
"0.58326834",
"0.58200264",
"0.58082944",
"0.580371",
"0.5799828",
"0.5742522",
"0.56905705",
"0.5689828",
"0.5686629",
"0.56796944",
"0.5672218",
"0.5672218",
"0.56626225",
"0.5660436",
"0.5657294"
] |
0.7579833
|
0
|
Dependencies are expressed as a dictionary whose keys are items and whose values are a set of dependent items. Output is a list of sets in topological order. The first set consists of items with no dependences, each subsequent set consists of items that depend upon items in the preceeding sets. >>> toposort2({
|
def toposort2( data ):
from functools import reduce
# Ignore self dependencies.
for k, v in data.items( ):
v.discard( k )
# Find all items that don't depend on anything.
extra_items_in_deps = reduce( set.union, data.itervalues( ) ) - set( data.iterkeys( ) )
# Add empty dependences where needed
data.update( { item: set( ) for item in extra_items_in_deps } )
while True:
ordered = set( item for item, dep in data.iteritems( ) if not dep )
if not ordered:
break
yield ordered
data = { item: (dep - ordered)
for item, dep in data.iteritems( )
if item not in ordered }
assert not data, "Cyclic dependencies exist among these items:\n%s" % '\n'.join(
repr( x ) for x in data.iteritems( ) )
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def topological_sort(deps: Dict[str, Set[str]]) -> List[str]:\n # TODO: implement cycle detection\n # Let's make a deep copy of the dictionary, so that we are a good citizen and don't\n # modify the parameter\n deps = {k: set(v) for k, v in deps.items()}\n flat: List[str] = []\n while deps:\n keys_with_no_deps = {k for k, v in deps.items() if not v}\n flat += keys_with_no_deps\n deps = {k: v - keys_with_no_deps for k, v in deps.items() if k not in keys_with_no_deps}\n return flat",
"def toposort(data):\n\n\n # Ignore self dependencies.\n for k, v in data.items():\n v.discard(k)\n # Find all items that don't depend on anything.\n extra_items_in_deps = \\\n reduce(set.union, data.itervalues()) - set(data.iterkeys())\n # Add empty dependences where needed\n data.update({item:set() for item in extra_items_in_deps})\n while True:\n ordered = set(item for item, dep in data.iteritems() if not dep)\n if not ordered:\n break\n yield ordered\n data = {item: (dep - ordered)\n for item, dep in data.iteritems()\n if item not in ordered}\n assert not data, \\\n \"Cyclic dependencies exist among these items:\\n{}\".format(\n '\\n'.join(repr(x) for x in data.iteritems()))",
"def toposort(data):\n\n from functools import reduce\n\n # Ignore self dependencies.\n for k, v in data.items():\n v.discard(k)\n # Find all items that don't depend on anything.\n extra_items_in_deps = reduce(set.union, data.itervalues()) - set(data.iterkeys())\n # Add empty dependences where needed\n data.update({item:set() for item in extra_items_in_deps})\n while True:\n ordered = set(item for item, dep in data.iteritems() if not dep)\n if not ordered:\n break\n yield ordered\n data = {item: (dep - ordered)\n for item, dep in data.iteritems()\n if item not in ordered}\n\n assert not data, \"Cyclic dependencies exist among these items:\\n%s\" %\\\n '\\n'.join(repr(x) for x in data.iteritems())",
"def pipeline_dependencies_tasks(g):\n deps = dict()\n for step_name in nx.topological_sort(g):\n deps[step_name] = list(g.predecessors(step_name)) # copy list\n return deps",
"def toposort(prereqs_d):\r\n\r\n# all1 = set(prereqs_d.keys())\r\n# all2 = set()\r\n# for x, y in prereqs_d.items():\r\n# all2.update(y)\r\n# print all1.difference(all2)\r\n\r\n seq = []\r\n done = set()\r\n postreqs_d = {}\r\n for x, prereqs in prereqs_d.items():\r\n for prereq in prereqs:\r\n postreqs_d.setdefault(prereq, set()).add(x)\r\n next = set([k for k in prereqs_d if not prereqs_d[k]])\r\n while next:\r\n bases = next\r\n next = set()\r\n for x in bases:\r\n done.add(x)\r\n seq.append(x)\r\n for x in bases:\r\n for postreq in postreqs_d.get(x, []):\r\n if not prereqs_d[postreq].difference(done):\r\n next.add(postreq)\r\n if len(prereqs_d) != len(seq):\r\n raise Exception(\"Cannot sort topologically: there might be cycles, \"\r\n \"prereqs_d does not have a key for each element or \"\r\n \"some orderings contain invalid elements.\")\r\n return seq",
"def toposort2(data):\n\n # pylint: disable=W0622\n from functools import reduce\n\n # Ignore self dependencies.\n for key, value in data.items():\n value.discard(key)\n\n # Find all items that don't depend on anything.\n extra_items_in_deps = reduce(set.union,\n six.itervalues(data)) - set(six.iterkeys(data))\n\n # Add empty dependencies where needed\n # data.update({item: set() for item in extra_items_in_deps})\n # {item: word.count(item) for item in set(word)}\n # dict((item, word.count(item)) for item in set(word))\n data.update(dict((item, set()) for item in extra_items_in_deps))\n while True:\n ordered = set(item for item, dep in six.iteritems(data) if not dep)\n if not ordered:\n break\n yield ordered\n # data = {item: (dep - ordered)\n # for item, dep in data.iteritems()\n # if item not in ordered}\n data = dict((item, (dep - ordered))\n for item, dep in six.iteritems(data)\n if item not in ordered)\n\n error_format = \"Cyclic dependencies exist among these items:\\n%s\"\n assert not data, error_format % '\\n'.join(repr(x) for x in six.iteritems(data))",
"def dependency_order(self):\n seen = set()\n\n def _prune_visited(node):\n if node in seen:\n return True\n seen.add(node)\n return False\n\n for target in self.targets:\n if target in seen:\n continue\n for node in target.postorder(prune_fn=_prune_visited):\n yield node.data",
"def get_ordered_dependency_list(self):\n # Validate the graph\n self.validate()\n # Generate the dependency list\n dep_list = []\n for rosdep_key in self:\n if self[rosdep_key]['is_root']:\n dep_list.extend(self.__get_ordered_uninstalled(rosdep_key))\n # Make the list unique and remove empty entries\n result = []\n for item in dep_list:\n if item not in result and item[1] != []:\n result.append(item)\n # Squash the results by installer_key\n squashed_result = []\n previous_installer_key = None\n for installer_key, resolved in result:\n if previous_installer_key != installer_key:\n squashed_result.append((installer_key, []))\n previous_installer_key = installer_key\n squashed_result[-1][1].extend(resolved)\n return squashed_result",
"def dependents(sent,head): # head: node address\n return sorted(chain.from_iterable(sent.nodes[head]\\\n ['deps'].values()))",
"def order (self, objects):\n # The algorithm used is the same is standard transitive closure,\n # except that we're not keeping in-degree for all vertices, but\n # rather removing edges.\n result = []\n\n if not objects:\n return result\n\n constraints = self.__eliminate_unused_constraits (objects)\n\n # Find some library that nobody depends upon and add it to\n # the 'result' array.\n obj = None\n while objects:\n new_objects = []\n while objects:\n obj = objects [0]\n\n if self.__has_no_dependents (obj, constraints):\n # Emulate break ;\n new_objects.extend (objects [1:])\n objects = []\n\n else:\n new_objects.append (obj)\n obj = None\n objects = objects [1:]\n\n if not obj:\n raise BaseException (\"Circular order dependencies\")\n\n # No problem with placing first.\n result.append (obj)\n\n # Remove all contains where 'obj' comes first,\n # since they are already satisfied.\n constraints = self.__remove_satisfied (constraints, obj)\n\n # Add the remaining objects for further processing\n # on the next iteration\n objects = new_objects\n\n return result",
"def topologicalSort(self, nodes: List[int], prerequisites: List[List[int]]) -> List[int]:\n # graph\n # prep: x ->requires set(y1, y2, ..), before take x need y1, y2, .., takes count only\n # post: y ->followup set(x1, x2, ..), \n # after take y, it is possible (only possible may not, as x_i might need others) x1, x2, ..\n prep = defaultdict(lambda: 0)\n post = defaultdict(set)\n for x, y in prerequisites:\n prep[x] += 1\n post[y].add(x)\n # schedule\n # start with all nodes requires no prerequisites\n schedule, boundary = [], [x for x in nodes if x not in prep]\n while boundary:\n y = boundary.pop()\n schedule.append(y)\n if y in post:\n xs = post.pop(y)\n for x in xs:\n prep[x] -= 1\n # all prerequisites of x are cleared \n if prep[x] == 0:\n prep.pop(x)\n boundary.append(x)\n # some nodes are impossible to complete?\n if prep:\n return []\n return schedule",
"def toposorted(infos):\n key_to_info = {}\n depends = {}\n for info in infos:\n key_to_info[info.key] = info\n depends[info.key] = []\n for info in infos:\n for after in info.after:\n after_info = key_to_info[after]\n depends[info.key].append(after_info)\n for before in info.before:\n before_info = key_to_info[before]\n depends[before_info.key].append(info)\n return topological_sort(infos, lambda info: depends[info.key])",
"def toposort_from_dependency_info(nodes, get_node_key, get_dependency_keys, can_ignore_dependency=None):\n nodes_by_key = dict()\n node_depended_on_by = dict()\n\n for node in nodes:\n key = get_node_key(node)\n if key in nodes_by_key:\n raise ValueError(\"two nodes with the same key %r\" % key)\n nodes_by_key[key] = node\n node_depended_on_by[key] = set()\n\n for node in nodes:\n dep_keys = get_dependency_keys(node)\n for dep_key in dep_keys:\n if dep_key not in nodes_by_key:\n if can_ignore_dependency is None or not can_ignore_dependency(dep_key):\n raise ValueError(\"Dependency %r was not in the list of nodes %r\" % (dep_key, nodes))\n else:\n node_depended_on_by[dep_key].add(node)\n\n return toposort(nodes, lambda n: node_depended_on_by[get_node_key(n)])",
"def _sort_dependencies(self):\n def sort_hier(node):\n if node is None:\n return None\n task = self.get_task_by_mapper(node.item)\n if node.cycles is not None:\n tasks = []\n for n in node.cycles:\n tasks.append(self.get_task_by_mapper(n.item))\n task.circular = task._sort_circular_dependencies(self, tasks)\n for child in node.children:\n t = sort_hier(child)\n if t is not None:\n task.childtasks.append(t)\n return task\n \n mappers = self._get_noninheriting_mappers()\n head = DependencySorter(self.dependencies, list(mappers)).sort(allow_all_cycles=True)\n #print \"-------------------------\"\n #print str(head)\n #print \"---------------------------\"\n task = sort_hier(head)\n return task",
"def _sort_circular_dependencies(self, trans, cycles):\n allobjects = []\n for task in cycles:\n allobjects += [e.obj for e in task.get_elements(polymorphic=True)]\n tuples = []\n \n cycles = util.Set(cycles)\n \n #print \"BEGIN CIRC SORT-------\"\n #print \"PRE-CIRC:\"\n #print list(cycles)[0].dump()\n \n # dependency processors that arent part of the cyclical thing\n # get put here\n extradeplist = []\n \n # organizes a set of new UOWTasks that will be assembled into\n # the final tree, for the purposes of holding new UOWDependencyProcessors\n # which process small sub-sections of dependent parent/child operations\n dependencies = {}\n def get_dependency_task(obj, depprocessor):\n try:\n dp = dependencies[obj]\n except KeyError:\n dp = dependencies.setdefault(obj, {})\n try:\n l = dp[depprocessor]\n except KeyError:\n l = UOWTask(self.uowtransaction, depprocessor.targettask.mapper, circular_parent=self)\n dp[depprocessor] = l\n return l\n\n def dependency_in_cycles(dep):\n # TODO: make a simpler way to get at the \"root inheritance\" mapper\n proctask = trans.get_task_by_mapper(dep.processor.mapper.primary_mapper().base_mapper(), True)\n targettask = trans.get_task_by_mapper(dep.targettask.mapper.base_mapper(), True)\n return targettask in cycles and (proctask is not None and proctask in cycles)\n \n # organize all original UOWDependencyProcessors by their target task\n deps_by_targettask = {}\n for t in cycles:\n for task in t.polymorphic_tasks():\n for dep in task.dependencies:\n if not dependency_in_cycles(dep):\n extradeplist.append(dep)\n for t in dep.targettask.polymorphic_tasks():\n l = deps_by_targettask.setdefault(t, [])\n l.append(dep)\n\n object_to_original_task = {}\n \n for t in cycles:\n for task in t.polymorphic_tasks():\n for taskelement in task.get_elements(polymorphic=False):\n obj = taskelement.obj\n object_to_original_task[obj] = task\n #print \"OBJ\", repr(obj), \"TASK\", repr(task)\n \n for dep in deps_by_targettask.get(task, []):\n # is this dependency involved in one of the cycles ?\n #print \"DEP iterate\", dep.processor.key, dep.processor.parent, dep.processor.mapper\n if not dependency_in_cycles(dep):\n #print \"NOT IN CYCLE\"\n continue\n #print \"DEP\", dep.processor.key \n (processor, targettask) = (dep.processor, dep.targettask)\n isdelete = taskelement.isdelete\n \n # list of dependent objects from this object\n childlist = dep.get_object_dependencies(obj, trans, passive=True)\n if childlist is None:\n continue\n # the task corresponding to saving/deleting of those dependent objects\n childtask = trans.get_task_by_mapper(processor.mapper.primary_mapper())\n \n childlist = childlist.added_items() + childlist.unchanged_items() + childlist.deleted_items()\n \n for o in childlist:\n if o is None or not childtask.contains_object(o, polymorphic=True):\n continue\n #print \"parent/child\", obj, o\n whosdep = dep.whose_dependent_on_who(obj, o)\n #print \"WHOSEDEP\", dep.processor.key, dep.processor.direction, whosdep\n if whosdep is not None:\n tuples.append(whosdep)\n # create a UOWDependencyProcessor representing this pair of objects.\n # append it to a UOWTask\n if whosdep[0] is obj:\n get_dependency_task(whosdep[0], dep).append(whosdep[0], isdelete=isdelete)\n else:\n get_dependency_task(whosdep[0], dep).append(whosdep[1], isdelete=isdelete)\n else:\n get_dependency_task(obj, dep).append(obj, isdelete=isdelete)\n \n #print \"TUPLES\", tuples\n head = DependencySorter(tuples, allobjects).sort()\n if head is None:\n return None\n\n #print str(head)\n\n # create a tree of UOWTasks corresponding to the tree of object instances\n # created by the DependencySorter\n def make_task_tree(node, parenttask, nexttasks):\n #print \"MAKETASKTREE\", node.item, parenttask\n originating_task = object_to_original_task[node.item]\n t = nexttasks.get(originating_task, None)\n if t is None:\n t = UOWTask(self.uowtransaction, originating_task.mapper, circular_parent=self)\n nexttasks[originating_task] = t\n parenttask.append(None, listonly=False, isdelete=originating_task.objects[node.item].isdelete, childtask=t)\n t.append(node.item, originating_task.objects[node.item].listonly, isdelete=originating_task.objects[node.item].isdelete)\n \n if dependencies.has_key(node.item):\n for depprocessor, deptask in dependencies[node.item].iteritems():\n t.cyclical_dependencies.add(depprocessor.branch(deptask))\n nd = {}\n for n in node.children:\n t2 = make_task_tree(n, t, nd)\n return t\n\n # this is the new \"circular\" UOWTask which will execute in place of \"self\"\n t = UOWTask(self.uowtransaction, self.mapper, circular_parent=self)\n\n # stick the non-circular dependencies and child tasks onto the new\n # circular UOWTask\n [t.dependencies.add(d) for d in extradeplist]\n t.childtasks = self.childtasks\n make_task_tree(head, t, {})\n #print t.dump()\n return t",
"def depends_on(self, node):\n return sorted(self.__edge_map[node], key=node_key)",
"def dep_tree(self, root):\n \n graph = {}\n for key,extract in self.extracts.items():\n graph[key] = set(extract.get('depends',[]))\n \n def _recurse(node):\n l = set([node])\n for n in graph[node]:\n l = l | _recurse(n)\n \n return l\n \n return _recurse(root)",
"def getSortedCyclicDependencies(self):\n res = []\n if self.isCircular():\n res = [self.identifier if self.originalId is None else self.originalId]\n # fill node inputs\n nn = 0\n while nn < len(res):\n _node = res[nn]\n for _inputId in self.model.getNode(_node).ioEngine.inputs:\n input_node = self.model.getNode(_inputId)\n if not _inputId in res and input_node.isCircular():\n # check if node is in circle of _inputId\n if _node in input_node.getFullInputs():\n res.append(_inputId)\n nn += 1\n return res",
"def rootSetOrder (self):\n order = []\n nodes = set(self.__nodes)\n edge_map = {}\n for (d, srcs) in six.iteritems(self.__edgeMap):\n edge_map[d] = srcs.copy()\n while nodes:\n freeset = set()\n for n in nodes:\n if not (n in edge_map):\n freeset.add(n)\n if 0 == len(freeset):\n _log.error('dependency cycle in named components')\n return None\n order.append(freeset)\n nodes.difference_update(freeset)\n new_edge_map = {}\n for (d, srcs) in six.iteritems(edge_map):\n srcs.difference_update(freeset)\n if 0 != len(srcs):\n new_edge_map[d] = srcs\n edge_map = new_edge_map\n return order",
"def get_rdeps(deps):\n rdeps = set()\n current = set(deps)\n while current:\n rdeps |= current\n new = set()\n for dep in current:\n new |= set(deps_cache[dep])\n current = new\n return rdeps",
"def topological_sort(self):\n in_degree = {}\n for node in self.graph:\n in_degree[node] = 0\n\n for from_node in self.graph:\n for to_node in self.graph[from_node]:\n in_degree[to_node] += 1\n\n queue = deque()\n for node in in_degree:\n if in_degree[node] == 0:\n queue.appendleft(node)\n\n sorted_nodes = []\n while queue:\n independent_node = queue.pop()\n sorted_nodes.append(independent_node)\n for next_node in self.graph[independent_node]:\n in_degree[next_node] -= 1\n if in_degree[next_node] == 0:\n queue.appendleft(next_node)\n\n if len(sorted_nodes) == len(self.graph):\n return sorted_nodes\n else:\n raise ValueError('graph is not acyclic')",
"def dependencies(self):\n tree_to_heads = {}\n for tree in reversed(list(self.all_subtrees())):\n if len(tree):\n head = tree.head()\n assert head.span() in tree_to_heads\n tree_to_heads[tree.span()] = tree_to_heads[head.span()]\n\n for subtree in tree:\n subhead = tree_to_heads[subtree.span()]\n if subhead.span() != head.span():\n yield (head, subhead)\n else:\n tree_to_heads[tree.span()] = tree",
"def topological_sort(items, partial_order):\n\n def add_node(graph, node):\n \"\"\"Add a node to the graph if not already exists.\"\"\"\n if node not in graph:\n graph[node] = [0] # 0 = number of arcs coming into this node.\n\n def add_arc(graph, fromnode, tonode):\n \"\"\"Add an arc to a graph. Can create multiple arcs.\n The end nodes must already exist.\"\"\"\n graph[fromnode].append(tonode)\n # Update the count of incoming arcs in tonode.\n graph[tonode][0] += 1\n\n # step 1 - create a directed graph with an arc a->b for each input\n # pair (a,b).\n # The graph is represented by a dictionary. The dictionary contains\n # a pair item:list for each node in the graph. /item/ is the value\n # of the node. /list/'s 1st item is the count of incoming arcs, and\n # the rest are the destinations of the outgoing arcs. For example:\n # {'a':[0,'b','c'], 'b':[1], 'c':[1]}\n # represents the graph: c <-- a --> b\n # The graph may contain loops and multiple arcs.\n # Note that our representation does not contain reference loops to\n # cause GC problems even when the represented graph contains loops,\n # because we keep the node names rather than references to the nodes.\n graph = {}\n for v in items:\n add_node(graph, v)\n for a, b in partial_order:\n add_arc(graph, a, b)\n\n # Step 2 - find all roots (nodes with zero incoming arcs).\n roots = [node for (node, nodeinfo) in graph.items() if nodeinfo[0] == 0]\n\n # step 3 - repeatedly emit a root and remove it from the graph. Removing\n # a node may convert some of the node's direct children into roots.\n # Whenever that happens, we append the new roots to the list of\n # current roots.\n sorted_items = []\n while len(roots) != 0:\n # If len(roots) is always 1 when we get here, it means that\n # the input describes a complete ordering and there is only\n # one possible output.\n # When len(roots) > 1, we can choose any root to send to the\n # output; this freedom represents the multiple complete orderings\n # that satisfy the input restrictions. We arbitrarily take one of\n # the roots using pop(). Note that for the algorithm to be efficient,\n # this operation must be done in O(1) time.\n root = roots.pop()\n sorted_items.append(root)\n for child in graph[root][1:]:\n graph[child][0] = graph[child][0] - 1\n if graph[child][0] == 0:\n roots.append(child)\n del graph[root]\n if len(graph.items()) != 0:\n # There is a loop in the input.\n return None\n return sorted_items",
"def get_dependencies(self, target, graph, dep_list):\n \n if graph == OrderedDict(): return\n if target in graph:\n dep_list.append(graph)\n return dep_list\n for key in graph:\n self.get_dependencies(target, graph[key], dep_list)\n return dep_list",
"def topo_sort(self):\n # TODO: detect cycles\n self.find_reachable_nodes()\n # save list of nodes in topo order\n self.nodes = []\n # assign each node an id field incrementally\n cur_id = 0\n # count visited outgoing edges for each node\n unvisited = {}\n for nid, node in list(self.found.items()):\n unvisited[nid] = node.nout\n queue = [self.root]\n #print >>sys.stderr, '+++'\n while queue:\n # take off nodes whose all outgoing edges are visited from\n # queue head\n node = queue.pop(0)\n self.nodes.append(node)\n node.hg = self\n node.id = cur_id\n cur_id += 1\n for edge in node.incoming:\n edge.hg = self\n for tailnode in edge.tail:\n #print >>sys.stderr, tailnode\n unvisited[id(tailnode)] -= 1\n if unvisited[id(tailnode)] == 0:\n queue.append(tailnode)\n self.sanity_check()\n self.tasks_done.add('topo_sort')",
"def deps_for(nodes, key):\n\n def _deps(key, path):\n if key not in nodes:\n return [key]\n\n if key in path:\n msg = \"Cycle detected between {} and {}\".format(\n path[0], path[-1])\n raise GraphError(msg)\n\n deps = nodes[key][\"required\"]\n trans = [_deps(dep, path + [key]) for dep in deps]\n return set(util.concat(deps, *trans))\n\n return _deps(key, [])",
"def ordered_descendants(\n self: InheritingObjectT, schema: s_schema.Schema\n ) -> List[InheritingObjectT]:\n graph = {}\n for descendant in self.descendants(schema):\n graph[descendant] = topological.DepGraphEntry(\n item=descendant,\n deps=ordered.OrderedSet(\n descendant.get_bases(schema).objects(schema),\n ),\n extra=False,\n )\n\n return list(topological.sort(graph, allow_unresolved=True))",
"def unique_deps(deps):\n deps.sort()\n return list(k for k, _ in itertools.groupby(deps))",
"def get_dependencies(graph: Graph, node: Node):\n dependencies: Set[Node] = set()\n def traverse_nodes(nodes):\n for candidate in nodes:\n if candidate not in dependencies:\n dependencies.add(candidate)\n traverse_nodes(graph[candidate])\n traverse_nodes(graph[node])\n dependencies.discard(node)\n return dependencies",
"def get_dependencies(self, *, _memo: Optional[Dict[\"EONode\", Set[\"EONode\"]]] = None) -> Set[\"EONode\"]:\n _memo = _memo if _memo is not None else {}\n if self not in _memo:\n result = {self}.union(*(input_node.get_dependencies(_memo=_memo) for input_node in self.inputs))\n _memo[self] = result\n\n return _memo[self]"
] |
[
"0.7946563",
"0.7501466",
"0.7435545",
"0.7102352",
"0.69701874",
"0.69135773",
"0.68873614",
"0.6655312",
"0.6642737",
"0.6591969",
"0.65904367",
"0.6499221",
"0.6433072",
"0.64172924",
"0.63997835",
"0.63847566",
"0.63627267",
"0.6321794",
"0.62746567",
"0.6258455",
"0.62512076",
"0.62507874",
"0.6236329",
"0.6234727",
"0.620894",
"0.61311924",
"0.6129433",
"0.61175877",
"0.61091876",
"0.6078364"
] |
0.77973616
|
1
|
[if cond form] ([else form])
|
def special_if(self, form):
testforms = [form[1:]]
elseform = None
startIndex = None
parent = form.up()
for i in range(len(parent)):
x = parent[i]
if x is form:
startIndex = i
if startIndex is None:
raise RuntimeError("Bad")
# find following forms that begin with `elif' and `else'. We
# break on anything else. Accumulate number of forms to delete.
index = startIndex + 1
while index < len(parent):
f = parent[index]
if isinstance(f, Form) and len(f) and isinstance(f[0], Identifier):
if f[0].name == 'elif':
testforms.append(f[1:])
f.insert(0, Ignore)
elif f[0].name == 'else':
elseform = f[1:]
f.insert(0, Ignore)
# there should be nothing after else
break
else:
# Anything other than elif or else, break
break
else:
# it doesn't look anything at all like an else or an elif form
break
index += 1
tests = [(self.reallyCompile(t[0]), self.compileSuite(t[1:])) for t in testforms]
else_ = elseform and self.compileSuite(elseform)
r = ast.If(tests, else_)
return r
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _IfExp(self, t):\n self.dispatch(t.test)\n self.write(\" ? \")\n self.dispatch(t.body)\n self.write(\" : \")\n self.dispatch(t.orelse)",
"def conditional(self) -> global___Statement.Conditional:",
"def switch(cond, ift, iff):",
"def ifelse(test, if_true, if_false):\n if test:\n return if_true\n else:\n return if_false",
"def conditions():\n pass",
"def condition(self) -> global___Expression:",
"def condition(self) -> global___Expression:",
"def link_if_stmt(self, stmt):\n self.link_expr(stmt.cond)\n self.link_stmt(stmt.true_body)\n if stmt.false_body is not None:\n self.link_stmt(stmt.false_body)",
"def if_function(condition, true_result, false_result):\n if condition:\n return true_result\n else:\n return false_result",
"def if_function(condition, true_result, false_result):\n if condition:\n return true_result\n else:\n return false_result",
"def if_function(condition, true_result, false_result):\n if condition:\n return true_result\n else:\n return false_result",
"def if_function(condition, true_result, false_result):\n if condition:\n return true_result\n else:\n return false_result",
"def decide():",
"def toggle(condition, if_true, if_false):\n return (if_true if condition else if_false)",
"def with_if_function():\n return if_function(c(), t(), f())",
"def _set_logical_op(self, condition, incr):\n c1 = [\"@SP\", \"A=M\", \"D=D-M\"]\n c2 = [\"@TRUE{i}\" .format(i=incr)]\n c3 = [\"D;{c}\".format(c=condition)]\n c4 = [\"(FALSE{i})\".format(i=incr)]\n c5 = self._set_stack(0)\n c6 = [\"@ACOND{i}\".format(i=incr)]\n c7 = [\"0;JMP\"]\n c8 = [\"(TRUE{i})\".format(i=incr)]\n c9 = self._set_stack(-1)\n c10 = [\"(ACOND{i})\".format(i=incr)]\n return c1 + c2 + c3 + c4 + c5 + c6 + c7 + c8 + c9 +c10",
"def condition_forward_checking(csp, var) :\n return False",
"def condition_forward_checking(csp, var) :\n return False",
"def _If(self, t):\n self.fill(\"if (\")\n self.dispatch(t.test)\n self.write(\")\")\n self.enter()\n self.dispatch(t.body)\n self.leave()\n # collapse nested ifs into equivalent elifs.\n while (t.orelse and len(t.orelse) == 1 and\n isinstance(t.orelse[0], ast.If)):\n t = t.orelse[0]\n self.fill(\"else if (\")\n self.dispatch(t.test)\n self.write(\")\")\n self.enter()\n self.dispatch(t.body)\n self.leave()\n # final else\n if t.orelse:\n self.fill(\"else\")\n self.enter()\n self.dispatch(t.orelse)\n self.leave()",
"def cond_actions(clause):\n return cdr(clause)",
"def Require(condition):\n if not condition:\n Revert()\n return True",
"def on_true(self) -> global___Expression:",
"def stmt_if(executor, stmt):\n e = Expression()\n result = e.eval(stmt._tokens, symbols=executor._symbols)\n if not result:\n executor.goto_next_line()",
"def cond2if(cond_exp):\n def expand_clauses(list_of_clauses): \n if isNull(list_of_clauses):\n return FALSE # 4-15\n first = first_clause(list_of_clauses)\n rest = rest_clauses(list_of_clauses)\n if isElseClause(first):\n if isNull(rest):\n return seq2exp(cond_actions(first)) \n else:\n raise ValueError(\"ELSE clause is not last -- cond2if\")\n else:\n return make_if(\n cond_predicate(first),\n seq2exp(cond_actions(first)), # make a single \"'begin\" expression\n expand_clauses(rest))\n return expand_clauses(cond_clauses(cond_exp)) # 4-15 changed exp to cond_exp",
"def _process_if(self, node):\n creg = node.children[0].name\n cval = node.children[1].value\n self.backend.set_condition(creg, cval)\n self._process_node(node.children[2])\n self.backend.drop_condition()",
"def fn_if(self, value):\n\n condition_name, true_value, false_value = value\n if self.parser.conditions.evaluate(condition_name):\n return true_value\n else:\n return false_value",
"def _ifelse(self):\n debug.show(\"ifelse:Stack = \" + str(self.opStack))\n if self.opStack.size() >= 3:\n falseCode = check.isCode(self.opStack.pop()) # Make sure it is code (a list)\n trueCode = check.isCode(self.opStack.pop()) # Make sure it is code (a list)\n if check.isBool(self.opStack.pop()):\n debug.show(\"ifelse:True\")\n self.evaluate(trueCode)\n else:\n debug.show(\"ifelse:False\")\n self.evaluate(falseCode)\n else:\n debug.err(\"not enough items on the stack\")\n return None",
"def iff(bool,trueValue,falseValue):\n if bool:\n return trueValue\n else:\n return falseValue",
"def with_if_statement():\n if c():\n return t()\n else:\n return f()",
"def gen_condition(self, condition, yes_block, no_block):\n if isinstance(condition, expressions.BinaryOperator):\n if condition.op == \"||\":\n middle_block = self.builder.new_block()\n self.gen_condition(condition.a, yes_block, middle_block)\n self.builder.set_block(middle_block)\n self.gen_condition(condition.b, yes_block, no_block)\n elif condition.op == \"&&\":\n middle_block = self.builder.new_block()\n self.gen_condition(condition.a, middle_block, no_block)\n self.builder.set_block(middle_block)\n self.gen_condition(condition.b, yes_block, no_block)\n elif condition.op in [\"<\", \">\", \"==\", \"!=\", \"<=\", \">=\"]:\n lhs = self.gen_expr(condition.a, rvalue=True)\n rhs = self.gen_expr(condition.b, rvalue=True)\n op_map = {\n \">\": \">\",\n \"<\": \"<\",\n \"==\": \"==\",\n \"!=\": \"!=\",\n \"<=\": \"<=\",\n \">=\": \">=\",\n }\n op = op_map[condition.op]\n self.emit(ir.CJump(lhs, op, rhs, yes_block, no_block))\n else:\n self.check_non_zero(condition, yes_block, no_block)\n elif isinstance(condition, expressions.UnaryOperator):\n if condition.op == \"!\":\n # Simply swap yes and no here!\n self.gen_condition(condition.a, no_block, yes_block)\n else:\n self.check_non_zero(condition, yes_block, no_block)\n else:\n self.check_non_zero(condition, yes_block, no_block)"
] |
[
"0.6044314",
"0.5981861",
"0.5975344",
"0.5948368",
"0.58330333",
"0.56861645",
"0.56861645",
"0.56803143",
"0.56476486",
"0.56476486",
"0.56476486",
"0.56476486",
"0.56255823",
"0.5613256",
"0.5593953",
"0.557144",
"0.5535713",
"0.5535713",
"0.54777485",
"0.54220575",
"0.54126686",
"0.54046845",
"0.54022855",
"0.53761876",
"0.53724784",
"0.53499454",
"0.53345734",
"0.53187126",
"0.53141236",
"0.53065044"
] |
0.6206543
|
0
|
Lambda function that runs on cloudformation create. It populates the metrics definition table with information about each metric.
|
def lambda_handler(event, context):
ddb_client = boto3.resource('dynamodb')
table = ddb_client.Table(os.environ['METRICSDEF_TABLE'])
metrics_def = SettingsDefinition()
#This just gets a list of funcs in SettingsDefinition via tuples.
function_list = inspect.getmembers(metrics_def, predicate=inspect.isfunction)
for item in function_list:
try:
table.put_item(Item = item[1]())
except Exception as e:
_LOGGER.error('Unable stick metric definition into DDB. {0}'.format(e))
send(event, context, FAILED)
return
send(event, context, SUCCESS)
return
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def recreate_metrics():\n all = monitor_client.list_metric_descriptors(\n project_path, filter_='metric.type=starts_with(\"custom.\")'\n )\n for a in all:\n if \"accumulator\" in str(a) or \"biquery\" in str(a):\n metric_name = monitor_client.metric_descriptor_path(\n settings.PROJECT_ID, a.type\n )\n\n try:\n monitor_client.delete_metric_descriptor(metric_name)\n except Exception as e:\n print(e)\n\n metric_descriptor = {\n \"type\": f\"custom.googleapis.com/{Monitoring.PING}\",\n \"labels\": [\n {\n \"key\": \"operation\",\n \"valueType\": \"STRING\",\n # \"description\": \"Performed operation name\"\n }\n ],\n \"metricKind\": \"GAUGE\",\n \"valueType\": \"DOUBLE\",\n \"unit\": \"items\",\n \"description\": \"Function performed in a loop with hard limit\",\n \"displayName\": \"Repeated Function Execution\",\n }\n\n return monitor_client.create_metric_descriptor(\n settings.PROJECT_ID, metric_descriptor\n )",
"def create_metric(self) -> EvalMetric:\n pass",
"def __init_metrics(self):\n\n batch = {}\n # split data into batches of size batch_size or less\n for metric_name, metric_pattern in self.metrics.items():\n # get the batch list for that metric\n batch_list = []\n for s in range(1, self.schema + 1):\n for t in range(1, self.table + 1):\n k = '/metrics/type=IndexTable/keyspace={}/scope={}/name={}/mean'.format(s, t, metric_name)\n # from Python 3.6 onwards, the standard dict type maintains insertion order by default\n batch[k] = 0\n # if the batch has batch_size items or at the end of iteration,\n # append the batch to list of that metric and create a new empty batch\n if len(batch) == self.batch_size or (s == self.schema and t == self.table):\n batch_list.append(batch)\n batch = {}\n\n # parse metric patterns\n l = metric_pattern.split()\n if l[0] == '(>':\n self.metrics[metric_name] = IncMetricStruct(float(int(l[1])), float(l[2][1:]), float(l[4][:-2]),\n batch_list)\n else:\n self.metrics[metric_name] = RandMetricStruct(float(l[0][1:]), float(l[-1][:-1]), batch_list)",
"def metrics_add():\n\n # Check just basic schema, let elastic check everything else\n schema = {\n \"type\": \"array\",\n \"items\": {\"type\": \"object\"}\n }\n\n try:\n req_data = flask.request.get_json(silent=False, force=True)\n jsonschema.validate(req_data, schema)\n except (ValueError, jsonschema.exceptions.ValidationError) as e:\n return flask.jsonify({\"error\": \"Bad request: %s\" % e}), 400\n else:\n data = {\"north-south\": [], \"east-west\": []}\n for d in req_data:\n for key in data:\n if key in d:\n data[key].append(d[key])\n break\n else:\n LOG.warning(\"Ignoring wrong object %s\" % json.dumps(d))\n\n # TODO(boris-42): Use pusher here, to reduce amount of quires\n # from netmet server to elastic, join data from different netmet\n # clients requests before pushing them to elastic\n for k, v in data.iteritems():\n if v:\n db.get().metrics_add(k, v)\n\n return flask.jsonify({\"message\": \"successfully stored metrics\"}), 201",
"def get_metric_info(self):\n metric_data_object = self.client.get_metric_data(\n MetricDataQueries=[\n {\n \"Id\": \"cdbdata_invocations\",\n \"MetricStat\": {\n \"Metric\": {\n \"Namespace\": \"AWS/Lambda\",\n \"MetricName\": \"Invocations\",\n \"Dimensions\": [\n {\n \"Name\": \"FunctionName\",\n \"Value\": self.function_name\n }\n ]\n },\n \"Period\": 60,\n \"Stat\": \"Sum\"\n },\n \"ReturnData\": True\n },\n {\n \"Id\": \"cdbdata_errors\",\n \"MetricStat\": {\n \"Metric\": {\n \"Namespace\": \"AWS/Lambda\",\n \"MetricName\": \"Errors\",\n \"Dimensions\": [\n {\n \"Name\": \"FunctionName\",\n \"Value\": self.function_name\n }\n ]\n },\n \"Period\": 60,\n \"Stat\": \"Sum\"\n },\n \"ReturnData\": True\n },\n {\n \"Id\": \"cdbdata_throttles\",\n \"MetricStat\": {\n \"Metric\": {\n \"Namespace\": \"AWS/Lambda\",\n \"MetricName\": \"Throttles\",\n \"Dimensions\": [\n {\n \"Name\": \"FunctionName\",\n \"Value\": self.function_name\n }\n ]\n },\n \"Period\": 60,\n \"Stat\": \"Sum\"\n },\n \"ReturnData\": True\n },\n {\n \"Id\": \"cdbdata_concurrentexec\",\n \"MetricStat\": {\n \"Metric\": {\n \"Namespace\": \"AWS/Lambda\",\n \"MetricName\": \"ConcurrentExecutions\",\n \"Dimensions\": [\n {\n \"Name\": \"FunctionName\",\n \"Value\": self.function_name\n }\n ]\n },\n \"Period\": 60,\n \"Stat\": \"Sum\"\n },\n \"ReturnData\": True\n }\n ],\n StartTime=self.start_timestamp,\n EndTime=self.end_timestamp,\n ScanBy='TimestampDescending'\n )\n\n metric_data_points = metric_data_object[DataPointsCollector.RESPONSE_KEY]\n\n return metric_data_points",
"def init_metric_definitions():\n metric_definitions = []\n\n # add info to list in memory, one by one, following signature values\n metric_def_ID = 1\n metric_def_name = \"Recovery Time\"\n metric_def_info = \"Measures time taken by ONAP to restore a VNF\"\n metric_definitions.append(RecoveryTimeDef(metric_def_ID, metric_def_name,\n metric_def_info))\n\n metric_def_ID = 2\n metric_def_name = \"Uptime Percentage\"\n metric_def_info = \"Measures ratio of uptime to reference time, not counting planned downtime\"\n metric_definitions.append(UptimePercentageDef(metric_def_ID, metric_def_name,\n metric_def_info))\n\n\n # write list to binary file\n write_list_bin(metric_definitions, FILE_METRIC_DEFINITIONS)\n\n return metric_definitions",
"def lambda_handler(event, context):\n get_other_metrics(event)",
"def _create_db(self, overwrite=False):\n current = list(self._cur.execute(\"select * from sqlite_master where type='table' and name='metrics'\"))\n if overwrite and len(current) >= 1:\n self._cur.execute('''DROP TABLE IF EXISTS metrics''')\n self._conn.commit()\n elif len(current) >= 1:\n self._fields = [x[1] for x in sorted(self._cur.execute('''PRAGMA table_info(metrics)'''))]\n return None\n self._cur.execute('''CREATE TABLE metrics (model_name text, operation_name text, metric_name text, metric_type text, metric_value real)''')\n self._fields = [\"model_name\", \"operation_name\", \"metric_name\", \"metric_type\", \"metric_value\"]\n self._conn.commit()",
"def test_create_derived_metric(self):\n pass",
"def __init__(self, metrics_to_record):\n self.tape = {}\n\n for metric_name in metrics_to_record:\n self.tape[metric_name] = []",
"def initialize_metrics_json(metrics_filename):\n json_structure = ['dataset','region','metric','statistic']\n metrics = {\n 'SCHEMA': {\n 'name': 'CMEC',\n 'version': 'v1',\n 'package': 'ASoP'},\n 'DIMENSIONS':{\n 'json_structure': json_structure,\n 'dimensions': {\n 'dataset': {},\n 'region': {},\n 'metric': {\n 'Temporal intermittency': {},\n 'Spatial intermittency': {}},\n 'statistic': {\n 'p(upper|upper)': \"Probability of upper quartile precipitation followed by upper quartile precipitation\",\n 'p(lower|lower)': \"Probability of lower quartile precipitation followed by lower quartile precipitation\",\n 'p(upper|lower)': \"Probability of upper quartile precipitation followed by lower quartile precipitation\",\n 'p(lower|upper)': \"Probability of lower quartile precipitation followed by upper quartile precipitation\",\n 'combined': 'Metric of coherence (combined probabilities)'\n }}},\n 'RESULTS': {},\n 'REFERENCE': 'Klingaman et al. (2017, GMD, doi:10.5194/gmd-10-57-2017)'}\n with open(metrics_filename,'w') as fname:\n json.dump(metrics,fname,indent=2)\n\n return",
"def create_system_metrics(system):\n pass",
"def metrics_group():",
"def handler(event, context):\n try:\n # Retrieve environment variables\n dimension_name = getenv(\"CODEDEPLOY_DIMENSION_NAME\")\n metric_name = getenv(\"CODEDEPLOY_METRIC_NAME\")\n if not dimension_name or not metric_name:\n return \"CODEDEPLOY_DIMENSION_NAME or CODEDEPLOY_METRIC_NAME not set\"\n\n # Get deployment state from CodeDeploy event\n deployment_state = event[\"detail\"][\"state\"]\n print(f\"Deployment state: {deployment_state}\")\n\n # Pushing custom metric to CW\n response = boto3.client(\"cloudwatch\").put_metric_data(\n MetricData=[\n {\n \"MetricName\": metric_name,\n \"Dimensions\": [{\"Name\": dimension_name, \"Value\": deployment_state}],\n \"Unit\": \"None\",\n \"Value\": 1,\n \"Timestamp\": datetime.datetime.now(),\n },\n ],\n Namespace=\"CodeDeployDeploymentStates\",\n )\n print(f\"Response from CW service: {response}\")\n return response\n # pylint: disable=broad-except\n except Exception as excpt:\n print(f\"Execution failed... {excpt}\")\n return None",
"def lambda_handler(event, context):\n\n try:\n created_item = create_new_table_item(event)\n return {\"statusCode\": 201, \"body\": json.dumps(f\"{created_item}\")}\n\n except BlankRequestBody as exception:\n logging.error(f\"{exception}\")\n return {\"statusCode\": 400, \"body\": json.dumps(MISSING_PARAMETERS_MESSAGE)}\n\n except ValidationError as exception:\n logging.error(f\"{exception}\")\n return {\"statusCode\": 400, \"body\": json.dumps(INCORRECT_PARAMETERS_MESSAGE)}\n\n except Exception as exception:\n logging.error(f\"{exception}\")\n return {\"statusCode\": 500, \"body\": json.dumps(\"Internal server error\")}",
"def metrics(_):\r\n collector = BuildsCollector()\r\n build_metrics, headers = collector.get_metrics_table()\r\n print(tabulate(build_metrics, headers=headers))",
"def generate_metrics_data(metricsquery: List, resultsquery: Dict, deltaminutes: int = 5, Region_name: str = None) -> Dict:\r\n cloudwatch=client('cloudwatch', region_name=Region_name) \r\n paginator = cloudwatch.get_paginator('get_metric_data')\r\n metricsgroup=grouper(metricsquery)\r\n resultsquery['ApiCalls']=0 \r\n for mqs in metricsgroup:\r\n for response in paginator.paginate(MetricDataQueries=mqs, StartTime=datetime.now()-timedelta(minutes=deltaminutes),EndTime=datetime.now()):\r\n for results in response['MetricDataResults']:\r\n resultsquery[results['Id']].append({'results':results})\r\n resultsquery['ApiCalls']+=1\r\n return resultsquery",
"def initialize_metrics():\n metrics = {\n 'cd_losses': [],\n 'cd_corrects': [],\n 'cd_precisions': [],\n 'cd_recalls': [],\n 'cd_f1scores': [],\n }\n\n return metrics",
"def build_metrics(input_workbook, metrics_worksheet_name, topic_name_prefix):\n wb = openpyxl.load_workbook(input_workbook, data_only=True, read_only=True)\n ws = wb[metrics_worksheet_name]\n\n result = {}\n for row in ws.iter_rows(min_row=2):\n name = row[0].value\n if not name:\n break\n\n description = row[1].value\n address = row[2].value\n size = row[3].value\n scaling_factor = row[4].value\n data_type = DATA_TYPE_STR_TO_ENUM[row[5].value]\n topic_name = '{}/{}'.format(topic_name_prefix, name)\n result[name] = model.Metric(\n name, description, address, size, scaling_factor, data_type, topic_name)\n\n return result",
"def collect_metrics(params, start, uuid=str(uuid4())):\n list_of_metrics = ['AWS/EC2/CPUUtilization',\n 'CGCloud/MemUsage',\n 'CGCloud/DiskUsage_mnt_ephemeral',\n 'CGCloud/DiskUsage_root',\n 'AWS/EC2/NetworkIn',\n 'AWS/EC2/NetworkOut',\n 'AWS/EC2/DiskWriteOps',\n 'AWS/EC2/DiskReadOps']\n\n ids = get_instance_ids(filter_cluster=params.cluster_name, filter_name=params.namespace + '_toil-worker')\n\n while ids:\n # metrics = {metric: [] for metric in list_of_metrics}\n for instance_id in ids:\n for metric in list_of_metrics:\n averages = []\n try:\n s = start\n while s < stop:\n e = s + (4 * 24 * 3600)\n aws_start = datetime.utcfromtimestamp(s)\n aws_stop = datetime.utcfromtimestamp(e)\n met_object = get_metric(metric, instance_id, aws_start, aws_stop)\n averages.extend([x['Average'] for x in get_datapoints(met_object)])\n s = e\n if averages:\n metrics[metric].append(averages)\n logging.info('# of Datapoints for metric {} is {}'.format(metric, len(metrics[metric][0])))\n except RuntimeError:\n if instance_id in instance_ids:\n instance_ids.remove(instance_id)\n # Remove metrics if no datapoints were collected\n metrics = dict((k, v) for k, v in metrics.iteritems() if v)\n # Save CSV of data\n mkdir_p('{}_{}'.format(uuid, str(datetime.utcnow()).split()[0]))\n for metric in metrics:\n with open('{}_{}/{}.csv'.format(uuid, str(datetime.utcnow()).split()[0], metric.rsplit('/', 1)[1]), 'wb') as f:\n writer = csv.writer(f)\n writer.writerows(metrics[metric])",
"def compute_metrics(self):\n pass",
"def CreatePodMetrics(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def _SnapMetrics(deadline):\n next_deadline = deadline + frequency_seconds\n callback = partial(_SnapMetrics, next_deadline)\n cls._timeouts[group_key] = IOLoop.current().add_timeout(next_deadline, callback)\n\n sample = meter.sample()\n sample_json = json.dumps(sample)\n new_metric = Metric.Create(group_key, machine_id, deadline, sample_json)\n with util.Barrier(_UploadSuccess, _UploadError) as b:\n retry.CallWithRetryAsync(retry_policy, new_metric.Update, client=client, callback=b.Callback())",
"def create_stats(measures: \"List of function timings.\",\n col: \"Current Column.\", row: \"Current Row\",\n scenario: \"Current Scenario.\") -> dict:\n return {\"scenario\": scenario,\n \"no_column\": col,\n \"data_length\": row,\n \"min\": np.min(measures),\n \"max\": np.max(measures),\n \"avg\": np.mean(measures),\n \"q50\": np.median(measures)}",
"def handler(event, context):\n\n status = cfnresponse.FAILED\n physical_resource_id = None\n response_data = {}\n try:\n\n # Start with the default parameters.\n response_data.update(event[\"ResourceProperties\"][\"DefaultParams\"])\n\n # Then override with any values from the secret.\n secret = secrets_client.get_secret_value(\n SecretId=event[\"ResourceProperties\"][\"SecretArn\"],\n VersionId=event[\"ResourceProperties\"][\"SecretVersionId\"],\n )\n secret_values = json.loads(secret[\"SecretString\"])\n response_data.update(secret_values)\n\n # Set the log stream prefix based on the image version or tag.\n if \":\" in response_data[\"IMAGE\"]:\n response_data[\"LOG_STREAM_PREFIX\"] = response_data[\"IMAGE\"].split(\":\")[-1]\n else:\n response_data[\"LOG_STREAM_PREFIX\"] = response_data[\"IMAGE\"].split(\"/\")[-1]\n\n # Use the existing desired count when updating an existing service,\n # because this value is managed by auto scaling. Otherwise,\n # start new services with the auto scaling minimum.\n if event[\"RequestType\"] == \"Update\":\n response_data[\"AUTOSCALING_DESIRED\"] = get_desired_count(\n cluster_name=event[\"ResourceProperties\"][\"ClusterName\"],\n service_name=event[\"ResourceProperties\"][\"ServiceName\"],\n )\n else:\n response_data[\"AUTOSCALING_DESIRED\"] = response_data[\"AUTOSCALING_MIN\"]\n\n status = cfnresponse.SUCCESS\n\n finally:\n cfnresponse.send(event, context, status, response_data, physical_resource_id)",
"def lambda_handler(event, context):\n for item in json.loads(event[\"Records\"][0][\"body\"]):\n item[\"id\"] = uuid.uuid1().bytes\n for key, value in item.items():\n if key == \"id\":\n item[key] = {\"B\": bytes(value)}\n elif key == \"fiscal_year\":\n item[key] = {\"N\": str(value)}\n elif key == \"emissions_mtco2e\":\n item[key] = {\"N\": str(value)}\n elif key == \"consumption\":\n item[key] = {\"N\": str(value)}\n else:\n item[key] = {\"S\": str(value)}\n\n time.sleep(0.001)\n\n dynamo.put_item(TableName=\"Greenhouse_gas_emissions\", Item=dict(item))",
"def __init__(self, metrics, gt, pred):\n self.dict_metrics = self.compute_metrics(metrics, gt, pred)",
"def post_init_metrics(sender, **kwargs):\r\n tags = _database_tags('initialized', sender, kwargs)\r\n\r\n dog_stats_api.increment('edxapp.db.model', tags=tags)",
"def initialize(self, runInfo, inputs, initDict) :\n super().initialize(runInfo, inputs, initDict)\n for metricIn in self.assemblerDict['Metric']:\n self.metricsDict[metricIn[2]] = metricIn[3]",
"def post(self):\r\n json_data = request.get_json(force=True)\r\n if not json_data:\r\n abort(400, message='No input data provided')\r\n # make sure the metric_id (temporary) and metric_type (model) are filled\r\n json_data[\"metric_id\"] = \"TBD\"\r\n json_data[\"metric_type\"] = \"model\"\r\n\r\n # validate and deserialize input\r\n new_metric = self.load(json_data, session=db.session)\r\n\r\n # get the next metric id and update metric object\r\n try:\r\n db.session.add(new_metric)\r\n db.session.commit()\r\n except SQLAlchemyError as e:\r\n abort(400, message=f'Database error. Reason: {e}')\r\n\r\n # dump to json and return result\r\n result = self.schema.dump(new_metric)\r\n return success(result, code=201)"
] |
[
"0.62973464",
"0.6232039",
"0.58734256",
"0.5825737",
"0.57151",
"0.5661247",
"0.5646527",
"0.5569144",
"0.55343616",
"0.55289",
"0.5513814",
"0.5499376",
"0.54892343",
"0.54830444",
"0.54804415",
"0.54654485",
"0.5418523",
"0.54166085",
"0.5406935",
"0.5386698",
"0.53412855",
"0.53305024",
"0.53300464",
"0.5312858",
"0.5312071",
"0.529884",
"0.5294887",
"0.52704567",
"0.52630097",
"0.5258288"
] |
0.7163048
|
0
|
Get the Redis password from the configuration
|
def redis_pwd():
with open("/etc/redis/redis.conf") as fd:
secret_cfg = fd.read().splitlines()
for line in secret_cfg:
line = line.strip()
if line.startswith("requirepass"):
return line.split(" ")[1].strip()
return ''
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_password(self) -> str:\n try:\n return self[\"password\"]\n except KeyError:\n raise MarathonNotConfigured(\n \"Could not find marathon password in system marathon config\"\n )",
"def password(self) -> str:\n return pulumi.get(self, \"password\")",
"def password(self) -> str:\n return pulumi.get(self, \"password\")",
"def password(self) -> str:\n return pulumi.get(self, \"password\")",
"def _get_password(self):\n return self._password",
"def password(self):\n return self.factory.server_password",
"def _get_password(self):\r\n return self._password",
"def get_password(self):\n return self.controller.dbfilter.db.get('passwd/user-password')",
"def get_password(self):\n return self.__password",
"def password(self) -> str:\n return self.get_env_var(self.password_var)",
"def password(self) -> str:\n return self.get_env_var(self.password_var)",
"def password(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"password\")",
"def proxy_password(self) -> ConfigNodePropertyString:\n return self._proxy_password",
"def _password(self):\n if 'password' in self._config:\n return self._config['password']\n else:\n while True:\n password = self._UI.get_password(\"Please enter your trac password: \")\n password2 = self._UI.get_password(\"Please confirm your trac password: \")\n if password != password2:\n self._UI.show(\"Passwords do not agree.\")\n else: break\n if self._UI.confirm(\"Do you want your password to be stored on your local system? (your password will be stored in plaintext in a file only readable by you)\", default_yes=False):\n self._config['password'] = password\n self._config._write_config()\n return password",
"def get_password(self) -> str:\n return self._password",
"def GetPassword(self):\n return self._password",
"def settings_app_password(self):\n return self._settings_app_password",
"def password( self ):\n return self._password",
"def password(self):\n return self._password",
"def password(self):\n return self._password",
"def password(self):\n return self._password",
"def password(self):\n return self._password",
"def password(self) :\n\t\ttry :\n\t\t\treturn self._password\n\t\texcept Exception as e:\n\t\t\traise e",
"def getPassword(self):\n\t\treturn self.Password",
"def password(self):\n return self._password()",
"def __get_password(self) -> str:\n\t\treturn os.getenv('MQTT_DRIVEN_PASSWORD')",
"def password(self):\n return (self._config.get(\"sasl.password\")\n or self._config.get(\"sasl.oauthbearer.client.secret\"))",
"def __get_password(self) -> str:\n\t\treturn os.getenv('MQTT_DRIVER_PASSWORD')",
"def password(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"password\")",
"def password(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"password\")"
] |
[
"0.7542461",
"0.7292464",
"0.7292464",
"0.7292464",
"0.716412",
"0.7142628",
"0.71423846",
"0.71384716",
"0.70613617",
"0.70174724",
"0.70174724",
"0.70080954",
"0.69805366",
"0.6949162",
"0.693224",
"0.69274074",
"0.689853",
"0.68978876",
"0.6888722",
"0.6888722",
"0.6888722",
"0.6888722",
"0.6881685",
"0.6879072",
"0.6876122",
"0.68508613",
"0.6838527",
"0.6807837",
"0.68051314",
"0.68051314"
] |
0.7801456
|
0
|
Checks if the current device of `device_mesh` supports DTensor's random APIs. Currently DTensor Random APIs only supports cuda/cudalike devices. We suggest users call this API to test the availability before using our random APIs.
|
def is_rng_supported_mesh(device_mesh: DeviceMesh) -> bool:
device_handle = _get_device_handle(device_mesh.device_type)
if device_handle and hasattr(device_handle, "set_rng_state"):
return True
else:
warnings.warn(
f"DTensor random operators may not have complete support on {device_mesh.device_type} device mesh"
)
return False
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def gpu_availability():\n # assume if using tensorflow-gpu, then Nvidia GPU is available\n if is_built_with_cuda():\n return len(tf.config.list_physical_devices(\"GPU\")) > 0\n else:\n return False",
"def detect_available():\n global _CUDA_AVAILABLE\n if _CUDA_AVAILABLE is not None: return _CUDA_AVAILABLE\n _CUDA_AVAILABLE = shell.run('{} -c \"import torch;print(torch.cuda.is_available())\"'.format(sys.executable)).strip('\\n') == 'True'\n return _CUDA_AVAILABLE",
"def is_gpu_available() -> bool:\n return torch.cuda.is_available()",
"def is_gpu_available():\n ret = get_gpu_count() > 0\n if _HAS_PADDLE:\n import paddle\n if ret is True and not paddle.is_compiled_with_cuda():\n logger.warning(\"Found non-empty CUDA_VISIBLE_DEVICES. \\\n But PARL found that Paddle was not complied with CUDA, which may cause issues. \\\n Thus PARL will not use GPU.\")\n return False\n if _HAS_FLUID:\n from paddle import fluid\n if ret is True and not fluid.is_compiled_with_cuda():\n logger.warning(\"Found non-empty CUDA_VISIBLE_DEVICES. \\\n But PARL found that Paddle was not complied with CUDA, which may cause issues. \\\n Thus PARL will not use GPU.\")\n return False\n return ret",
"def check_cuda():\n if OS_VERSION[0] == \"Linux\":\n check_cuda_linux()\n elif OS_VERSION[0] == \"Windows\":\n check_cuda_windows()",
"def get_random(self) -> bool:\n return self._select_interface(self._rc_get_random,\n self._http_get_random)",
"def _valid_device(device):\n required_fields = ('name', 'type', 'group', 'canonical_name')\n if all(field in device for field in required_fields):\n return True\n return False",
"def is_cuda_device(device):\n\treturn 'cuda' in str(device)",
"def checkCUDAisAvailable():\n # some possible lib names \n libnames = ('libcuda.so', 'libcuda.dylib', 'cuda.dll')\n libsOk = True\n for libname in libnames:\n try:\n cuda = ctypes.CDLL(libname)\n except OSError:\n continue\n else:\n break\n else:\n libsOk = False\n return libsOk",
"def only_gpu(request):\n if request.node.get_closest_marker('gpu'):\n if 'device' in request.fixturenames:\n if not isinstance(request.getfixturevalue('device'),\n hoomd.device.GPU):\n pytest.skip('Test is run only on GPU(s).')\n else:\n raise ValueError('only_gpu requires the *device* fixture')",
"def hasaccelerator():\n\n return torch.cuda.is_available() or torch.backends.mps.is_available() or bool(Models.finddevice())",
"def is_available(self) -> bool:\n return (\n len(self._gpu_ids) > 1\n and \"TORCHELASTIC_RUN_ID\"\n not in os.environ # If otx is executed by torchrun, then otx multi gpu interface is disabled.\n )",
"def is_mesh(node):\n try:\n mesh = attach_mesh(node)\n except TypeError:\n return False\n else:\n return True",
"def has_metal(self):\n if self.metal_indices:\n return True\n return False",
"def is_gpu_device(self, device):\n return device in self._gpu_devices",
"def is_resource_node(self):\n return self.camera is not None or self.mesh is not None",
"def _minimal_device_test(device: torch.device) -> bool:\n try:\n with torch.no_grad():\n model = torch.nn.Conv2d(1, 1, 1).to(device)\n x = torch.zeros(1, 1, 1, 1).to(device)\n y = model(x)\n del model, x, y\n except Exception as e:\n return False\n\n return True",
"def is_valid_mesh(cexp, mesh, mod):\n is_valid = True\n log_gui.debug(\"is_valid_mesh %s / %s\", mesh, mod)\n if not mesh:\n mess = \"A mesh is required\"\n mod.launch(aster_s_gui.ERROR, mess)\n is_valid = False\n return is_valid\n if not cexp.give(\"pressure\").find_groups(mesh):\n mess = \"At least a group without nodes need to be defined \" \\\n \"on the selected object\"\n mod.launch(aster_s_gui.ERROR, mess)\n is_valid = False\n return is_valid",
"def validate(self, mesh, mod):\n return is_valid_mesh(self, mesh, mod)",
"def validate(self, mesh, mod):\n return is_valid_mesh(self, mesh, mod)",
"def _check_device(self, inputs):\n for i, input in enumerate(inputs):\n if input._device != self._device:\n raise RuntimeError(\n 'Mismatched device between function and '\n 'element {} of input tensors. ({} vs. {})'\n .format(i, self._device, input._device))",
"def recognize_device(self, device):\n return False",
"def use_cuda():\n return torch.cuda.is_available() and os.getenv('AICROWD_CUDA', True)",
"def _on_gpu(self) -> bool:\n return self._current_device_index != CPU_INDEX",
"async def test_device_not_accessible(hass):\n with patch.object(axis.device, \"get_device\", side_effect=axis.errors.CannotConnect):\n await setup_axis_integration(hass)\n assert hass.data[AXIS_DOMAIN] == {}",
"def _gpu_and_random(self, exprs):\n if not GPU:\n return False\n if not all(tell_deterministic(i) for i in exprs):\n return True\n\n return False",
"def get_available_devices():\n executable_path = os.path.join(os.path.dirname(__file__), 'build')\n try:\n num_devices = int(subprocess.check_output(\n [\"{}/query_devices\".format(executable_path)]))\n except subprocess.CalledProcessError as e:\n return [0]\n\n FNULL = open(os.devnull, 'w')\n\n available_devices = []\n for i in range(num_devices):\n try:\n if b\"NVIDIA\" in subprocess.check_output(\n [\"{}/test_device\".format(executable_path),\n str(i)], stderr=FNULL):\n available_devices.append(i)\n logging.info('Device {} is available for rendering'.format(i))\n except subprocess.CalledProcessError as e:\n logging.info(e)\n logging.info('Device {} is not available for rendering'.format(i))\n FNULL.close()\n\n return available_devices",
"def get_test_devices():\n\n # Assumption: CPU is always available\n devices = ['cpu']\n\n if torch.cuda.is_available():\n devices.append('cuda')\n\n return devices",
"def test_unique_device(self):\n\n mode = \"unique_host_unique_device\"\n host_id_devices = utils.host_id_devices_for_rng(mode)\n specialize_func = jax.pmap(functools.partial(\n utils.specialize_rng_host_device, axis_name=\"i\",\n mode=mode), axis_name=\"i\")\n\n rng = specialize_func(self.rng, host_id_devices)\n\n self.assertEqual(\n np.unique(rng, axis=0).shape[0], jax.local_device_count())",
"def is_cuda(model):\n\treturn next(model.parameters()).is_cuda"
] |
[
"0.584912",
"0.58147293",
"0.57266724",
"0.55685794",
"0.5564945",
"0.54525566",
"0.5374313",
"0.53115326",
"0.52874994",
"0.52724636",
"0.5206334",
"0.51691324",
"0.5138846",
"0.5133226",
"0.5077793",
"0.506437",
"0.50338316",
"0.50119895",
"0.5010054",
"0.5010054",
"0.50036323",
"0.5002076",
"0.49968514",
"0.49882686",
"0.49807653",
"0.49780166",
"0.49758375",
"0.49745914",
"0.4953473",
"0.49465182"
] |
0.8336198
|
0
|
Set the starting RNG offset for current device's local shard before actual op execution. The pre_op_offset value should start from the current RNG offset and increment by the size of local shard until it reaches the size of the whole DTensor. For different ranks that hold the same DTensor shard, their pre_op_offset will be the same.
|
def _set_pre_op_offset(self, spec: DTensorSpec) -> None:
dtensor_shape = spec.shape
mesh = spec.mesh
dim_map = spec.dim_map
# Compute shard coordinate:
# The coordinate on each tensor dim is a tuple (idx, range)
# If a DTensor is partitioned on its dim i into n shards, and the current rank
# holds the j-th, then its shard coordinate will be (idx=j, range=n) on dim i
coordinate = mesh.get_coordinate()
assert coordinate is not None
shard_coord = [
coordinate[mesh_dim] if mesh_dim >= 0 else 0 for mesh_dim in dim_map
]
shard_size = [
mesh.size(mesh_dim) if mesh_dim >= 0 else 1 for mesh_dim in dim_map
]
# compute shard linear index
shard_linear_idx = self._calc_shard_linear_idx(shard_coord, shard_size)
# compute starting offset using the first shard's size
local_size_on_rank_0 = list(dtensor_shape)
for idx, placement in enumerate(spec.placements):
if isinstance(placement, Shard):
mesh_dim_size = mesh.size(idx)
shard_dim = placement.dim
local_size_on_rank_0[shard_dim] = placement._local_shard_size_on_dim(
dtensor_shape[shard_dim],
mesh_dim_size,
0,
return_offset=False,
)[0]
from torch.distributed._tensor.ops.utils import prod
local_size = prod(local_size_on_rank_0)
# get current RNG offset
current_offset = self.get_offset("parallel-rng")
# pytorch: offset must be multiple of 4
# source: aten/src/ATen/cuda/CUDAGeneratorImpl.cpp
offset_incr = (shard_linear_idx * local_size + 3) // 4 * 4
self.set_offset("parallel-rng", current_offset + offset_incr)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _set_post_op_offset(self, spec: DTensorSpec, old_offset: int) -> None:\n dtensor_shape = spec.shape\n\n from torch.distributed._tensor.ops.utils import prod\n\n numel = prod(dtensor_shape)\n # pytorch: offset must be multiple of 4\n # source: aten/src/ATen/cuda/CUDAGeneratorImpl.cpp\n numel = (numel + 3) // 4 * 4\n self.set_offset(\"parallel-rng\", old_offset + numel)",
"def prepare_for_use(self):\n self._random_state = check_random_state(self.random_state)\n self._offset = self.offset\n self._x_idx = 0",
"def prepare_for_use(self):\n self._random_state = check_random_state(self.random_state)\n self._offset = self.offset\n self._x_idx = 0",
"def prepare_for_use(self):\n self._random_state = check_random_state(self.random_state)\n self._offset = self.offset\n self._x_idx = 0",
"def set_starting_pos(self):\n if self.start and self.is_unoccupied(*self.start):\n self.current_pos = self.start[:]\n else:\n self.set_random_pos('starting')",
"def _local_pre_load_state_dict_hook(\n self,\n state_dict: Dict[str, Any],\n prefix: str,\n ) -> None:\n _replace_by_prefix(state_dict, prefix, f\"{prefix}{FSDP_WRAPPED_MODULE}.\")\n fqn = f\"{prefix}{FSDP_WRAPPED_MODULE}.{FLAT_PARAM}\"\n if fqn not in state_dict:\n assert getattr(self._fsdp_wrapped_module, FLAT_PARAM, None) is None, (\n \"No flat parameter in state_dict but self._fsdp_wrapped_module.flat_param is not None\"\n )\n return\n load_tensor = state_dict[fqn]\n assert isinstance(\n load_tensor, ShardedTensor\n ), \"Tensors in local_state_dict should be ShardedTensor.\"\n\n # Convert the ShardedTensor to a Tensor.\n shards = load_tensor.local_shards()\n assert len(shards), \"load_local_state_dict assume one shard per ShardedTensor.\"\n load_tensor = cast(torch.Tensor, shards[0].tensor)\n\n # Get the metada of the flat_param to decide whether to pad the loaded\n # tensor.\n flat_param = self._fsdp_wrapped_module.flat_param\n assert flat_param is not None\n if flat_param._shard_numel_padded not in (0, flat_param.numel()):\n assert load_tensor.numel() < flat_param.numel(), (\n f\"Local shard size = {flat_param.numel()} and the tensor in \"\n f\"the state_dict is {load_tensor.numel()}.\"\n )\n load_tensor = F.pad(load_tensor, [0, flat_param._shard_numel_padded])\n state_dict[fqn] = load_tensor",
"def set_initial_offset(self, offset):\n self.initial_offset = max(\n min(\n (len(self) + 0.5) * self.item_heights - self.my_surface.get_height(),\n offset\n ),\n 0\n )",
"def prestep(\n self,\n replica_id: tf.Tensor,\n replicas: np.ndarray,\n additional_states: FlowFieldMap,\n ) -> None:\n del replica_id, replicas\n # Parse additional states to extract external source/forcing terms.\n self._source.update(\n self._src_manager.update_helper_variable_from_additional_states(\n additional_states))",
"def rand_start_pos(self):\n free_list = np.where(self.grid_map == self.empty_value)\n pos_idx = np.random.randint(free_list[0].shape[0])\n self.set_start_pos((free_list[0][pos_idx], free_list[1][pos_idx]))",
"def from_scratch(self, init_rng: Array) -> train_state_lib.TrainState:\n logging.info('Initializing parameters from scratch.')\n\n # If pretraining and no checkpoint imported, we jit the (sharded-) init\n # function to minimize fragmentation. We use the same partition\n # setup as the training step/loop to initialize everything \"in-place\" and\n # avoid communication or OOM.\n p_initialize_train_state_fn = self._partitioner.partition(\n self._initialize_train_state,\n in_axis_resources=None,\n out_axis_resources=self.train_state_axes)\n return p_initialize_train_state_fn(init_rng)",
"def in_place_offset(self, offset):\n self.p += offset * self.cross_z.normalized()",
"def set_auto_dc_offset(self, *args, **kwargs):\n return _uhd_swig.usrp_source_set_auto_dc_offset(self, *args, **kwargs)",
"def apply_random_offset(patch, min_offset, max_offset):\n # Choose offset uniformly in log space.\n offset = tensorflow.pow(\n tensorflow.constant([10.0]),\n tensorflow.random_uniform([1], numpy.log10(min_offset), numpy.log10(max_offset)))\n return tensorflow.add(patch, offset)",
"def _sharded_pre_load_state_dict_hook(\n self,\n state_dict: Dict[str, Any],\n prefix: str,\n ) -> None:\n _replace_by_prefix(state_dict, prefix, prefix + f\"{FSDP_WRAPPED_MODULE}.\")\n if not self._fsdp_wrapped_module.has_params:\n return\n\n if not self._fsdp_wrapped_module.handle.uses_sharded_strategy:\n raise RuntimeError(\n \"load_sharded_state_dict can only be called when parameters \"\n \"are flatten and sharded.\"\n )\n\n nonsharded_tensors = []\n # TODO: Reduce the communication by using only one _all_gather_base to\n # gather all the parameters in this layer. This can be achieved by\n # concatenated all the local shards and then append the padding.\n # https://github.com/pytorch/pytorch/issues/77461\n for (param_name, _, module_name) in self._fsdp_wrapped_module.handle.flat_param._param_infos:\n module_name = self._convert_to_wrapped_module_name(module_name)\n fqn = f\"{prefix}{FSDP_WRAPPED_MODULE}.{module_name}{param_name}\"\n param = state_dict.pop(fqn)\n\n # All-gather the param (ShardedTensor)\n param, shards = _ext_pre_load_state_dict_transform(param)\n assert len(shards) < 2, (\n f\"Expects 0 or 1 shard per rank but got {len(shards)} shards on rank {self.rank}\"\n )\n param_numel = param.size().numel()\n dim_0_size = param.size()[0]\n chunk_size = (\n math.ceil(dim_0_size / self.world_size) * param_numel // dim_0_size\n )\n if shards:\n local_tensor = cast(torch.Tensor, shards[0].tensor).flatten()\n if not local_tensor.is_cuda:\n local_tensor = local_tensor.cuda()\n num_padding = chunk_size - local_tensor.numel()\n if num_padding > 0:\n local_tensor = F.pad(local_tensor, [0, num_padding])\n else:\n local_tensor = torch.zeros(chunk_size, dtype=param.dtype).cuda()\n tensor = torch.empty(\n chunk_size * self.world_size, dtype=local_tensor.dtype\n ).cuda()\n dist._all_gather_base(tensor, local_tensor, group=self.process_group)\n tensor = tensor.narrow(0, 0, param_numel).reshape(param.size())\n nonsharded_tensors.append(tensor)\n\n # Create a new flat_param from the loaded, non-sharded tensors.\n flat_param = self._fsdp_wrapped_module.flat_param\n loaded_flat_param = FlatParamHandle.flatten_params(nonsharded_tensors, requires_grad=False)\n\n # Get the chunk from the loaded flat_param for the local rank.\n loaded_flat_param, num_to_pad = FlatParamHandle._get_shard(\n loaded_flat_param, self.rank, self.world_size,\n )\n loaded_flat_param.to(flat_param.device)\n assert flat_param.numel() == loaded_flat_param.numel(), (\n f\"The loaded local chunk has different numel({flat_param.numel()}) \"\n f\"from the local chunk {flat_param.numel()}.\"\n )\n assert flat_param._shard_numel_padded == num_to_pad, (\n f\"The loaded local chunk has different padding({num_to_pad}) \"\n f\"from the local chunk {flat_param._shard_numel_padded}.\"\n )\n state_dict[f\"{prefix}_fsdp_wrapped_module.flat_param\"] = loaded_flat_param",
"def pre_randomize(self, seed):\n super(ReseedingRandomizer, self).pre_randomize(seed)\n self.seed(seed=seed)",
"def inc_ring_setting(self):\n self._rng_offset = self._change_offset(self._rng_offset, 1)",
"def incFirstRepOffset(self, inc=1):\n self.firstRepOffset += inc\n self.setLastRepOffset()",
"def batch_start(self, batch_idx, batch_data):\n self.batch = batch_idx",
"def pre_step(self):\n\n self.reward = 0",
"def position_before(self, op):\n if isinstance(op, FuncArg):\n raise error.PositioningError(\n \"Cannot place builder before function argument\")\n self._curblock = op.block\n self._lastop = op._prev",
"def set_auto_dc_offset(self, *args, **kwargs):\n return _uhd_swig.usrp_source_sptr_set_auto_dc_offset(self, *args, **kwargs)",
"def pre(self, pre):\n\n self._pre = pre",
"def step(self):\n\n if self._is_training:\n self._pos = self.training_pos.item()\n\n if self._params.dataset_config == DatasetConfig.TRAIN_ONLY:\n sample_range = len(self._train_label_indexes)\n self._is_training = True\n elif self._params.dataset_config == DatasetConfig.TEST_ONLY:\n sample_range = len(self._test_label_indexes)\n self._is_training = False\n else:\n total_dataset_size = len(self._train_label_indexes) + len(self._test_label_indexes)\n self._is_training = self._presented % total_dataset_size < len(self._train_label_indexes)\n switch_from_training_to_testing = self._presented % total_dataset_size == len(self._train_label_indexes)\n switch_from_testing_to_training = self._presented % total_dataset_size == 0\n if switch_from_training_to_testing or switch_from_testing_to_training:\n self._pos = -1 # needed for sequential order\n if self._is_training:\n sample_range = len(self._train_label_indexes)\n else:\n sample_range = len(self._test_label_indexes)\n\n self._presented += 1\n\n if self._params.random_order:\n self._pos = self._random.randint(low=0, high=sample_range)\n if self._is_location_filtering():\n self._pos = self._filter_location_random_position(self._is_training)\n else: # sequential order\n self._pos = (self._pos + 1) % sample_range\n if self._is_location_filtering():\n skipped_beginning, self._skip_next_step, self._pos = self._filter_location_sequential(self._is_training)\n self._presented += skipped_beginning\n\n self._copy_to_outputs_from(self._pos, self._is_training)\n\n self._pos += self._skip_next_step\n self._presented += self._skip_next_step\n self._skip_next_step = 0\n\n # write the training pos to tensor\n if self._is_training:\n self.training_pos[0] = self._pos",
"def _random_start_position(self):\r\n self.position = np.array(random.choice(self.start_positions),\r\n dtype=np.int16)",
"def run_pretraining(self):\n if self.is_main_process:\n logging.info(\"*********************************\")\n logging.info(\"*** Starting pre-training ***\")\n logging.info(\"*********************************\")\n logging.info(\"Training on GPU: %s\", torch.cuda.get_device_name(0))\n logging.info(\"Target batch size: %s\", self.target_batch_size)\n logging.info(\"Number of accumulation steps: %s\", self.num_accumulation_steps)\n logging.info(\"Actual batch size: %s\", self.batch_size)\n\n self.model.train()\n self.most_recent_ckpts_paths = []\n average_loss = 0.0 # averaged loss every self.log_freq steps\n epoch = 0\n training_steps = 0\n pool = ProcessPoolExecutor(1)\n if self.is_main_process:\n tensorboard_log_fpath = os.path.join(\n WORKDIR,\n '.tensorboard_logs',\n self.tensorboard_id,\n self.start_datetime.strftime(\"%d-%m-%Y_%H-%M-%S\")\n )\n logging.info(\n \"Writing TensorBoard logs in: %s\",\n tensorboard_log_fpath.replace(WORKDIR, '$WORKDIR'))\n self.tensorboard_writer = SummaryWriter(tensorboard_log_fpath)\n\n # NOTE: Infinite loop over epochs, termination is handled via iteration count\n while True:\n\n # If beginning of pre-training: read files from hdf5_directory and shuffle\n if (not self.resume_pretraining) or (epoch > 0) \\\n or (self.phase2 and self.global_step < 1) or self.init_checkpoint:\n files = []\n for fname in os.listdir(self.hdf5_directory):\n fpath = os.path.join(self.hdf5_directory, fname)\n if os.path.isfile(fpath) and fname.startswith('training.') and fname.endswith('.hdf5'):\n files.append(fpath)\n f_start_id = 0\n files.sort()\n random.Random(self.random_seed + epoch).shuffle(files)\n # Else: get id of next file\n else:\n f_start_id = self.checkpoint['files'][0]\n files = self.checkpoint['files'][1:]\n self.resume_pretraining = False\n num_files = len(files)\n\n # Get the current process hdf5 file\n # and handle case where there are more processes than files left:\n if \\\n torch.distributed.is_initialized() \\\n and torch.distributed.get_world_size() > num_files:\n\n remainder = torch.distributed.get_world_size() % num_files\n hdf5_fpath = files[\n (\n f_start_id * torch.distributed.get_world_size()\n + torch.distributed.get_rank()\n + remainder * f_start_id\n ) % num_files\n ]\n else:\n hdf5_fpath = files[\n (\n f_start_id * torch.distributed.get_world_size()\n + torch.distributed.get_rank()\n ) % num_files\n ]\n\n # Set previous_file variable for next iteration\n previous_file = hdf5_fpath\n\n # Load the pre-training data from the .hdf5 file\n pretraining_data = PretrainingDataset(\n hdf5_fpath=hdf5_fpath,\n max_masked_tokens_per_input=self.max_masked_tokens_per_input\n )\n train_sampler = RandomSampler(pretraining_data)\n train_dataloader = DataLoader(\n pretraining_data,\n sampler=train_sampler,\n batch_size=self.batch_size * self.n_gpu,\n num_workers=4, pin_memory=True\n )\n overflow_buf = None\n if self.allreduce_post_accumulation:\n overflow_buf = torch.cuda.IntTensor([0])\n\n # Loop over the rest of pre-training data files\n if len(files) == 1:\n f_start_id = -1\n for f_id in range(f_start_id + 1, len(files)):\n\n # Submit creation of next DataLoader\n if torch.distributed.get_world_size() > num_files:\n hdf5_fpath = files[\n (\n f_id * torch.distributed.get_world_size()\n + torch.distributed.get_rank()\n + remainder * f_id\n ) % num_files\n ]\n else:\n hdf5_fpath = files[\n (\n f_id * torch.distributed.get_world_size()\n + torch.distributed.get_rank()\n ) % num_files\n ]\n if self.is_main_process:\n logging.info(\n \"Local rank: %s | File n° %s: %s\",\n self.local_rank, f_id, os.path.basename(previous_file)\n )\n previous_file = hdf5_fpath\n dataset_future = pool.submit(\n create_pretraining_dataloader,\n hdf5_fpath,\n self.max_masked_tokens_per_input,\n self.batch_size * self.n_gpu,\n )\n\n # Iterate over batches (w/ progress bar for main process)\n training_batches = tqdm(\n train_dataloader,\n desc=\"Pre-training...\"\n ) if self.is_main_process else train_dataloader\n for batch in training_batches:\n training_steps += 1\n (\n input_ids,\n segment_ids,\n input_mask,\n masked_lm_labels,\n next_sentence_labels\n ) = [tensor.to(self.device) for tensor in batch]\n\n # Forward Pass\n model_output = self.model(\n input_ids=input_ids,\n token_type_ids=segment_ids,\n attention_mask=input_mask,\n labels=masked_lm_labels,\n next_sentence_label=next_sentence_labels)\n loss = model_output['loss']\n if self.n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu.\n\n divisor = self.num_accumulation_steps\n if self.num_accumulation_steps > 1:\n if not self.allreduce_post_accumulation:\n # this division was merged into predivision\n loss = loss / self.num_accumulation_steps\n divisor = 1.0\n\n # Compute gradients\n if self.fp16:\n with amp.scale_loss(\n loss, self.optimizer,\n delay_overflow_check=self.allreduce_post_accumulation) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n\n average_loss += loss.item()\n\n # Take optimizer/scheduler step every (gradient_acc_steps) steps\n # This is the model parameter update:\n if training_steps % self.num_accumulation_steps == 0:\n self.lr_scheduler.step() # learning rate warmup\n self.take_optimizer_step(overflow_buf)\n\n # If reached max steps save everything and log final loss:\n if self.global_step >= self.total_steps:\n last_num_steps = int(\n training_steps / self.num_accumulation_steps\n ) % self.log_freq\n last_num_steps = self.log_freq if last_num_steps == 0 else last_num_steps\n average_loss = torch.tensor(average_loss, dtype=torch.float32).cuda()\n average_loss = average_loss / (last_num_steps * divisor)\n if torch.distributed.is_initialized():\n average_loss /= torch.distributed.get_world_size()\n torch.distributed.all_reduce(average_loss)\n if self.is_main_process:\n logging.info(\n \"Total Steps: %s | Final Loss = %.3f\",\n int(training_steps / self.num_accumulation_steps),\n average_loss.item()\n )\n self.tensorboard_writer.add_scalar(\n \"Avg. training loss\",\n average_loss.item(), global_step=self.global_step)\n\n # If at a logging step:\n elif training_steps % (self.log_freq * self.num_accumulation_steps) == 0:\n if self.is_main_process:\n logging_message = (\n f\"Global step: {self.global_step} | \"\n f\"Learning Rate: {self.optimizer.param_groups[0]['lr']:.2E} | \"\n f\"Step Loss: {loss.item() * self.num_accumulation_steps / divisor:.3f} | \"\n f\"Avg. Loss: {average_loss / (self.log_freq * divisor):.3f}\"\n )\n # Update the tqdm description\n training_batches.set_description(logging_message, refresh=True)\n # Log average training loss to TensorBoard:\n self.tensorboard_writer.add_scalar(\n \"Avg. training loss\",\n average_loss / (self.log_freq * divisor),\n global_step=self.global_step)\n average_loss = 0\n\n # If reached max steps at log step or reached checkpoint step:\n if \\\n self.global_step >= self.total_steps \\\n or training_steps % \\\n (self.checkpoint_interval * self.num_accumulation_steps) == 0:\n\n # Check if model has improved then save a checkpoint if so\n if self.do_validation:\n model_has_improved = self.run_validation()\n else:\n model_has_improved = True\n if self.is_main_process and model_has_improved:\n self.make_checkpoint(f_id, files)\n\n # End pre-training if reached max steps\n if self.global_step >= self.total_steps:\n del train_dataloader\n return # NOTE: breaks out of the training loop\n\n # Move to next file after using up all batches of current file\n del train_dataloader\n train_dataloader, hdf5_fpath = \\\n dataset_future.result(timeout=None)\n\n # Update epoch after going through all .hdf5 files\n epoch += 1",
"def preinitialize(self):\n for group in self.param_groups:\n for p in group['params']:\n self.state[p][\"sum\"] = torch.full_like(\n p,\n group[\"initial_accumulator_value\"],\n memory_format=torch.preserve_format,\n device=\"cpu\",\n ).to(p.device)",
"def _local_post_state_dict_hook(\n self,\n state_dict: Dict[str, Any],\n prefix: str,\n ) -> Dict[str, Any]:\n _replace_by_prefix(state_dict, f\"{prefix}{FSDP_WRAPPED_MODULE}.\", prefix)\n if not self._fsdp_wrapped_module.has_params:\n return state_dict\n\n # state_dict[f\"{prefix}{FLAT_PARAM}\"] exists and has the same tensor\n # value as the flat_param but it is a pure Tensor because\n # nn.Module.state_dict() will detach the parameter. Therefore, we need\n # to get flat_param from the FlattenParamsWrapper to get the metadata.\n flat_param = getattr(self._fsdp_wrapped_module, FLAT_PARAM, None)\n assert flat_param is not None\n # Construct a ShardedTensor from the flat_param.\n full_numel = flat_param._unpadded_unsharded_size.numel() # type: ignore[attr-defined]\n shard_offset = flat_param.numel() * self.rank\n valid_data_size = flat_param.numel() - flat_param._shard_numel_padded\n if valid_data_size > 0 and flat_param._shard_numel_padded > 0:\n flat_param = flat_param.narrow(0, 0, valid_data_size)\n local_shards = [\n Shard.from_tensor_and_offsets(flat_param, [shard_offset], self.rank)\n ]\n state_dict[f\"{prefix}{FLAT_PARAM}\"] = init_from_local_shards(\n local_shards, full_numel, process_group=self.process_group\n ) # type: ignore[assignment]\n\n return state_dict",
"def post_randomize(self):\n super(BatchRandomizer, self).post_randomize()\n self.batch_idx += 1",
"def pre_forward(self, *args, **kwargs):\n batch_size = args[0].shape[0]\n seq_len = args[0].shape[-2]\n if not self.instantiated:\n self.hidden_dim = args[0].shape[-1]\n self.instantiate(hidden_dim=self.hidden_dim)\n if self.past_key_reparam is None:\n past_key = self.past_key\n else:\n past_key = self.past_key_reparam\n if self.past_value_reparam is None:\n past_value = self.past_value\n else:\n past_value = self.past_value_reparam\n\n\n def expand_batchsize(x):\n x = x.reshape(self.prefix_token_num, self.num_heads, -1).transpose(0,1)\n x = x.unsqueeze(0).expand(batch_size, *x.shape)\n return x\n\n if 'position_bias' in kwargs and kwargs['position_bias'] is not None:\n if kwargs['position_bias'].shape[-1] != seq_len + self.prefix_token_num: # Then the position_bias should be re-calculated\n kwargs['position_bias'] = None\n if kwargs['past_key_value'] is None:\n kwargs['past_key_value'] = (expand_batchsize(past_key), expand_batchsize(past_value))\n\n past_key_len = kwargs['past_key_value'][0].shape[-2]\n\n if 'mask' in kwargs and kwargs['mask'] is not None:\n mask_len = kwargs['mask'].shape[-1]\n if past_key_len + seq_len == mask_len + self.prefix_token_num:\n\n am = kwargs['mask'] # Should check the format of the attention_mask when moving to a new plm.\n kwargs['mask'] = torch.cat([-torch.zeros((*am.shape[:-1],self.prefix_token_num), dtype = am.dtype,device=am.device), am], dim=-1)\n return args, kwargs",
"def _epoch_before_hook(self):\n self._train_steps_this_epoch = 0"
] |
[
"0.6240967",
"0.5669493",
"0.5669493",
"0.5669493",
"0.54019994",
"0.53623265",
"0.5219316",
"0.521635",
"0.5167405",
"0.50177777",
"0.49872008",
"0.4977033",
"0.49528822",
"0.49195656",
"0.49027094",
"0.48578644",
"0.4843479",
"0.48360705",
"0.48027",
"0.47935167",
"0.47832376",
"0.4773301",
"0.47335756",
"0.47281262",
"0.4682614",
"0.46669522",
"0.46663636",
"0.46345958",
"0.46208966",
"0.4616774"
] |
0.8119143
|
0
|
Create a mapping from binomial terms to abbreviated binomials.
|
def abbreviate_binomials(binomials: list[dict], single_expanded_name=True):
abbrevs = defaultdict(set)
for term in binomials:
pattern = term["pattern"]
abbrev = abbreviate(pattern)
abbrevs[abbrev].add(pattern.split()[0])
if single_expanded_name:
abbrevs = {k: v.pop().title() for k, v in abbrevs.items() if len(v) == 1}
return abbrevs
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def word2vec_mapping_func():\n return {\"belonging to\": \"belonging\", \"parked on\": \"parked\", \"growing on\": \"growing\", \"standing on\": \"standing\",\n \"made of\": \"made\", \"attached to\": \"attached\", \"hanging from\": \"hanging\", \"in front of\": \"front\",\n \"lying on\": \"lying\", \"flying in\": \"flying\", \"looking at\": \"looking\", \"on back of\": \"back\",\n \"laying on\": \"laying\", \"walking on\": \"walking\", \"walking in\": \"walking\", \"sitting on\": \"sitting\",\n \"covered in\": \"covered\", \"part of\": \"part\", \"painted on\": \"painted\", \"mounted on\": \"mounted\"}",
"def _binary(self, trajectories):\n # Get the mapping between indices and binary genotypes\n index2binary = self._gpm.map(\"indices\", \"binary.genotypes\")\n\n # New dictionary\n mapping = OrderedDict()\n\n # Iterate through trajectories and convert keys to binary repr.\n for key in trajectories:\n indices = list(key)\n sequences = tuple([index2binary[i] for i in indices])\n mapping[sequences] = trajectories[key]\n\n _mapping = self.sort_dict(mapping)\n return _mapping",
"def nominal_map(options):\n pass",
"def _map_B(self, obs_seq):\n B_map = np.ones((self.n_states, len(obs_seq)))\n\n for j in range(self.n_states):\n for t, obs in enumerate(obs_seq):\n for i, symbol in enumerate(obs):\n if symbol == self.MISSING or (symbol is np.nan or symbol != symbol):\n # if the symbol is missing, use the maximum likelihood symbol for that state\n temp_symbol = np.argmax(\n self.B[i][j]\n )\n B_map[j][t] *= self.B[i][j][temp_symbol]\n else:\n B_map[j][t] *= self.B[i][j][symbol]\n return B_map",
"def create_label_map():\n\n cnt = 1\n tmp_array = np.array([10, 15, 25, 30, 40, 47, 57, 63, 69, 74, 81])\n dictionary = dict()\n dictionary[1] = 1\n for idx, val in enumerate(tmp_array):\n for j in range(cnt + 1, val):\n dictionary[j] = int(idx + 2)\n cnt = j\n return dictionary",
"def get_whole_nato_alphabet_string(mappings):\n def tuple_to_string(letter_word_pair):\n \"\"\"Convert a tuple to a mapping string.\"\"\"\n letter, word = letter_word_pair\n return '{letter}: {word}'.format(letter=letter, word=word)\n\n items = mappings.items()\n sorted_items = sorted(mappings.items())\n return '\\n'.join(map(tuple_to_string, sorted_items))",
"def _create_labels_and_mapping(self, labels, mapping):\n numbered_classes = list(enumerate(list(labels), start=0))\n if mapping:\n new_mapping = {number: str(mapping[label]) for number, label in numbered_classes}\n else:\n new_mapping = {number: str(label) for number, label in numbered_classes}\n new_labels = [new_mapping[numbered[0]] for numbered in numbered_classes]\n\n return new_labels, new_mapping",
"def word_mapping(sentences, lower):\n words = [[x[0].lower() if lower else x[0] for x in s] for s in sentences]\n dico = create_dico(words)\n dico['<UNK>'] = 10000000\n word_to_id, id_to_word = create_mapping(dico)\n print(\"Found %i unique words (%i in total)\" % (\n len(dico), sum(len(x) for x in words))\n )\n return dico, word_to_id, id_to_word",
"def word_map(text):\n\n # Replace puncation with words\n s = text.replace('.', \" :period:\")\n s = s.replace('\\n', \"\")\n s = s.replace('\"', \" :quote:\")\n s = s.replace(',', \" :comma:\")\n s = s.replace('?', \" :quest:\")\n\n words = sorted(set(s.split(\" \")))\n\n n_to_word = {}\n word_to_n = {}\n\n num = 0\n for word in words:\n n_to_word[num] = word\n word_to_n[word] = num\n num += 1\n\n return words, n_to_word, word_to_n",
"def build_inverse_barcode_map(seqs):\r\n inverse_map = {}\r\n map_count = defaultdict(int)\r\n for (label, seq) in seqs:\r\n (map_id, seq_id) = label.split()[:2]\r\n map_id = map_id.split(\"_\")[0]\r\n inverse_map[seq_id] = map_id\r\n map_count[map_id] += 1\r\n\r\n return (inverse_map, map_count)",
"def _create_subscript_mapping():\n # Create the normal and subscript digits list.\n normal_digits = [i for i in range(10)]\n subscript_digits = [chr(0x2080 + i) for i in range(10)]\n\n # Convert the normal digits to strings.\n normal_digits = [str(i) for i in normal_digits]\n\n # Create a dict mapping the two.\n return DefaultDictionary(zip(normal_digits, subscript_digits))",
"def test_map_ids_to_taxonomy(self):\r\n p = BlastTaxonAssigner({})\r\n id_to_taxonomy_map = {\r\n \"AY800210\": \"Archaea;Euryarchaeota;Halobacteriales;uncultured\",\r\n \"EU883771\":\r\n \"Archaea;Euryarchaeota;Methanomicrobiales;Methanomicrobium et rel.\",\r\n \"EF503699\": \"Archaea;Crenarchaeota;uncultured;uncultured\",\r\n \"DQ260310\":\r\n \"Archaea;Euryarchaeota;Methanobacteriales;Methanobacterium\",\r\n \"EF503697\": \"Archaea;Crenarchaeota;uncultured;uncultured\",\r\n }\r\n hits = {\r\n 's1': (\"AY800210\", 1e-99),\r\n 's5': (\"EU883771\", 'weird confidence value'),\r\n 's3': (\"DQ260310\", 42.),\r\n 's4': None,\r\n }\r\n expected = {\r\n 's1':\r\n (\"Archaea;Euryarchaeota;Halobacteriales;uncultured\",\r\n 1e-99, \"AY800210\"),\r\n 's5': ('Archaea;Euryarchaeota;Methanomicrobiales;Methanomicrobium et rel.',\r\n 'weird confidence value', \"EU883771\"),\r\n 's3':\r\n (\"Archaea;Euryarchaeota;Methanobacteriales;Methanobacterium\",\r\n 42., \"DQ260310\"),\r\n 's4': ('No blast hit', None, None),\r\n }\r\n actual = p._map_ids_to_taxonomy(hits, id_to_taxonomy_map)\r\n self.assertEqual(actual, expected)",
"def word_mapping(sentences, lower):\n words = [[x[0].lower() if lower else x[0] for x in s] for s in sentences]\n dico = create_dico(words)\n\n dico['<PAD>'] = 10000001\n dico['<UNK>'] = 10000000\n dico = {k:v for k,v in dico.items() if v>=3}\n word_to_id, id_to_word = create_mapping(dico)\n\n print(\"Found %i unique words (%i in total)\" % (\n len(dico), sum(len(x) for x in words)\n ))\n return dico, word_to_id, id_to_word",
"def _create_unicode_map():\n unicode_map = {}\n\n for beta, uni in _map.BETACODE_MAP.items():\n # Include decomposed equivalent where necessary.\n norm = unicodedata.normalize('NFC', uni)\n unicode_map[norm] = beta\n unicode_map[uni] = beta\n\n # Add the final sigmas.\n final_sigma_norm = unicodedata.normalize('NFC', _FINAL_LC_SIGMA)\n unicode_map[final_sigma_norm] = 's'\n unicode_map[_FINAL_LC_SIGMA] = 's'\n\n return unicode_map",
"def _get_bin_map_of_number(number, length):\n empty_map = '0' * length\n bin_map_long = empty_map + str(bin(number))[2:]\n return bin_map_long[-length:]",
"def binary_transformation(sequence:str, binary_dict:dict):\r\n binary_sequence = \"\"\r\n for letter in sequence:\r\n binary_sequence += str(binary_dict[letter])\r\n return binary_sequence",
"def binary_probabilities(self, num_bits=None):\n n = self._num_bits if num_bits is None else num_bits\n return {format(key, \"b\").zfill(n): value for key, value in self.items()}",
"def create_mapped_bigrams(in_text):\r\n\tword_list = [] \r\n\r\n\tfor word in in_text: # iterate over words in corpus\r\n\t\tword_list.append(word.lower().strip()) # strip and lower all words in corpus\r\n\t\t\t\r\n\tword_bigramz = [' '.join(bigram) for bigram in nltk.bigrams(word_list)] # form bigrams\r\n\tprint(f'Total word bigrams in corpus: {len(word_bigramz)}')\r\n\r\n\t# create unique bigram dict of corpus\r\n\tbigram_dict = {}\r\n\r\n\tfor i, bigram in enumerate(word_bigramz):\r\n\r\n\t\tif not bigram in bigram_dict:# if bigram not already a dict key\r\n\t\t\tbigram_dict[bigram] = [] # make it a dict key\r\n\r\n\t\tbigram_dict[bigram].append(i) # add indices to dict key as value\r\n\r\n\r\n\tprint(f'Unique bigrams in corpus: {len(bigram_dict)}')\r\n\treturn bigram_dict",
"def prepare_labels(state_mapping, sequences):\n encoded_labels = [[state_mapping[state] for state in label] for label in sequences]\n \n depth = len(state_mapping)\n one_hot_labels = [[one_hot_encode(label, depth) for label in sequence] for sequence in encoded_labels]\n one_hot_labels = [np.asarray(ls) for ls in one_hot_labels]\n return one_hot_labels",
"def char_mapping(sentences, lower):\n chars = [[x[0].lower() if lower else x[0] for x in s] for s in sentences]\n dico = create_dico(chars)\n dico[\"<PAD>\"] = 10000001\n dico['<UNK>'] = 10000000\n char_to_id, id_to_char = create_mapping(dico)\n print(\"Found %i unique words (%i in total)\" % (\n len(dico), sum(len(x) for x in chars)\n ))\n return dico, char_to_id, id_to_char",
"def adem_2(a, b):\r\n if b == 0:\r\n return {(a,) : 1}\r\n if a == 0:\r\n return {(b,) : 1}\r\n if a >= 2*b:\r\n return {(a, b) : 1}\r\n result = {}\r\n for j in range(1 + a//2):\r\n if combinatorics.binomial_2(b-j-1, a-2*j) == 1:\r\n if j == 0:\r\n result[(a+b,)] = 1\r\n else:\r\n result[(a+b-j, j)] = 1\r\n return result",
"def pronoun_breakdown(pronoun_dict):\n first_person_singular = 0\n first_person_plural = 0\n second_person = 0\n third_person_singular = 0\n third_person_plural = 0\n total = 0\n\n for pronoun in pronoun_dict:\n if pronoun in first_person_singular_pronouns:\n first_person_singular += pronoun_dict[pronoun]\n elif pronoun in first_person_plural_pronouns:\n first_person_plural += pronoun_dict[pronoun]\n elif pronoun in second_person_pronouns:\n second_person += pronoun_dict[pronoun]\n elif pronoun in third_person_singular_pronouns:\n third_person_singular += pronoun_dict[pronoun]\n elif pronoun in third_person_plural_pronouns:\n third_person_plural += pronoun_dict[pronoun]\n total += pronoun_dict[pronoun]\n\n return {\n 'first_person_singular': first_person_singular,\n 'first_person_plural': first_person_plural,\n 'second_person': second_person,\n 'third_person_singular': third_person_singular,\n 'third_person_plural': third_person_plural,\n 'total': total\n }",
"def w2n(word):\n word = re.sub('[^A-Z0-9]', '', word)\n return ''.join([letter_to_number_mapping[x] for x in word])",
"def extract_barcodes_from_mapping(labels):\r\n barcodes = {}\r\n\r\n # use \\w* to allow for non barcoded reads\r\n re = compile(\r\n r'(\\w+) ([a-zA-Z0-9.]+) orig_bc=(\\w*) new_bc=\\w* bc_diffs=\\d+')\r\n for label in labels:\r\n tmatch = search(re, label)\r\n flowgram_id = tmatch.group(2)\r\n barcode = tmatch.group(3)\r\n\r\n barcodes[flowgram_id] = barcode\r\n\r\n return barcodes",
"def word_nbr_map(wl):\n wf = dict()\n for word in wl:\n try:\n wf[word] = wf[word] + 1\n except KeyError:\n wf[word] = 1\n return wf",
"def _create_symbol_mapping():\n normal_items = [\"+\", \"-\"]\n unicode_items = [chr(0x2070 + i) for i in range(10, 12)]\n\n # Create a dict mapping the two.\n return DefaultDictionary(zip(normal_items, unicode_items))",
"def create_lookup_tables(words):\n\n word_counts = Counter(words)\n sorted_vocab = sorted(word_counts, key=word_counts.get, reverse=True)\n int_to_vocab = {(ii+1): word for ii, word in enumerate(sorted_vocab)}\n vocab_to_int = {word: (ii+1) for ii, word in int_to_vocab.items()}\n\n return vocab_to_int, int_to_vocab",
"def get_alphas(matrix):\n return {prefix: sum(probabilities.values()) for prefix, probabilities in matrix.items()}",
"def map_word(self, word):\n for invariance in self.invariances:\n word = invariance.map_word(word)\n return word",
"def __init__(self, dictionary):\n self.abbrdict = {} #Use a dict to count the number of same abbreviation.\n self.origdict = {} #Use a dict to rule out duplicate of original words.\n for x in dictionary:\n if len(x) <= 1: #If the length of word is not larger than 1, its abbreviation is itself.\n n = x\n else: #Otherwise get the abbreviation as decribed.\n n = x[0] + str(len(x) - 2) + x[-1]\n if x not in self.origdict: #If this is the 1st time word appears, add the count of abbreviation.\n if n not in self.abbrdict:\n self.abbrdict[n] = 1\n else:\n self.abbrdict[n] += 1\n self.origdict[x] = True #Set it as appeared."
] |
[
"0.596231",
"0.56996095",
"0.56565887",
"0.56336045",
"0.56131876",
"0.5507035",
"0.5407744",
"0.5394013",
"0.5345998",
"0.5345086",
"0.5334324",
"0.53331065",
"0.53101665",
"0.5308835",
"0.52683884",
"0.52648175",
"0.5257812",
"0.5256144",
"0.52431834",
"0.5231941",
"0.52281606",
"0.5224368",
"0.52153313",
"0.52071816",
"0.5201492",
"0.51993006",
"0.5195762",
"0.5182243",
"0.51675475",
"0.51638985"
] |
0.7119786
|
0
|
Constructor. Instantiating an updater object causes all the metadata files for the toplevel roles to be read from disk, including the key and role information for the delegated targets of 'targets'. The actual metadata for delegated roles is not loaded in __init__. The metadata for these delegated roles, including nested delegated roles, are loaded, updated, and saved to the 'self.metadata' store by the target methods, like all_targets() and targets_of_role(). The initial set of metadata files are provided by the software update system utilizing TUF. In order to use an updater, the following directories must already
|
def __init__(self, updater_name, repository_mirrors):
# Do the arguments have the correct format?
# These checks ensure the arguments have the appropriate
# number of objects and object types and that all dict
# keys are properly named.
# Raise 'tuf.FormatError' if there is a mistmatch.
tuf.formats.NAME_SCHEMA.check_match(updater_name)
tuf.formats.MIRRORDICT_SCHEMA.check_match(repository_mirrors)
# Save the validated arguments.
self.name = updater_name
self.mirrors = repository_mirrors
# Store the trusted metadata read from disk.
self.metadata = {}
# Store the currently trusted/verified metadata.
self.metadata['current'] = {}
# Store the previously trusted/verified metadata.
self.metadata['previous'] = {}
# Store the file information of all the metadata files. The dict keys are
# paths, the dict values fileinfo data. This information can help determine
# whether a metadata file has changed and so needs to be re-downloaded.
self.fileinfo = {}
# Store the location of the client's metadata directory.
self.metadata_directory = {}
# Ensure the repository metadata directory has been set.
if tuf.conf.repository_directory is None:
message = 'The TUF update client module must specify the directory' \
' containing the local repository files.' \
' "tuf.conf.repository_directory" MUST be set.'
raise tuf.RepositoryError(message)
# Set the path for the current set of metadata files.
repository_directory = tuf.conf.repository_directory
current_path = os.path.join(repository_directory, 'metadata', 'current')
# Ensure the current path is valid/exists before saving it.
if not os.path.exists(current_path):
message = 'Missing '+repr(current_path)+'. This path must exist and, ' \
'at a minimum, contain the root metadata file.'
raise tuf.RepositoryError(message)
self.metadata_directory['current'] = current_path
# Set the path for the previous set of metadata files.
previous_path = os.path.join(repository_directory, 'metadata', 'previous')
# Ensure the previous path is valid/exists.
if not os.path.exists(previous_path):
message = 'Missing '+repr(previous_path)+'. This path must exist.'
raise tuf.RepositoryError(message)
self.metadata_directory['previous'] = previous_path
# Load current and previous metadata.
for metadata_set in ['current', 'previous']:
for metadata_role in ['root', 'targets', 'release', 'timestamp']:
self._load_metadata_from_file(metadata_set, metadata_role)
# Raise an exception if the repository is missing the required 'root'
# metadata.
if 'root' not in self.metadata['current']:
message = 'No root of trust! Could not find the "root.txt" file.'
raise tuf.RepositoryError(message)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _refresh_targets_metadata(self, rolename='targets', include_delegations=False):\n\n roles_to_update = []\n\n # See if this role provides metadata and, if we're including\n # delegations, look for metadata from delegated roles.\n role_prefix = rolename + '/'\n for metadata_path in self.metadata['current']['release']['meta'].keys():\n if metadata_path == rolename + '.txt':\n roles_to_update.append(metadata_path[:-len('.txt')])\n elif include_delegations and metadata_path.startswith(role_prefix):\n # Add delegated roles. Skip roles names containing compression\n # extensions.\n if metadata_path.endswith('.txt'): \n roles_to_update.append(metadata_path[:-len('.txt')])\n\n # Remove the 'targets' role because it gets updated when the targets.txt\n # file is updated in _update_metadata_if_changed('targets').\n if rolename == 'targets':\n try:\n roles_to_update.remove('targets')\n except ValueError:\n message = 'The Release metadata file is missing the targets.txt entry.'\n raise tuf.RepositoryError(message)\n \n # If there is nothing to refresh, we are done.\n if not roles_to_update:\n return\n\n # Sort the roles so that parent roles always come first.\n roles_to_update.sort()\n logger.debug('Roles to update: '+repr(roles_to_update)+'.')\n\n # Iterate through 'roles_to_update', load its metadata\n # file, and update it if it has changed.\n for rolename in roles_to_update:\n self._load_metadata_from_file('previous', rolename)\n self._load_metadata_from_file('current', rolename)\n\n self._update_metadata_if_changed(rolename)\n\n # Remove the role if it has expired.\n try:\n self._ensure_not_expired(rolename)\n except tuf.ExpiredMetadataError:\n tuf.roledb.remove_role(rolename)",
"def refresh(self):\n\n # The timestamp role does not have signed metadata about it; otherwise we\n # would need an infinite regress of metadata. Therefore, we use some\n # default, sane metadata about it.\n DEFAULT_TIMESTAMP_FILEINFO = {\n 'hashes':None,\n 'length': tuf.conf.DEFAULT_TIMESTAMP_REQUIRED_LENGTH\n }\n\n # Update the top-level metadata. The _update_metadata_if_changed() and\n # _update_metadata() calls below do NOT perform an update if there\n # is insufficient trusted signatures for the specified metadata.\n # Raise 'tuf.NoWorkingMirrorError' if an update fails.\n\n # Use default but sane information for timestamp metadata, and do not\n # require strict checks on its required length.\n self._update_metadata('timestamp', DEFAULT_TIMESTAMP_FILEINFO)\n\n self._update_metadata_if_changed('release', referenced_metadata='timestamp')\n\n self._update_metadata_if_changed('root')\n\n self._update_metadata_if_changed('targets')\n\n # Updated the top-level metadata (which all had valid signatures), however,\n # have they expired? Raise 'tuf.ExpiredMetadataError' if any of the metadata\n # has expired.\n for metadata_role in ['timestamp', 'root', 'release', 'targets']:\n self._ensure_not_expired(metadata_role)",
"def _rebuild_key_and_role_db(self):\n \n # Clobbering this means all delegated metadata files are rendered outdated\n # and will need to be reloaded. However, reloading the delegated metadata\n # files is avoided here because fetching target information with methods\n # like all_targets() and target() always cause a refresh of these files.\n # The metadata files for delegated roles are also not loaded when the\n # repository is first instantiated. Due to this setup, reloading delegated\n # roles is not required here.\n tuf.keydb.create_keydb_from_root_metadata(self.metadata['current']['root'])\n tuf.roledb.create_roledb_from_root_metadata(self.metadata['current']['root'])",
"def __init__(self,\n root: Path = None,\n resources_dir: Path = None,\n slave_configuration_path : Path = None,\n binaries_dir : Path = None,\n wrapper_win64 : Path = None,\n wrapper_linux64: Path = None,\n main_script_path : Path = None,\n model_description: Path = None,\n model_description_path : Path = None,\n main_script: Path = None,\n main_class : Path = None,\n pyfmu_dir : Path = None\n ):\n self.model_description = model_description\n\n self.main_script = main_script\n self.main_class = main_class\n self.slave_configuration = None\n\n # paths\n self.root = root\n self.resources_dir = resources_dir\n self.slave_configuration_path = slave_configuration_path\n self.main_script_path = main_script_path\n self.model_description_path = model_description_path\n self.binaries_dir = binaries_dir\n self.wrapper_win64 = wrapper_win64\n self.wrapper_linux64 = wrapper_linux64\n self.pyfmu_dir = pyfmu_dir",
"def __init__(self):\n self.mach_files = self._scan_directory()\n self.targets = []\n\n for mach_file in self.mach_files:\n with open(mach_file, 'r') as mfile:\n mach_json = json.load(mfile)\n self._handle_config_file(mach_json, mach_file)\n # Now that we have all of the targets, it's time to resolve dependencies between them\n for target in self.targets:\n def target_finder(name):\n return self.find_target(name)\n target.resolve_dependencies(target_finder)",
"def _update_metadata(self, metadata_role, fileinfo, compression=None):\n\n # Construct the metadata filename as expected by the download/mirror modules.\n metadata_filename = metadata_role + '.txt'\n uncompressed_metadata_filename = metadata_filename\n \n # The 'release' or Targets metadata may be compressed. Add the appropriate\n # extension to 'metadata_filename'. \n if compression == 'gzip':\n metadata_filename = metadata_filename + '.gz'\n\n # Extract file length and file hashes. They will be passed as arguments\n # to 'download_file' function.\n compressed_file_length = fileinfo['length']\n uncompressed_file_hashes = fileinfo['hashes']\n\n # Attempt a file download from each mirror until the file is downloaded and\n # verified. If the signature of the downloaded file is valid, proceed,\n # otherwise log a warning and try the next mirror. 'metadata_file_object'\n # is the file-like object returned by 'download.py'. 'metadata_signable'\n # is the object extracted from 'metadata_file_object'. Metadata saved to\n # files are regarded as 'signable' objects, conformant to\n # 'tuf.formats.SIGNABLE_SCHEMA'.\n #\n # Some metadata (presently timestamp) will be downloaded \"unsafely\", in the\n # sense that we can only estimate its true length and know nothing about\n # its hashes. This is because not all metadata will have other metadata\n # for it; otherwise we will have an infinite regress of metadata signing\n # for each other. In this case, we will download the metadata up to the\n # best length we can get for it, not check its hashes, but perform the rest\n # of the checks (e.g signature verification).\n #\n # Note also that we presently support decompression of only \"safe\"\n # metadata, but this is easily extend to \"unsafe\" metadata as well as\n # \"safe\" targets.\n\n if metadata_role == 'timestamp':\n metadata_file_object = \\\n self.unsafely_get_metadata_file(metadata_role, metadata_filename,\n compressed_file_length)\n else:\n metadata_file_object = \\\n self.safely_get_metadata_file(metadata_role, metadata_filename,\n compressed_file_length,\n uncompressed_file_hashes,\n compression=compression)\n\n # The metadata has been verified. Move the metadata file into place.\n # First, move the 'current' metadata file to the 'previous' directory\n # if it exists.\n current_filepath = os.path.join(self.metadata_directory['current'],\n metadata_filename)\n current_filepath = os.path.abspath(current_filepath)\n tuf.util.ensure_parent_dir(current_filepath)\n \n previous_filepath = os.path.join(self.metadata_directory['previous'],\n metadata_filename)\n previous_filepath = os.path.abspath(previous_filepath)\n if os.path.exists(current_filepath):\n # Previous metadata might not exist, say when delegations are added.\n tuf.util.ensure_parent_dir(previous_filepath)\n shutil.move(current_filepath, previous_filepath)\n\n # Next, move the verified updated metadata file to the 'current' directory.\n # Note that the 'move' method comes from tuf.util's TempFile class.\n # 'metadata_file_object' is an instance of tuf.util.TempFile.\n metadata_signable = tuf.util.load_json_string(metadata_file_object.read())\n if compression == 'gzip':\n current_uncompressed_filepath = \\\n os.path.join(self.metadata_directory['current'],\n uncompressed_metadata_filename)\n current_uncompressed_filepath = \\\n os.path.abspath(current_uncompressed_filepath)\n metadata_file_object.move(current_uncompressed_filepath)\n else:\n metadata_file_object.move(current_filepath)\n\n # Extract the metadata object so we can store it to the metadata store.\n # 'current_metadata_object' set to 'None' if there is not an object\n # stored for 'metadata_role'.\n updated_metadata_object = metadata_signable['signed']\n current_metadata_object = self.metadata['current'].get(metadata_role)\n\n # Finally, update the metadata and fileinfo stores.\n logger.debug('Updated '+repr(current_filepath)+'.')\n self.metadata['previous'][metadata_role] = current_metadata_object\n self.metadata['current'][metadata_role] = updated_metadata_object\n self._update_fileinfo(metadata_filename)",
"def _update_metadata_if_changed(self, metadata_role, referenced_metadata='release'):\n \n uncompressed_metadata_filename = metadata_role + '.txt'\n\n # Ensure the referenced metadata has been loaded. The 'root' role may be\n # updated without having 'release' available. \n if referenced_metadata not in self.metadata['current']:\n message = 'Cannot update '+repr(metadata_role)+' because ' \\\n +referenced_metadata+' is missing.'\n raise tuf.RepositoryError(message)\n # The referenced metadata has been loaded. Extract the new\n # fileinfo for 'metadata_role' from it. \n else:\n message = repr(metadata_role)+' referenced in '+\\\n repr(referenced_metadata)+'. '+repr(metadata_role)+' may be updated.'\n logger.debug(message)\n \n # There might be a compressed version of 'release.txt' or Targets\n # metadata available for download. Check the 'meta' field of\n # 'referenced_metadata' to see if it is listed when 'metadata_role'\n # is 'release'. The full rolename for delegated Targets metadata\n # must begin with 'targets/'. The Release role lists all the Targets\n # metadata available on the repository, including any that may be in\n # compressed form.\n compression = None\n\n # Extract the fileinfo of the uncompressed version of 'metadata_role'.\n uncompressed_fileinfo = self.metadata['current'][referenced_metadata] \\\n ['meta'] \\\n [uncompressed_metadata_filename]\n\n # Check for availability of compressed versions of 'release.txt',\n # 'targets.txt', and delegated Targets, which also start with 'targets'.\n # For 'targets.txt' and delegated metadata, 'referenced_metata'\n # should always be 'release'. 'release.txt' specifies all roles\n # provided by a repository, including their file sizes and hashes.\n if metadata_role == 'release' or metadata_role.startswith('targets'):\n gzip_metadata_filename = uncompressed_metadata_filename + '.gz'\n if gzip_metadata_filename in self.metadata['current'] \\\n [referenced_metadata]['meta']:\n compression = 'gzip'\n compressed_fileinfo = self.metadata['current'][referenced_metadata] \\\n ['meta'][gzip_metadata_filename]\n # NOTE: When we download the compressed file, we care about its\n # compressed length. However, we check the hash of the uncompressed\n # file; therefore we use the hashes of the uncompressed file.\n fileinfo = {'length': compressed_fileinfo['length'],\n 'hashes': uncompressed_fileinfo['hashes']}\n logger.debug('Compressed version of '+\\\n repr(uncompressed_metadata_filename)+' is available at '+\\\n repr(gzip_metadata_filename)+'.')\n else:\n logger.debug('Compressed version of '+\\\n repr(uncompressed_metadata_filename)+' not available.')\n fileinfo = uncompressed_fileinfo\n else:\n fileinfo = uncompressed_fileinfo\n\n # Simply return if the file has not changed, according to the metadata\n # about the uncompressed file provided by the referenced metadata.\n if not self._fileinfo_has_changed(uncompressed_metadata_filename,\n uncompressed_fileinfo):\n return\n\n logger.debug('Metadata '+repr(uncompressed_metadata_filename)+\\\n ' has changed.')\n\n try:\n self._update_metadata(metadata_role, fileinfo=fileinfo,\n compression=compression)\n except:\n # The current metadata we have is not current but we couldn't\n # get new metadata. We shouldn't use the old metadata anymore.\n # This will get rid of in-memory knowledge of the role and\n # delegated roles, but will leave delegated metadata files as\n # current files on disk.\n # TODO: Should we get rid of the delegated metadata files?\n # We shouldn't need to, but we need to check the trust\n # implications of the current implementation.\n self._delete_metadata(metadata_role)\n logger.error('Metadata for '+str(metadata_role)+' could not be updated')\n raise\n else:\n # We need to remove delegated roles because the delegated roles\n # may not be trusted anymore.\n if metadata_role == 'targets' or metadata_role.startswith('targets/'):\n logger.debug('Removing delegated roles of '+repr(metadata_role)+'.')\n # TODO: Should we also remove the keys of the delegated roles?\n tuf.roledb.remove_delegated_roles(metadata_role)\n self._import_delegations(metadata_role)",
"def __init__(self, *fnames):\n self.lnames = {}\n for fname in fnames:\n if os.path.isfile(fname):\n with open(fname) as f:\n for line in f:\n if not line.startswith('#') and not line.isspace():\n tokens = line.strip().split()\n if len(tokens) < 4:\n # must have at least one log name\n raise Exception('Targets: invalid target, file = ' + fname + ', line = ' + line)\n\n # prepare data\n target,ra,dec = tokens[:3]\n target = target.replace('~',' ')\n ra,dec,system = str2radec(ra + ' ' + dec)\n names = [token.replace('~',' ') for token in tokens[3:]]\n\n # check that, if the target has been\n # entered before, as is possible, that it\n # is self-consistent\n if target in self:\n entry = self[target]\n if entry['ra'] != ra or entry['dec'] != dec:\n raise Exception(\n 'Targets: file = ' + fname + ', line = ' + line + \\\n '\\nTarget =' + target + ' already has an entry but with a different position.'\n )\n\n # add names to the dictionary maintained to check for uniqueness\n for name in names:\n if name in self.lnames:\n raise Exception(\n 'Targets: file = ' + fname + ', line = ' + line + \\\n '\\nName = ' + name + ' already exists.'\n )\n self.lnames[name] = target\n\n self[target] = {'ra' : ra, 'dec' : dec, 'names' : names}\n\n print(len(self),'targets after loading',fname)\n else:\n print('No targets loaded from',fname,'as it does not exist.')",
"def __init__(self, *args, **kws):\n from PyInstaller.config import CONF\n Target.__init__(self)\n self.strip_binaries = kws.get('strip', False)\n self.upx_exclude = kws.get(\"upx_exclude\", [])\n self.console = True\n self.target_arch = None\n self.codesign_identity = None\n self.entitlements_file = None\n\n if CONF['hasUPX']:\n self.upx_binaries = kws.get('upx', False)\n else:\n self.upx_binaries = False\n\n self.name = kws.get('name')\n # Old .spec format included in 'name' the path where to collect files for the created app. app. New format\n # includes only directory name.\n #\n # The 'name' directory is created in DISTPATH and necessary files are then collected to this directory.\n self.name = os.path.join(CONF['distpath'], os.path.basename(self.name))\n\n self.toc = TOC()\n for arg in args:\n if isinstance(arg, TOC):\n self.toc.extend(arg)\n elif isinstance(arg, Target):\n self.toc.append((os.path.basename(arg.name), arg.name, arg.typ))\n if isinstance(arg, EXE):\n self.console = arg.console\n self.target_arch = arg.target_arch\n self.codesign_identity = arg.codesign_identity\n self.entitlements_file = arg.entitlements_file\n for tocnm, fnm, typ in arg.toc:\n if tocnm == os.path.basename(arg.name) + \".manifest\":\n self.toc.append((tocnm, fnm, typ))\n if not arg.append_pkg:\n self.toc.append((os.path.basename(arg.pkgname), arg.pkgname, 'PKG'))\n self.toc.extend(arg.dependencies)\n else:\n self.toc.extend(arg)\n self.__postinit__()",
"def update_targets(self):\n self.actor.update_target_network()\n self.critic.update_target_network()",
"def _ensure_all_targets_allowed(self, metadata_role, metadata_object):\n \n # Return if 'metadata_role' is 'targets'. 'targets' is not\n # a delegated role.\n if metadata_role == 'targets':\n return\n \n # The targets of delegated roles are stored in the parent's\n # metadata file. Retrieve the parent role of 'metadata_role'\n # to confirm 'metadata_role' contains valid targets.\n parent_role = tuf.roledb.get_parent_rolename(metadata_role)\n\n # Iterate over the targets of 'metadata_role' and confirm they are trusted,\n # or their root parent directory exists in the role delegated paths of the\n # parent role.\n roles = self.metadata['current'][parent_role]['delegations']['roles']\n role_index = tuf.repo.signerlib.find_delegated_role(roles, metadata_role)\n\n # Ensure the delegated role exists prior to extracting trusted paths from\n # the parent's 'paths', or trusted path hash prefixes from the parent's\n # 'path_hash_prefixes'.\n if role_index is not None:\n role = roles[role_index] \n allowed_child_paths = role.get('paths')\n allowed_child_path_hash_prefixes = role.get('path_hash_prefixes')\n actual_child_targets = metadata_object['targets'].keys()\n\n if allowed_child_path_hash_prefixes is not None:\n consistent = self._paths_are_consistent_with_hash_prefixes\n if not consistent(actual_child_targets,\n allowed_child_path_hash_prefixes):\n raise tuf.ForbiddenTargetError('Role '+repr(metadata_role)+\\\n ' specifies target which does not'+\\\n ' have a path hash prefix matching'+\\\n ' the prefix listed by the parent'+\\\n ' role '+repr(parent_role)+'.')\n\n elif allowed_child_paths is not None: \n\n # Check that each delegated target is either explicitly listed or a parent\n # directory is found under role['paths'], otherwise raise an exception.\n # If the parent role explicitly lists target file paths in 'paths',\n # this loop will run in O(n^2), the worst-case. The repository\n # maintainer will likely delegate entire directories, and opt for\n # explicit file paths if the targets in a directory are delegated to \n # different roles/developers.\n for child_target in actual_child_targets:\n for allowed_child_path in allowed_child_paths:\n prefix = os.path.commonprefix([child_target, allowed_child_path])\n if prefix == allowed_child_path:\n break\n else: \n raise tuf.ForbiddenTargetError('Role '+repr(metadata_role)+\\\n ' specifies target '+\\\n repr(child_target)+' which is not'+\\\n ' an allowed path according to'+\\\n ' the delegations set by '+\\\n repr(parent_role)+'.')\n\n else:\n\n # 'role' should have been validated when it was downloaded.\n # The 'paths' or 'path_hash_prefixes' attributes should not be missing,\n # so raise an error in case this clause is reached.\n raise tuf.FormatError(repr(role)+' did not contain one of '+\\\n 'the required fields (\"paths\" or '+\\\n '\"path_hash_prefixes\").')\n\n # Raise an exception if the parent has not delegated to the specified\n # 'metadata_role' child role.\n else:\n raise tuf.RepositoryError(repr(parent_role)+' has not delegated to '+\\\n repr(metadata_role)+'.')",
"def _metadata_update_targets(targets):\n affidavit = _create_affidavit()\n firmwares = db.firmware.get_all()\n for target in targets:\n firmwares_filtered = []\n for f in firmwares:\n if f.target == 'private':\n continue\n if f.target != target:\n continue\n firmwares_filtered.append(f)\n if target == 'stable':\n _generate_metadata_kind('firmware.xml.gz',\n firmwares_filtered,\n affidavit=affidavit)\n elif target == 'testing':\n _generate_metadata_kind('firmware-testing.xml.gz',\n firmwares_filtered,\n affidavit=affidavit)",
"def setup_class(cls):\n self = cls()\n self.remove_files_created_during_previous_runs()\n if not os.path.exists(self.plaintext_directory):\n os.makedirs(self.plaintext_directory)\n\n if not os.path.exists(self.training_path):\n os.makedirs(self.training_path)\n\n if not os.path.exists(self.heldout_path):\n os.makedirs(self.heldout_path)\n\n prepare_data(self.paths)",
"def setup_class(cls):\n self = cls()\n self.remove_files_created_during_previous_runs()\n if not os.path.exists(self.plaintext_directory):\n os.makedirs(self.plaintext_directory)\n\n if not os.path.exists(self.training_path):\n os.makedirs(self.training_path)\n\n if not os.path.exists(self.heldout_path):\n os.makedirs(self.heldout_path)",
"def _import_delegations(self, parent_role):\n \n current_parent_metadata = self.metadata['current'][parent_role]\n \n if 'delegations' not in current_parent_metadata:\n return\n\n # This could be quite slow with a huge number of delegations.\n keys_info = current_parent_metadata['delegations'].get('keys', {})\n roles_info = current_parent_metadata['delegations'].get('roles', [])\n\n logger.debug('Adding roles delegated from '+repr(parent_role)+'.')\n \n # Iterate through the keys of the delegated roles of 'parent_role'\n # and load them.\n for keyid, keyinfo in keys_info.items():\n if keyinfo['keytype'] in ['rsa', 'ed25519']:\n key = tuf.keys.format_metadata_to_key(keyinfo)\n \n # We specify the keyid to ensure that it's the correct keyid\n # for the key.\n try:\n tuf.keydb.add_key(key, keyid)\n except tuf.KeyAlreadyExistsError:\n pass\n except (tuf.FormatError, tuf.Error), e:\n logger.exception('Failed to add keyid: '+repr(keyid)+'.')\n logger.error('Aborting role delegation for parent role '+parent_role+'.')\n raise\n else:\n logger.warn('Invalid key type for '+repr(keyid)+'.')\n continue\n\n # Add the roles to the role database.\n for roleinfo in roles_info:\n try:\n # NOTE: tuf.roledb.add_role will take care\n # of the case where rolename is None.\n rolename = roleinfo.get('name')\n logger.debug('Adding delegated role: '+str(rolename)+'.')\n tuf.roledb.add_role(rolename, roleinfo)\n except tuf.RoleAlreadyExistsError, e:\n logger.warn('Role already exists: '+rolename)\n except:\n logger.exception('Failed to add delegated role: '+rolename+'.')\n raise",
"def _targets_of_role(self, rolename, targets=None, skip_refresh=False):\n\n if targets is None:\n targets = []\n\n logger.debug('Getting targets of role: '+repr(rolename)+'.')\n\n if not tuf.roledb.role_exists(rolename):\n raise tuf.UnknownRoleError(rolename)\n\n # We do not need to worry about the target paths being trusted because\n # this is enforced before any new metadata is accepted.\n if not skip_refresh:\n self._refresh_targets_metadata(rolename)\n \n # Do we have metadata for 'rolename'?\n if rolename not in self.metadata['current']:\n message = 'No metadata for '+rolename+'. Unable to determine targets.'\n logger.debug(message)\n return targets\n\n # Get the targets specified by the role itself.\n for filepath, fileinfo in self.metadata['current'][rolename]['targets'].items():\n new_target = {} \n new_target['filepath'] = filepath \n new_target['fileinfo'] = fileinfo\n \n targets.append(new_target)\n\n return targets",
"def __init__(self,\n alternate_restore_base_directory=None,\n continue_on_error=None,\n encryption_enabled=None,\n generate_ssh_keys=None,\n override_originals=None,\n preserve_acls=None,\n preserve_attributes=None,\n preserve_timestamps=None,\n restore_entities=None,\n restore_to_original_paths=None,\n save_success_files=None,\n skip_estimation=None,\n ):\n\n # Initialize members of the class\n self.alternate_restore_base_directory = alternate_restore_base_directory\n self.continue_on_error = continue_on_error\n self.encryption_enabled = encryption_enabled\n self.generate_ssh_keys = generate_ssh_keys\n self.override_originals = override_originals\n self.preserve_acls = preserve_acls\n self.preserve_attributes = preserve_attributes\n self.preserve_timestamps = preserve_timestamps\n self.restore_entities = restore_entities\n self.restore_to_original_paths = restore_to_original_paths\n self.save_success_files = save_success_files\n self.skip_estimation = skip_estimation",
"def setup(self):\n # Initialize key variables\n valid = True\n updated_list = []\n config = copy.deepcopy(self.config)\n directory = self.directories[0]\n\n # Update log_directory and ingest_cache_directory\n if isinstance(config, dict) is True:\n if 'main' in config:\n # Setup the log_directory to a known good default\n (updated, config) = self._create_directory_entries(\n 'log_directory', config)\n updated_list.append(updated)\n\n # Setup the ingest_cache_directory to a known good default\n (updated, config) = self._create_directory_entries(\n 'ingest_cache_directory', config)\n updated_list.append(updated)\n\n else:\n valid = False\n else:\n valid = False\n\n # Gracefully exit if things are not OK\n if valid is False:\n log_message = (\n 'Configuration files found in {} is invalid'\n ''.format(self.directories))\n log.log2die_safe(1007, log_message)\n\n # Update configuration file if required\n if len(updated_list) == updated_list.count(True):\n for next_directory in self.directories:\n # Delete all YAML files in the directory\n general.delete_yaml_files(next_directory)\n\n # Write config back to directory\n filepath = ('%s/config.yaml') % (directory)\n with open(filepath, 'w') as outfile:\n yaml.dump(config, outfile, default_flow_style=False)",
"def _setup_target_updates(model_scope, target_scope, scope, tau, verbose):\n if scope is not None:\n model_scope = scope + '/' + model_scope\n target_scope = scope + '/' + target_scope\n\n return get_target_updates(\n get_trainable_vars(model_scope),\n get_trainable_vars(target_scope),\n tau, verbose)",
"def __init__(self, data_file_paths, targets, transform=None):\n \n self.transform = transform\n self.data_file_paths = data_file_paths\n self.targets = targets\n return",
"def assign_targets(classes, source_dir):\n for cl in classes.values():\n cl['target'] = get_target(source_dir, cl['filepath'])",
"def __init__(self, source, target, method, chmod=0777):\n self.source = source\n self.target = target\n self.directory = self.target\n self.method = method\n self._path()\n self.chmod = chmod",
"def _load_metadata_from_file(self, metadata_set, metadata_role):\n\n # Ensure we have a valid metadata set.\n if metadata_set not in ['current', 'previous']:\n raise tuf.Error('Invalid metadata set: '+repr(metadata_set))\n\n # Save and construct the full metadata path.\n metadata_directory = self.metadata_directory[metadata_set]\n metadata_filename = metadata_role + '.txt'\n metadata_filepath = os.path.join(metadata_directory, metadata_filename)\n \n # Ensure the metadata path is valid/exists, else ignore the call. \n if os.path.exists(metadata_filepath):\n # Load the file. The loaded object should conform to\n # 'tuf.formats.SIGNABLE_SCHEMA'.\n metadata_signable = tuf.util.load_json_file(metadata_filepath)\n\n tuf.formats.check_signable_object_format(metadata_signable)\n\n # Extract the 'signed' role object from 'metadata_signable'.\n metadata_object = metadata_signable['signed']\n \n # Save the metadata object to the metadata store.\n self.metadata[metadata_set][metadata_role] = metadata_object\n \n # We need to rebuild the key and role databases if \n # metadata object is 'root' or target metadata.\n if metadata_set == 'current':\n if metadata_role == 'root':\n self._rebuild_key_and_role_db()\n elif metadata_object['_type'] == 'Targets':\n # TODO: Should we also remove the keys of the delegated roles?\n tuf.roledb.remove_delegated_roles(metadata_role)\n self._import_delegations(metadata_role)",
"def __init__(self):\n if ModelUpdater._instance_ is not None:\n raise GenericRolloutException(\"Attempting to construct multiple ModelUpdater\")\n\n # Wait for required services to be available\n rospy.wait_for_service(SET_MODEL_STATE)\n # Wait for gazebo plugin services to be available\n for service in GazeboServiceName:\n if service.name in GAZEBO_SERVICES:\n rospy.wait_for_service(service.value)\n # Gazebo service that allows us to position the car\n self._model_state_client = ServiceProxyWrapper(SET_MODEL_STATE, SetModelState)\n\n self._get_model_prop = ServiceProxyWrapper(GazeboServiceName.GET_MODEL_PROPERTIES.value,\n GetModelProperties)\n self._get_visual_names = ServiceProxyWrapper(GazeboServiceName.GET_VISUAL_NAMES.value,\n GetVisualNames)\n self._get_visuals = ServiceProxyWrapper(GazeboServiceName.GET_VISUALS.value, GetVisuals)\n self._set_visual_colors = ServiceProxyWrapper(GazeboServiceName.SET_VISUAL_COLORS.value,\n SetVisualColors)\n self._set_visual_visibles = ServiceProxyWrapper(GazeboServiceName.SET_VISUAL_VISIBLES.value,\n SetVisualVisibles)\n self._set_visual_transparencies = ServiceProxyWrapper(GazeboServiceName.SET_VISUAL_TRANSPARENCIES.value,\n SetVisualTransparencies)\n self._pause_physics = ServiceProxyWrapper(GazeboServiceName.PAUSE_PHYSICS.value, Empty)\n self._unpause_physics = ServiceProxyWrapper(GazeboServiceName.UNPAUSE_PHYSICS.value, Empty)\n self._set_model_state_tracker = SetModelStateTracker.get_instance()\n self._get_model_state_tracker = GetModelStateTracker.get_instance()\n # there should be only one model updater instance\n ModelUpdater._instance_ = self",
"def __init__(self, *tocs, **kwargs):\n\n from PyInstaller.config import CONF\n Target.__init__(self)\n name = kwargs.get('name', None)\n cipher = kwargs.get('cipher', None)\n self.toc = TOC()\n # If available, use code objects directly from ModuleGraph to speed up PyInstaller.\n self.code_dict = {}\n for t in tocs:\n self.toc.extend(t)\n self.code_dict.update(getattr(t, '_code_cache', {}))\n\n self.name = name\n if name is None:\n self.name = os.path.splitext(self.tocfilename)[0] + '.pyz'\n # PyInstaller bootstrapping modules.\n self.dependencies = get_bootstrap_modules()\n # Bundle the crypto key.\n self.cipher = cipher\n if cipher:\n key_file = ('pyimod00_crypto_key', os.path.join(CONF['workpath'], 'pyimod00_crypto_key.pyc'), 'PYMODULE')\n # Insert the key as the first module in the list. The key module contains just variables and does not depend\n # on other modules.\n self.dependencies.insert(0, key_file)\n # Compile the top-level modules so that they end up in the CArchive and can be imported by the bootstrap script.\n self.dependencies = misc.compile_py_files(self.dependencies, CONF['workpath'])\n self.__postinit__()",
"def __init__(self, dataset: Dataset, targets_file: str = os.path.join('data', 'targets.pkl')):\n self.dataset = dataset\n with open(targets_file, 'rb') as f:\n target_data = pickle.load(f)\n self.targets = target_data",
"def __init__(self, roles, role):\n self._roles = roles\n self._requestor = self._roles._requestor\n self._id = role[\"id\"]\n self._data = role\n self.name = role[\"name\"]\n self.description = role[\"description\"]\n self.system = role[\"system\"]\n self.permissions = dict(role[\"permissions\"])",
"def __init__(self, repo=None, home=os.getcwd(), new_repo=False, **kwargs):\r\n\r\n\r\n self.repo = repo\r\n self.file_home = Path(home) # Home of the file calling this class\r\n self.managementlog = LogIt().default(logname=\"Management\", logfile=None)\r\n\r\n # Below are path-like attributes that map various modules and directories.\r\n # Cookies Module:\r\n self.Kitchen = Oven(repo=self.repo, output_dir=self.file_home)\r\n self.Pantry = self.Kitchen.Recipes\r\n # Manager Module:\r\n self.Manager = Path(pkg_resources.resource_filename(Manager.__name__, ''))\r\n self.BioSQL = self.Manager / Path('BioSQL')\r\n self.SQLite3 = self.BioSQL / Path('sqlite')\r\n self.MySQL = self.BioSQL / Path('mysql')\r\n self.config = self.Manager / Path('config')\r\n # Orthologs Module:\r\n self.Orthologs = Path(pkg_resources.resource_filename(Orthologs.__name__, ''))\r\n self.Align = self.Orthologs / Path('Align')\r\n self.Blast = self.Orthologs / Path('Blast')\r\n self.GenBank = self.Orthologs / Path('GenBank')\r\n self.Phylogenetics = self.Orthologs / Path('Phylogenetics')\r\n # Tools Module:\r\n self.Tools = Path(pkg_resources.resource_filename(Tools.__name__, ''))\r\n self.ftp = self.Tools / Path('ftp')\r\n self.logit = self.Tools / Path('logit')\r\n self.mpi = self.Tools / Path('mpi')\r\n self.mygene = self.Tools / Path('mygene')\r\n self.pandoc = self.Tools / Path('pandoc')\r\n self.parallel = self.Tools / Path('parallel')\r\n self.pybasher = self.Tools / Path('pybasher')\r\n self.send2server = self.Tools / Path('send2server')\r\n self.sge = self.Tools / Path('sge')\r\n self.slackify = self.Tools / Path('slackify')\r\n self.otherutils = self.Tools / Path('otherutils')\r\n\r\n if self.repo:\r\n self.repo_path = self.file_home / Path(self.repo)\r\n self.managementlog.info('The BaseManagement class variables have been set.')\r\n\r\n # Make a new repository.\r\n if new_repo is True:\r\n self.managementlog.info('The repository cookie is being prepared for the Oven.')\r\n self.Kitchen.bake_the_repo()",
"def __init__(self, datacfg_file=None, envcfg_file=None, resources_file=None):\n\n super(LoadHandler, self).__init__(datacfg_file, envcfg_file, resources_file)\n\n logkv(logger, {\"msg\": \"Starting load\",\n \"dataset\": self.get_config(\"dataset_name\")}, \"info\")\n\n # Initialize fields that will be filled in methods of this class.\n self.propfile = None\n self.newdirs = None\n self.locked = False\n self.load_type = None\n\n # Get primary HDFS namenode before proceeding with load\n namenodes = self.get_config(\"webhdfs_root\").split(\",\")\n try:\n self.primary_namenode = \\\n self.hdfs_mgr.get_primary_namenode(namenodes,\n self.get_config(\"hdfs_root\"),\n self.get_config(\"hdfs_user\"))\n except HdfsManagerException as ex:\n logkv(logger, {\"msg\": \"Failed to get primary namenode\"}, \"error\", ex)\n raise LoadHandlerException()\n\n # Instantiate Oozie manager\n self.oozie_mgr = OozieManager()\n\n # Get folder processing delay\n try:\n self.process_delay = float(self.get_config(\"folder_processing_delay\"))\n except ValueError as ex:\n logkv(logger,\n {\"msg\": \"Could not parse folder_processing_delay as a float\"},\n \"error\")\n raise LoadHandlerException()\n\n # # Instantiate NewRelic manager\n # try:\n # self.newrelic_mgr = NewRelicManager(self.get_config(\"newrelic_api_key\", configtype=\"env\"),\n # self.get_config(\"newrelic_dataset_name\"),\n # self.get_config(\"newrelic_url\"))\n #\n # except Exception as ex:\n # logkv(logger, {\"msg\": \"Failed to initialize NewRelic Manager\"}, \"error\")\n # raise LoadHandlerException()",
"def __init__(self, name, has_python, skip_java, skip_scala, root_dir,\r\n checkstyle_suppression_files, debug_port, targets, transitive, workunit_factory):\r\n\r\n self.name = name\r\n self.root_dir = root_dir\r\n self.targets = OrderedSet(targets)\r\n self.transitive = transitive\r\n self.workunit_factory = workunit_factory\r\n\r\n self.sources = []\r\n self.py_sources = []\r\n self.py_libs = []\r\n self.resource_extensions = set()\r\n\r\n self.has_python = has_python\r\n self.skip_java = skip_java\r\n self.skip_scala = skip_scala\r\n self.has_scala = False\r\n self.has_tests = False\r\n\r\n self.checkstyle_suppression_files = checkstyle_suppression_files # Absolute paths.\r\n self.debug_port = debug_port\r\n\r\n self.internal_jars = OrderedSet()\r\n self.external_jars = OrderedSet()"
] |
[
"0.67134696",
"0.58107144",
"0.57709205",
"0.5663369",
"0.56254727",
"0.5605019",
"0.560434",
"0.5399462",
"0.5245718",
"0.52128386",
"0.5182947",
"0.5175609",
"0.516285",
"0.5137553",
"0.5126536",
"0.5122762",
"0.5112088",
"0.5105605",
"0.5076615",
"0.5076116",
"0.50703824",
"0.50592744",
"0.5050742",
"0.50381225",
"0.5034722",
"0.50225073",
"0.5020437",
"0.49945077",
"0.49633077",
"0.49341932"
] |
0.67343026
|
0
|
Load current or previous metadata if there is a local file. If the expected file belonging to 'metadata_role' (e.g., 'root.txt') cannot be loaded, raise an exception. The extracted metadata object loaded from file is saved to the metadata store (i.e., self.metadata).
|
def _load_metadata_from_file(self, metadata_set, metadata_role):
# Ensure we have a valid metadata set.
if metadata_set not in ['current', 'previous']:
raise tuf.Error('Invalid metadata set: '+repr(metadata_set))
# Save and construct the full metadata path.
metadata_directory = self.metadata_directory[metadata_set]
metadata_filename = metadata_role + '.txt'
metadata_filepath = os.path.join(metadata_directory, metadata_filename)
# Ensure the metadata path is valid/exists, else ignore the call.
if os.path.exists(metadata_filepath):
# Load the file. The loaded object should conform to
# 'tuf.formats.SIGNABLE_SCHEMA'.
metadata_signable = tuf.util.load_json_file(metadata_filepath)
tuf.formats.check_signable_object_format(metadata_signable)
# Extract the 'signed' role object from 'metadata_signable'.
metadata_object = metadata_signable['signed']
# Save the metadata object to the metadata store.
self.metadata[metadata_set][metadata_role] = metadata_object
# We need to rebuild the key and role databases if
# metadata object is 'root' or target metadata.
if metadata_set == 'current':
if metadata_role == 'root':
self._rebuild_key_and_role_db()
elif metadata_object['_type'] == 'Targets':
# TODO: Should we also remove the keys of the delegated roles?
tuf.roledb.remove_delegated_roles(metadata_role)
self._import_delegations(metadata_role)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _load(self) -> None:\n try:\n self._logger.debug('Load metafile %s.', self._path)\n with codecs.open(self._path, 'r', 'utf-8') as ff:\n self._data = json.load(ff)\n # TODO Validate Meta Dict\n except OSError as ex:\n msg = 'Unable to open the metadata file \"{}\". {}'.format(self._path, ex.strerror)\n raise MetaFileError(msg) from ex\n except ValueError as ex:\n msg = 'Unable to load the metadata file \"{}\". AttributeError: {}'.format(self._path, ex)\n raise MetaFileError(msg) from ex",
"def _load_from_file(self):\n try:\n self.logger.debug('Load metafile %s.', self.meta_file_path)\n with codecs.open(self.meta_file_path, 'r', 'utf-8') as meta_file:\n self._meta_dict = json.load(meta_file)\n # TODO Validate Meta Dict\n except OSError as ex:\n raise MetadataError('Unable to open the metadata file \"{}\". {}'\n .format(self.meta_file_path, ex.strerror)) from ex\n except ValueError as ex:\n raise MetadataError(\n 'Unable to load the metadata file \"{}\". AttributeError: {}'\n .format(self.meta_file_path, ex)) from ex",
"def load(self, metadata):\n raise NotImplemented()",
"def read_metadata_file():\n metadata = None\n if not os.path.isfile(META_DATA_FILE):\n ppg.log_info(\"No metadata found. The earthquake splitting might have not been ran yet.\")\n else:\n ppg.log_info(\"Found metadata file\")\n metadata = pd.read_csv(META_DATA_FILE)\n return metadata",
"def _fetch_current_local_metadata():\n if not os.path.exists(LOCAL_METADATA_FILE):\n return {}\n\n with open(LOCAL_METADATA_FILE) as f:\n return json.loads(f.read())",
"def read_metadata(self, file=None):\n if file is None:\n file = self.meta_data_file\n\n try:\n self.meta_data = self.input_dataframe(file, index_col=None)\n except IOError:\n self.meta_data = self.create_default_meta_data(self.expression_matrix)",
"def _load_metadata(self, datapath):\n try:\n metadata = Metadata(datapath)\n return metadata\n except RuntimeError:\n print('Metadata does not exist. Please double check your datapath.')\n return None",
"def _load_meta(self, db, metadata, source_name) -> None:\n db.metadata.put_item(Item={\n 'src_name': source_name,\n 'data_license': metadata.data_license,\n 'data_license_url': metadata.data_license_url,\n 'version': metadata.version,\n 'data_url': metadata.data_url,\n 'rdp_url': metadata.rdp_url,\n 'data_license_attributes': metadata.data_license_attributes,\n 'genome_assemblies': metadata.genome_assemblies\n })",
"def load(self) -> dict:\n if not os.path.exists(self.file_path):\n logger.error('Could not find meta file {}'.format(self.file_path))\n raise Exception()\n with open(self.file_path, encoding='utf-8') as meta_file:\n return json.loads(meta_file.read())",
"def from_file(self, filename):\n if os.path.exists(filename): # read the image from file\n self.image = io.imread(filename)\n self.filename = filename\n if os.path.exists(filename[:-4] + '.csv'):\n self.metadata.read_from_csv(filename[:-4] + '.csv')\n else:\n warnings.warn(\"Metadata file is not found!\", Warning)\n else:\n raise ValueError('File does not exist!')",
"def _update_metadata_if_changed(self, metadata_role, referenced_metadata='release'):\n \n uncompressed_metadata_filename = metadata_role + '.txt'\n\n # Ensure the referenced metadata has been loaded. The 'root' role may be\n # updated without having 'release' available. \n if referenced_metadata not in self.metadata['current']:\n message = 'Cannot update '+repr(metadata_role)+' because ' \\\n +referenced_metadata+' is missing.'\n raise tuf.RepositoryError(message)\n # The referenced metadata has been loaded. Extract the new\n # fileinfo for 'metadata_role' from it. \n else:\n message = repr(metadata_role)+' referenced in '+\\\n repr(referenced_metadata)+'. '+repr(metadata_role)+' may be updated.'\n logger.debug(message)\n \n # There might be a compressed version of 'release.txt' or Targets\n # metadata available for download. Check the 'meta' field of\n # 'referenced_metadata' to see if it is listed when 'metadata_role'\n # is 'release'. The full rolename for delegated Targets metadata\n # must begin with 'targets/'. The Release role lists all the Targets\n # metadata available on the repository, including any that may be in\n # compressed form.\n compression = None\n\n # Extract the fileinfo of the uncompressed version of 'metadata_role'.\n uncompressed_fileinfo = self.metadata['current'][referenced_metadata] \\\n ['meta'] \\\n [uncompressed_metadata_filename]\n\n # Check for availability of compressed versions of 'release.txt',\n # 'targets.txt', and delegated Targets, which also start with 'targets'.\n # For 'targets.txt' and delegated metadata, 'referenced_metata'\n # should always be 'release'. 'release.txt' specifies all roles\n # provided by a repository, including their file sizes and hashes.\n if metadata_role == 'release' or metadata_role.startswith('targets'):\n gzip_metadata_filename = uncompressed_metadata_filename + '.gz'\n if gzip_metadata_filename in self.metadata['current'] \\\n [referenced_metadata]['meta']:\n compression = 'gzip'\n compressed_fileinfo = self.metadata['current'][referenced_metadata] \\\n ['meta'][gzip_metadata_filename]\n # NOTE: When we download the compressed file, we care about its\n # compressed length. However, we check the hash of the uncompressed\n # file; therefore we use the hashes of the uncompressed file.\n fileinfo = {'length': compressed_fileinfo['length'],\n 'hashes': uncompressed_fileinfo['hashes']}\n logger.debug('Compressed version of '+\\\n repr(uncompressed_metadata_filename)+' is available at '+\\\n repr(gzip_metadata_filename)+'.')\n else:\n logger.debug('Compressed version of '+\\\n repr(uncompressed_metadata_filename)+' not available.')\n fileinfo = uncompressed_fileinfo\n else:\n fileinfo = uncompressed_fileinfo\n\n # Simply return if the file has not changed, according to the metadata\n # about the uncompressed file provided by the referenced metadata.\n if not self._fileinfo_has_changed(uncompressed_metadata_filename,\n uncompressed_fileinfo):\n return\n\n logger.debug('Metadata '+repr(uncompressed_metadata_filename)+\\\n ' has changed.')\n\n try:\n self._update_metadata(metadata_role, fileinfo=fileinfo,\n compression=compression)\n except:\n # The current metadata we have is not current but we couldn't\n # get new metadata. We shouldn't use the old metadata anymore.\n # This will get rid of in-memory knowledge of the role and\n # delegated roles, but will leave delegated metadata files as\n # current files on disk.\n # TODO: Should we get rid of the delegated metadata files?\n # We shouldn't need to, but we need to check the trust\n # implications of the current implementation.\n self._delete_metadata(metadata_role)\n logger.error('Metadata for '+str(metadata_role)+' could not be updated')\n raise\n else:\n # We need to remove delegated roles because the delegated roles\n # may not be trusted anymore.\n if metadata_role == 'targets' or metadata_role.startswith('targets/'):\n logger.debug('Removing delegated roles of '+repr(metadata_role)+'.')\n # TODO: Should we also remove the keys of the delegated roles?\n tuf.roledb.remove_delegated_roles(metadata_role)\n self._import_delegations(metadata_role)",
"def _ReadStorageMetadata(self):\n stream_name = 'metadata.txt'\n if not self._HasStream(stream_name):\n return False\n\n stream_data = self._ReadStream(stream_name)\n\n storage_metadata_reader = _StorageMetadataReader()\n storage_metadata = storage_metadata_reader.Read(stream_data)\n\n ZIPStorageFile._CheckStorageMetadata(storage_metadata)\n\n self.format_version = storage_metadata.format_version\n self.serialization_format = storage_metadata.serialization_format\n self.storage_type = storage_metadata.storage_type\n\n return True",
"def read_metadata(self, file_in_cache):\n metadata_file = self.get_metadata_file(file_in_cache)\n if self.context.is_file(metadata_file):\n return json.loads(auto_decode(self.context.read_file(metadata_file)))\n else:\n return {}",
"def _update_metadata(self, metadata_role, fileinfo, compression=None):\n\n # Construct the metadata filename as expected by the download/mirror modules.\n metadata_filename = metadata_role + '.txt'\n uncompressed_metadata_filename = metadata_filename\n \n # The 'release' or Targets metadata may be compressed. Add the appropriate\n # extension to 'metadata_filename'. \n if compression == 'gzip':\n metadata_filename = metadata_filename + '.gz'\n\n # Extract file length and file hashes. They will be passed as arguments\n # to 'download_file' function.\n compressed_file_length = fileinfo['length']\n uncompressed_file_hashes = fileinfo['hashes']\n\n # Attempt a file download from each mirror until the file is downloaded and\n # verified. If the signature of the downloaded file is valid, proceed,\n # otherwise log a warning and try the next mirror. 'metadata_file_object'\n # is the file-like object returned by 'download.py'. 'metadata_signable'\n # is the object extracted from 'metadata_file_object'. Metadata saved to\n # files are regarded as 'signable' objects, conformant to\n # 'tuf.formats.SIGNABLE_SCHEMA'.\n #\n # Some metadata (presently timestamp) will be downloaded \"unsafely\", in the\n # sense that we can only estimate its true length and know nothing about\n # its hashes. This is because not all metadata will have other metadata\n # for it; otherwise we will have an infinite regress of metadata signing\n # for each other. In this case, we will download the metadata up to the\n # best length we can get for it, not check its hashes, but perform the rest\n # of the checks (e.g signature verification).\n #\n # Note also that we presently support decompression of only \"safe\"\n # metadata, but this is easily extend to \"unsafe\" metadata as well as\n # \"safe\" targets.\n\n if metadata_role == 'timestamp':\n metadata_file_object = \\\n self.unsafely_get_metadata_file(metadata_role, metadata_filename,\n compressed_file_length)\n else:\n metadata_file_object = \\\n self.safely_get_metadata_file(metadata_role, metadata_filename,\n compressed_file_length,\n uncompressed_file_hashes,\n compression=compression)\n\n # The metadata has been verified. Move the metadata file into place.\n # First, move the 'current' metadata file to the 'previous' directory\n # if it exists.\n current_filepath = os.path.join(self.metadata_directory['current'],\n metadata_filename)\n current_filepath = os.path.abspath(current_filepath)\n tuf.util.ensure_parent_dir(current_filepath)\n \n previous_filepath = os.path.join(self.metadata_directory['previous'],\n metadata_filename)\n previous_filepath = os.path.abspath(previous_filepath)\n if os.path.exists(current_filepath):\n # Previous metadata might not exist, say when delegations are added.\n tuf.util.ensure_parent_dir(previous_filepath)\n shutil.move(current_filepath, previous_filepath)\n\n # Next, move the verified updated metadata file to the 'current' directory.\n # Note that the 'move' method comes from tuf.util's TempFile class.\n # 'metadata_file_object' is an instance of tuf.util.TempFile.\n metadata_signable = tuf.util.load_json_string(metadata_file_object.read())\n if compression == 'gzip':\n current_uncompressed_filepath = \\\n os.path.join(self.metadata_directory['current'],\n uncompressed_metadata_filename)\n current_uncompressed_filepath = \\\n os.path.abspath(current_uncompressed_filepath)\n metadata_file_object.move(current_uncompressed_filepath)\n else:\n metadata_file_object.move(current_filepath)\n\n # Extract the metadata object so we can store it to the metadata store.\n # 'current_metadata_object' set to 'None' if there is not an object\n # stored for 'metadata_role'.\n updated_metadata_object = metadata_signable['signed']\n current_metadata_object = self.metadata['current'].get(metadata_role)\n\n # Finally, update the metadata and fileinfo stores.\n logger.debug('Updated '+repr(current_filepath)+'.')\n self.metadata['previous'][metadata_role] = current_metadata_object\n self.metadata['current'][metadata_role] = updated_metadata_object\n self._update_fileinfo(metadata_filename)",
"def _update_filesystem_metadata(self, metadata):\n directory, fname = os.path.split(self.fname)\n fbase = os.path.splitext(fname)[0]\n \n # Test for presence and size of zip file\n zip_file = fbase + '.zip'\n zip_path = os.path.join(directory, zip_file)\n \n if os.path.isfile(zip_path):\n location = 'on_disk'\n data_file_size = os.path.getsize(zip_path)\n else:\n location = 'on_tape'\n data_file_size = 0\n \n # Test for presence of quick look PNG file\n quicklook_file = fbase + '.png'\n quicklook_path = os.path.join(directory, quicklook_file)\n \n if not os.path.isfile(quicklook_path):\n quicklook_file = ''\n\n # Add to metadata dictionary\n item_map = {'directory': directory, 'metadata_file': fname,\n 'data_file': zip_file, 'location': location, \n 'data_file_size': data_file_size, 'quicklook_file': quicklook_file}\n \n for key, value in item_map.items():\n metadata[key] = value",
"def load(self):\n if not path.exists('service.json'):\n raise UserError('service.json not found')\n with open('service.json') as f:\n try:\n metadata = json.loads(f.read())\n except Exception as e:\n raise UserError('malformed service.json - ' + str(e))\n return metadata",
"def unsafely_get_metadata_file(self, metadata_role, metadata_filepath,\n compressed_file_length):\n\n def unsafely_verify_uncompressed_metadata_file(metadata_file_object):\n self.__soft_check_compressed_file_length(metadata_file_object,\n compressed_file_length)\n self.__verify_uncompressed_metadata_file(metadata_file_object,\n metadata_role)\n\n return self.__get_file(metadata_filepath,\n unsafely_verify_uncompressed_metadata_file, 'meta',\n compressed_file_length, download_safely=False,\n compression=None)",
"def _fake_meta(self):\n resp = tju.load_file(UPLOADED_FILE, self.adpt)\n return vf.File.wrap(resp)",
"def _update_fileinfo(self, metadata_filename):\n \n # In case we delayed loading the metadata and didn't do it in\n # __init__ (such as with delegated metadata), then get the file\n # info now.\n \n # Save the path to the current metadata file for 'metadata_filename'.\n current_filepath = os.path.join(self.metadata_directory['current'],\n metadata_filename)\n # If the path is invalid, simply return and leave fileinfo unset.\n if not os.path.exists(current_filepath):\n self.fileinfo[current_filepath] = None\n return\n \n # Extract the file information from the actual file and save it\n # to the fileinfo store.\n file_length, hashes = tuf.util.get_file_details(current_filepath)\n metadata_fileinfo = tuf.formats.make_fileinfo(file_length, hashes)\n self.fileinfo[metadata_filename] = metadata_fileinfo",
"def metadata_file(self):\n return self._metadata_file",
"def safely_get_metadata_file(self, metadata_role, metadata_filepath,\n compressed_file_length,\n uncompressed_file_hashes, compression):\n\n def safely_verify_uncompressed_metadata_file(metadata_file_object):\n self.__hard_check_compressed_file_length(metadata_file_object,\n compressed_file_length)\n self.__check_hashes(metadata_file_object, uncompressed_file_hashes)\n self.__verify_uncompressed_metadata_file(metadata_file_object,\n metadata_role)\n\n return self.__get_file(metadata_filepath,\n safely_verify_uncompressed_metadata_file, 'meta',\n compressed_file_length, download_safely=True,\n compression=compression)",
"def extract_metadata(self):\n metadata_file_path = self.create_metadata_file(\".metadata.txt\")\n mt = self.mimetype\n metadata_processing_method = self.metadata_mimetype_methods.get(mt)\n if metadata_processing_method:\n # TODO: should we return metadata and write it here instead of in processing method?\n metadata_processing_method(metadata_file_path)",
"def load_metadata(self, name) -> Dict[str, str]:\n return load_metadata(self._casedir / Path(\"{name}/metadata_{name}.yaml\".format(name=name)))",
"def _load_metadata(self, result_dir: Path) -> str:\n id_path = result_dir / SerializationAttributes.ID_FILENAME\n with open(id_path, 'r') as f:\n self.id = json.load(f)[SerializationAttributes.ID_KEY]\n\n version_path = result_dir / SerializationAttributes.VERSION_FILENAME\n with open(version_path, 'r') as f:\n self.version = json.load(f)[SerializationAttributes.VERSION_KEY]",
"def _load(self):\n if self.file_path.exists():\n with open(self.file_path) as fid:\n self.data = json.load(fid)",
"def __verify_uncompressed_metadata_file(self, metadata_file_object,\n metadata_role):\n\n metadata = metadata_file_object.read()\n try:\n metadata_signable = tuf.util.load_json_string(metadata)\n except Exception, exception:\n raise tuf.InvalidMetadataJSONError(exception)\n else:\n # Ensure the loaded 'metadata_signable' is properly formatted.\n tuf.formats.check_signable_object_format(metadata_signable)\n\n # Is 'metadata_signable' newer than the currently installed\n # version?\n current_metadata_role = self.metadata['current'].get(metadata_role)\n\n # Compare metadata version numbers. Ensure there is a current\n # version of the metadata role to be updated.\n if current_metadata_role is not None:\n current_version = current_metadata_role['version']\n downloaded_version = metadata_signable['signed']['version']\n if downloaded_version < current_version:\n raise tuf.ReplayedMetadataError(metadata_role, downloaded_version,\n current_version)\n\n # Reject the metadata if any specified targets are not allowed.\n if metadata_signable['signed']['_type'] == 'Targets':\n self._ensure_all_targets_allowed(metadata_role,\n metadata_signable['signed'])\n\n # Verify the signature on the downloaded metadata object.\n valid = tuf.sig.verify(metadata_signable, metadata_role)\n if not valid:\n raise tuf.BadSignatureError(metadata_role)",
"def read_data(self):\n\n try:\n self.data_instance.read_data(self.directory + self.fName)\n except FileNotFoundError as file_error:\n print(\n \"# The file {} belonging to {} do not exist.\".format(\n file_error.filename, self.fName))",
"def import_file(self) -> pulumi.Input['FileMetadataArgs']:\n return pulumi.get(self, \"import_file\")",
"def __load_model(self):\n loaded = load(self.__file_name)\n self.__model = loaded['model']\n self.__meta_data = loaded['metadata']\n self.__is_ready = True",
"def load_from_file(cls, file=None, file_path=None):\n if not file:\n file = open(file_path, 'r') \n if not file_path:\n file_path = file.name\n with file:\n file_meta = cls._get_file_meta(file, file_path=file_path)\n cls_properties = dict([[p, file_meta.get(p, None)] for p in cls.properties()])\n cls(key_name=file_path, **cls_properties).put()"
] |
[
"0.6726712",
"0.66560674",
"0.61084676",
"0.60199374",
"0.59750575",
"0.5961096",
"0.59146667",
"0.57406485",
"0.5625859",
"0.56245166",
"0.5618372",
"0.55644155",
"0.55479753",
"0.551915",
"0.5503802",
"0.5485509",
"0.54633296",
"0.5457984",
"0.5455698",
"0.5450701",
"0.5427472",
"0.5424017",
"0.5409404",
"0.5383026",
"0.53664434",
"0.53413534",
"0.532233",
"0.5285635",
"0.52740705",
"0.52307075"
] |
0.74305725
|
0
|
Rebuild the key and role databases from the currently trusted 'root' metadata object extracted from 'root.txt'. This private function is called when a new/updated 'root' metadata file is loaded. This function will only store the role information for the toplevel roles (i.e., 'root', 'targets', 'release', 'timestamp'). None.
|
def _rebuild_key_and_role_db(self):
# Clobbering this means all delegated metadata files are rendered outdated
# and will need to be reloaded. However, reloading the delegated metadata
# files is avoided here because fetching target information with methods
# like all_targets() and target() always cause a refresh of these files.
# The metadata files for delegated roles are also not loaded when the
# repository is first instantiated. Due to this setup, reloading delegated
# roles is not required here.
tuf.keydb.create_keydb_from_root_metadata(self.metadata['current']['root'])
tuf.roledb.create_roledb_from_root_metadata(self.metadata['current']['root'])
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def load(self):\n self.root = self._load()\n\n if self.ignore_case_in_keys:\n self.root = self._convert_keys_to_lower(self.root)",
"def refresh(self):\n\n # The timestamp role does not have signed metadata about it; otherwise we\n # would need an infinite regress of metadata. Therefore, we use some\n # default, sane metadata about it.\n DEFAULT_TIMESTAMP_FILEINFO = {\n 'hashes':None,\n 'length': tuf.conf.DEFAULT_TIMESTAMP_REQUIRED_LENGTH\n }\n\n # Update the top-level metadata. The _update_metadata_if_changed() and\n # _update_metadata() calls below do NOT perform an update if there\n # is insufficient trusted signatures for the specified metadata.\n # Raise 'tuf.NoWorkingMirrorError' if an update fails.\n\n # Use default but sane information for timestamp metadata, and do not\n # require strict checks on its required length.\n self._update_metadata('timestamp', DEFAULT_TIMESTAMP_FILEINFO)\n\n self._update_metadata_if_changed('release', referenced_metadata='timestamp')\n\n self._update_metadata_if_changed('root')\n\n self._update_metadata_if_changed('targets')\n\n # Updated the top-level metadata (which all had valid signatures), however,\n # have they expired? Raise 'tuf.ExpiredMetadataError' if any of the metadata\n # has expired.\n for metadata_role in ['timestamp', 'root', 'release', 'targets']:\n self._ensure_not_expired(metadata_role)",
"def _mv_to_root(map):\n if METADATA_KEY in map:\n for mk in list(map[METADATA_KEY].keys()):\n if mk not in map:\n map[mk] = map[METADATA_KEY][mk]\n del map[METADATA_KEY][mk]\n _LOGGER.debug(\"Section {m}.{k} moved to {k}\".\n format(m=METADATA_KEY, k=mk))\n del self[CONFIG_KEY][METADATA_KEY]",
"def init_db_root(db_root = None,whitelist = (KeyboardInterrupt,)):\n\n erase = False\n if db_root is None:\n db_root = tempfile.mkdtemp()\n erase = True\n else:\n db_root = db_root\n whitelist = whitelist\n try:\n yield db_root\n except Exception as e:\n if issubclass(type(e),whitelist):\n erase = True\n else:\n erase = False\n raise\n finally:\n if erase:\n shutil.rmtree(db_root)",
"def rootdesc(data, dbname, initialpassword=None):\n debug(\"rootdesc(..data..,{0})\".format(safestr(dbname)))\n # pylint: disable=bad-continuation\n return {\n 'database': dbname,\n 'host': hostportion(data['rw']),\n 'port': portportion(data['rw']),\n 'user': 'postgres',\n 'password': initialpassword if initialpassword else getpass(data, 'postgres', data['rw'], 'postgres')\n }",
"def test_get_root_role(self):\n root = role_middleware.get_root()\n print(root.name, root.id)",
"def modify_base_buildroot(self):\n if \"'%s '\" % self.buildroot_pkgs != pipes.quote(str(self.buildroot_pkgs)+' '):\n # just different test if it contains only alphanumeric characters allowed in packages name\n raise BuilderError(\"Do not try this kind of attack on me\")\n self.root_conn.module_name = \"lineinfile\"\n self.root_conn.module_args = \"\"\"dest=/etc/mock/%s.cfg line=\"config_opts['chroot_setup_cmd'] = 'install @buildsys-build %s'\" regexp=\"^.*chroot_setup_cmd.*$\" \"\"\" % (self.chroot, self.buildroot_pkgs)\n self.mockremote.callback.log('putting %s into minimal buildroot of %s' % (self.buildroot_pkgs, self.chroot))\n results = self.root_conn.run()\n\n is_err, err_results = check_for_ans_error(results, self.hostname, success_codes=[0],\n return_on_error=['stdout', 'stderr'])\n if is_err:\n self.mockremote.callback.log(\"Error: %s\" % err_results)\n myresults = get_ans_results(results, self.hostname)\n self.mockremote.callback.log(\"%s\" % myresults)",
"def _refresh_targets_metadata(self, rolename='targets', include_delegations=False):\n\n roles_to_update = []\n\n # See if this role provides metadata and, if we're including\n # delegations, look for metadata from delegated roles.\n role_prefix = rolename + '/'\n for metadata_path in self.metadata['current']['release']['meta'].keys():\n if metadata_path == rolename + '.txt':\n roles_to_update.append(metadata_path[:-len('.txt')])\n elif include_delegations and metadata_path.startswith(role_prefix):\n # Add delegated roles. Skip roles names containing compression\n # extensions.\n if metadata_path.endswith('.txt'): \n roles_to_update.append(metadata_path[:-len('.txt')])\n\n # Remove the 'targets' role because it gets updated when the targets.txt\n # file is updated in _update_metadata_if_changed('targets').\n if rolename == 'targets':\n try:\n roles_to_update.remove('targets')\n except ValueError:\n message = 'The Release metadata file is missing the targets.txt entry.'\n raise tuf.RepositoryError(message)\n \n # If there is nothing to refresh, we are done.\n if not roles_to_update:\n return\n\n # Sort the roles so that parent roles always come first.\n roles_to_update.sort()\n logger.debug('Roles to update: '+repr(roles_to_update)+'.')\n\n # Iterate through 'roles_to_update', load its metadata\n # file, and update it if it has changed.\n for rolename in roles_to_update:\n self._load_metadata_from_file('previous', rolename)\n self._load_metadata_from_file('current', rolename)\n\n self._update_metadata_if_changed(rolename)\n\n # Remove the role if it has expired.\n try:\n self._ensure_not_expired(rolename)\n except tuf.ExpiredMetadataError:\n tuf.roledb.remove_role(rolename)",
"def rebuild(self):\n if self.drop_tables:\n self._drop_tables()\n\n # reload meta.json into db\n self._meta_json_to_database()\n\n processed_data_ids = []\n\n # Iterate through each row in the manifest then clean and validate\n for manifest_row in self.manifest:\n # Note: Incompletely filled out rows in the manifest can break the\n # other code\n # TODO: figure out a way to flag this issue early in loading\n # TODO: of manifest\n\n # only clean and validate data files flagged for use in database\n if manifest_row['include_flag'] == 'use':\n logging.info(\"{}: preparing to load row {} from the manifest\".\n format(manifest_row['unique_data_id'],\n len(self.manifest)))\n\n self._process_data_file(manifest_row=manifest_row)\n\n processed_data_ids.append(manifest_row['unique_data_id'])\n\n # build Zone Facts table\n self._create_zone_facts_table()\n\n return processed_data_ids",
"def test_update_config_root(self):\n config_root = self._create_config_root()\n config_root_uuid = config_root['config-root']['uuid']\n updated_name = data_utils.rand_name('new_config_root')\n with self.override_role():\n self.config_client.update_config_root(\n config_root_uuid, display_name=updated_name)",
"def define_root(self, username, password):\n\n self._db_manager.set_root(username, password)",
"def rebalance_root(self):\n split_dirs = [d.split('/') for d in self.directories]\n new_root = []\n for level in zip(*split_dirs):\n if not(all([d == level[0] for d in level])):\n break\n new_root.append(level[0])\n self.root = '/'.join(new_root)",
"def _rebuild_rpm_db(context, root=None):\n base_cmd = ['rpmdb', '--rebuilddb']\n cmd = base_cmd if not root else base_cmd + ['-r', root]\n context.call(cmd)",
"def masterRoles():\r\n document.add_heading('Node Roles', 1)\r\n role= get_qlik_sense.masterRoles()\r\n num_of_nodes = len(role)\r\n table = document.add_table(rows=num_of_nodes+1, cols=2)\r\n table.style = 'Grid Table 1 Light Accent 1'\r\n row = table.rows[0]\r\n row = table.rows[0]\r\n row.cells[0].text = 'Host name'\r\n row.cells[1].text = 'Roles'\r\n for node in range(num_of_nodes):\r\n row = table.rows[node+1]\r\n row.cells[0].text = str(role[node][0])\r\n row.cells[1].text = str(role[node][1])",
"def __revert_terminal_variables(root):\n var_to_terminal = lambda var_name: var_name[len(TERMINAL_VAR_PREFIX):].lower()\n for node in root.preorder():\n if node.key.startswith(TERMINAL_VAR_PREFIX):\n node.key = var_to_terminal(node.key)\n node.children = []",
"def reindex(self):\n print \"REINDEXING rack namespace %r\" % (self.path,)\n\n # First scan the db, building key and subns lists in memory.\n # We don't want to modify the db until we're done iterating over it.\n keyList = []\n subNsList = []\n internalNsKeys = []\n\n for dbkey in self.db:\n path, internalNs, key = self.serializer.loadKey(dbkey)\n\n # Is this one of our keys?\n if path == self.path:\n if internalNs is None:\n if not key in keyList:\n keyList.append(key)\n else:\n # It's a key in one of our internal namespaces, make note of it\n internalNsKeys.append(dbkey)\n\n # Is it a child of our path?\n if len(path) > len(self.path) and path[:-1] == self.path:\n name = path[len(self.path)]\n if not name in subNsList:\n subNsList.append(name)\n\n # Now delete all of our internal namespace keys, nuking our possibly\n # corrupted namespace and key lists.\n for key in internalNsKeys:\n del self.db[key]\n\n # Rebuild fresh linked lists of our keys and subnamespaces\n l = self._getSubNsList()\n for subNs in subNsList:\n l.append(subNs)\n l = self._getKeyList()\n for key in keyList:\n l.append(key)",
"def read_root():\n return {\"Hello\":\"World!\"}",
"def _buildindex( self ):\n try:\n import ROOT as rt\n except:\n print \"Could not load ROOT\"\n sys.exit(-1)\n \n # sigh. this is a mess\n self.producers = [] # all producer names found in ROOT files\n self.datatypes = [] # all data types\n self.flavors = [] # flavor = hash of string listing set of trees found in a given file\n self.flavor_def = {} # map from flavor to list of tree names\n self.rawdigits_entrymap = {} # only used if file type is raw digits. maps rse to (position,wfms) in data tree\n self.rawdigits_tpcindex = {}\n flavor_eventset = {}\n eventsets = []\n events_to_files = {}\n events_to_flavors = {}\n\n # this loop is going into each file in our list and\n # - taking the list of trees in the file and making a has out of their names\n # - this hash is used to define the 'flavor' of the file\n # - we also make a list of events in the tree, labeling each entry with (run,subrun,event) ID\n # - we keep track of such list of entries and group files (and flavors) with the same event list\n # - determine filetype: LArCV or LArLite\n self.filetype = None\n for f in self.larlitefilelist:\n r = rt.TFile(f)\n nfkeys = r.GetListOfKeys().GetEntries()\n\n # now here we parse the type of objects in the ROOT file\n # we are looking to determine three file types supported by pylard\n # (1) larlite (2) larcv (3) rawdigitreader\n trees = []\n for i in range(nfkeys):\n keyname = r.GetListOfKeys().At(i).GetName()\n if keyname==\"larlite_id_tree\":\n found_id_tree = True\n elif \"_tree\" in keyname:\n producer = keyname.split(\"_\")[1]\n dtype = keyname.split(\"_\")[0]\n if producer not in self.producers:\n self.producers.append( producer )\n if dtype not in self.datatypes:\n self.datatypes.append( dtype )\n elif \"rawdigitwriter\" in keyname:\n trees.append( \"rawdigitwriter/RawDigits\" )\n trees.append( \"rawdigitwriter/OpDetWaveforms\" )\n trees.append( \"rawdigitwriter/IndexRawDigits\" )\n trees.append( \"rawdigitwriter/IndexOpDetWfms\" )\n if keyname not in trees:\n trees.append(keyname)\n hashstr = \"\"\n trees.sort()\n for keyname in trees:\n hashstr += keyname +\";\"\n\n # determine filetype from type of keys we see\n is_supported_rootfile = False\n idtreename = None\n if \"larlite_id_tree\" in trees:\n thisfiletype = \"LARLITE\"\n is_supported_rootfile = True\n if \"image2d\" in self.datatypes:\n thisfiletype = \"LARCV\"\n is_supported_rootfile = True\n if \"partroi\" in self.datatypes:\n thisfiletype = \"LARCV\"\n is_supported_rootfile = True\n if \"rawdigitwriter/OpDetWaveforms\" in trees:\n thisfiletype = \"RAWDIGITS\"\n is_supported_rootfile = True\n if not is_supported_rootfile:\n continue\n\n if self.filetype is not None and self.filetype!=thisfiletype:\n print \"Error in parsing filelist: Cannot mix filetypes (LArCV/LArLite/RawDigitTree)\"\n return\n elif self.filetype is None:\n self.filetype = thisfiletype\n \n # now we determine the idtree to use\n if self.filetype==\"LARLITE\":\n idtreename = \"larlite_id_tree\"\n elif self.filetype==\"LARCV\":\n if self.loaded_larcv == False:\n s = time.time()\n import larcv as larcv\n print \"LOADING LARCV: \",time.time()-s,\"secs\"\n self.loaded_larcv = True\n for treename in trees:\n if \"image2d\" in treename:\n if idtreename is None:\n idtreename = treename\n else:\n pass # we only use this if we have to\n if \"partroi\" in treename:\n idtreename = treename # we prefer to use this tree for speed\n break\n elif self.filetype==\"RAWDIGITS\":\n idtreename = \"rawdigitwriter/IndexOpDetWfms\"\n\n if idtreename is None:\n print \"Error: Could not setup a proper ID tree for this file\"\n continue\n\n # now we parse the tree contents. define a flavor for it based on all the trees\n # we also get the (run,subrun,event) id for the event\n m = hashlib.md5()\n m.update(hashstr)\n flavor = m.digest()\n if flavor not in self.flavors:\n self.flavors.append( flavor )\n flavor_eventset[flavor] = []\n self.flavor_def[flavor] = hashstr\n if self.filetype==\"LARLITE\":\n idtree = r.Get(idtreename)\n elif self.filetype==\"LARCV\":\n idtree = r.Get(idtreename)\n elif self.filetype==\"RAWDIGITS\":\n idtree = r.Get(idtreename)\n \n eventset = [] # list of events\n for n in range(idtree.GetEntries()):\n idtree.GetEntry(n)\n if self.filetype==\"LARLITE\":\n rse = ( idtree._run_id, idtree._subrun_id, idtree._event_id )\n elif self.filetype==\"LARCV\":\n idbranchname = idtreename.replace(\"_tree\",\"_branch\")\n idbranch = None\n exec(\"idbranch=idtree.%s\"%(idbranchname))\n rse = ( idbranch.run(), idbranch.subrun(), idbranch.event() )\n elif self.filetype==\"RAWDIGITS\":\n rse = ( idtree.idx_run, idtree.idx_subrun, idtree.idx_event )\n self.rawdigits_entrymap[rse] = (idtree.entrystart, idtree.nentries )\n eventset.append(rse)\n if rse not in flavor_eventset[flavor]:\n flavor_eventset[flavor].append( rse )\n else:\n raise ValueError( \"found a repeated run/subrun/event index (%s). what?\"%( str(rse) ) )\n if self.filetype==\"RAWDIGITS\":\n # rawdigits has another tree index for the TPC\n tpcindex = r.Get(\"rawdigitwriter/IndexRawDigits\")\n for n in range(tpcindex.GetEntries()):\n tpcindex.GetEntry(n)\n rse = ( tpcindex.idx_run, tpcindex.idx_subrun, tpcindex.idx_event )\n self.rawdigits_tpcindex[rse] = (tpcindex.entrystart, tpcindex.nentries)\n \n eventset = tuple(eventset)\n if eventset not in events_to_files:\n events_to_files[eventset] = {}\n events_to_flavors[eventset] = []\n eventsets.append( eventset )\n events_to_files[eventset][flavor] = f\n events_to_flavors[eventset].append( flavor )\n del idtree\n r.Close()\n self.parsed = True\n\n # now we take our collection of event lists and\n # - sort the event lists\n # - make lists of files with the same set of events in the order of the sorted event list\n # - for each list we also make a dictionary between (run,subrun,event) index to the entry number\n # - we pick the list with the biggest number of events as the \"official\" file list\n eventsets.sort()\n flavorfiles = {}\n flavorsets = []\n\n flavorset_rse_dict = {}\n flavorset_entry_dict = {}\n for eventset in eventsets:\n events_to_flavors[eventset].sort() # sort the flavors with this event-set\n flavorset = tuple( events_to_flavors[eventset] )\n if flavorset not in flavorfiles:\n flavorfiles[flavorset] = []\n flavorsets.append(flavorset)\n flavorset_rse_dict[flavorset] = {}\n flavorset_entry_dict[flavorset] = {}\n for flavor in flavorset:\n flavorfiles[flavorset].append( events_to_files[eventset][flavor] )\n for rse in eventset:\n ientry = len( flavorset_rse_dict[flavorset] )\n flavorset_rse_dict[flavorset][rse] = ientry\n flavorset_entry_dict[flavorset][ientry] = rse\n\n # look for largest fileset\n maxset = None\n nfiles = 0\n for fset in flavorsets:\n n = len(flavorfiles[fset])\n if n>nfiles:\n nfiles = n\n maxset = fset\n # these are the final file list and event dictionary we want\n self.sorted_filelist = flavorfiles[maxset]\n self.rse_dict = flavorset_rse_dict[maxset]\n self.entry_dict = flavorset_entry_dict[maxset]\n\n # for rawdigits, we also build the entry to data map\n if self.filetype==\"RAWDIGITS\":\n treepos = 0\n treepos_tpc = 0\n for entry in range(len(self.entry_dict)):\n rse = self.entry_dict[entry] \n # update OPDET tree\n pos_entries = self.rawdigits_entrymap[rse] # pos is from start of file, nentries is for the event block\n merged_pos_entries = ( treepos, pos_entries[1] )\n treepos += pos_entries[1]\n self.rawdigits_entrymap[rse] = merged_pos_entries # update \n # update TPC tree\n pos_entries = self.rawdigits_tpcindex[rse]\n merged_pos_entries = ( treepos_tpc, pos_entries[1] )\n treepos_tpc += pos_entries[1]\n self.rawdigits_tpcindex[rse] = merged_pos_entries # update",
"def recoverTree(self, root: TreeNode) -> None:\n if not root:\n return\n self.pre = None\n self.m1 = None\n self.m2 = None\n self.helper(root)\n self.m1.val,self.m2.val = self.m2.val, self.m1.val",
"def _load_metadata_from_file(self, metadata_set, metadata_role):\n\n # Ensure we have a valid metadata set.\n if metadata_set not in ['current', 'previous']:\n raise tuf.Error('Invalid metadata set: '+repr(metadata_set))\n\n # Save and construct the full metadata path.\n metadata_directory = self.metadata_directory[metadata_set]\n metadata_filename = metadata_role + '.txt'\n metadata_filepath = os.path.join(metadata_directory, metadata_filename)\n \n # Ensure the metadata path is valid/exists, else ignore the call. \n if os.path.exists(metadata_filepath):\n # Load the file. The loaded object should conform to\n # 'tuf.formats.SIGNABLE_SCHEMA'.\n metadata_signable = tuf.util.load_json_file(metadata_filepath)\n\n tuf.formats.check_signable_object_format(metadata_signable)\n\n # Extract the 'signed' role object from 'metadata_signable'.\n metadata_object = metadata_signable['signed']\n \n # Save the metadata object to the metadata store.\n self.metadata[metadata_set][metadata_role] = metadata_object\n \n # We need to rebuild the key and role databases if \n # metadata object is 'root' or target metadata.\n if metadata_set == 'current':\n if metadata_role == 'root':\n self._rebuild_key_and_role_db()\n elif metadata_object['_type'] == 'Targets':\n # TODO: Should we also remove the keys of the delegated roles?\n tuf.roledb.remove_delegated_roles(metadata_role)\n self._import_delegations(metadata_role)",
"def recoverTree(self, root: TreeNode) -> None:\n res, rlist = [], []\n self.helper(root, res, rlist)\n res.sort()\n for i in range(len(res)):\n rlist[i].val = res[i]",
"def test_create_config_roots(self):\n with self.override_role():\n self._create_config_root()",
"def get_final_dirs(self, root=\"\"):\n _updated = int(self.stats()[\"db_update\"])\n _hash = uhash(root)\n return self._get_final_dirs(_updated=_updated, _hash=_hash, root=root)",
"def create_roots(self):\n self.root = SchemaNode.element(\"nmt:netmod-tree\",\n interleave=False, occur=2)\n self.confdata = SchemaNode.element(\"nmt:top\", self.root,\n interleave=True, occur=2)\n self.rpcs = SchemaNode.element(\"nmt:rpc-methods\", self.root,\n interleave=False, occur=2)\n self.notifications = SchemaNode.element(\"nmt:notifications\", self.root,\n interleave=True, occur=2)",
"def fix_root(self):\n # In the main bzrlib code, this forces the new tree to use the same\n # tree root as the old tree. But merge-into explicitly doesn't want\n # that. So the first portion is just a copy of the old code, and then\n # we change the rest.\n try:\n self.tt.final_kind(self.tt.root)\n except NoSuchFile:\n self.tt.cancel_deletion(self.tt.root)\n if self.tt.final_file_id(self.tt.root) is None:\n self.tt.version_file(self.tt.tree_file_id(self.tt.root),\n self.tt.root)\n # All we do is skip the step which used to sanitize the root id.",
"def recoverTree(self, root):\n it = self.isValidBST(root)\n a, b = next(it)\n c = next(it, None)\n if c:\n _, c = c\n a.val, c.val = c.val, a.val\n else:\n a.val, b.val = b.val, a.val\n return root",
"def readdatabase2(self):\n fname=\"/home/alice/rl/v/vme/ADCI/DB/INPUTS.txt\"\n try:\n database=open(fname,\"r\") \n except IOError:\n print \"Cannot open \",fname\n return None\n else:\n print \"File \",fname,\" open successfuly.\"\n #print \"database= \",database\n lines=database.readlines()\n database.close() \n #print lines,len(lines) \n dbinputs=[]\n for i in lines:\n if(i[0] != '#'):\n items=string.split(i)\n #print 'items= ',items,len(items)\n if(len(items)<6):\n print \"Error parsing database, not enough items in line:\"\n print items\n return None\n db={}\n db['number']=items[0]\n db['numberDIM']=items[1]\n db['level']=items[2]\n db['name']=items[3]\n db['detector']=items[4]\n db['signature']=items[5]\n dbinputs.append(db)\n return dbinputs",
"def load_eigenstrat_data(file_root):\n\n ind_file=open(file_root+\".ind\", \"r\")\n snp_file=open(file_root+\".snp\", \"r\")\n gen_file=open(file_root+\".geno\", \"r\")\n \n sample_names=ind_file.readlines()\n sample_names=[x.strip() for x in sample_names]\n sample_names=[x.split()[0] for x in sample_names]\n ind_file.close()\n \n snp_data=snp_file.readlines()\n snp_data=[x.strip() for x in snp_data]\n snp_names=[x.split()[0] for x in snp_data]\n snp_pos=[int(x.split()[3]) for x in snp_data]\n snp_file.close()\n\n genotype_data=np.genfromtxt(file_root+\".geno\", dtype=np.int, delimiter=1)\n genotype_data[genotype_data==9]=3\n return {\"sample_names\":sample_names, \"snp_names\":snp_names, \"snp_pos\":snp_pos, \"genotype_data\":genotype_data}",
"def manifestations_root_system(self, manifestations_root_system):\n\n self._manifestations_root_system = manifestations_root_system",
"def get_root_key(name):\n return \"REZ_{name}_ROOT\".format(name=name.upper())"
] |
[
"0.5743582",
"0.56385076",
"0.53721976",
"0.51468635",
"0.5134066",
"0.5078618",
"0.50034493",
"0.49979895",
"0.49948904",
"0.49779555",
"0.4950957",
"0.49308586",
"0.49201196",
"0.48425183",
"0.4825396",
"0.47523007",
"0.473874",
"0.47370243",
"0.47205967",
"0.47127837",
"0.47088298",
"0.4699732",
"0.46968892",
"0.46669844",
"0.4661586",
"0.4653279",
"0.46116745",
"0.46043763",
"0.45977595",
"0.4586267"
] |
0.731258
|
0
|
Import all the roles delegated by 'parent_role'.
|
def _import_delegations(self, parent_role):
current_parent_metadata = self.metadata['current'][parent_role]
if 'delegations' not in current_parent_metadata:
return
# This could be quite slow with a huge number of delegations.
keys_info = current_parent_metadata['delegations'].get('keys', {})
roles_info = current_parent_metadata['delegations'].get('roles', [])
logger.debug('Adding roles delegated from '+repr(parent_role)+'.')
# Iterate through the keys of the delegated roles of 'parent_role'
# and load them.
for keyid, keyinfo in keys_info.items():
if keyinfo['keytype'] in ['rsa', 'ed25519']:
key = tuf.keys.format_metadata_to_key(keyinfo)
# We specify the keyid to ensure that it's the correct keyid
# for the key.
try:
tuf.keydb.add_key(key, keyid)
except tuf.KeyAlreadyExistsError:
pass
except (tuf.FormatError, tuf.Error), e:
logger.exception('Failed to add keyid: '+repr(keyid)+'.')
logger.error('Aborting role delegation for parent role '+parent_role+'.')
raise
else:
logger.warn('Invalid key type for '+repr(keyid)+'.')
continue
# Add the roles to the role database.
for roleinfo in roles_info:
try:
# NOTE: tuf.roledb.add_role will take care
# of the case where rolename is None.
rolename = roleinfo.get('name')
logger.debug('Adding delegated role: '+str(rolename)+'.')
tuf.roledb.add_role(rolename, roleinfo)
except tuf.RoleAlreadyExistsError, e:
logger.warn('Role already exists: '+rolename)
except:
logger.exception('Failed to add delegated role: '+rolename+'.')
raise
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def parents(cls):\n return db.relationship(ext.role_model, secondary='role_links',\n primaryjoin=f\"RoleLink.role_id==%s.{ext.role_pk}\" % cls.__name__,\n secondaryjoin=f\"RoleLink.parent_id==%s.{ext.role_pk}\" % cls.__name__,\n backref=\"children\")",
"def test_get_parents_role(self):\n root = role_middleware.get_root()\n children = role_middleware.get_all_children(root.id)\n role = children[-1]\n parent_list = role_middleware.get_parents(role.id)\n for ro in parent_list:\n print(ro.name, ro.id)",
"def test_get_parent_role(self):\n root = role_middleware.get_root()\n children = role_middleware.get_all_children(root.id)\n role = children[-1]\n parent = role_middleware.get_parent(role.id)\n print(parent.name, parent.id)",
"def sync_roles(self) -> None:\n # Create global all-dag permissions\n self.create_perm_vm_for_all_dag()\n\n # Sync the default roles (Admin, Viewer, User, Op, public) with related permissions\n self.bulk_sync_roles(self.ROLE_CONFIGS)\n\n self.add_homepage_access_to_custom_roles()\n # init existing roles, the rest role could be created through UI.\n self.update_admin_permission()\n self.clean_perms()",
"def get_roles(role):",
"def iter_roles(self):\n role_ids = self.role_ids\n if (role_ids is not None):\n for role_id in role_ids:\n yield create_partial_role_from_id(role_id)",
"def add_role(self, role, parents=[]):\r\n self._roles.setdefault(role, set())\r\n self._roles[role].update(parents)",
"def load_roles(self):\n\n roles_file = open(self.roles_file, mode='r')\n self.assignable_roles = json.load(roles_file)",
"def iter_role_ids(self):\n role_ids = self.role_ids\n if (role_ids is not None):\n yield from role_ids",
"def assign_roles(self):\n # Roling the dice for the starting order\n self.shuffle_agents()\n\n # The role and order assignment to the agents.\n starting_role = None\n for agent_index in range(len(self.agents)):\n agent = self.get_agent_order_mask(agent_index)\n worker = get_worker_from_agent(agent)\n qual = worker.get_granted_qualification(self.role_training_qname)\n assert qual\n role_qual = qual.value\n if role_qual == constants.WIZARD:\n agent.agent_id = 'Wizard'\n elif role_qual == constants.APPRENTICE:\n agent.agent_id = 'Apprentice'\n else:\n raise ValueError(f'Unrecognized role qulification {role_qual}.')\n if not starting_role: # sets it the first time that loop runs\n starting_role = role_qual\n\n logging.info('Agent roles assigned.')\n logging.info(f'Agent with {self.get_agent_order_mask(0).agent_id} role starts.')\n return starting_role",
"async def roles(self, ctx, *, role: Fuzzy[Selfrole] = None):\n\n if role:\n await self._toggle_role(ctx, role)\n else:\n await self._list_all_roles(ctx)",
"def adjust_parent(self, parent_adjustor: ParentRoleAdjuster):\n self.adjust_parent_aggregate(parent_adjustor=parent_adjustor,\n get_summed_field=lambda: getattr(parent_adjustor.child_logic_row.row, self._child_summed_field),\n get_old_summed_field=lambda: getattr(parent_adjustor.child_logic_row.old_row, self._child_summed_field)\n )",
"def get_roles():\r\n global _roles\r\n return _roles",
"def init_roles(self):\n self.role_owner = Role.objects.get_or_create(\n name=PROJECT_ROLE_OWNER, rank=ROLE_RANKING[PROJECT_ROLE_OWNER]\n )[0]\n self.role_delegate = Role.objects.get_or_create(\n name=PROJECT_ROLE_DELEGATE, rank=ROLE_RANKING[PROJECT_ROLE_DELEGATE]\n )[0]\n self.role_contributor = Role.objects.get_or_create(\n name=PROJECT_ROLE_CONTRIBUTOR,\n rank=ROLE_RANKING[PROJECT_ROLE_CONTRIBUTOR],\n )[0]\n self.role_guest = Role.objects.get_or_create(\n name=PROJECT_ROLE_GUEST, rank=ROLE_RANKING[PROJECT_ROLE_GUEST]\n )[0]\n self.role_finder = Role.objects.get_or_create(\n name=PROJECT_ROLE_FINDER,\n rank=ROLE_RANKING[PROJECT_ROLE_FINDER],\n project_types=[PROJECT_TYPE_CATEGORY],\n )[0]",
"def set_roles_loader(self, role_loader):\r\n self.load_roles = role_loader",
"def import_descendants(parent_module, target_globals, target_locals):\n basedir = os.path.dirname(parent_module.__file__)\n\n for root_dir, dirs, files in os.walk(basedir):\n relative_dir = root_dir[len(basedir):]\n package = parent_module.__package__ + relative_dir.replace(os.path.sep, '.')\n components = [os.path.splitext(filename) for filename in files]\n modules = [basename for basename, ext in components\n if ext == '.py' and basename != '__init__']\n\n # Import the directory module, unless it is src_module itself (this\n # function is commonly used to import the descendants of a module into\n # itself, so if we didn't have this guard then we'd try to import the\n # parent module into itself)\n if root_dir != basedir:\n exec 'from %s import *' % (package,) in target_globals, target_locals\n\n for module in modules:\n exec 'from %s.%s import *' % (package, module) in target_globals, target_locals",
"def commonWorkflow(context):\n setup = getToolByName(context, 'portal_setup')\n setup.runAllImportStepsFromProfile(PROFILE)\n portal_workflow = getToolByName(context, 'portal_workflow')\n portal_workflow.updateRoleMappings()",
"def read_roles():\n roles = defaultdict(list)\n invert_is_a = defaultdict(list)\n with open('relation.tsv','r') as inf:\n for line in inf:\n x = line.strip().split('\\t')\n if x[1] == 'has_role':\n roles[f'CHEBI:{x[3]}'].append(f'CHEBI:{x[2]}')\n elif x[1] == 'is_a':\n child = f'CHEBI:{x[3]}'\n parent = f'CHEBI:{x[2]}'\n invert_is_a[parent].append(child)\n #Now include parents\n ancestors = get_ancestors(invert_is_a)\n for node,noderoles in roles.items():\n if node == 'CHEBI:64663':\n print('hi')\n restroles= []\n for role in noderoles:\n moreroles=ancestors[role]\n restroles += moreroles\n roles[node] += restroles\n return roles",
"def on_update(self):\n\t\tusers = frappe.get_all('User', filters={'role_profile_name': self.name})\n\t\troles = [role.role for role in self.roles]\n\t\tfor d in users:\n\t\t\tuser = frappe.get_doc('User', d)\n\t\t\tuser.set('roles', [])\n\t\t\tuser.add_roles(*roles)",
"async def roles(self, ctx):\n\n pass",
"def getRoles(self):",
"def enaml_importer():\n print(imports, dir(imports))\n old = imports.get_importers()\n\n yield imports\n\n imports._imports__importers = old",
"def load_roles():\n for code, name in list(config.Roles.ALL_ROLES.items()):\n role = ContactRole.objects.get_or_create(code=code)[0]\n if role.name != name:\n role.name = name\n role.save()",
"def _get_roles(self):\n return api.tuskar.OvercloudRole.list(self.request)",
"def insert_roles_staging(self):\n\n self.load_wikidata(\"roles\", ROLES_SPARQL_QUERY, INSERT_ROLE_SQL_QUERY,\n INSERT_ROLE_MAP_COLUMNS)",
"def get_roles(self, principal_id):",
"def recalculate_roles(worker):\n for gspd in worker.source.administrator_page.participant_group.groupspecificparticipantdata_set.all():\n gspd.recalculate_roles()\n worker.unilog(\"All roles are recalculated, to update the leaderboard run /recreate_leaderboard command.\")",
"def test_list_role_for_all_namespaces(self):\n pass",
"def test_create_role_for_all_namespaces(self):\n pass",
"def inheritRole(self, role=None, roleName=None, kvDict=None):\n\n return _modelActionBase(self, instance=role, instanceName=roleName, kvDict=kvDict,\n model=get_model('role'), db=db, action='add', modelType='role', inherit=True)"
] |
[
"0.5542862",
"0.5484064",
"0.53176576",
"0.52519345",
"0.5243283",
"0.5170428",
"0.5165677",
"0.51483893",
"0.5015461",
"0.50147766",
"0.5009216",
"0.49793363",
"0.49722782",
"0.49593183",
"0.4957792",
"0.49368262",
"0.49309275",
"0.48666155",
"0.4833351",
"0.48196456",
"0.47783944",
"0.47687778",
"0.47423893",
"0.46861738",
"0.4677405",
"0.46765593",
"0.46666387",
"0.4644406",
"0.46165907",
"0.4611101"
] |
0.73602885
|
0
|
Update the latest copies of the metadata for the toplevel roles. The update request process follows a specific order to ensure the metadata files are securely updated. The client would call refresh() prior to requesting target file information. Calling refresh() ensures target methods, like all_targets() and target(), refer to the latest available content. The latest copies for delegated metadata are downloaded and updated by the target methods. None.
|
def refresh(self):
# The timestamp role does not have signed metadata about it; otherwise we
# would need an infinite regress of metadata. Therefore, we use some
# default, sane metadata about it.
DEFAULT_TIMESTAMP_FILEINFO = {
'hashes':None,
'length': tuf.conf.DEFAULT_TIMESTAMP_REQUIRED_LENGTH
}
# Update the top-level metadata. The _update_metadata_if_changed() and
# _update_metadata() calls below do NOT perform an update if there
# is insufficient trusted signatures for the specified metadata.
# Raise 'tuf.NoWorkingMirrorError' if an update fails.
# Use default but sane information for timestamp metadata, and do not
# require strict checks on its required length.
self._update_metadata('timestamp', DEFAULT_TIMESTAMP_FILEINFO)
self._update_metadata_if_changed('release', referenced_metadata='timestamp')
self._update_metadata_if_changed('root')
self._update_metadata_if_changed('targets')
# Updated the top-level metadata (which all had valid signatures), however,
# have they expired? Raise 'tuf.ExpiredMetadataError' if any of the metadata
# has expired.
for metadata_role in ['timestamp', 'root', 'release', 'targets']:
self._ensure_not_expired(metadata_role)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _refresh_targets_metadata(self, rolename='targets', include_delegations=False):\n\n roles_to_update = []\n\n # See if this role provides metadata and, if we're including\n # delegations, look for metadata from delegated roles.\n role_prefix = rolename + '/'\n for metadata_path in self.metadata['current']['release']['meta'].keys():\n if metadata_path == rolename + '.txt':\n roles_to_update.append(metadata_path[:-len('.txt')])\n elif include_delegations and metadata_path.startswith(role_prefix):\n # Add delegated roles. Skip roles names containing compression\n # extensions.\n if metadata_path.endswith('.txt'): \n roles_to_update.append(metadata_path[:-len('.txt')])\n\n # Remove the 'targets' role because it gets updated when the targets.txt\n # file is updated in _update_metadata_if_changed('targets').\n if rolename == 'targets':\n try:\n roles_to_update.remove('targets')\n except ValueError:\n message = 'The Release metadata file is missing the targets.txt entry.'\n raise tuf.RepositoryError(message)\n \n # If there is nothing to refresh, we are done.\n if not roles_to_update:\n return\n\n # Sort the roles so that parent roles always come first.\n roles_to_update.sort()\n logger.debug('Roles to update: '+repr(roles_to_update)+'.')\n\n # Iterate through 'roles_to_update', load its metadata\n # file, and update it if it has changed.\n for rolename in roles_to_update:\n self._load_metadata_from_file('previous', rolename)\n self._load_metadata_from_file('current', rolename)\n\n self._update_metadata_if_changed(rolename)\n\n # Remove the role if it has expired.\n try:\n self._ensure_not_expired(rolename)\n except tuf.ExpiredMetadataError:\n tuf.roledb.remove_role(rolename)",
"def _update_metadata_if_changed(self, metadata_role, referenced_metadata='release'):\n \n uncompressed_metadata_filename = metadata_role + '.txt'\n\n # Ensure the referenced metadata has been loaded. The 'root' role may be\n # updated without having 'release' available. \n if referenced_metadata not in self.metadata['current']:\n message = 'Cannot update '+repr(metadata_role)+' because ' \\\n +referenced_metadata+' is missing.'\n raise tuf.RepositoryError(message)\n # The referenced metadata has been loaded. Extract the new\n # fileinfo for 'metadata_role' from it. \n else:\n message = repr(metadata_role)+' referenced in '+\\\n repr(referenced_metadata)+'. '+repr(metadata_role)+' may be updated.'\n logger.debug(message)\n \n # There might be a compressed version of 'release.txt' or Targets\n # metadata available for download. Check the 'meta' field of\n # 'referenced_metadata' to see if it is listed when 'metadata_role'\n # is 'release'. The full rolename for delegated Targets metadata\n # must begin with 'targets/'. The Release role lists all the Targets\n # metadata available on the repository, including any that may be in\n # compressed form.\n compression = None\n\n # Extract the fileinfo of the uncompressed version of 'metadata_role'.\n uncompressed_fileinfo = self.metadata['current'][referenced_metadata] \\\n ['meta'] \\\n [uncompressed_metadata_filename]\n\n # Check for availability of compressed versions of 'release.txt',\n # 'targets.txt', and delegated Targets, which also start with 'targets'.\n # For 'targets.txt' and delegated metadata, 'referenced_metata'\n # should always be 'release'. 'release.txt' specifies all roles\n # provided by a repository, including their file sizes and hashes.\n if metadata_role == 'release' or metadata_role.startswith('targets'):\n gzip_metadata_filename = uncompressed_metadata_filename + '.gz'\n if gzip_metadata_filename in self.metadata['current'] \\\n [referenced_metadata]['meta']:\n compression = 'gzip'\n compressed_fileinfo = self.metadata['current'][referenced_metadata] \\\n ['meta'][gzip_metadata_filename]\n # NOTE: When we download the compressed file, we care about its\n # compressed length. However, we check the hash of the uncompressed\n # file; therefore we use the hashes of the uncompressed file.\n fileinfo = {'length': compressed_fileinfo['length'],\n 'hashes': uncompressed_fileinfo['hashes']}\n logger.debug('Compressed version of '+\\\n repr(uncompressed_metadata_filename)+' is available at '+\\\n repr(gzip_metadata_filename)+'.')\n else:\n logger.debug('Compressed version of '+\\\n repr(uncompressed_metadata_filename)+' not available.')\n fileinfo = uncompressed_fileinfo\n else:\n fileinfo = uncompressed_fileinfo\n\n # Simply return if the file has not changed, according to the metadata\n # about the uncompressed file provided by the referenced metadata.\n if not self._fileinfo_has_changed(uncompressed_metadata_filename,\n uncompressed_fileinfo):\n return\n\n logger.debug('Metadata '+repr(uncompressed_metadata_filename)+\\\n ' has changed.')\n\n try:\n self._update_metadata(metadata_role, fileinfo=fileinfo,\n compression=compression)\n except:\n # The current metadata we have is not current but we couldn't\n # get new metadata. We shouldn't use the old metadata anymore.\n # This will get rid of in-memory knowledge of the role and\n # delegated roles, but will leave delegated metadata files as\n # current files on disk.\n # TODO: Should we get rid of the delegated metadata files?\n # We shouldn't need to, but we need to check the trust\n # implications of the current implementation.\n self._delete_metadata(metadata_role)\n logger.error('Metadata for '+str(metadata_role)+' could not be updated')\n raise\n else:\n # We need to remove delegated roles because the delegated roles\n # may not be trusted anymore.\n if metadata_role == 'targets' or metadata_role.startswith('targets/'):\n logger.debug('Removing delegated roles of '+repr(metadata_role)+'.')\n # TODO: Should we also remove the keys of the delegated roles?\n tuf.roledb.remove_delegated_roles(metadata_role)\n self._import_delegations(metadata_role)",
"def _rebuild_key_and_role_db(self):\n \n # Clobbering this means all delegated metadata files are rendered outdated\n # and will need to be reloaded. However, reloading the delegated metadata\n # files is avoided here because fetching target information with methods\n # like all_targets() and target() always cause a refresh of these files.\n # The metadata files for delegated roles are also not loaded when the\n # repository is first instantiated. Due to this setup, reloading delegated\n # roles is not required here.\n tuf.keydb.create_keydb_from_root_metadata(self.metadata['current']['root'])\n tuf.roledb.create_roledb_from_root_metadata(self.metadata['current']['root'])",
"def refresh(self):\n self.update_from_file()\n self.update_from_env()",
"def _update_metadata(self, metadata_role, fileinfo, compression=None):\n\n # Construct the metadata filename as expected by the download/mirror modules.\n metadata_filename = metadata_role + '.txt'\n uncompressed_metadata_filename = metadata_filename\n \n # The 'release' or Targets metadata may be compressed. Add the appropriate\n # extension to 'metadata_filename'. \n if compression == 'gzip':\n metadata_filename = metadata_filename + '.gz'\n\n # Extract file length and file hashes. They will be passed as arguments\n # to 'download_file' function.\n compressed_file_length = fileinfo['length']\n uncompressed_file_hashes = fileinfo['hashes']\n\n # Attempt a file download from each mirror until the file is downloaded and\n # verified. If the signature of the downloaded file is valid, proceed,\n # otherwise log a warning and try the next mirror. 'metadata_file_object'\n # is the file-like object returned by 'download.py'. 'metadata_signable'\n # is the object extracted from 'metadata_file_object'. Metadata saved to\n # files are regarded as 'signable' objects, conformant to\n # 'tuf.formats.SIGNABLE_SCHEMA'.\n #\n # Some metadata (presently timestamp) will be downloaded \"unsafely\", in the\n # sense that we can only estimate its true length and know nothing about\n # its hashes. This is because not all metadata will have other metadata\n # for it; otherwise we will have an infinite regress of metadata signing\n # for each other. In this case, we will download the metadata up to the\n # best length we can get for it, not check its hashes, but perform the rest\n # of the checks (e.g signature verification).\n #\n # Note also that we presently support decompression of only \"safe\"\n # metadata, but this is easily extend to \"unsafe\" metadata as well as\n # \"safe\" targets.\n\n if metadata_role == 'timestamp':\n metadata_file_object = \\\n self.unsafely_get_metadata_file(metadata_role, metadata_filename,\n compressed_file_length)\n else:\n metadata_file_object = \\\n self.safely_get_metadata_file(metadata_role, metadata_filename,\n compressed_file_length,\n uncompressed_file_hashes,\n compression=compression)\n\n # The metadata has been verified. Move the metadata file into place.\n # First, move the 'current' metadata file to the 'previous' directory\n # if it exists.\n current_filepath = os.path.join(self.metadata_directory['current'],\n metadata_filename)\n current_filepath = os.path.abspath(current_filepath)\n tuf.util.ensure_parent_dir(current_filepath)\n \n previous_filepath = os.path.join(self.metadata_directory['previous'],\n metadata_filename)\n previous_filepath = os.path.abspath(previous_filepath)\n if os.path.exists(current_filepath):\n # Previous metadata might not exist, say when delegations are added.\n tuf.util.ensure_parent_dir(previous_filepath)\n shutil.move(current_filepath, previous_filepath)\n\n # Next, move the verified updated metadata file to the 'current' directory.\n # Note that the 'move' method comes from tuf.util's TempFile class.\n # 'metadata_file_object' is an instance of tuf.util.TempFile.\n metadata_signable = tuf.util.load_json_string(metadata_file_object.read())\n if compression == 'gzip':\n current_uncompressed_filepath = \\\n os.path.join(self.metadata_directory['current'],\n uncompressed_metadata_filename)\n current_uncompressed_filepath = \\\n os.path.abspath(current_uncompressed_filepath)\n metadata_file_object.move(current_uncompressed_filepath)\n else:\n metadata_file_object.move(current_filepath)\n\n # Extract the metadata object so we can store it to the metadata store.\n # 'current_metadata_object' set to 'None' if there is not an object\n # stored for 'metadata_role'.\n updated_metadata_object = metadata_signable['signed']\n current_metadata_object = self.metadata['current'].get(metadata_role)\n\n # Finally, update the metadata and fileinfo stores.\n logger.debug('Updated '+repr(current_filepath)+'.')\n self.metadata['previous'][metadata_role] = current_metadata_object\n self.metadata['current'][metadata_role] = updated_metadata_object\n self._update_fileinfo(metadata_filename)",
"def update(self):\n self.downloader.authorize()\n self.update_users()\n self.update_channels()\n self.update_history()",
"def update_metadata(self, metadata):\n return self.parent.update_metadata_for_node(self, metadata)",
"def do_update(self, node_role_map, node_roles, first_run=False):\n require('use_rds')\n require('pstat_instance')\n require('pstat_url')\n require('project_root')\n require('config_folder')\n require('ssl_prefix')\n require('backup')\n require('aws_access_key_id')\n require('aws_secret_access_key')\n require('sphinx_counter')\n require('key_filename')\n require('calabar_conf_context')\n require('loggly_inputs')\n require('sphinx_counter')\n require('ipsec_confs')\n require('hostname')\n require('enable_periodic_tasks')\n\n logger.info(\"Starting to provision %s\", env.host_string)\n\n for ipsec_name, _ in env.ipsec_confs.items():\n # Require all of the pre-shared key configs\n require('ipsec_psk_%s' % ipsec_name)\n\n if first_run:\n self.do_first_launch_config()\n\n self._stop_celery()\n\n self._update_cache_settings(node_role_map['memcached']['all'])\n self._update_sphinx_settings(\n node_role_map['celery_backend']['same_az'],\n node_roles,\n )\n self._update_celery_backend_settings(\n node_role_map['sphinx_search_indexer']['same_az'],\n )\n ldap_api_nodes = node_role_map['has_ldap_access']\n self._update_ldap_api_endpoint_settings(\n all_ldap_api_nodes=ldap_api_nodes['all'],\n same_az_ldap_api_nodes=ldap_api_nodes['same_az'],\n node_roles=node_roles,\n )\n self._update_celery_ldap_settings(node_roles)\n\n # Package and push the app to the new instance\n env.project_root_src = '/opt/pstat/versions/%(timestamp)s' % env\n source_dir = env.project_root_src\n current_source_dir = None\n if not first_run:\n current_source_dir = env.project_root\n with hide(*fab_output_hides):\n push_source(\n new_source_dir=source_dir,\n current_source_dir=current_source_dir,\n chown=F_CHOWN,\n chmod=\"u+rw,g+rw,o-rw\",\n )\n self._make_media_readable(source_dir)\n self._configure_settings_local(\n source_dir,\n env.pstat_settings,\n chown=F_CHOWN,\n )\n self._configure_settings_target(\n source_dir,\n env.settings_target,\n chown=F_CHOWN,\n )\n self.configure_terrarium(source_dir=source_dir, user=FILE_OWNER)\n self._activate_new_source(\n source_dir,\n [ACTIVE_SOURCE_SYMLINK, env.project_root],\n )\n self._run_db_migrations(user=FILE_OWNER)\n\n # Link up the attachments and upload directories from /mnt/\n self._link_storage_dirs()\n\n self._configure_webservers(node_roles)\n building_search_index = self._build_search_index()\n\n self._create_media_folder()\n self._collect_static_media()\n\n self._create_500_page()\n self._restart_webservers()\n\n # Services managed via supervisord\n self._configure_celery(node_roles)\n self._update_supervisord()\n self._configure_calabar()\n self._configure_ipsec()\n self._start_celery()\n\n self._configure_loggly()\n self._configure_pstat_cron_jobs()\n self._configure_email_sending()\n\n if first_run:\n self._sync_s3_media()\n\n if building_search_index:\n self._wait_for_search_indexing()\n self._ensure_sphinx_running()\n self._configure_sphinx_cron()\n\n logger.info(\"Provisioner completed successfully\")",
"def metadata_update(self, _):\n self.details.original_widget = YesNoWidget('Update metadata files?', self.__metadata_update)",
"def refresh(self) -> None:\n self.data = {}\n self.load_settings_file(self.default_settings_path / \"settings.yaml\", file_key=\"internal\")\n self.load_systems(self.default_settings_path / \"systems\")\n self.load_settings_file(self.personal_dir / \"settings.yaml\", file_key=\"user\")\n self.load_systems(self.personal_dir / \"systems\")",
"def update_metadata(self):\n self.data[\"keywords\"] = self.repo.topics(self.data.get(\"keywords\", []))\n self.data[\"description\"] = self.data.get(\"description\") or self.repo.description\n self.data[\"codeRepository\"] = (\n self.data.get(\"codeRepository\") or self.repo.html_url\n )\n self.data[\"name\"] = self.data.get(\"name\") or self.repo.name\n self.data[\"issueTracker\"] = (\n self.data.get(\"issueTracker\") or self.repo.issues_url\n )\n self.data[\"license\"] = self.data.get(\"license\") or self.repo.license",
"def sync_roles(self) -> None:\n # Create global all-dag permissions\n self.create_perm_vm_for_all_dag()\n\n # Sync the default roles (Admin, Viewer, User, Op, public) with related permissions\n self.bulk_sync_roles(self.ROLE_CONFIGS)\n\n self.add_homepage_access_to_custom_roles()\n # init existing roles, the rest role could be created through UI.\n self.update_admin_permission()\n self.clean_perms()",
"def refresh(self):\r\n self.metadata = self.db.read(self.path).json()",
"def update_all_metadata(self, metadata):\n return self.manager.update_all_metadata(self, metadata)",
"def refresh(self):\n if self.is_server_process and self.cache_manager.is_refreshing():\n raise RefreshInProgressError()\n catalogs = MetadataManager(schemaspace=ComponentCatalogs.COMPONENT_CATALOGS_SCHEMASPACE_ID).get_all()\n for catalog in catalogs:\n self._insert_request(self.refresh_queue, catalog, \"modify\")",
"def __init__(self, updater_name, repository_mirrors):\n \n # Do the arguments have the correct format?\n # These checks ensure the arguments have the appropriate\n # number of objects and object types and that all dict\n # keys are properly named.\n # Raise 'tuf.FormatError' if there is a mistmatch.\n tuf.formats.NAME_SCHEMA.check_match(updater_name)\n tuf.formats.MIRRORDICT_SCHEMA.check_match(repository_mirrors)\n \n # Save the validated arguments.\n self.name = updater_name\n self.mirrors = repository_mirrors\n\n # Store the trusted metadata read from disk.\n self.metadata = {}\n \n # Store the currently trusted/verified metadata.\n self.metadata['current'] = {} \n \n # Store the previously trusted/verified metadata.\n self.metadata['previous'] = {}\n\n # Store the file information of all the metadata files. The dict keys are\n # paths, the dict values fileinfo data. This information can help determine\n # whether a metadata file has changed and so needs to be re-downloaded.\n self.fileinfo = {}\n \n # Store the location of the client's metadata directory.\n self.metadata_directory = {}\n \n # Ensure the repository metadata directory has been set.\n if tuf.conf.repository_directory is None:\n message = 'The TUF update client module must specify the directory' \\\n ' containing the local repository files.' \\\n ' \"tuf.conf.repository_directory\" MUST be set.'\n raise tuf.RepositoryError(message)\n\n # Set the path for the current set of metadata files. \n repository_directory = tuf.conf.repository_directory\n current_path = os.path.join(repository_directory, 'metadata', 'current')\n \n # Ensure the current path is valid/exists before saving it.\n if not os.path.exists(current_path):\n message = 'Missing '+repr(current_path)+'. This path must exist and, ' \\\n 'at a minimum, contain the root metadata file.' \n raise tuf.RepositoryError(message)\n self.metadata_directory['current'] = current_path\n \n # Set the path for the previous set of metadata files. \n previous_path = os.path.join(repository_directory, 'metadata', 'previous') \n \n # Ensure the previous path is valid/exists.\n if not os.path.exists(previous_path):\n message = 'Missing '+repr(previous_path)+'. This path must exist.'\n raise tuf.RepositoryError(message)\n self.metadata_directory['previous'] = previous_path\n \n # Load current and previous metadata.\n for metadata_set in ['current', 'previous']:\n for metadata_role in ['root', 'targets', 'release', 'timestamp']:\n self._load_metadata_from_file(metadata_set, metadata_role)\n \n # Raise an exception if the repository is missing the required 'root'\n # metadata.\n if 'root' not in self.metadata['current']:\n message = 'No root of trust! Could not find the \"root.txt\" file.'\n raise tuf.RepositoryError(message)",
"def refresh_metadata(self):\n #self.node_index = None\n #self.edge_index = None\n #self._calc_edge_centers = False\n #self._calc_cell_centers = False\n #self._calc_vcenters = False\n self._node_to_edges = None\n self._node_to_cells = None",
"def refresh(self):\n self.config.read(self.filename)\n self.loadRecentFiles()",
"def fusion_api_fabric_manager_refresh(self, body, uri, api=None, headers=None):\n param = '/snapshot/'\n return self.fabricmanager.put(body=body, uri=uri, param=param, api=api, headers=headers)",
"def update_role_files(**kwargs):\n\n # Finds out which tracking branch you are on\n # Generates a commit in OA and each of its roles\n # Generates a git show output\n # Asks before triggering git review\n\n # Example commit message\n # Update all SHAs for 15.1.8\n # This patch updates all the roles to the latest available stable\n # SHA's, copies the release notes from the updated roles into the\n # integrated repo, updates all the OpenStack Service SHA's, and\n # updates the appropriate python requirements pins.\n click.echo(\"Not implemented yet\")",
"def update_metadata(self):\n parser = GenericParser(\n fn_re='{}/(e\\d+s\\d+)_.*/Production.nc'.format(self.data_folder),\n group_names=['sim'],\n group_transforms=[lambda x: x],\n top_fn='',\n step_ps=self.timestep\n )\n meta = gather_metadata('{}/e*/*nc'.format(self.data_folder), parser)\n meta['top_fn'] = sorted(glob('{}/e*/structure.prmtop'.format(self.input_folder)))\n self.meta = meta",
"def run(self):\n self.update_repos()",
"def refresh(self):\n self._policies = self._get_policies()",
"def reload(self):\n self._populate(self.hierarchy[-1])",
"def refresh_all(self) -> None:\n self._update_thread.force_refresh_folder(self.feed_cache)",
"def metadata_update(self, new_metadata=None):\n if new_metadata is None:\n self.metadata_set(self.t.metadata())",
"def update_all(self):\n self.update_head_node_ip()\n self.get_database_info()\n self.update_users()",
"def update_metadata(self, metadata):\n return self.manager.update_metadata(self, metadata)",
"def update_target(self):\n pass",
"def refresh(self):\n self.active_member_count\n self.description\n self.lbmethod\n self.members\n self.minimum_active_member\n self.minimum_up_member\n self.slow_ramp_time\n self.statistics"
] |
[
"0.70679146",
"0.5714709",
"0.5503326",
"0.54583365",
"0.54566574",
"0.54549015",
"0.5347425",
"0.52819204",
"0.52689433",
"0.5257534",
"0.5254578",
"0.52315694",
"0.52265584",
"0.5155415",
"0.5107273",
"0.5079031",
"0.5068169",
"0.5066787",
"0.5056151",
"0.5014376",
"0.4908333",
"0.48953608",
"0.48902977",
"0.48902363",
"0.4885898",
"0.48813757",
"0.48757923",
"0.4843387",
"0.4835641",
"0.4812802"
] |
0.73300976
|
0
|
A helper function that verifies multiple secure hashes of the downloaded file. If any of these fail it raises an exception. This is to conform with the TUF specs, which support clients with different hashing algorithms. The 'hash.py' module is used to compute the hashes of the 'file_object'.
|
def __check_hashes(self, file_object, trusted_hashes):
# Verify each trusted hash of 'trusted_hashes'. Raise exception if
# any of the hashes are incorrect and return if all are correct.
for algorithm, trusted_hash in trusted_hashes.items():
digest_object = tuf.hash.digest(algorithm)
digest_object.update(file_object.read())
computed_hash = digest_object.hexdigest()
if trusted_hash != computed_hash:
raise tuf.BadHashError(trusted_hash, computed_hash)
else:
logger.info('The file\'s '+algorithm+' hash is correct: '+trusted_hash)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def CheckHashes(self, hashes):\n hash_map = {}\n for hsh in hashes:\n if hsh.HasField(\"sha256\"):\n # The canonical name of the file is where we store the file hash.\n digest = hsh.sha256\n hash_map[aff4.ROOT_URN.Add(\"files/hash/generic/sha256\").Add(\n str(digest))] = digest\n\n for metadata in aff4.FACTORY.Stat(list(hash_map), token=self.token):\n yield metadata[\"urn\"], hash_map[metadata[\"urn\"]]",
"def _verify_hashes(hashes):\n\n for item in hashes:\n try:\n hashlib.new(item)\n VALID_HASH.append(item)\n except Exception:\n pass",
"def validate_file(fpath, file_hash, algorithm='auto', chunk_size=65535):\n if (algorithm == 'sha256') or (algorithm == 'auto' and len(file_hash) == 64):\n hasher = 'sha256'\n else:\n hasher = 'md5'\n\n if str(_hash_file(fpath, hasher, chunk_size)) == str(file_hash):\n return True\n else:\n return False",
"def validate_file(fpath, file_hash, algorithm='auto', chunk_size=65535):\n if ((algorithm is 'sha256') or\n (algorithm is 'auto' and len(file_hash) is 64)):\n hasher = 'sha256'\n else:\n hasher = 'md5'\n\n if str(_hash_file(fpath, hasher, chunk_size)) == str(file_hash):\n return True\n else:\n return False",
"def check_hash(self, fname, args):\n fobj = self._open_file(fname)\n\n rc = 0\n format_errors = 0\n hash_errors = 0\n read_errors = 0\n for idx, line in enumerate(fobj):\n # remove any newline characters\n m = self.CHECK_RE.match(line.strip())\n if not m:\n if args.warn:\n self.app.stderr.write(\n 'hasher {0}: {1}: {2}: improperly formatted {3}'\n ' checksum line\\n'.format(self.name, fname, idx + 1,\n self.name.upper()))\n format_errors += 1\n rc = 1\n continue\n hash_value, binary, check_file = m.groups()\n\n try:\n check_f = open(check_file, 'rb' if binary == '*' else 'r')\n except IOError:\n self.app.stderr.write(\n 'hasher {0}: {1}: No such file or directory\\n'.format(\n self.name, check_file))\n if not args.status:\n self.app.stdout.write(\n STATUS_MSG.format(check_file, READ_ERROR))\n read_errors += 1\n rc = 1\n continue\n\n if self._calculate_hash(check_f) == hash_value:\n if not (args.quiet or args.status):\n self.app.stdout.write(\n STATUS_MSG.format(check_file, SUCCESS))\n else:\n if not args.status:\n self.app.stdout.write(\n STATUS_MSG.format(check_file, HASH_ERROR))\n hash_errors += 1\n rc = 1\n\n if format_errors and not args.status:\n self.app.stderr.write(\n 'hasher {0}: WARNING: {1} line{2} {3} improperly'\n ' formatted\\n'.format(\n self.name,\n format_errors,\n 's' if format_errors > 1 else '',\n 'are' if format_errors > 1 else 'is',\n ))\n if read_errors and not args.status:\n self.app.stderr.write(\n 'hasher {0}: WARNING: {1} listed file{2}'\n ' could not be read\\n'.format(\n self.name,\n read_errors,\n 's' if read_errors > 1 else '',\n ))\n if hash_errors and not args.status:\n self.app.stderr.write(\n 'hasher {0}: WARNING: {1} computed checksum{2}'\n ' did NOT match\\n'.format(\n self.name,\n hash_errors,\n 's' if hash_errors > 1 else '',\n ))\n return rc",
"def hash_check_files(self):\n temp_error = 0\n if not self.hash_log_curr:\n self.hash_log_curr = self.hash_curr_files\n else:\n for key, value in self.hash_curr_files.iteritems():\n if key in self.hash_log_curr:\n #test for valid hash\n if self.valid is not None:\n #test any valid hahses are given\n if key in self.valid:\n # a hash code that is ok to duplicate\n self.print_to_log('Valid Duplicate HashCode, skipping: ' + value[5])\n self.hash_log_curr[key][3] = str(int(self.hash_log_curr[key][3]) + 1)\n self.hash_log_curr[key][4] = value[4]\n continue\n # not valid duplicate hash\n # a dupulicate hash found which is a failure and should abort import\n self.hash_log_curr[key][0] = 'Fail'\n self.hash_log_curr[key][3] = str(int(self.hash_log_curr[key][3]) + 1)\n self.hash_log_curr[key][4] = value[4]\n self.hash_log_curr[key][5] += ', ' + value[5]\n self.print_to_log('Duplicate hash found for file: ' + value[5])\n temp_error = 1\n else:\n #a new hash, no issues\n self.hash_log_curr[key] = value\n self.print_to_log('New Hash for file: ' + value[5])\n self.error = temp_error",
"def hash_file(self, filename_or_bytestream):\n\n try:\n for data in self._read_blocks(filename_or_bytestream):\n self._update(data)\n except OSError as e:\n print('digest: ', filename_or_bytestream, ': ', e.strerror, sep='', file=sys.stderr)\n return None\n return self._hexdigests()",
"def _HashFile(self, fd):\n hashes = fd.Get(fd.Schema.HASH)\n if hashes:\n found_all = True\n for fingerprint_type, hash_types in self.HASH_TYPES.iteritems():\n for hash_type in hash_types:\n if fingerprint_type == \"pecoff\":\n hash_type = \"pecoff_%s\" % hash_type\n if not hashes.HasField(hash_type):\n found_all = False\n break\n if not found_all:\n break\n if found_all:\n return hashes\n\n fingerprinter = fingerprint.Fingerprinter(fd)\n if \"generic\" in self.HASH_TYPES:\n hashers = self._GetHashers(self.HASH_TYPES[\"generic\"])\n fingerprinter.EvalGeneric(hashers=hashers)\n if \"pecoff\" in self.HASH_TYPES:\n hashers = self._GetHashers(self.HASH_TYPES[\"pecoff\"])\n if hashers:\n fingerprinter.EvalPecoff(hashers=hashers)\n\n if not hashes:\n hashes = fd.Schema.HASH()\n\n for result in fingerprinter.HashIt():\n fingerprint_type = result[\"name\"]\n for hash_type in self.HASH_TYPES[fingerprint_type]:\n if hash_type not in result:\n continue\n\n if hash_type == \"SignedData\":\n # There can be several certs in the same file.\n for signed_data in result[hash_type]:\n hashes.signed_data.Append(revision=signed_data[0],\n cert_type=signed_data[1],\n certificate=signed_data[2])\n continue\n\n # Set the hashes in the original object\n if fingerprint_type == \"generic\":\n hashes.Set(hash_type, result[hash_type])\n\n elif fingerprint_type == \"pecoff\":\n hashes.Set(\"pecoff_%s\" % hash_type, result[hash_type])\n\n else:\n logging.error(\"Unknown fingerprint_type %s.\", fingerprint_type)\n\n try:\n fd.Set(hashes)\n except IOError:\n pass\n return hashes",
"def _calculate_hash(self, file_object):\n hasher = self.hashlib()\n for chunk in self.iterchunks(file_object):\n hasher.update(chunk)\n return hasher.hexdigest()",
"def validate_file(fpath, file_hash, algorithm='auto', chunk_size=65535):\n hasher = _resolve_hasher(algorithm, file_hash)\n\n if str(_hash_file(fpath, hasher, chunk_size)) == str(file_hash):\n return True\n else:\n return False",
"def test_hash(self):\n ffs = get_available_force_fields()\n\n for ff1, ff2 in itertools.combinations(ffs, 2):\n assert hash(ff1) != hash(ff2)",
"def GetFileHashes(file_path, do_sha1=False, do_sha256=False, do_md5=False):\n hashes = {}\n if (do_sha1 or do_sha256 or do_md5):\n # Initialize hashers.\n hasher_sha1 = hashlib.sha1() if do_sha1 else None\n hasher_sha256 = hashlib.sha256() if do_sha256 else None\n hasher_md5 = hashlib.md5() if do_md5 else None\n\n # Read blocks from file, update hashes.\n with open(file_path, 'rb') as fd:\n while True:\n block = fd.read(_HASH_BLOCK_SIZE)\n if not block:\n break\n hasher_sha1 and hasher_sha1.update(block)\n hasher_sha256 and hasher_sha256.update(block)\n hasher_md5 and hasher_md5.update(block)\n\n # Update return values.\n if hasher_sha1:\n hashes['sha1'] = hasher_sha1.digest()\n if hasher_sha256:\n hashes['sha256'] = hasher_sha256.digest()\n if hasher_md5:\n hashes['md5'] = hasher_md5.digest()\n\n return hashes",
"def semhash(file):\n _hash_helper(file)",
"def semhash(file):\n _hash_helper(file)",
"def _verify_hash(self, read_bytes):\n if self.hash is None:\n raise QuiltException(\"Hash missing - need to build the package\")\n _check_hash_type_support(self.hash.get('type'))\n digest = hashlib.sha256(read_bytes).hexdigest()\n if digest != self.hash.get('value'):\n raise QuiltException(\"Hash validation failed\")",
"def hash_file_sha256(file_path, binary=False, buffer_size=65536):\n return hash_file(file_path, hash_type=hashlib.sha256, binary=binary, buffer_size=buffer_size)",
"def verify(self, src, extra_files_ok=False):\n for lk, e in self.walk():\n _check_hash_type_support(e.hash[\"type\"])\n\n src = PhysicalKey.from_url(fix_url(src))\n src_dict = dict(list_url(src))\n url_list = []\n size_list = []\n for logical_key, entry in self.walk():\n src_size = src_dict.pop(logical_key, None)\n if src_size is None:\n return False\n if entry.size != src_size:\n return False\n entry_url = src.join(logical_key)\n url_list.append(entry_url)\n size_list.append(src_size)\n\n if src_dict and not extra_files_ok:\n return False\n\n hash_list = calculate_sha256(url_list, size_list)\n for (logical_key, entry), url_hash in zip(self.walk(), hash_list):\n if isinstance(url_hash, Exception):\n raise url_hash\n if entry.hash['value'] != url_hash:\n return False\n\n return True",
"def CheckHashes(self, hashes, unused_external=True):\n hash_map = {}\n for hsh in hashes:\n if hsh.HasField(\"sha1\"):\n digest = hsh.sha1\n hash_urn = self.PATH.Add(str(digest))\n logging.info(\"Checking URN %s\", str(hash_urn))\n hash_map[hash_urn] = digest\n\n for metadata in aff4.FACTORY.Stat(list(hash_map), token=self.token):\n yield metadata[\"urn\"], hash_map[metadata[\"urn\"]]",
"def verify(self, verbose_failures=False):\n modified = set()\n removed = set()\n count = len(self.entries)\n # TODO: Track number of bytes hashed instead of number of files\n # This will act as a more meaningful progress indicator\n i = 0\n for i, entry in enumerate(self.entries.values(), 1):\n if entry.exists():\n if entry.verify():\n entry.update_attrs()\n else:\n if verbose_failures:\n stderr.write('\\r{} failed hash verification\\n'.format(entry.filename))\n modified.add(entry.filename)\n else:\n removed.add(entry.filename)\n if verbose_failures:\n stderr.write('\\r{} is missing\\n'.format(entry.filename))\n stderr.write('\\rChecked {} of {} files'.format(i, count))\n if i:\n stderr.write('\\n')\n return modified, removed",
"def verify(\n hash_file: typing.Optional[str],\n data_folders: str,\n no_paths_flag: bool,\n hash_flag: bool,\n cache_parent_folder: str,\n identifiers: typing.Optional[typing.List[str]] = None,\n file_filters: typing.Optional[typing.List[str]] = None,\n quiet: bool = False,\n) -> bool:\n if quiet:\n log = logging.getLogger(\"quiet\")\n else:\n log = logging.getLogger(__name__)\n if hash_file is not None and not os.path.isfile(hash_file):\n log.error(\"File '{}' does not exist\".format(hash_file))\n return False\n for data_folder in data_folders:\n if not os.path.isdir(data_folder):\n log.error(\"Folder '{}' does not exist\".format(data_folder))\n return False\n\n errors = 0\n for data_folder in data_folders:\n # Get comparable dictionaries from both the hash metadata file (i.e. IA-side metadata)\n # and local folder of files (i.e. local-side metadata of previously-downloaded files)\n missing_metadata_items = []\n if hash_file is not None:\n try:\n hashfile_metadata = get_metadata_from_hashfile(\n hash_file, hash_flag, identifiers, file_filters\n )\n except ValueError:\n log.error(\n \"Hash file '{}' does not match expected format - cannot be used for\"\n \" verification\".format(hash_file)\n )\n return False\n else:\n subfolders = [\n item\n for item in os.listdir(data_folder)\n if os.path.isdir(os.path.join(data_folder, item))\n ]\n hashfile_metadata = {}\n if len(subfolders) == 0:\n log.warning(\n \"No item folders were found in provided data folder '{}' -\"\n \" make sure the parent download folder was provided rather than the\"\n \" item subfolder (e.g. provide '/downloads/' rather than\"\n \" '/downloads/item/'\".format(data_folder)\n )\n for subfolder in subfolders:\n if identifiers is not None:\n if subfolder not in identifiers:\n continue\n # Find cache data for the subfolder (item) in question\n cache_folder = os.path.join(cache_parent_folder, subfolder)\n if os.path.isdir(cache_folder):\n # Get most recent cache file in folder\n cache_files = sorted(\n [\n f.path\n for f in os.scandir(cache_folder)\n if f.is_file() and f.name.endswith(\"metadata.txt\")\n ]\n )\n if len(cache_files) > 0:\n cache_file = cache_files[-1]\n try:\n hashfile_metadata.update(\n get_metadata_from_hashfile(\n cache_file, hash_flag, identifiers, file_filters\n )\n )\n except ValueError:\n log.warning(\n \"Cache file '{}' does not match expected format - cannot be used\"\n \" for verification\".format(cache_file)\n )\n missing_metadata_items.append(subfolder)\n else:\n log.warning(\n \"Cache data not found for subfolder/item '{}' - files for this item\"\n \" will not be checked\".format(subfolder)\n )\n missing_metadata_items.append(subfolder)\n else:\n log.warning(\n \"Cache data not found for subfolder/item '{}' - files for this item will\"\n \" not be checked\".format(subfolder)\n )\n missing_metadata_items.append(subfolder)\n\n if len(hashfile_metadata) == 0:\n log.error(\n \"Hash file '{}' is empty - check correct file has been provided\".format(hash_file)\n if hash_file is not None\n else \"No metadata found in cache - verification cannot be performed\"\n )\n errors += 1\n continue\n\n relative_paths_from_ia_metadata = list(hashfile_metadata.keys())\n\n if hash_flag:\n md5_or_size_str = \"MD5\"\n else:\n md5_or_size_str = \"Size\"\n\n if identifiers is None:\n log.info(\n \"Verification of {} metadata for files in folder '{}' begun{}\".format(\n md5_or_size_str,\n data_folder,\n \" (using hash file '{}')\".format(hash_file) if hash_file is not None else \"\",\n )\n )\n else:\n log.info(\n \"Verification of {} metadata for item(s) {} files in folder '{}' begun\".format(\n md5_or_size_str,\n \", \".join([\"'{}'\".format(identifier) for identifier in identifiers]),\n data_folder,\n )\n )\n\n mismatch_count = 0\n if no_paths_flag:\n folder_metadata = get_metadata_from_files_in_folder(data_folder, hash_flag)\n else:\n unique_identifier_dirs_from_ia_metadata = sorted(\n list(\n set(\n [\n pathlib.Path(relative_path).parts[0]\n for relative_path in relative_paths_from_ia_metadata\n ]\n )\n )\n )\n # Print warnings for item folders referenced in IA metadata that aren't found in\n # the provided data folder\n nonexistent_dirs = []\n for identifier_dir in unique_identifier_dirs_from_ia_metadata:\n if not os.path.isdir(os.path.join(data_folder, identifier_dir)):\n log.warning(\n \"Expected item folder '{}' was not found in provided data folder '{}' -\"\n \" make sure the parent download folder was provided rather than the\"\n \" item subfolder (e.g. provide '/downloads/' rather than\"\n \" '/downloads/item/'\".format(identifier_dir, data_folder)\n )\n nonexistent_dirs.append(identifier_dir)\n\n folder_metadata = get_metadata_from_files_in_folder(\n data_folder, hash_flag, relative_paths_from_ia_metadata\n )\n\n # Group warnings for each file in a non-existent folder into one unified warning\n for nonexistent_dir in nonexistent_dirs:\n nonexistent_files = [\n relative_path\n for relative_path in relative_paths_from_ia_metadata\n if pathlib.Path(relative_path).parts[0] == nonexistent_dir\n ]\n log.warning(\n \"Files in non-existent folder '{}' not found: {}\".format(\n nonexistent_dir,\n \", \".join(\n [\n \"'{}'\".format(nonexistent_file)\n for nonexistent_file in nonexistent_files\n ]\n ),\n )\n )\n mismatch_count += len(nonexistent_files)\n # Delete non-existent files from the hashfile_metadata so we don't end up\n # iterating these later and printing more warning messages than necessary\n for nonexistent_file in nonexistent_files:\n if nonexistent_file in hashfile_metadata:\n del hashfile_metadata[nonexistent_file]\n\n # Don't consider the [identifier]_files.xml files, as these regularly gives false\n # positives (see README Known Issues)\n xml_files_to_be_removed = [\n relative_path\n for relative_path in relative_paths_from_ia_metadata\n if os.path.basename(relative_path)\n == \"{}_files.xml\".format(pathlib.Path(relative_path).parts[0])\n ]\n for xml_file_to_be_removed in xml_files_to_be_removed:\n if xml_file_to_be_removed in hashfile_metadata:\n del hashfile_metadata[xml_file_to_be_removed]\n\n # If user has moved files, so they're no longer in the same relative file paths, they\n # will need to set the 'nopaths' flag so that only hash/size metadata is checked rather\n # than path data as well\n # Disadvantage of this approach is that, if a file is stored in multiple locations, the\n # unique hash/size will only be checked for once - so any deletions of multiple copies\n # of the file will not be flagged\n if no_paths_flag:\n # Iterate only for hashes/sizes in the IA metadata that are not present in the local\n # folder of downloaded files\n for value in [\n value\n for value in hashfile_metadata.values()\n if value not in folder_metadata.values()\n ]:\n log.warning(\n \"{} '{}' (original filename(s) '{}') not found in data folder\".format(\n md5_or_size_str,\n value,\n [k for k, v in hashfile_metadata.items() if v == value],\n )\n )\n mismatch_count += 1\n\n else:\n for file_path, value in hashfile_metadata.items():\n if file_path not in folder_metadata:\n log.warning(\n \"File '{}' not found in data folder '{}'\".format(file_path, data_folder)\n )\n mismatch_count += 1\n else:\n if value != folder_metadata[file_path]:\n if value != \"-1\":\n log.warning(\n \"File '{}' {} does not match ('{}' in IA metadata, '{}' in data\"\n \" folder)\".format(\n file_path,\n md5_or_size_str,\n value,\n folder_metadata[file_path],\n )\n )\n mismatch_count += 1\n else:\n log.debug(\n \"File '{}' {} is not available in IA metadata, so verification\"\n \" not performed on this file\".format(file_path, md5_or_size_str)\n )\n\n issue_message = \"\"\n if len(missing_metadata_items) > 0:\n issue_message += \"cached metadata missing for items {}; \".format(\n \", \".join([\"'{}'\".format(item) for item in missing_metadata_items])\n )\n if mismatch_count > 0:\n issue_message += (\n \"{} files were not present or did not match Internet Archive {} metadata; \".format(\n mismatch_count, md5_or_size_str\n )\n )\n if issue_message == \"\":\n issue_message = (\n \"all files were verified against Internet Archive {} data with no issues identified\"\n .format(md5_or_size_str)\n )\n else:\n issue_message = issue_message[:-2]\n if identifiers is None:\n log.info(\"Verification of folder '{}' complete: {}\".format(data_folder, issue_message))\n else:\n log.info(\n \"Verification of item(s) {} in folder '{}' complete: {}\".format(\n \", \".join([\"'{}'\".format(identifier) for identifier in identifiers]),\n data_folder,\n issue_message,\n )\n )\n errors += len(missing_metadata_items) + mismatch_count\n if errors > 0:\n return False\n return True",
"def getHashFile(file):\n try:\n fileContent = open(file, 'rb').read()\n except:\n raise IOError, \"No such file...\"\n return False\n return getHash(fileContent)",
"def file_hash(load, fnd):\n if \"env\" in load:\n # \"env\" is not supported; Use \"saltenv\".\n load.pop(\"env\")\n\n ret = {}\n\n if \"saltenv\" not in load:\n return ret\n\n if \"path\" not in fnd or \"bucket\" not in fnd or not fnd[\"path\"]:\n return ret\n\n cached_file_path = _get_cached_file_name(\n fnd[\"bucket\"], load[\"saltenv\"], fnd[\"path\"]\n )\n\n if os.path.isfile(cached_file_path):\n ret[\"hsum\"] = salt.utils.hashutils.get_hash(cached_file_path)\n ret[\"hash_type\"] = \"md5\"\n\n return ret",
"def hash_multiple_files(self, inputs):\n\n yield self.header\n for filename_or_bytestream in inputs: # Calculate digest(s) for each file\n digests = self.hash_file(filename_or_bytestream)\n if digests is not None:\n yield (filename_or_bytestream, *digests)",
"def hash_file(file_to_hash):\n print(\"Hashing \" + file_to_hash + \"...\")\n hash_algorithm = hashlib.sha256()\n file = open(file_to_hash, 'rb')\n while True:\n contents = file.read(65536)\n if not contents:\n break\n hash_algorithm.update(contents)\n hash_str = hash_algorithm.hexdigest()\n return hash_str",
"def test_hash_data(self):\n\n expected = False\n actual = PyFunceble.path.isfile(self.file)\n self.assertEqual(expected, actual)\n\n File(self.file).write(\"\\n\".join(self.data_to_write))\n expected = True\n actual = PyFunceble.path.isfile(self.file)\n\n self.assertEqual(expected, actual)\n\n for algo, result in self.expected_hashed.items():\n self.assertEqual(\n result,\n Hash(self.file).hash_data(algo),\n msg=\"%s did not passed the test\" % repr(algo),\n )\n\n File(self.file).delete()\n\n expected = False\n actual = PyFunceble.path.isfile(self.file)\n\n self.assertEqual(expected, actual)",
"def main(hashmes, s, v, c):\n\n # Print version and quit\n if v:\n import pkg_resources\n version = pkg_resources.require(\"omnihash\")[0].version\n click.echo(version)\n return\n\n for hashme in hashmes:\n # URL\n if not s and validators.url(hashme):\n click.echo(\"Hashing content of URL '%s'..\" % hashme)\n try:\n response = requests.get(hashme)\n except requests.exceptions.ConnectionError as e:\n print (\"Not a valid URL. :(\")\n continue\n except Exception as e:\n print (\"Not a valid URL. %s\" % e)\n continue \n if response.status_code != 200:\n click.echo(\"Response returned %s. :(\" % response.status_code)\n continue\n hashme_data = response.content\n # File\n elif os.path.exists(hashme) and not s:\n click.echo(\"Hashing file %s..\" % hashme)\n with open(hashme, mode='rb') as f:\n hashme_data = f.read()\n if sys.version_info < (3,0):\n hashme_data = hashme_data.encode('utf-8')\n # String\n else:\n click.echo(\"Hashing string '%s'..\" % hashme)\n hashme_data = hashme.encode('utf-8')\n\n # Default Algos\n done = []\n for algo in sorted(hashlib.algorithms_available):\n\n # algorithms_available can have duplicates\n if algo.upper() in done:\n continue\n \n h = hashlib.new(algo)\n h.update(hashme_data)\n echo(algo, h.hexdigest())\n done.append(algo)\n\n # SHA3 Family\n sha = sha3.SHA3224()\n sha.update(hashme_data)\n echo('SHA3_224', sha.hexdigest().decode(\"utf-8\"))\n\n sha = sha3.SHA3256()\n sha.update(hashme_data)\n echo('SHA3_256', sha.hexdigest().decode(\"utf-8\"))\n\n sha = sha3.SHA3384()\n sha.update(hashme_data)\n echo('SHA3_384', sha.hexdigest().decode(\"utf-8\"))\n\n sha = sha3.SHA3512()\n sha.update(hashme_data)\n echo('SHA3_512', sha.hexdigest().decode(\"utf-8\"))\n\n # BLAKE\n blake = blake2s()\n blake.update(hashme_data)\n echo('BLAKE2s', blake.hexdigest())\n \n blake = blake2b()\n blake.update(hashme_data)\n echo('BLAKE2b', blake.hexdigest())\n\n # CRC\n if c:\n for name in sorted(crcmod.predefined._crc_definitions_by_name):\n crc_name = crcmod.predefined._crc_definitions_by_name[name]['name']\n crc_func = crcmod.predefined.mkCrcFun(crc_name)\n echo(crc_name.upper(), hex(crc_func(hashme_data)))",
"def check(infile, decompress_alg, verify_payloads, quiet, output):\n\n decompress_alg = getattr(CompressionAlg, decompress_alg)\n\n failed_digests = []\n num_total = 0\n num_no_digest = 0\n try:\n desc_tpl = 'Verifying digests ({} failed)'\n pbar = tqdm(verify_digests(infile, verify_payloads, decompress_alg),\n desc=desc_tpl.format(0), unit=' record(s)', leave=False, disable=quiet, mininterval=0.2)\n\n for v in pbar:\n num_total += 1\n\n status = 'OK'\n if v['block_digest_ok'] is False:\n status = 'FAIL'\n elif v['block_digest_ok'] is None:\n status = 'NO_DIGEST'\n\n if 'payload_digest_ok' in v:\n if v['payload_digest_ok'] is True:\n status += ', PAYLOAD_OK'\n elif v['payload_digest_ok'] is False:\n status += ', PAYLOAD_FAIL'\n elif v['payload_digest_ok'] is None:\n status += ', PAYLOAD_NO_DIGEST'\n\n if 'FAIL' in status:\n failed_digests.append(v['record_id'])\n pbar.set_description(desc_tpl.format(len(failed_digests)))\n elif 'NO_DIGEST' in status and 'OK' not in status:\n num_no_digest += 1\n\n if output:\n output.write(f'{v[\"record_id\"]}: {status}\\n')\n\n if output:\n output.close()\n\n except KeyboardInterrupt:\n if not quiet:\n click.echo('Verification aborted.')\n sys.exit(1)\n finally:\n if not quiet and num_total > 0:\n if not failed_digests:\n click.echo(f'{num_total - num_no_digest} records were verified successfully.')\n if num_no_digest:\n click.echo(f'{num_no_digest} records were skipped without digest.')\n else:\n click.echo('Failed records:')\n click.echo('===============')\n for rec in failed_digests:\n click.echo(rec)\n sys.exit(1)",
"def _is_hash_valid(self):\n downloaded_hash = sha1(self._downloaded_bytes).digest()\n return downloaded_hash == self.hash",
"def verify(self, h):\n CrawlConfig.log(\"hsi(%d) attempting to verify %s\" % (h.pid(),\n self.path))\n rsp = h.hashverify(self.path)\n\n if \"TIMEOUT\" in rsp or \"ERROR\" in rsp:\n rval = \"skipped\"\n self.set('fails', self.fails + 1)\n CrawlConfig.log(\"hashverify transfer incomplete on %s -- skipping\"\n % self.path)\n h.quit()\n elif \"%s: (md5) OK\" % self.path in rsp:\n rval = \"matched\"\n CrawlConfig.log(\"hashverify matched on %s\" % self.path)\n elif \"no valid checksum found\" in rsp:\n if self.addable(self.cos):\n rval = self.add_to_sample(h)\n else:\n self.set('checksum', 0)\n rval = \"skipped\"\n CrawlConfig.log(\"hashverify skipped %s\" % self.path)\n else:\n rval = Alert.Alert(\"Checksum mismatch: %s\" % rsp)\n CrawlConfig.log(\"hashverify generated 'Checksum mismatch' \" +\n \"alert on %s\" % self.path)\n return rval",
"def check_hash(self):\n m = rtorrent9.rpc.Multicall(self)\n self.multicall_add(m, \"d.check_hash\")\n\n return m.call()[-1]"
] |
[
"0.68194133",
"0.68004715",
"0.6736972",
"0.66865826",
"0.65869343",
"0.6368939",
"0.6293834",
"0.62324214",
"0.6138452",
"0.6136976",
"0.6094988",
"0.59902525",
"0.5987892",
"0.5987892",
"0.597661",
"0.59442955",
"0.5934656",
"0.59294504",
"0.5914992",
"0.5909823",
"0.58761203",
"0.5864959",
"0.5864453",
"0.5843463",
"0.58427364",
"0.5835581",
"0.5831494",
"0.5820658",
"0.58193034",
"0.5818882"
] |
0.7769532
|
0
|
A helper function that checks the expected compressed length of a filelike object. The length of the file must be strictly equal to the expected length. This is a deliberately redundant implementation designed to complement tuf.download._check_downloaded_length().
|
def __hard_check_compressed_file_length(self, file_object,
compressed_file_length):
observed_length = file_object.get_compressed_length()
if observed_length != compressed_file_length:
raise tuf.DownloadLengthMismatchError(compressed_file_length,
observed_length)
else:
logger.debug('file length ('+str(observed_length)+\
') == trusted length ('+str(compressed_file_length)+')')
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def __soft_check_compressed_file_length(self, file_object,\n compressed_file_length):\n\n observed_length = file_object.get_compressed_length()\n if observed_length > compressed_file_length:\n raise tuf.DownloadLengthMismatchError(compressed_file_length,\n observed_length)\n else:\n logger.debug('file length ('+str(observed_length)+\\\n ') <= trusted length ('+str(compressed_file_length)+')')",
"def _check_content_length(r: requests.Response):\n content_length = r.headers.get('Content-Length')\n if content_length is None:\n logger.debug('Cannot check length before downloading file')\n return\n\n if int(content_length) > MAX_DOWNLOAD_BYTES:\n raise FetchFileTooBigError(\n 'File length is {} bytes'.format(content_length)\n )",
"def check_file_size(self, file_name:str)->bool:\n try:\n size = os.path.getsize(file_name)\n except Exception as e:\n print(\"Failed to get file file size for %s - %s\" % (file_name, e))\n return -1\n\n if size >= self.file_size: \n return True\n return False",
"def secure_filesize(filepath):\n return os.path.getsize(filepath) <= MAX_FILESIZE",
"def check_pack_sizes():\n conn = sqlite3.connect(DBNAME)\n c = conn.cursor()\n for row in c.execute(\"SELECT lower(hex(sum)), size FROM packs\"):\n checksum, size = row\n resp = s3.head_object(Bucket=BUCKET, Key=f\"{checksum}.pack\")\n length = resp[\"ContentLength\"]\n if length != size:\n raise ValueError(f\"pack {checksum}: expected size {size} but actual size is {length}\")",
"def is_too_large(usr_file: str) -> bool:\n if usr_file.file_size >= MAX_FILESIZE_DOWNLOAD:\n return True\n else:\n return False",
"def _check_truncation(self):\n\n temp_pos = self._handle.tell()\n self._handle.seek(-28, 2)\n eof = self._handle.read()\n self._handle.seek(temp_pos)\n if eof == _bgzf_eof:\n return False\n else:\n warnings.BytesWarning('No EOF character found. File may be truncated')\n return True",
"def check_size(self):\n\n if not self.size:\n if not os.path.exists(self.get_path()): return False\n size = os.path.getsize(self.get_path())\n else: size = self.size\n Settings.maybe_print(\"file size: {}kb - {}mb\".format(size/1000, size/1000000))\n global ONE_MEGABYTE\n if size <= ONE_MEGABYTE:\n Settings.warn_print(\"small file size\")\n global ONE_HUNDRED_KILOBYTES\n if size <= ONE_HUNDRED_KILOBYTES:\n Settings.warn_print(\"tiny file size\")\n self.size = size\n if size == 0:\n Settings.err_print(\"empty file size\")\n return False\n return True",
"def is_too_long(file_name: str) -> bool:\n return len(file_name + REPORT_FILE_EXT) > 255",
"def test_lengths(self):\n self.assertEqual(size(attempt.Z), 201)\n self.assertEqual(size(attempt.W), 201)",
"def check_s3_object_size(bucket, key_name):\n\n tracer.put_metadata('object', f's3://{bucket}/{key_name}')\n\n try:\n size = s3_resource.Object(bucket, key_name).content_length\n tracer.put_metadata('object_size', size)\n except Exception as e:\n logger.error(f'Error: {str(e)}')\n size = 'NaN'\n tracer.put_metadata('object_size', size)\n\n return(size)",
"def test_DataPackageFileAttributesAreValid_different_size(\n tempdir: pathlib.Path,\n):\n df = dpack_pb2.DataPackageFile()\n df.relative_path = \"a\"\n df.checksum_hash = dpack_pb2.SHA256\n df.checksum = SHA256_EMPTY_FILE\n df.size_in_bytes = 10 # An empty file has size 0\n (tempdir / \"a\").touch()\n assert not dpack.DataPackageFileAttributesAreValid(tempdir, df)",
"def checkPacketLength(self):\n return self.packetLength == len(self) - PRIMARY_HEADER_BYTE_SIZE - 1",
"def _ValidateCacheFileMetadataHeader(self, cache_file_metadata_header):\n return (cache_file_metadata_header.key_size > 0 and\n cache_file_metadata_header.key_size < self._MAXIMUM_URL_LENGTH and\n cache_file_metadata_header.format_version in (1, 2, 3) and\n cache_file_metadata_header.last_fetched_time > 0 and\n cache_file_metadata_header.fetch_count > 0)",
"def check_file(filename, expected_bytes):\n if not os.path.exists(filename):\n print(\"please make sure {0} exists in the current directory\".format(filename))\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified', filename)\n else:\n print(statinfo.st_size)\n raise Exception(\n \"File {0} didn't have the expected size. Please ensure you have downloaded the assignment files correctly\".format(filename))\n return filename",
"def testCompressedSize(self):\n\n uncompressed_file = tempfile.NamedTemporaryFile(delete=False)\n for line in range(200):\n uncompressed_file.write(\n 'Lorem ipsum dolor sit amet, consectetur adipiscing elit. '\n 'Sed eleifend')\n uncompressed_file.close()\n compressed_path = uncompressed_file.name + '.compressed'\n compressor_path = os.path.join(DIR_SOURCE_ROOT, 'third_party',\n 'fuchsia-sdk', 'sdk', 'tools', 'x64',\n 'blobfs-compression')\n subprocess.call([compressor_path, uncompressed_file.name, compressed_path])\n self.assertEqual(binary_sizes.CompressedSize(uncompressed_file.name),\n os.path.getsize(compressed_path))\n os.remove(uncompressed_file.name)\n os.remove(compressed_path)",
"def is_valid(path):\n with open(path, 'rb') as handle:\n size = os.fstat(handle.fileno()).st_size\n try:\n mgz.header.parse_stream(handle)\n mgz.body.meta.parse_stream(handle)\n while handle.tell() < size:\n mgz.body.operation.parse_stream(handle)\n print('valid')\n return True\n except ConstructError:\n print('invalid')\n return False",
"def _checkResponseByteCount(payload):\n POSITION_FOR_GIVEN_NUMBER = 0\n NUMBER_OF_BYTES_TO_SKIP = 1\n\n _checkString(payload, minlength=1, description='payload')\n\n givenNumberOfDatabytes = ord(payload[POSITION_FOR_GIVEN_NUMBER])\n countedNumberOfDatabytes = len(payload) - NUMBER_OF_BYTES_TO_SKIP\n\n if givenNumberOfDatabytes != countedNumberOfDatabytes:\n errortemplate = 'Wrong given number of bytes in the response: {0}, but counted is {1} as data payload length is {2}.' + \\\n ' The data payload is: {3!r}'\n errortext = errortemplate.format(givenNumberOfDatabytes, countedNumberOfDatabytes, len(payload), payload)\n raise ValueError(errortext)",
"def check_file(filename, force, expected_file_size=1):\n if os.path.exists(filename):\n if force or os.path.getsize(filename) < expected_file_size:\n logger.debug(\" .. Removing old file '%s'.\", filename)\n os.remove(filename)\n return False\n else:\n return True\n return False",
"def check_compression(ctype, clevel, olevel):\n repository = Repository(archiver.repository_path, exclusive=True)\n with repository:\n manifest = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)\n state = None\n while True:\n ids, state = repository.scan(limit=LIST_SCAN_LIMIT, state=state)\n if not ids:\n break\n for id in ids:\n chunk = repository.get(id, read_data=True)\n meta, data = manifest.repo_objs.parse(id, chunk) # will also decompress according to metadata\n m_olevel = meta.get(\"olevel\", -1)\n m_psize = meta.get(\"psize\", -1)\n print(\n hexlify(id).decode(),\n meta[\"ctype\"],\n meta[\"clevel\"],\n meta[\"csize\"],\n meta[\"size\"],\n m_olevel,\n m_psize,\n )\n # this is not as easy as one thinks due to the DecidingCompressor choosing the smallest of\n # (desired compressed, lz4 compressed, not compressed).\n assert meta[\"ctype\"] in (ctype, LZ4.ID, CNONE.ID)\n assert meta[\"clevel\"] in (clevel, 255) # LZ4 and CNONE has level 255\n if olevel != -1: # we expect obfuscation\n assert \"psize\" in meta\n assert m_olevel == olevel\n else:\n assert \"psize\" not in meta\n assert \"olevel\" not in meta",
"def checkSize(self):\n if self.format.maxSize and self.size > self.format.maxSize:\n return 1\n elif (self.format.minSize and\n (not self.req_grow and\n self.size < self.format.minSize) or\n (self.req_grow and self.req_max_size and\n self.req_max_size < self.format.minSize)):\n return -1\n return 0",
"def is_file_size_error(self):\n return self._tag == 'file_size_error'",
"def _assert_file_count_equal(self, expected_count):\n assert len(BlockStructureModel._get_all_files(self.usage_key)) == expected_count",
"def validate_file(path: str, expected_num_bytes: int, expected_hexdigest: str):\n with open(path, 'rb') as f:\n data = f.read()\n num_bytes = len(data)\n if num_bytes != expected_num_bytes:\n raise ValueError(\n f'Expected file content number of bytes to be {expected_num_bytes} but found {num_bytes}.'\n )\n hexdigest = hashlib.sha256(data).hexdigest()\n if hexdigest != expected_hexdigest:\n raise ValueError(\n f'Expected file content hash to be {expected_hexdigest!r} but found {hexdigest!r}.'\n )",
"def validate_checksum(blob: bytes, offset: int, length: int):\n\n checksum = ord(blob[offset + length - 1:offset + length])\n data_sum = sum(\n struct.unpack('%dB' % (length - 1), blob[offset:offset + length - 1])\n )\n if 0xff & (data_sum + checksum) != 0:\n raise ValueError('The data do not match the checksum')",
"def checkThumbSize(isz, tsz, desired):\n\n # tolerate 2% error\n try:\n if abs(float(isz[0]) / isz[1] - float(tsz[0]) / tsz[1]) > 0.02:\n return 0 # aspect has changed, or isz rotated\n except:\n return 0\n return abs(desired - tsz[0]) <= 1 or abs(desired - tsz[1]) <= 1",
"def check_file_transferred(replica, location):\n\n from tardis.tardis_portal.models import Dataset_File\n datafile = Dataset_File.objects.get(pk=replica.datafile.id)\n\n # If the remote is capable, get it to send us the checksums and / or\n # file length for its copy of the file\n try:\n # Fetch the remote's metadata for the file\n m = location.provider.get_metadata(replica)\n _check_attribute(m, datafile.size, 'length')\n if (_check_attribute(m, datafile.sha512sum, 'sha512sum') or \\\n _check_attribute(m, datafile.md5sum, 'md5sum')):\n return True\n if location.trust_length and \\\n _check_attribute(m, datafile.size, 'length') :\n return False\n raise MigrationError('Not enough metadata for verification')\n except NotImplementedError:\n pass\n except HTTPError as e:\n # Bad request means that the remote didn't recognize the query\n if e.code != 400:\n raise\n\n if location.provider.trust_length :\n try:\n length = location.provider.get_length(replica)\n if _check_attribute2(length, datafile.size, 'length'):\n return False\n except NotImplementedError:\n pass\n\n # Fetch back the remote file and verify it locally.\n f = location.provider.get_opener(replica)()\n md5sum, sha512sum, size, x = generate_file_checksums(f, None)\n _check_attribute2(str(size), datafile.size, 'length')\n if _check_attribute2(sha512sum, datafile.sha512sum, 'sha512sum') or \\\n _check_attribute2(md5sum, datafile.md5sum, 'md5sum'):\n return True\n raise MigrationError('Not enough metadata for file verification')",
"def test_bytearray_respects_length(self):\n self._respects_length_test(bytearray)",
"def verify_size_content(self, re_size):\n to_alternate = 0\n if re_size['chunck'] < re_size['size']:\n to_alternate = re_size['chunck']\n re_size['chunck'] = re_size['size']\n re_size['size'] = to_alternate\n return re_size",
"def test_size_returns_length(dq_3):\n assert dq_3.size() == 3"
] |
[
"0.8020666",
"0.69070154",
"0.62786996",
"0.6147197",
"0.6046355",
"0.58408123",
"0.58013856",
"0.5790666",
"0.5770636",
"0.5769222",
"0.5685216",
"0.56598413",
"0.5642787",
"0.5641548",
"0.56404966",
"0.56370836",
"0.5596607",
"0.5533528",
"0.5476115",
"0.5465439",
"0.54634446",
"0.5461173",
"0.5456884",
"0.5454762",
"0.54485625",
"0.54385805",
"0.54310024",
"0.5419655",
"0.5418396",
"0.54029626"
] |
0.808241
|
0
|
A helper function that checks the expected compressed length of a filelike object. The length of the file must be less than or equal to the expected length. This is a deliberately redundant implementation designed to complement tuf.download._check_downloaded_length().
|
def __soft_check_compressed_file_length(self, file_object,
compressed_file_length):
observed_length = file_object.get_compressed_length()
if observed_length > compressed_file_length:
raise tuf.DownloadLengthMismatchError(compressed_file_length,
observed_length)
else:
logger.debug('file length ('+str(observed_length)+\
') <= trusted length ('+str(compressed_file_length)+')')
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def __hard_check_compressed_file_length(self, file_object,\n compressed_file_length):\n\n observed_length = file_object.get_compressed_length()\n if observed_length != compressed_file_length:\n raise tuf.DownloadLengthMismatchError(compressed_file_length,\n observed_length)\n else:\n logger.debug('file length ('+str(observed_length)+\\\n ') == trusted length ('+str(compressed_file_length)+')')",
"def _check_content_length(r: requests.Response):\n content_length = r.headers.get('Content-Length')\n if content_length is None:\n logger.debug('Cannot check length before downloading file')\n return\n\n if int(content_length) > MAX_DOWNLOAD_BYTES:\n raise FetchFileTooBigError(\n 'File length is {} bytes'.format(content_length)\n )",
"def check_file_size(self, file_name:str)->bool:\n try:\n size = os.path.getsize(file_name)\n except Exception as e:\n print(\"Failed to get file file size for %s - %s\" % (file_name, e))\n return -1\n\n if size >= self.file_size: \n return True\n return False",
"def secure_filesize(filepath):\n return os.path.getsize(filepath) <= MAX_FILESIZE",
"def is_too_large(usr_file: str) -> bool:\n if usr_file.file_size >= MAX_FILESIZE_DOWNLOAD:\n return True\n else:\n return False",
"def is_too_long(file_name: str) -> bool:\n return len(file_name + REPORT_FILE_EXT) > 255",
"def check_pack_sizes():\n conn = sqlite3.connect(DBNAME)\n c = conn.cursor()\n for row in c.execute(\"SELECT lower(hex(sum)), size FROM packs\"):\n checksum, size = row\n resp = s3.head_object(Bucket=BUCKET, Key=f\"{checksum}.pack\")\n length = resp[\"ContentLength\"]\n if length != size:\n raise ValueError(f\"pack {checksum}: expected size {size} but actual size is {length}\")",
"def _check_truncation(self):\n\n temp_pos = self._handle.tell()\n self._handle.seek(-28, 2)\n eof = self._handle.read()\n self._handle.seek(temp_pos)\n if eof == _bgzf_eof:\n return False\n else:\n warnings.BytesWarning('No EOF character found. File may be truncated')\n return True",
"def check_size(self):\n\n if not self.size:\n if not os.path.exists(self.get_path()): return False\n size = os.path.getsize(self.get_path())\n else: size = self.size\n Settings.maybe_print(\"file size: {}kb - {}mb\".format(size/1000, size/1000000))\n global ONE_MEGABYTE\n if size <= ONE_MEGABYTE:\n Settings.warn_print(\"small file size\")\n global ONE_HUNDRED_KILOBYTES\n if size <= ONE_HUNDRED_KILOBYTES:\n Settings.warn_print(\"tiny file size\")\n self.size = size\n if size == 0:\n Settings.err_print(\"empty file size\")\n return False\n return True",
"def _ValidateCacheFileMetadataHeader(self, cache_file_metadata_header):\n return (cache_file_metadata_header.key_size > 0 and\n cache_file_metadata_header.key_size < self._MAXIMUM_URL_LENGTH and\n cache_file_metadata_header.format_version in (1, 2, 3) and\n cache_file_metadata_header.last_fetched_time > 0 and\n cache_file_metadata_header.fetch_count > 0)",
"def check_file(filename, expected_bytes):\n if not os.path.exists(filename):\n print(\"please make sure {0} exists in the current directory\".format(filename))\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified', filename)\n else:\n print(statinfo.st_size)\n raise Exception(\n \"File {0} didn't have the expected size. Please ensure you have downloaded the assignment files correctly\".format(filename))\n return filename",
"def is_file_size_error(self):\n return self._tag == 'file_size_error'",
"def is_valid(path):\n with open(path, 'rb') as handle:\n size = os.fstat(handle.fileno()).st_size\n try:\n mgz.header.parse_stream(handle)\n mgz.body.meta.parse_stream(handle)\n while handle.tell() < size:\n mgz.body.operation.parse_stream(handle)\n print('valid')\n return True\n except ConstructError:\n print('invalid')\n return False",
"def testCompressedSize(self):\n\n uncompressed_file = tempfile.NamedTemporaryFile(delete=False)\n for line in range(200):\n uncompressed_file.write(\n 'Lorem ipsum dolor sit amet, consectetur adipiscing elit. '\n 'Sed eleifend')\n uncompressed_file.close()\n compressed_path = uncompressed_file.name + '.compressed'\n compressor_path = os.path.join(DIR_SOURCE_ROOT, 'third_party',\n 'fuchsia-sdk', 'sdk', 'tools', 'x64',\n 'blobfs-compression')\n subprocess.call([compressor_path, uncompressed_file.name, compressed_path])\n self.assertEqual(binary_sizes.CompressedSize(uncompressed_file.name),\n os.path.getsize(compressed_path))\n os.remove(uncompressed_file.name)\n os.remove(compressed_path)",
"def test_lengths(self):\n self.assertEqual(size(attempt.Z), 201)\n self.assertEqual(size(attempt.W), 201)",
"def checkSize(self):\n if self.format.maxSize and self.size > self.format.maxSize:\n return 1\n elif (self.format.minSize and\n (not self.req_grow and\n self.size < self.format.minSize) or\n (self.req_grow and self.req_max_size and\n self.req_max_size < self.format.minSize)):\n return -1\n return 0",
"def _checkResponseByteCount(payload):\n POSITION_FOR_GIVEN_NUMBER = 0\n NUMBER_OF_BYTES_TO_SKIP = 1\n\n _checkString(payload, minlength=1, description='payload')\n\n givenNumberOfDatabytes = ord(payload[POSITION_FOR_GIVEN_NUMBER])\n countedNumberOfDatabytes = len(payload) - NUMBER_OF_BYTES_TO_SKIP\n\n if givenNumberOfDatabytes != countedNumberOfDatabytes:\n errortemplate = 'Wrong given number of bytes in the response: {0}, but counted is {1} as data payload length is {2}.' + \\\n ' The data payload is: {3!r}'\n errortext = errortemplate.format(givenNumberOfDatabytes, countedNumberOfDatabytes, len(payload), payload)\n raise ValueError(errortext)",
"def check_s3_object_size(bucket, key_name):\n\n tracer.put_metadata('object', f's3://{bucket}/{key_name}')\n\n try:\n size = s3_resource.Object(bucket, key_name).content_length\n tracer.put_metadata('object_size', size)\n except Exception as e:\n logger.error(f'Error: {str(e)}')\n size = 'NaN'\n tracer.put_metadata('object_size', size)\n\n return(size)",
"def checkPacketLength(self):\n return self.packetLength == len(self) - PRIMARY_HEADER_BYTE_SIZE - 1",
"def _check_packed_file_size(self, file, dirs):\n if not self._is_size_bound(\n file\n ) or file.stat().st_size <= self._get_max_image_bytes(**dirs):\n return file\n raise RuntimeError(\"Output file ({}) too large\".format(file))",
"def maybe_compress(filename, compress_minsize=config.COMPRESS_MINSIZE):\n size = os.path.getsize(filename)\n if size < compress_minsize:\n return open(filename, 'rb'), False\n\n compressed_size, compressed_fobj = compress_file(filename)\n if compressed_size >= size:\n # Compressed file was larger\n log.info(\"%s was larger when compressed; using uncompressed version\", filename)\n return open(filename, 'rb'), False\n\n return compressed_fobj, True",
"def test_DataPackageFileAttributesAreValid_different_size(\n tempdir: pathlib.Path,\n):\n df = dpack_pb2.DataPackageFile()\n df.relative_path = \"a\"\n df.checksum_hash = dpack_pb2.SHA256\n df.checksum = SHA256_EMPTY_FILE\n df.size_in_bytes = 10 # An empty file has size 0\n (tempdir / \"a\").touch()\n assert not dpack.DataPackageFileAttributesAreValid(tempdir, df)",
"def checkThumbSize(isz, tsz, desired):\n\n # tolerate 2% error\n try:\n if abs(float(isz[0]) / isz[1] - float(tsz[0]) / tsz[1]) > 0.02:\n return 0 # aspect has changed, or isz rotated\n except:\n return 0\n return abs(desired - tsz[0]) <= 1 or abs(desired - tsz[1]) <= 1",
"def check_compression(ctype, clevel, olevel):\n repository = Repository(archiver.repository_path, exclusive=True)\n with repository:\n manifest = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)\n state = None\n while True:\n ids, state = repository.scan(limit=LIST_SCAN_LIMIT, state=state)\n if not ids:\n break\n for id in ids:\n chunk = repository.get(id, read_data=True)\n meta, data = manifest.repo_objs.parse(id, chunk) # will also decompress according to metadata\n m_olevel = meta.get(\"olevel\", -1)\n m_psize = meta.get(\"psize\", -1)\n print(\n hexlify(id).decode(),\n meta[\"ctype\"],\n meta[\"clevel\"],\n meta[\"csize\"],\n meta[\"size\"],\n m_olevel,\n m_psize,\n )\n # this is not as easy as one thinks due to the DecidingCompressor choosing the smallest of\n # (desired compressed, lz4 compressed, not compressed).\n assert meta[\"ctype\"] in (ctype, LZ4.ID, CNONE.ID)\n assert meta[\"clevel\"] in (clevel, 255) # LZ4 and CNONE has level 255\n if olevel != -1: # we expect obfuscation\n assert \"psize\" in meta\n assert m_olevel == olevel\n else:\n assert \"psize\" not in meta\n assert \"olevel\" not in meta",
"def test_bad_length(caplog):\n fname = get_test_data('nids/KOUN_SDUS84_DAATLX_201305202016', as_file_obj=False)\n with open(fname, 'rb') as inf:\n data = inf.read()\n fobj = BytesIO(data + data)\n\n with caplog.at_level(logging.WARNING, 'metpy.io.nexrad'):\n Level3File(fobj)\n assert len(caplog.records) == 1\n assert 'This product may not parse correctly' in caplog.records[0].message",
"def check_file_size(self, metatypes, request):\n try:\n helper_view = api.content.get_view(\n name='lfsp_helpers_view',\n context=api.portal.get(),\n request=request,\n )\n except InvalidParameterError:\n helper_view = None\n if not helper_view:\n return None\n maxsize = helper_view.get_maxsize_tiny(metatypes)\n if not maxsize:\n return None\n return helper_view.check_size(\n maxsize=maxsize,\n uploadfile=request['uploadfile'])",
"def min_length(verifield, required):\n if verifield is None: return True\n return len(verifield) >= required",
"def check_pe_size(self, pe, data):\n length = max(map(lambda x: x.PointerToRawData + x.SizeOfRawData, pe.sections))\n if length < len(data):\n print(\"[+] %i extra bytes in the file\" % (len(data) - length))\n return True\n else:\n return False",
"def check_file(filename, force, expected_file_size=1):\n if os.path.exists(filename):\n if force or os.path.getsize(filename) < expected_file_size:\n logger.debug(\" .. Removing old file '%s'.\", filename)\n os.remove(filename)\n return False\n else:\n return True\n return False",
"def check_filename(basename):\n return len(basename) <= MAXIMUM_FILENAME_LENGTH"
] |
[
"0.80448043",
"0.7002057",
"0.6431012",
"0.62423784",
"0.6030527",
"0.59850883",
"0.5954853",
"0.58445287",
"0.5809482",
"0.5691129",
"0.56652397",
"0.5663412",
"0.56330484",
"0.5626236",
"0.55908865",
"0.5584143",
"0.5577502",
"0.5576237",
"0.5566866",
"0.55355495",
"0.5506491",
"0.5501121",
"0.54371995",
"0.5436665",
"0.5432197",
"0.5424294",
"0.54221624",
"0.5420944",
"0.5412734",
"0.541081"
] |
0.8079612
|
0
|
A private helper function to verify an uncompressed downloaded metadata file.
|
def __verify_uncompressed_metadata_file(self, metadata_file_object,
metadata_role):
metadata = metadata_file_object.read()
try:
metadata_signable = tuf.util.load_json_string(metadata)
except Exception, exception:
raise tuf.InvalidMetadataJSONError(exception)
else:
# Ensure the loaded 'metadata_signable' is properly formatted.
tuf.formats.check_signable_object_format(metadata_signable)
# Is 'metadata_signable' newer than the currently installed
# version?
current_metadata_role = self.metadata['current'].get(metadata_role)
# Compare metadata version numbers. Ensure there is a current
# version of the metadata role to be updated.
if current_metadata_role is not None:
current_version = current_metadata_role['version']
downloaded_version = metadata_signable['signed']['version']
if downloaded_version < current_version:
raise tuf.ReplayedMetadataError(metadata_role, downloaded_version,
current_version)
# Reject the metadata if any specified targets are not allowed.
if metadata_signable['signed']['_type'] == 'Targets':
self._ensure_all_targets_allowed(metadata_role,
metadata_signable['signed'])
# Verify the signature on the downloaded metadata object.
valid = tuf.sig.verify(metadata_signable, metadata_role)
if not valid:
raise tuf.BadSignatureError(metadata_role)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _ValidateCacheFileMetadataHeader(self, cache_file_metadata_header):\n return (cache_file_metadata_header.key_size > 0 and\n cache_file_metadata_header.key_size < self._MAXIMUM_URL_LENGTH and\n cache_file_metadata_header.format_version in (1, 2, 3) and\n cache_file_metadata_header.last_fetched_time > 0 and\n cache_file_metadata_header.fetch_count > 0)",
"def test_file_integrity_return_error_in_case_of_bad_md5():\n test_file = open('./testfile.tmp', 'a')\n test_file.close()\n\n test_file_path = os.path.realpath('./testfile.tmp')\n test_file_md5 = hashlib.md5(open(test_file_path, 'rb').read()).hexdigest()\n\n bad_md5 = 'some_noise_%s' % test_file_md5\n\n result = PackageDownloadHelper.check_file_integrity(test_file_path, bad_md5)\n\n assert isinstance(result, ApiResponse)",
"def validate_file_contents(cube, metadata):\n _check_start_end_times(cube, metadata)\n _check_contiguity(cube, metadata)\n _check_data_point(cube, metadata)",
"def test_verify_unzip(self):\n assert os.path.exists(\n os.path.join(\n settings.MEDIA_ROOT,\n \"indices\",\n \"test-index\",\n \"data\",\n \"sample.txt\"\n )\n )",
"def is_valid(path):\n with open(path, 'rb') as handle:\n size = os.fstat(handle.fileno()).st_size\n try:\n mgz.header.parse_stream(handle)\n mgz.body.meta.parse_stream(handle)\n while handle.tell() < size:\n mgz.body.operation.parse_stream(handle)\n print('valid')\n return True\n except ConstructError:\n print('invalid')\n return False",
"def CheckSupportedFormat(cls, path):\n try:\n zip_file = zipfile.ZipFile(\n path, mode='r', compression=zipfile.ZIP_DEFLATED, allowZip64=True)\n\n with zip_file.open('metadata.txt', mode='r') as file_object:\n stream_data = file_object.read()\n\n storage_metadata_reader = _StorageMetadataReader()\n storage_metadata = storage_metadata_reader.Read(stream_data)\n\n cls._CheckStorageMetadata(storage_metadata)\n\n zip_file.close()\n result = True\n\n except (IOError, KeyError, zipfile.BadZipfile):\n result = False\n\n return result",
"def valid_tpkg_file(self, path):\n\n\t\tprint(self.config[\"daemon\"][\"rootdir\"] + path)\n\t\tif os.path.exists(self.config[\"daemon\"][\"rootdir\"] + \"/\" + path):\n\t\t\treturn self.fetch_remote_hashcode(path) == self.fetch_local_hashcode(path)\n\t\telse:\n\t\t\tprint(\"Package: \" + path + \" has not been downloaded.\");\n\t\treturn False",
"def _validate(self):\n if not self._contents.has_key('type'):\n raise ValidationFailed(\"Metadata file %s contains no type field\" % (self._filename))\n \n if not self._contents.has_key('version'):\n raise ValidationFailed(\"Metadata file %s contains no version field\" %\n (self._filename))",
"def _verify(self) -> None:\n # Check if the files already exist\n if os.path.exists(os.path.join(self.root, self.image_root)):\n return\n\n # Check if .zip files already exists (if so extract)\n exists = []\n for filename, md5 in zip(self.filenames, self.md5s):\n filepath = os.path.join(self.root, filename)\n if os.path.isfile(filepath):\n if self.checksum and not check_integrity(filepath, md5):\n raise RuntimeError(\"Dataset found, but corrupted.\")\n exists.append(True)\n extract_archive(filepath)\n else:\n exists.append(False)\n\n if all(exists):\n return\n\n # Check if the user requested to download the dataset\n raise RuntimeError(\n \"Dataset not found in `root` directory, either specify a different\"\n + \" `root` directory or manually download the dataset to this directory.\"\n )",
"def validate_tariff(self):\n\t\treturn True",
"def verify_zipfile(filename, trust_dir=None):\n\n try:\n zip_obj = zipfile.ZipFile(filename)\n except (IOError, zipfile.BadZipfile):\n return False\n\n # Get ZIP hashes\n hashes = get_zip_hashes(zip_obj)\n\n # Read signed hash data from ZIP comment\n signature_data = zip_obj.comment\n\n # Close ZIP file\n zip_obj.close()\n\n # Verify signed data\n return verify_data(hashes, signature_data, trust_dir)",
"def _is_downloaded(self):\n return self._system.file_exists(self._tar_name)",
"def download_matt_mahoney_text8(filename, expected_bytes):\n if not os.path.exists(filename):\n print('Downloading ...')\n filename, _ = urllib.request.urlretrieve(url + filename, filename)\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified', filename)\n else:\n print(statinfo.st_size)\n raise Exception(\n 'Failed to verify ' + filename + '. Can you get to it with a browser?')\n return filename",
"def _metadata_is_consistent(metadata):\n checks = []\n required = ('version', 'fields', 'size', 'width', 'height', 'points',\n 'viewpoint', 'data')\n for f in required:\n if f not in metadata:\n print('%s required' % f)\n checks.append((lambda m: all([k in m for k in required]),\n 'missing field'))\n checks.append((lambda m: len(m['type']) == len(m['count']) ==\n len(m['fields']),\n 'length of type, count and fields must be equal'))\n checks.append((lambda m: m['height'] > 0,\n 'height must be greater than 0'))\n checks.append((lambda m: m['width'] > 0,\n 'width must be greater than 0'))\n checks.append((lambda m: m['points'] > 0,\n 'points must be greater than 0'))\n checks.append((lambda m: m['data'].lower() in ('ascii', 'binary',\n 'binary_compressed'),\n 'unknown data type:'\n 'should be ascii/binary/binary_compressed'))\n ok = True\n for check, msg in checks:\n if not check(metadata):\n print('error:', msg)\n ok = False\n return ok",
"def verify_fields(msg: dict, data_path: str) -> bool:\n if \"metadata\" in msg and \"filename\" in msg:\n if os.path.isfile(data_path + msg[\"filename\"]):\n return True\n return False",
"def _dist_has_meta_data(dist: pkg_resources.Distribution) -> bool:\n return dist.has_metadata('direct_url.json')",
"def check_magic(self, target: str):\n\t\twith open(target, \"rb+\") as archive:\n\t\t\tmagic = archive.read(4)\n\t\t\tif magic == struct.pack(\"I\", self.magic):\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn False",
"def __check_metadata(s3client, key, bucket_name):\n response = s3client.head_object(Bucket=bucket_name, Key=key)\n if 'status' in response['Metadata']:\n return response['Metadata']['status'] == 'uploaded'\n return False",
"def _verify_metadata(self, subject_meta):\n # NOTE: admins can see subject metadata in the v1 API, but shouldn't\n # be able to download the actual subject data.\n if subject_meta['status'] == 'deleted' and subject_meta['deleted']:\n raise exception.NotFound()\n\n if not subject_meta['size']:\n # override subject size metadata with the actual cached\n # file size, see LP Bug #900959\n subject_meta['size'] = self.cache.get_subject_size(subject_meta['id'])",
"def validate(self):\n print(\"Validating \")\n sha256_test = _get_file_sha256_hash(self.file_path)\n sha256_truth = self.metadata_pkg[\"hash\"]\n if sha256_test != sha256_truth:\n raise ValueError(\n f\"Hash of modelpkg file {os.path.basename(self.file_path)} ({sha256_test}) does not match truth hash ({sha256_truth}).\")",
"def _verify(self) -> None:\n # Check if the extracted files already exist\n pathname = os.path.join(self.root, self.data_dir)\n if os.path.exists(pathname):\n return\n\n # Check if the zip files have already been downloaded\n pathname = os.path.join(self.root, self.data_dir) + \".zip\"\n if os.path.exists(pathname):\n self._extract()\n return\n\n # Check if the user requested to download the dataset\n if not self.download:\n raise RuntimeError(\n f\"Dataset not found in `root={self.root}` and `download=False`, \"\n \"either specify a different `root` directory or use `download=True` \"\n \"to automatically download the dataset.\"\n )\n\n # Download the dataset\n self._download()\n self._extract()",
"def _ReadStorageMetadata(self):\n stream_name = 'metadata.txt'\n if not self._HasStream(stream_name):\n return False\n\n stream_data = self._ReadStream(stream_name)\n\n storage_metadata_reader = _StorageMetadataReader()\n storage_metadata = storage_metadata_reader.Read(stream_data)\n\n ZIPStorageFile._CheckStorageMetadata(storage_metadata)\n\n self.format_version = storage_metadata.format_version\n self.serialization_format = storage_metadata.serialization_format\n self.storage_type = storage_metadata.storage_type\n\n return True",
"def verify_file(file_path):\n print(\"Verifying dataset file {}\".format(file_path))\n\n with open(file_path, mode='rb') as f:\n data = pickle.load(f, encoding='latin1')\n print(\"Number of records: {}\".format(len(data)))\n\n for record in data:\n try:\n check_record(record)\n except AssertionError:\n print(\"File with error: {}\".format(record[\"image_name\"]))\n print(\"Content of record: {}\".format(record))\n raise",
"def test__decompress(filename):\n with open(filename, mode=\"rb\") as file_handle:\n name, content = Submit._decompress(filename, file_handle)\n assert name.endswith(\"EcoliCore.xml\")\n assert len(content.read()) >= 494226",
"def verifyFileInfo(file_dict, guid):\n\n ec = 0\n error = PilotErrors()\n\n # does the file info dictionary have the correct file info? (non-zero and non-empty string)\n if file_dict.has_key(guid):\n if file_dict[guid] != \"\" and file_dict[guid] != \"0\":\n tolog(\"Valid file for guid %s: %s\" % (guid, file_dict[guid]))\n else:\n ec = error.ERR_NOPFC\n else:\n ec = error.ERR_NOPFC\n\n return ec",
"def test_08_verify_tar03(self, mock_isfile, mock_call, mock_msg):\n mock_msg.level = 0\n mock_isfile.return_value = True\n mock_call.return_value = 1\n status = udocker.FileUtil(\"tarball.tar\").verify_tar()\n self.assertFalse(status)",
"def maybe_download(filename, expected_bytes):\n filepath = datapath + filename\n if not os.path.exists(filepath):\n # urlretrieve returns a tuple of saved filepath and info() of the downloaded file\n filepath, _ = urllib.request.urlretrieve(url+filename, filepath)\n statinfo = os.stat(filepath)\n if statinfo.st_size == expected_bytes:\n print('Found and verified', filepath)\n else:\n print(statinfo.st_size)\n raise Exception(\n 'Failed to verify ' + filepath + '. Can you get to it with a browser?')\n return filepath",
"def _fileinfo_has_changed(self, metadata_filename, new_fileinfo):\n \n # If there is no fileinfo currently stored for 'metadata_filename',\n # try to load the file, calculate the fileinfo, and store it.\n if metadata_filename not in self.fileinfo:\n self._update_fileinfo(metadata_filename)\n\n # Return true if there is no fileinfo for 'metadata_filename'.\n # 'metadata_filename' is not in the 'self.fileinfo' store\n # and it doesn't exist in the 'current' metadata location.\n if self.fileinfo.get(metadata_filename) is None:\n return True\n\n current_fileinfo = self.fileinfo[metadata_filename]\n\n if current_fileinfo['length'] != new_fileinfo['length']:\n return True\n\n # Now compare hashes. Note that the reason we can't just do a simple\n # equality check on the fileinfo dicts is that we want to support the\n # case where the hash algorithms listed in the metadata have changed\n # without having that result in considering all files as needing to be\n # updated, or not all hash algorithms listed can be calculated on the\n # specific client.\n for algorithm, hash_value in new_fileinfo['hashes'].items():\n # We're only looking for a single match. This isn't a security\n # check, we just want to prevent unnecessary downloads.\n if hash_value == current_fileinfo['hashes'][algorithm]:\n return False\n\n return True",
"def testMetadata(self):\n self.assertGreater(len(self.unauth.metadata(self.dataset)), 0)\n self.assertGreater(len(self.auth.metadata(self.dataset)), 0)",
"def test_unpack_http_url_bad_downloaded_checksum(mock_unpack_file):\n base_url = 'http://www.example.com/somepackage.tgz'\n contents = b'downloaded'\n download_hash = hashlib.new('sha1', contents)\n link = Link(base_url + '#sha1=' + download_hash.hexdigest())\n\n session = Mock()\n session.get = Mock()\n response = session.get.return_value = MockResponse(contents)\n response.headers = {'content-type': 'application/x-tar'}\n response.url = base_url\n\n download_dir = mkdtemp()\n try:\n downloaded_file = os.path.join(download_dir, 'somepackage.tgz')\n create_file(downloaded_file, 'some contents')\n\n unpack_http_url(\n link,\n 'location',\n download_dir=download_dir,\n session=session,\n hashes=Hashes({'sha1': [download_hash.hexdigest()]})\n )\n\n # despite existence of downloaded file with bad hash, downloaded again\n session.get.assert_called_once_with(\n 'http://www.example.com/somepackage.tgz',\n headers={\"Accept-Encoding\": \"identity\"},\n stream=True,\n )\n # cached file is replaced with newly downloaded file\n with open(downloaded_file) as fh:\n assert fh.read() == 'downloaded'\n\n finally:\n rmtree(download_dir)"
] |
[
"0.65709776",
"0.63994855",
"0.6376387",
"0.63114023",
"0.6262165",
"0.6249125",
"0.623439",
"0.6172689",
"0.61573446",
"0.61059684",
"0.6089557",
"0.6085965",
"0.60393137",
"0.6024993",
"0.60169613",
"0.60069674",
"0.5968942",
"0.5944426",
"0.5941978",
"0.59248453",
"0.5912177",
"0.5908947",
"0.5891585",
"0.5866348",
"0.58660525",
"0.58656657",
"0.58532465",
"0.58407027",
"0.5832692",
"0.5830439"
] |
0.7640599
|
0
|
Unsafely download a metadata file up to a certain length. The actual file length may not be strictly equal to its expected length. File hashes will not be checked because it is expected to be unknown.
|
def unsafely_get_metadata_file(self, metadata_role, metadata_filepath,
compressed_file_length):
def unsafely_verify_uncompressed_metadata_file(metadata_file_object):
self.__soft_check_compressed_file_length(metadata_file_object,
compressed_file_length)
self.__verify_uncompressed_metadata_file(metadata_file_object,
metadata_role)
return self.__get_file(metadata_filepath,
unsafely_verify_uncompressed_metadata_file, 'meta',
compressed_file_length, download_safely=False,
compression=None)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _check_content_length(r: requests.Response):\n content_length = r.headers.get('Content-Length')\n if content_length is None:\n logger.debug('Cannot check length before downloading file')\n return\n\n if int(content_length) > MAX_DOWNLOAD_BYTES:\n raise FetchFileTooBigError(\n 'File length is {} bytes'.format(content_length)\n )",
"def __hard_check_compressed_file_length(self, file_object,\n compressed_file_length):\n\n observed_length = file_object.get_compressed_length()\n if observed_length != compressed_file_length:\n raise tuf.DownloadLengthMismatchError(compressed_file_length,\n observed_length)\n else:\n logger.debug('file length ('+str(observed_length)+\\\n ') == trusted length ('+str(compressed_file_length)+')')",
"def maybe_download(filename, expected_bytes):\n filepath = datapath + filename\n if not os.path.exists(filepath):\n # urlretrieve returns a tuple of saved filepath and info() of the downloaded file\n filepath, _ = urllib.request.urlretrieve(url+filename, filepath)\n statinfo = os.stat(filepath)\n if statinfo.st_size == expected_bytes:\n print('Found and verified', filepath)\n else:\n print(statinfo.st_size)\n raise Exception(\n 'Failed to verify ' + filepath + '. Can you get to it with a browser?')\n return filepath",
"def download_matt_mahoney_text8(filename, expected_bytes):\n if not os.path.exists(filename):\n print('Downloading ...')\n filename, _ = urllib.request.urlretrieve(url + filename, filename)\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified', filename)\n else:\n print(statinfo.st_size)\n raise Exception(\n 'Failed to verify ' + filename + '. Can you get to it with a browser?')\n return filename",
"def maybe_download(filename, expected_bytes):\n if not os.path.exists(filename):\n filename, _ = urllib.request.urlretrieve(url + filename, filename)\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified', filename)\n else:\n print(statinfo.st_size)\n raise Exception(\n 'Failed to verify ' + filename + '. Can you get to it with a browser?')\n return filename",
"def maybe_download(filename, expected_bytes):\n if not os.path.exists(filename):\n filename, _ = urllib.request.urlretrieve(url + filename, filename)\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified', filename)\n else:\n print(statinfo.st_size)\n raise Exception(\n 'Failed to verify ' + filename + '. Can you get to it with a browser?')\n return filename",
"def maybe_download(filename, expected_bytes):\n if not os.path.exists(filename):\n filename, _ = urlretrieve(url + filename, filename)\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified %s' % filename)\n else:\n print(statinfo.st_size)\n raise Exception(\n 'Failed to verify ' + filename + '. Can you get to it with a browser?')\n return filename",
"def maybe_download(filename, expected_bytes):\n if not os.path.exists(filename):\n filename, _ = urlretrieve(url + filename, filename)\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified %s' % filename)\n else:\n print(statinfo.st_size)\n raise Exception(\n 'Failed to verify ' + filename + '. Can you get to it with a browser?')\n return filename",
"def maybe_download(filename, expected_bytes):\n if not os.path.exists(filename):\n filename, _ = urllib.request.urlretrieve(url + filename, filename)\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified', filename)\n else:\n print(statinfo.st_size)\n raise Exception(\n 'Failed to verify ' + filename + '. Can you get to it with a browser?')\n return filename",
"def test_file_integrity_return_error_in_case_of_bad_md5():\n test_file = open('./testfile.tmp', 'a')\n test_file.close()\n\n test_file_path = os.path.realpath('./testfile.tmp')\n test_file_md5 = hashlib.md5(open(test_file_path, 'rb').read()).hexdigest()\n\n bad_md5 = 'some_noise_%s' % test_file_md5\n\n result = PackageDownloadHelper.check_file_integrity(test_file_path, bad_md5)\n\n assert isinstance(result, ApiResponse)",
"def maybe_download(filename, expected_bytes, force=False):\n if force or not os.path.exists(filename):\n filename, _ = urlretrieve(url + filename, filename)\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified', filename)\n else:\n raise Exception(\n 'Failed to verify' + filename + '. Can you get to it with a browser?')\n return filename",
"def _download_metadata():\n if not os.path.isfile(L1000FWD_METADATA):\n if not os.path.exists('L1000FWD'):\n os.mkdir('L1000FWD')\n response = requests.get('https://amp.pharm.mssm.edu/l1000fwd/download/Drugs_metadata.csv', stream=True)\n if response.status_code != 200:\n raise Exception('This should not happen')\n with open(L1000FWD_METADATA, 'wb') as outfile:\n for chunk in response.iter_content(chunk_size=1024):\n outfile.write(chunk)",
"def maybe_download(filename, expected_bytes):\n if not os.path.exists(filename):\n filename, _ = urllib.request.urlretrieve(url + filename, filename)\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print ('Found and verified', filename)\n else:\n raise Exception(\n 'Failed to verify' + filename + '. Can you get to it with a browser?')\n return filename",
"def test_unpack_http_url_bad_downloaded_checksum(mock_unpack_file):\n base_url = 'http://www.example.com/somepackage.tgz'\n contents = b'downloaded'\n download_hash = hashlib.new('sha1', contents)\n link = Link(base_url + '#sha1=' + download_hash.hexdigest())\n\n session = Mock()\n session.get = Mock()\n response = session.get.return_value = MockResponse(contents)\n response.headers = {'content-type': 'application/x-tar'}\n response.url = base_url\n\n download_dir = mkdtemp()\n try:\n downloaded_file = os.path.join(download_dir, 'somepackage.tgz')\n create_file(downloaded_file, 'some contents')\n\n unpack_http_url(\n link,\n 'location',\n download_dir=download_dir,\n session=session,\n hashes=Hashes({'sha1': [download_hash.hexdigest()]})\n )\n\n # despite existence of downloaded file with bad hash, downloaded again\n session.get.assert_called_once_with(\n 'http://www.example.com/somepackage.tgz',\n headers={\"Accept-Encoding\": \"identity\"},\n stream=True,\n )\n # cached file is replaced with newly downloaded file\n with open(downloaded_file) as fh:\n assert fh.read() == 'downloaded'\n\n finally:\n rmtree(download_dir)",
"def maybe_download(filename, url, expected_bytes):\n if not os.path.exists(filename):\n filename, _ = urllib.request.urlretrieve(url + filename, filename)\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified', filename)\n else:\n print(statinfo.st_size)\n raise Exception(\n 'Failed to verify ' + filename + '. Can you get to it with a browser?')\n return filename",
"def maybe_download(filename, url, expected_bytes):\n if not os.path.exists(filename):\n filename, _ = urllib.request.urlretrieve(url + filename, filename)\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified', filename)\n else:\n print(statinfo.st_size)\n raise Exception(\n 'Failed to verify ' + filename + '. Can you get to it with a browser?')\n return filename",
"def maybe_download(filename, url, expected_bytes):\n if not os.path.exists(filename):\n filename, _ = urllib.request.urlretrieve(url + filename, filename)\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified', filename)\n else:\n print(statinfo.st_size)\n raise Exception(\n 'Failed to verify ' + filename + '. Can you get to it with a browser?')\n return filename",
"def testGetFileTimeout(self):\n try:\n # Test to make sure get request times out\n fU = FileUtil(timeout=0.00001)\n remoteLocator = self.__largeHttpsFileUrl\n fn = fU.getFileName(remoteLocator)\n lPath = os.path.join(self.__workPath, fn)\n ok = fU.get(remoteLocator, lPath)\n self.assertFalse(ok)\n\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()",
"def safely_get_metadata_file(self, metadata_role, metadata_filepath,\n compressed_file_length,\n uncompressed_file_hashes, compression):\n\n def safely_verify_uncompressed_metadata_file(metadata_file_object):\n self.__hard_check_compressed_file_length(metadata_file_object,\n compressed_file_length)\n self.__check_hashes(metadata_file_object, uncompressed_file_hashes)\n self.__verify_uncompressed_metadata_file(metadata_file_object,\n metadata_role)\n\n return self.__get_file(metadata_filepath,\n safely_verify_uncompressed_metadata_file, 'meta',\n compressed_file_length, download_safely=True,\n compression=compression)",
"def __soft_check_compressed_file_length(self, file_object,\n compressed_file_length):\n\n observed_length = file_object.get_compressed_length()\n if observed_length > compressed_file_length:\n raise tuf.DownloadLengthMismatchError(compressed_file_length,\n observed_length)\n else:\n logger.debug('file length ('+str(observed_length)+\\\n ') <= trusted length ('+str(compressed_file_length)+')')",
"def maybe_download(filename, expected_bytes, force=False):\n if not P.exists(DATA_DIR):\n os.makedirs(DATA_DIR)\n filepath = P.join(DATA_DIR, filename)\n if force or not P.exists(filepath):\n print(\"Downloading %s, %s bytes...\" % (filename, sizeof_fmt(expected_bytes)))\n url = 'http://commondatastorage.googleapis.com/books1000/'\n urlretrieve(url + filename, filepath)\n\n statinfo = os.stat(filepath)\n if statinfo.st_size == expected_bytes:\n print('Found and verified', filename)\n else:\n raise Exception('Failed to verify ' + filename + '. Can you get to it with a browser?')\n\n return filename",
"def _verify_metadata(self, subject_meta):\n # NOTE: admins can see subject metadata in the v1 API, but shouldn't\n # be able to download the actual subject data.\n if subject_meta['status'] == 'deleted' and subject_meta['deleted']:\n raise exception.NotFound()\n\n if not subject_meta['size']:\n # override subject size metadata with the actual cached\n # file size, see LP Bug #900959\n subject_meta['size'] = self.cache.get_subject_size(subject_meta['id'])",
"def reject_factory(total_length, content_type, filename, file_length):\n raise status.RequestEntityTooLarge('not accepting posted files')",
"def maybe_download(filename, expected_bytes, force=False):\n if force or not os.path.exists(filename):\n print('Attempting to download:', filename)\n filename, _ = urlretrieve(url + filename, filename,\n reporthook=download_progress_hook)\n print('\\nDownload Complete!')\n statinfo = os.stat(filename)\n\n if statinfo.st_size == expected_bytes:\n print('Found and verified', filename)\n else:\n raise Exception(\n 'Failed to verify ' + filename + \\\n '. Can you get to it with a browser?')\n return filename",
"def maybe_download(filename, expected_bytes, force=False):\n dest_filename = os.path.join(data_root, filename)\n if force or not os.path.exists(dest_filename):\n print('Attempting to download:', filename)\n filename, _ = urlretrieve(url + filename, dest_filename, reporthook=download_progress_hook)\n print('\\nDownload Complete!')\n statinfo = os.stat(dest_filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified', dest_filename)\n else:\n raise Exception(\n 'Failed to verify ' + dest_filename + '. Can you get to it with a browser?')\n return dest_filename",
"def t_getfile(self, link, filename, session):\n\n self.sema.acquire()\n\n filepath = os.path.join(os.getcwd() + '/Downloads/' + str(filename))\n os.makedirs(os.path.dirname(filepath), exist_ok=True)\n\n if not os.path.isfile(filepath):\n self.download_new_file(link, filepath, session)\n else:\n\n current_bytes = os.stat(filepath).st_size\n\n headers = requests.head(link).headers\n\n print(headers)\n if 'content-length' not in headers:\n print(f\"server doesn't support content-length for {link}\")\n self.sema.release()\n return\n\n total_bytes = int(requests.head(link).headers['content-length'])\n\n print(total_bytes)\n\n if current_bytes < total_bytes:\n #\n self.continue_file_download(link, filepath, session, current_bytes, total_bytes)\n print(f\"Current byte < total - remaining {total_bytes - current_bytes}\")\n else:\n print(f\"already done: {filename}\")\n\n self.sema.release()",
"def _download_metadata(track_id, dataset_version):\n metadata_path = os.path.join(METADATA_PATH, _METADATA_FMT % track_id)\n if os.path.exists(metadata_path):\n return True\n\n try:\n top_folderid = GDRIVE_FOLDERS[dataset_version]\n except KeyError:\n raise IOError(\"Unable to find data in Google Drive for this version.\")\n\n file_list = get_named_child(top_folderid, track_id)\n correct_file = [f for f in file_list if f['title'] == track_id]\n\n if len(correct_file) == 0:\n raise IOError(\"Could not find multitrack\")\n else:\n mtrack_file = correct_file[0]\n\n metadata_file_list = get_named_child(mtrack_file['id'], 'METADATA')\n if len(metadata_file_list) > 0:\n metadata_file = metadata_file_list[0]\n else:\n folder_file_list = get_files_in_folder(mtrack_file['id'])\n print(len(folder_file_list))\n for fobject in folder_file_list:\n print(fobject['title'])\n raise IOError(\"Could not find Metadata\")\n\n download_file(metadata_file['id'], metadata_path)\n\n DOWNLOADED_FILEPATHS.append(metadata_path)\n\n return True",
"def download_and_validate_checksum(name, checksum):\n dst = os.path.join(DOWNLOADS_DIR, os.path.basename(name))\n download_file(src=name, dst=dst)\n md5 = hashlib.md5()\n for chunk in chunked_reader(dst):\n md5.update(chunk)\n dl_checksum = md5.digest().hex()\n if dl_checksum != checksum:\n raise ValueError(f\"expected checksum {checksum} but received {dl_checksum}\")\n os.remove(dst)",
"def test_unpack_file_url_download_bad_hash(self, tmpdir, data,\n monkeypatch):\n self.prep(tmpdir, data)\n\n # add in previous download (copy simple-2.0 as simple-1.0 so it's wrong\n # hash)\n dest_file = os.path.join(self.download_dir, self.dist_file)\n copy(self.dist_path2, dest_file)\n\n with open(self.dist_path, 'rb') as f:\n dist_path_md5 = hashlib.md5(f.read()).hexdigest()\n with open(dest_file, 'rb') as f:\n dist_path2_md5 = hashlib.md5(f.read()).hexdigest()\n\n assert dist_path_md5 != dist_path2_md5\n\n url = '{}#md5={}'.format(self.dist_url.url, dist_path_md5)\n dist_url = Link(url)\n unpack_file_url(dist_url, self.build_dir,\n download_dir=self.download_dir,\n hashes=Hashes({'md5': [dist_path_md5]}))\n\n # confirm hash is for simple1-1.0\n # the previous bad download has been removed\n with open(dest_file, 'rb') as f:\n assert hashlib.md5(f.read()).hexdigest() == dist_path_md5",
"def tailFile(filename, offset, length):\n\n overflow = False\n try:\n f = open(filename, 'rb')\n f.seek(0, 2)\n sz = f.tell()\n\n if sz > (offset + length):\n overflow = True\n offset = sz - 1\n\n if (offset + length) > sz:\n if offset > (sz - 1):\n length = 0\n offset = sz - length\n\n if offset < 0:\n offset = 0\n if length < 0:\n length = 0\n\n if length == 0:\n data = ''\n else:\n f.seek(offset)\n data = f.read(length)\n\n offset = sz\n return [data, offset, overflow]\n\n except (OSError, IOError):\n return ['', offset, False]"
] |
[
"0.68834263",
"0.6019552",
"0.6017874",
"0.60117567",
"0.5979806",
"0.5979806",
"0.5956577",
"0.5953644",
"0.59176356",
"0.59112364",
"0.590496",
"0.5834261",
"0.5826231",
"0.58154356",
"0.57973844",
"0.57973844",
"0.57973844",
"0.5739276",
"0.57232654",
"0.57115835",
"0.5689397",
"0.5670298",
"0.5650559",
"0.5613731",
"0.5594849",
"0.55810034",
"0.5572172",
"0.55649006",
"0.5556563",
"0.554649"
] |
0.6091753
|
1
|
Download, verify, and 'install' the metadata belonging to 'metadata_role'. Calling this function implies the metadata has been updated by the repository and thus needs to be redownloaded. The current and previous metadata stores are updated if the newly downloaded metadata is successfully downloaded and verified.
|
def _update_metadata(self, metadata_role, fileinfo, compression=None):
# Construct the metadata filename as expected by the download/mirror modules.
metadata_filename = metadata_role + '.txt'
uncompressed_metadata_filename = metadata_filename
# The 'release' or Targets metadata may be compressed. Add the appropriate
# extension to 'metadata_filename'.
if compression == 'gzip':
metadata_filename = metadata_filename + '.gz'
# Extract file length and file hashes. They will be passed as arguments
# to 'download_file' function.
compressed_file_length = fileinfo['length']
uncompressed_file_hashes = fileinfo['hashes']
# Attempt a file download from each mirror until the file is downloaded and
# verified. If the signature of the downloaded file is valid, proceed,
# otherwise log a warning and try the next mirror. 'metadata_file_object'
# is the file-like object returned by 'download.py'. 'metadata_signable'
# is the object extracted from 'metadata_file_object'. Metadata saved to
# files are regarded as 'signable' objects, conformant to
# 'tuf.formats.SIGNABLE_SCHEMA'.
#
# Some metadata (presently timestamp) will be downloaded "unsafely", in the
# sense that we can only estimate its true length and know nothing about
# its hashes. This is because not all metadata will have other metadata
# for it; otherwise we will have an infinite regress of metadata signing
# for each other. In this case, we will download the metadata up to the
# best length we can get for it, not check its hashes, but perform the rest
# of the checks (e.g signature verification).
#
# Note also that we presently support decompression of only "safe"
# metadata, but this is easily extend to "unsafe" metadata as well as
# "safe" targets.
if metadata_role == 'timestamp':
metadata_file_object = \
self.unsafely_get_metadata_file(metadata_role, metadata_filename,
compressed_file_length)
else:
metadata_file_object = \
self.safely_get_metadata_file(metadata_role, metadata_filename,
compressed_file_length,
uncompressed_file_hashes,
compression=compression)
# The metadata has been verified. Move the metadata file into place.
# First, move the 'current' metadata file to the 'previous' directory
# if it exists.
current_filepath = os.path.join(self.metadata_directory['current'],
metadata_filename)
current_filepath = os.path.abspath(current_filepath)
tuf.util.ensure_parent_dir(current_filepath)
previous_filepath = os.path.join(self.metadata_directory['previous'],
metadata_filename)
previous_filepath = os.path.abspath(previous_filepath)
if os.path.exists(current_filepath):
# Previous metadata might not exist, say when delegations are added.
tuf.util.ensure_parent_dir(previous_filepath)
shutil.move(current_filepath, previous_filepath)
# Next, move the verified updated metadata file to the 'current' directory.
# Note that the 'move' method comes from tuf.util's TempFile class.
# 'metadata_file_object' is an instance of tuf.util.TempFile.
metadata_signable = tuf.util.load_json_string(metadata_file_object.read())
if compression == 'gzip':
current_uncompressed_filepath = \
os.path.join(self.metadata_directory['current'],
uncompressed_metadata_filename)
current_uncompressed_filepath = \
os.path.abspath(current_uncompressed_filepath)
metadata_file_object.move(current_uncompressed_filepath)
else:
metadata_file_object.move(current_filepath)
# Extract the metadata object so we can store it to the metadata store.
# 'current_metadata_object' set to 'None' if there is not an object
# stored for 'metadata_role'.
updated_metadata_object = metadata_signable['signed']
current_metadata_object = self.metadata['current'].get(metadata_role)
# Finally, update the metadata and fileinfo stores.
logger.debug('Updated '+repr(current_filepath)+'.')
self.metadata['previous'][metadata_role] = current_metadata_object
self.metadata['current'][metadata_role] = updated_metadata_object
self._update_fileinfo(metadata_filename)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _update_metadata_if_changed(self, metadata_role, referenced_metadata='release'):\n \n uncompressed_metadata_filename = metadata_role + '.txt'\n\n # Ensure the referenced metadata has been loaded. The 'root' role may be\n # updated without having 'release' available. \n if referenced_metadata not in self.metadata['current']:\n message = 'Cannot update '+repr(metadata_role)+' because ' \\\n +referenced_metadata+' is missing.'\n raise tuf.RepositoryError(message)\n # The referenced metadata has been loaded. Extract the new\n # fileinfo for 'metadata_role' from it. \n else:\n message = repr(metadata_role)+' referenced in '+\\\n repr(referenced_metadata)+'. '+repr(metadata_role)+' may be updated.'\n logger.debug(message)\n \n # There might be a compressed version of 'release.txt' or Targets\n # metadata available for download. Check the 'meta' field of\n # 'referenced_metadata' to see if it is listed when 'metadata_role'\n # is 'release'. The full rolename for delegated Targets metadata\n # must begin with 'targets/'. The Release role lists all the Targets\n # metadata available on the repository, including any that may be in\n # compressed form.\n compression = None\n\n # Extract the fileinfo of the uncompressed version of 'metadata_role'.\n uncompressed_fileinfo = self.metadata['current'][referenced_metadata] \\\n ['meta'] \\\n [uncompressed_metadata_filename]\n\n # Check for availability of compressed versions of 'release.txt',\n # 'targets.txt', and delegated Targets, which also start with 'targets'.\n # For 'targets.txt' and delegated metadata, 'referenced_metata'\n # should always be 'release'. 'release.txt' specifies all roles\n # provided by a repository, including their file sizes and hashes.\n if metadata_role == 'release' or metadata_role.startswith('targets'):\n gzip_metadata_filename = uncompressed_metadata_filename + '.gz'\n if gzip_metadata_filename in self.metadata['current'] \\\n [referenced_metadata]['meta']:\n compression = 'gzip'\n compressed_fileinfo = self.metadata['current'][referenced_metadata] \\\n ['meta'][gzip_metadata_filename]\n # NOTE: When we download the compressed file, we care about its\n # compressed length. However, we check the hash of the uncompressed\n # file; therefore we use the hashes of the uncompressed file.\n fileinfo = {'length': compressed_fileinfo['length'],\n 'hashes': uncompressed_fileinfo['hashes']}\n logger.debug('Compressed version of '+\\\n repr(uncompressed_metadata_filename)+' is available at '+\\\n repr(gzip_metadata_filename)+'.')\n else:\n logger.debug('Compressed version of '+\\\n repr(uncompressed_metadata_filename)+' not available.')\n fileinfo = uncompressed_fileinfo\n else:\n fileinfo = uncompressed_fileinfo\n\n # Simply return if the file has not changed, according to the metadata\n # about the uncompressed file provided by the referenced metadata.\n if not self._fileinfo_has_changed(uncompressed_metadata_filename,\n uncompressed_fileinfo):\n return\n\n logger.debug('Metadata '+repr(uncompressed_metadata_filename)+\\\n ' has changed.')\n\n try:\n self._update_metadata(metadata_role, fileinfo=fileinfo,\n compression=compression)\n except:\n # The current metadata we have is not current but we couldn't\n # get new metadata. We shouldn't use the old metadata anymore.\n # This will get rid of in-memory knowledge of the role and\n # delegated roles, but will leave delegated metadata files as\n # current files on disk.\n # TODO: Should we get rid of the delegated metadata files?\n # We shouldn't need to, but we need to check the trust\n # implications of the current implementation.\n self._delete_metadata(metadata_role)\n logger.error('Metadata for '+str(metadata_role)+' could not be updated')\n raise\n else:\n # We need to remove delegated roles because the delegated roles\n # may not be trusted anymore.\n if metadata_role == 'targets' or metadata_role.startswith('targets/'):\n logger.debug('Removing delegated roles of '+repr(metadata_role)+'.')\n # TODO: Should we also remove the keys of the delegated roles?\n tuf.roledb.remove_delegated_roles(metadata_role)\n self._import_delegations(metadata_role)",
"def refresh(self):\n\n # The timestamp role does not have signed metadata about it; otherwise we\n # would need an infinite regress of metadata. Therefore, we use some\n # default, sane metadata about it.\n DEFAULT_TIMESTAMP_FILEINFO = {\n 'hashes':None,\n 'length': tuf.conf.DEFAULT_TIMESTAMP_REQUIRED_LENGTH\n }\n\n # Update the top-level metadata. The _update_metadata_if_changed() and\n # _update_metadata() calls below do NOT perform an update if there\n # is insufficient trusted signatures for the specified metadata.\n # Raise 'tuf.NoWorkingMirrorError' if an update fails.\n\n # Use default but sane information for timestamp metadata, and do not\n # require strict checks on its required length.\n self._update_metadata('timestamp', DEFAULT_TIMESTAMP_FILEINFO)\n\n self._update_metadata_if_changed('release', referenced_metadata='timestamp')\n\n self._update_metadata_if_changed('root')\n\n self._update_metadata_if_changed('targets')\n\n # Updated the top-level metadata (which all had valid signatures), however,\n # have they expired? Raise 'tuf.ExpiredMetadataError' if any of the metadata\n # has expired.\n for metadata_role in ['timestamp', 'root', 'release', 'targets']:\n self._ensure_not_expired(metadata_role)",
"def update_metadata(self, metadata):\n return self.manager.update_metadata(self, metadata)",
"def _load_metadata_from_file(self, metadata_set, metadata_role):\n\n # Ensure we have a valid metadata set.\n if metadata_set not in ['current', 'previous']:\n raise tuf.Error('Invalid metadata set: '+repr(metadata_set))\n\n # Save and construct the full metadata path.\n metadata_directory = self.metadata_directory[metadata_set]\n metadata_filename = metadata_role + '.txt'\n metadata_filepath = os.path.join(metadata_directory, metadata_filename)\n \n # Ensure the metadata path is valid/exists, else ignore the call. \n if os.path.exists(metadata_filepath):\n # Load the file. The loaded object should conform to\n # 'tuf.formats.SIGNABLE_SCHEMA'.\n metadata_signable = tuf.util.load_json_file(metadata_filepath)\n\n tuf.formats.check_signable_object_format(metadata_signable)\n\n # Extract the 'signed' role object from 'metadata_signable'.\n metadata_object = metadata_signable['signed']\n \n # Save the metadata object to the metadata store.\n self.metadata[metadata_set][metadata_role] = metadata_object\n \n # We need to rebuild the key and role databases if \n # metadata object is 'root' or target metadata.\n if metadata_set == 'current':\n if metadata_role == 'root':\n self._rebuild_key_and_role_db()\n elif metadata_object['_type'] == 'Targets':\n # TODO: Should we also remove the keys of the delegated roles?\n tuf.roledb.remove_delegated_roles(metadata_role)\n self._import_delegations(metadata_role)",
"def _refresh_targets_metadata(self, rolename='targets', include_delegations=False):\n\n roles_to_update = []\n\n # See if this role provides metadata and, if we're including\n # delegations, look for metadata from delegated roles.\n role_prefix = rolename + '/'\n for metadata_path in self.metadata['current']['release']['meta'].keys():\n if metadata_path == rolename + '.txt':\n roles_to_update.append(metadata_path[:-len('.txt')])\n elif include_delegations and metadata_path.startswith(role_prefix):\n # Add delegated roles. Skip roles names containing compression\n # extensions.\n if metadata_path.endswith('.txt'): \n roles_to_update.append(metadata_path[:-len('.txt')])\n\n # Remove the 'targets' role because it gets updated when the targets.txt\n # file is updated in _update_metadata_if_changed('targets').\n if rolename == 'targets':\n try:\n roles_to_update.remove('targets')\n except ValueError:\n message = 'The Release metadata file is missing the targets.txt entry.'\n raise tuf.RepositoryError(message)\n \n # If there is nothing to refresh, we are done.\n if not roles_to_update:\n return\n\n # Sort the roles so that parent roles always come first.\n roles_to_update.sort()\n logger.debug('Roles to update: '+repr(roles_to_update)+'.')\n\n # Iterate through 'roles_to_update', load its metadata\n # file, and update it if it has changed.\n for rolename in roles_to_update:\n self._load_metadata_from_file('previous', rolename)\n self._load_metadata_from_file('current', rolename)\n\n self._update_metadata_if_changed(rolename)\n\n # Remove the role if it has expired.\n try:\n self._ensure_not_expired(rolename)\n except tuf.ExpiredMetadataError:\n tuf.roledb.remove_role(rolename)",
"def test_update_metadata(self):\n pass",
"def update_metadata(self):\n self.data[\"keywords\"] = self.repo.topics(self.data.get(\"keywords\", []))\n self.data[\"description\"] = self.data.get(\"description\") or self.repo.description\n self.data[\"codeRepository\"] = (\n self.data.get(\"codeRepository\") or self.repo.html_url\n )\n self.data[\"name\"] = self.data.get(\"name\") or self.repo.name\n self.data[\"issueTracker\"] = (\n self.data.get(\"issueTracker\") or self.repo.issues_url\n )\n self.data[\"license\"] = self.data.get(\"license\") or self.repo.license",
"def update_metadata(self, metadata):\n return self.parent.update_metadata_for_node(self, metadata)",
"def test_0020_verify_installed_repository_metadata(self):\n self.verify_installed_repository_metadata_unchanged(emboss_repository_name, common.test_user_1_name)",
"def _delete_metadata(self, metadata_role):\n \n # The root metadata role is never deleted without a replacement.\n if metadata_role == 'root':\n return\n \n # Get rid of the current metadata file.\n self._move_current_to_previous(metadata_role)\n \n # Remove knowledge of the role.\n if metadata_role in self.metadata['current']:\n del self.metadata['current'][metadata_role]\n tuf.roledb.remove_role(metadata_role)",
"def update_all_metadata(self, metadata):\n return self.manager.update_all_metadata(self, metadata)",
"def __init__(self, updater_name, repository_mirrors):\n \n # Do the arguments have the correct format?\n # These checks ensure the arguments have the appropriate\n # number of objects and object types and that all dict\n # keys are properly named.\n # Raise 'tuf.FormatError' if there is a mistmatch.\n tuf.formats.NAME_SCHEMA.check_match(updater_name)\n tuf.formats.MIRRORDICT_SCHEMA.check_match(repository_mirrors)\n \n # Save the validated arguments.\n self.name = updater_name\n self.mirrors = repository_mirrors\n\n # Store the trusted metadata read from disk.\n self.metadata = {}\n \n # Store the currently trusted/verified metadata.\n self.metadata['current'] = {} \n \n # Store the previously trusted/verified metadata.\n self.metadata['previous'] = {}\n\n # Store the file information of all the metadata files. The dict keys are\n # paths, the dict values fileinfo data. This information can help determine\n # whether a metadata file has changed and so needs to be re-downloaded.\n self.fileinfo = {}\n \n # Store the location of the client's metadata directory.\n self.metadata_directory = {}\n \n # Ensure the repository metadata directory has been set.\n if tuf.conf.repository_directory is None:\n message = 'The TUF update client module must specify the directory' \\\n ' containing the local repository files.' \\\n ' \"tuf.conf.repository_directory\" MUST be set.'\n raise tuf.RepositoryError(message)\n\n # Set the path for the current set of metadata files. \n repository_directory = tuf.conf.repository_directory\n current_path = os.path.join(repository_directory, 'metadata', 'current')\n \n # Ensure the current path is valid/exists before saving it.\n if not os.path.exists(current_path):\n message = 'Missing '+repr(current_path)+'. This path must exist and, ' \\\n 'at a minimum, contain the root metadata file.' \n raise tuf.RepositoryError(message)\n self.metadata_directory['current'] = current_path\n \n # Set the path for the previous set of metadata files. \n previous_path = os.path.join(repository_directory, 'metadata', 'previous') \n \n # Ensure the previous path is valid/exists.\n if not os.path.exists(previous_path):\n message = 'Missing '+repr(previous_path)+'. This path must exist.'\n raise tuf.RepositoryError(message)\n self.metadata_directory['previous'] = previous_path\n \n # Load current and previous metadata.\n for metadata_set in ['current', 'previous']:\n for metadata_role in ['root', 'targets', 'release', 'timestamp']:\n self._load_metadata_from_file(metadata_set, metadata_role)\n \n # Raise an exception if the repository is missing the required 'root'\n # metadata.\n if 'root' not in self.metadata['current']:\n message = 'No root of trust! Could not find the \"root.txt\" file.'\n raise tuf.RepositoryError(message)",
"def _ensure_not_expired(self, metadata_role):\n \n # Construct the full metadata filename and the location of its\n # current path. The current path of 'metadata_role' is needed\n # to log the exact filename of the expired metadata.\n metadata_filename = metadata_role + '.txt'\n rolepath = os.path.join(self.metadata_directory['current'],\n metadata_filename)\n rolepath = os.path.abspath(rolepath)\n \n # Extract the expiration time.\n expires = self.metadata['current'][metadata_role]['expires']\n \n # If the current time has surpassed the expiration date, raise\n # an exception. 'expires' is in YYYY-MM-DD HH:MM:SS format, so\n # convert it to seconds since the epoch, which is the time format\n # returned by time.time() (i.e., current time), before comparing.\n current_time = time.time()\n expiry_time = tuf.formats.parse_time(expires)\n if expiry_time < current_time:\n logger.error('Metadata '+repr(rolepath)+' expired on '+repr(expires)+'.')\n raise tuf.ExpiredMetadataError(expires)",
"def set_metadata(self, metadata):\n return self.client._perform_json(\n \"PUT\", \"/projects/%s/recipes/%s/metadata\" % (self.project_key, self.recipe_name),\n body=metadata)",
"def _update_filesystem_metadata(self, metadata):\n directory, fname = os.path.split(self.fname)\n fbase = os.path.splitext(fname)[0]\n \n # Test for presence and size of zip file\n zip_file = fbase + '.zip'\n zip_path = os.path.join(directory, zip_file)\n \n if os.path.isfile(zip_path):\n location = 'on_disk'\n data_file_size = os.path.getsize(zip_path)\n else:\n location = 'on_tape'\n data_file_size = 0\n \n # Test for presence of quick look PNG file\n quicklook_file = fbase + '.png'\n quicklook_path = os.path.join(directory, quicklook_file)\n \n if not os.path.isfile(quicklook_path):\n quicklook_file = ''\n\n # Add to metadata dictionary\n item_map = {'directory': directory, 'metadata_file': fname,\n 'data_file': zip_file, 'location': location, \n 'data_file_size': data_file_size, 'quicklook_file': quicklook_file}\n \n for key, value in item_map.items():\n metadata[key] = value",
"def metadata_update(self, new_metadata=None):\n if new_metadata is None:\n self.metadata_set(self.t.metadata())",
"def __verify_uncompressed_metadata_file(self, metadata_file_object,\n metadata_role):\n\n metadata = metadata_file_object.read()\n try:\n metadata_signable = tuf.util.load_json_string(metadata)\n except Exception, exception:\n raise tuf.InvalidMetadataJSONError(exception)\n else:\n # Ensure the loaded 'metadata_signable' is properly formatted.\n tuf.formats.check_signable_object_format(metadata_signable)\n\n # Is 'metadata_signable' newer than the currently installed\n # version?\n current_metadata_role = self.metadata['current'].get(metadata_role)\n\n # Compare metadata version numbers. Ensure there is a current\n # version of the metadata role to be updated.\n if current_metadata_role is not None:\n current_version = current_metadata_role['version']\n downloaded_version = metadata_signable['signed']['version']\n if downloaded_version < current_version:\n raise tuf.ReplayedMetadataError(metadata_role, downloaded_version,\n current_version)\n\n # Reject the metadata if any specified targets are not allowed.\n if metadata_signable['signed']['_type'] == 'Targets':\n self._ensure_all_targets_allowed(metadata_role,\n metadata_signable['signed'])\n\n # Verify the signature on the downloaded metadata object.\n valid = tuf.sig.verify(metadata_signable, metadata_role)\n if not valid:\n raise tuf.BadSignatureError(metadata_role)",
"def _store_package_metadata(self):",
"def test_update_metadata1(self):\n pass",
"def _update_extra_metadata(self, extra_metadata):\n self._add_filename_metadata(extra_metadata)\n self._derive_extra_metadata(extra_metadata)\n \n if type(self) == SAFESentinel3:\n self._extract_metadata_from_zipfile(extra_metadata)",
"def check_dataset_old_metadata_location(**_):\n old_metadata = get_pre_0_3_4_datasets_metadata()\n\n if not old_metadata:\n return True, False, None\n\n problems = (\n WARNING + \"There are metadata files in the old location.\"\n '\\n (use \"renku migrate\" to move them)\\n\\n\\t'\n + \"\\n\\t\".join(click.style(str(path.relative_to(project_context.path)), fg=\"yellow\") for path in old_metadata)\n + \"\\n\"\n )\n\n return False, False, problems",
"def _rebuild_key_and_role_db(self):\n \n # Clobbering this means all delegated metadata files are rendered outdated\n # and will need to be reloaded. However, reloading the delegated metadata\n # files is avoided here because fetching target information with methods\n # like all_targets() and target() always cause a refresh of these files.\n # The metadata files for delegated roles are also not loaded when the\n # repository is first instantiated. Due to this setup, reloading delegated\n # roles is not required here.\n tuf.keydb.create_keydb_from_root_metadata(self.metadata['current']['root'])\n tuf.roledb.create_roledb_from_root_metadata(self.metadata['current']['root'])",
"def metadata_update(self, _):\n self.details.original_widget = YesNoWidget('Update metadata files?', self.__metadata_update)",
"def update_metadata(self, metadata: t.Mapping[str, str]) -> None:\n self._metadata.update(metadata)",
"def set_metadata(self, metadata):\n return self.manager.set_metadata(self, metadata)",
"def set_metadata(self, metadata):\n return self.manager.set_metadata(self, metadata)",
"def _load_meta(self, db, metadata, source_name) -> None:\n db.metadata.put_item(Item={\n 'src_name': source_name,\n 'data_license': metadata.data_license,\n 'data_license_url': metadata.data_license_url,\n 'version': metadata.version,\n 'data_url': metadata.data_url,\n 'rdp_url': metadata.rdp_url,\n 'data_license_attributes': metadata.data_license_attributes,\n 'genome_assemblies': metadata.genome_assemblies\n })",
"def metadata_storage_config():\n\n\tprint_section_header(\"METADATA STORAGE\", Fore.BLUE)\n\n\t###\n\t# Language Modeling Data\n\t###\n\n\tif prompt_yes_no(top_line=\"-> Clear language modeling metadata?\",\n\t bottom_line=\"This includes user spelling, typing and suggestion data.\"):\n\t\tprint_confirmation(\"Removing language modeling data...\")\n\t\tsp.run('rm -rfv \"~/Library/LanguageModeling/*\" \"~/Library/Spelling/*\" \"~/Library/Suggestions/*\"', shell=True, stdout=sp.PIPE)\n\n\tif prompt_yes_no(bottom_line=\"-> Disable language modeling data collection?\"):\n\t\tprint_confirmation(\"Disabling language modeling data collection...\")\n\t\tsp.run('sudo chmod -R 000 ~/Library/LanguageModeling ~/Library/Spelling ~/Library/Suggestions', shell=True, stdout=sp.PIPE)\n\t\tsp.run('sudo chflags -R uchg ~/Library/LanguageModeling ~/Library/Spelling ~/Library/Suggestions', shell=True, stdout=sp.PIPE)\n\n\t###\n\t# QuickLook and Quarantine Data\n\t###\n\n\tif prompt_yes_no(top_line=\"-> Clear QuickLook metadata?\",\n\t bottom_line=\"This will erase spotlight user data.\"):\n\t\tprint_confirmation(\"Removing QuickLook metadata...\")\n\t\tsp.run('rm -rfv \"~/Library/Application Support/Quick Look/*\"', shell=True, stdout=sp.PIPE)\n\n\t###\n\t# Downloads Metadata\n\t###\n\n\t# TODO: BUG --> /bin/sh: /Users/alichtman/Library/Preferences/com.apple.LaunchServices.QuarantineEventsV2: Operation not permitted\n\tif prompt_yes_no(bottom_line=\"-> Clear Downloads metadata?\"):\n\t\tprint_confirmation(\"Removing Downloads metadata...\")\n\t\tsp.run(':>~/Library/Preferences/com.apple.LaunchServices.QuarantineEventsV2', shell=True, stdout=sp.PIPE)\n\n\tif prompt_yes_no(bottom_line=\"-> Disable metadata collection from Downloads?\"):\n\t\tprint_confirmation(\"Disabling Quarantine data collection from downloaded files...\")\n\t\tsp.run('sudo chflags schg ~/Library/Preferences/com.apple.LaunchServices.QuarantineEventsV2', shell=True, stdout=sp.PIPE)\n\n\t# TODO: ERRORS\n\t\t# chmod: ~/Library/Application Support/Quick Look: No such file or directory\n\t\t# chflags: ~/Library/Application Support/Quick Look: No such file or directory\n\n\t# if prompt_yes_no(bottom_line=\"Disable QuickLook data logging?\"):\n\t# \tprint_confirmation(\"Disabling QuickLook data logging...\")\n\t# \tsp.run('sudo chmod -R 000 \"~/Library/Application Support/Quick Look\"', shell=True, stdout=sp.PIPE)\n\t# \tsp.run('sudo chflags -R uchg \"~/Library/Application Support/Quick Look\"', shell=True, stdout=sp.PIPE)\n\n\t###\n\t# Siri Metadata\n\t###\n\n\tif prompt_yes_no(bottom_line=\"-> Clear SiriAnalytics database?\"):\n\t\tprint_confirmation(\"Clearing SiriAnalytics database...\")\n\t\tsp.run('rm -rfv ~/Library/Assistant/SiriAnalytics.db', shell=True, stdout=sp.PIPE)",
"def download_meta(self):\n for f in self._manager.remote.list_contents(\".yml\"):\n self._manager.remote.download(f)",
"def _download_metadata(track_id, dataset_version):\n metadata_path = os.path.join(METADATA_PATH, _METADATA_FMT % track_id)\n if os.path.exists(metadata_path):\n return True\n\n try:\n top_folderid = GDRIVE_FOLDERS[dataset_version]\n except KeyError:\n raise IOError(\"Unable to find data in Google Drive for this version.\")\n\n file_list = get_named_child(top_folderid, track_id)\n correct_file = [f for f in file_list if f['title'] == track_id]\n\n if len(correct_file) == 0:\n raise IOError(\"Could not find multitrack\")\n else:\n mtrack_file = correct_file[0]\n\n metadata_file_list = get_named_child(mtrack_file['id'], 'METADATA')\n if len(metadata_file_list) > 0:\n metadata_file = metadata_file_list[0]\n else:\n folder_file_list = get_files_in_folder(mtrack_file['id'])\n print(len(folder_file_list))\n for fobject in folder_file_list:\n print(fobject['title'])\n raise IOError(\"Could not find Metadata\")\n\n download_file(metadata_file['id'], metadata_path)\n\n DOWNLOADED_FILEPATHS.append(metadata_path)\n\n return True"
] |
[
"0.74762315",
"0.5975187",
"0.5900152",
"0.5849737",
"0.580336",
"0.562456",
"0.5609304",
"0.56023556",
"0.5595829",
"0.55921024",
"0.55448467",
"0.55376464",
"0.5479364",
"0.542237",
"0.54155916",
"0.53572685",
"0.53532314",
"0.535209",
"0.53430897",
"0.5335187",
"0.5273859",
"0.5255343",
"0.524819",
"0.5228915",
"0.5179169",
"0.5179169",
"0.51754344",
"0.51205",
"0.5108692",
"0.5106064"
] |
0.7117053
|
1
|
Update the metadata for 'metadata_role' if it has changed. With the exception of the 'timestamp' role, all the toplevel roles are updated by this function. The 'timestamp' role is always downloaded from a mirror without first checking if it has been updated; it is updated in refresh() by calling _update_metadata('timestamp'). This function is also called for delegated role metadata, which are referenced by 'release'. If the metadata needs to be updated but an update cannot be obtained, this function will delete the file (with the exception of the root metadata, which never gets removed without a replacement). Due to the way in which metadata files are updated, it is expected that 'referenced_metadata' is not out of date and trusted. The refresh() method updates the toplevel roles in 'timestamp > release > root > targets' order. For delegated metadata, the parent role is updated before the delegated role. Taking into account that 'referenced_metadata' is updated and verified before 'metadata_role', this function determines if 'metadata_role' has changed by checking the 'meta' field of the newly updated 'referenced_metadata'.
|
def _update_metadata_if_changed(self, metadata_role, referenced_metadata='release'):
uncompressed_metadata_filename = metadata_role + '.txt'
# Ensure the referenced metadata has been loaded. The 'root' role may be
# updated without having 'release' available.
if referenced_metadata not in self.metadata['current']:
message = 'Cannot update '+repr(metadata_role)+' because ' \
+referenced_metadata+' is missing.'
raise tuf.RepositoryError(message)
# The referenced metadata has been loaded. Extract the new
# fileinfo for 'metadata_role' from it.
else:
message = repr(metadata_role)+' referenced in '+\
repr(referenced_metadata)+'. '+repr(metadata_role)+' may be updated.'
logger.debug(message)
# There might be a compressed version of 'release.txt' or Targets
# metadata available for download. Check the 'meta' field of
# 'referenced_metadata' to see if it is listed when 'metadata_role'
# is 'release'. The full rolename for delegated Targets metadata
# must begin with 'targets/'. The Release role lists all the Targets
# metadata available on the repository, including any that may be in
# compressed form.
compression = None
# Extract the fileinfo of the uncompressed version of 'metadata_role'.
uncompressed_fileinfo = self.metadata['current'][referenced_metadata] \
['meta'] \
[uncompressed_metadata_filename]
# Check for availability of compressed versions of 'release.txt',
# 'targets.txt', and delegated Targets, which also start with 'targets'.
# For 'targets.txt' and delegated metadata, 'referenced_metata'
# should always be 'release'. 'release.txt' specifies all roles
# provided by a repository, including their file sizes and hashes.
if metadata_role == 'release' or metadata_role.startswith('targets'):
gzip_metadata_filename = uncompressed_metadata_filename + '.gz'
if gzip_metadata_filename in self.metadata['current'] \
[referenced_metadata]['meta']:
compression = 'gzip'
compressed_fileinfo = self.metadata['current'][referenced_metadata] \
['meta'][gzip_metadata_filename]
# NOTE: When we download the compressed file, we care about its
# compressed length. However, we check the hash of the uncompressed
# file; therefore we use the hashes of the uncompressed file.
fileinfo = {'length': compressed_fileinfo['length'],
'hashes': uncompressed_fileinfo['hashes']}
logger.debug('Compressed version of '+\
repr(uncompressed_metadata_filename)+' is available at '+\
repr(gzip_metadata_filename)+'.')
else:
logger.debug('Compressed version of '+\
repr(uncompressed_metadata_filename)+' not available.')
fileinfo = uncompressed_fileinfo
else:
fileinfo = uncompressed_fileinfo
# Simply return if the file has not changed, according to the metadata
# about the uncompressed file provided by the referenced metadata.
if not self._fileinfo_has_changed(uncompressed_metadata_filename,
uncompressed_fileinfo):
return
logger.debug('Metadata '+repr(uncompressed_metadata_filename)+\
' has changed.')
try:
self._update_metadata(metadata_role, fileinfo=fileinfo,
compression=compression)
except:
# The current metadata we have is not current but we couldn't
# get new metadata. We shouldn't use the old metadata anymore.
# This will get rid of in-memory knowledge of the role and
# delegated roles, but will leave delegated metadata files as
# current files on disk.
# TODO: Should we get rid of the delegated metadata files?
# We shouldn't need to, but we need to check the trust
# implications of the current implementation.
self._delete_metadata(metadata_role)
logger.error('Metadata for '+str(metadata_role)+' could not be updated')
raise
else:
# We need to remove delegated roles because the delegated roles
# may not be trusted anymore.
if metadata_role == 'targets' or metadata_role.startswith('targets/'):
logger.debug('Removing delegated roles of '+repr(metadata_role)+'.')
# TODO: Should we also remove the keys of the delegated roles?
tuf.roledb.remove_delegated_roles(metadata_role)
self._import_delegations(metadata_role)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _refresh_targets_metadata(self, rolename='targets', include_delegations=False):\n\n roles_to_update = []\n\n # See if this role provides metadata and, if we're including\n # delegations, look for metadata from delegated roles.\n role_prefix = rolename + '/'\n for metadata_path in self.metadata['current']['release']['meta'].keys():\n if metadata_path == rolename + '.txt':\n roles_to_update.append(metadata_path[:-len('.txt')])\n elif include_delegations and metadata_path.startswith(role_prefix):\n # Add delegated roles. Skip roles names containing compression\n # extensions.\n if metadata_path.endswith('.txt'): \n roles_to_update.append(metadata_path[:-len('.txt')])\n\n # Remove the 'targets' role because it gets updated when the targets.txt\n # file is updated in _update_metadata_if_changed('targets').\n if rolename == 'targets':\n try:\n roles_to_update.remove('targets')\n except ValueError:\n message = 'The Release metadata file is missing the targets.txt entry.'\n raise tuf.RepositoryError(message)\n \n # If there is nothing to refresh, we are done.\n if not roles_to_update:\n return\n\n # Sort the roles so that parent roles always come first.\n roles_to_update.sort()\n logger.debug('Roles to update: '+repr(roles_to_update)+'.')\n\n # Iterate through 'roles_to_update', load its metadata\n # file, and update it if it has changed.\n for rolename in roles_to_update:\n self._load_metadata_from_file('previous', rolename)\n self._load_metadata_from_file('current', rolename)\n\n self._update_metadata_if_changed(rolename)\n\n # Remove the role if it has expired.\n try:\n self._ensure_not_expired(rolename)\n except tuf.ExpiredMetadataError:\n tuf.roledb.remove_role(rolename)",
"def refresh(self):\n\n # The timestamp role does not have signed metadata about it; otherwise we\n # would need an infinite regress of metadata. Therefore, we use some\n # default, sane metadata about it.\n DEFAULT_TIMESTAMP_FILEINFO = {\n 'hashes':None,\n 'length': tuf.conf.DEFAULT_TIMESTAMP_REQUIRED_LENGTH\n }\n\n # Update the top-level metadata. The _update_metadata_if_changed() and\n # _update_metadata() calls below do NOT perform an update if there\n # is insufficient trusted signatures for the specified metadata.\n # Raise 'tuf.NoWorkingMirrorError' if an update fails.\n\n # Use default but sane information for timestamp metadata, and do not\n # require strict checks on its required length.\n self._update_metadata('timestamp', DEFAULT_TIMESTAMP_FILEINFO)\n\n self._update_metadata_if_changed('release', referenced_metadata='timestamp')\n\n self._update_metadata_if_changed('root')\n\n self._update_metadata_if_changed('targets')\n\n # Updated the top-level metadata (which all had valid signatures), however,\n # have they expired? Raise 'tuf.ExpiredMetadataError' if any of the metadata\n # has expired.\n for metadata_role in ['timestamp', 'root', 'release', 'targets']:\n self._ensure_not_expired(metadata_role)",
"def _delete_metadata(self, metadata_role):\n \n # The root metadata role is never deleted without a replacement.\n if metadata_role == 'root':\n return\n \n # Get rid of the current metadata file.\n self._move_current_to_previous(metadata_role)\n \n # Remove knowledge of the role.\n if metadata_role in self.metadata['current']:\n del self.metadata['current'][metadata_role]\n tuf.roledb.remove_role(metadata_role)",
"def _update_metadata(self, metadata_role, fileinfo, compression=None):\n\n # Construct the metadata filename as expected by the download/mirror modules.\n metadata_filename = metadata_role + '.txt'\n uncompressed_metadata_filename = metadata_filename\n \n # The 'release' or Targets metadata may be compressed. Add the appropriate\n # extension to 'metadata_filename'. \n if compression == 'gzip':\n metadata_filename = metadata_filename + '.gz'\n\n # Extract file length and file hashes. They will be passed as arguments\n # to 'download_file' function.\n compressed_file_length = fileinfo['length']\n uncompressed_file_hashes = fileinfo['hashes']\n\n # Attempt a file download from each mirror until the file is downloaded and\n # verified. If the signature of the downloaded file is valid, proceed,\n # otherwise log a warning and try the next mirror. 'metadata_file_object'\n # is the file-like object returned by 'download.py'. 'metadata_signable'\n # is the object extracted from 'metadata_file_object'. Metadata saved to\n # files are regarded as 'signable' objects, conformant to\n # 'tuf.formats.SIGNABLE_SCHEMA'.\n #\n # Some metadata (presently timestamp) will be downloaded \"unsafely\", in the\n # sense that we can only estimate its true length and know nothing about\n # its hashes. This is because not all metadata will have other metadata\n # for it; otherwise we will have an infinite regress of metadata signing\n # for each other. In this case, we will download the metadata up to the\n # best length we can get for it, not check its hashes, but perform the rest\n # of the checks (e.g signature verification).\n #\n # Note also that we presently support decompression of only \"safe\"\n # metadata, but this is easily extend to \"unsafe\" metadata as well as\n # \"safe\" targets.\n\n if metadata_role == 'timestamp':\n metadata_file_object = \\\n self.unsafely_get_metadata_file(metadata_role, metadata_filename,\n compressed_file_length)\n else:\n metadata_file_object = \\\n self.safely_get_metadata_file(metadata_role, metadata_filename,\n compressed_file_length,\n uncompressed_file_hashes,\n compression=compression)\n\n # The metadata has been verified. Move the metadata file into place.\n # First, move the 'current' metadata file to the 'previous' directory\n # if it exists.\n current_filepath = os.path.join(self.metadata_directory['current'],\n metadata_filename)\n current_filepath = os.path.abspath(current_filepath)\n tuf.util.ensure_parent_dir(current_filepath)\n \n previous_filepath = os.path.join(self.metadata_directory['previous'],\n metadata_filename)\n previous_filepath = os.path.abspath(previous_filepath)\n if os.path.exists(current_filepath):\n # Previous metadata might not exist, say when delegations are added.\n tuf.util.ensure_parent_dir(previous_filepath)\n shutil.move(current_filepath, previous_filepath)\n\n # Next, move the verified updated metadata file to the 'current' directory.\n # Note that the 'move' method comes from tuf.util's TempFile class.\n # 'metadata_file_object' is an instance of tuf.util.TempFile.\n metadata_signable = tuf.util.load_json_string(metadata_file_object.read())\n if compression == 'gzip':\n current_uncompressed_filepath = \\\n os.path.join(self.metadata_directory['current'],\n uncompressed_metadata_filename)\n current_uncompressed_filepath = \\\n os.path.abspath(current_uncompressed_filepath)\n metadata_file_object.move(current_uncompressed_filepath)\n else:\n metadata_file_object.move(current_filepath)\n\n # Extract the metadata object so we can store it to the metadata store.\n # 'current_metadata_object' set to 'None' if there is not an object\n # stored for 'metadata_role'.\n updated_metadata_object = metadata_signable['signed']\n current_metadata_object = self.metadata['current'].get(metadata_role)\n\n # Finally, update the metadata and fileinfo stores.\n logger.debug('Updated '+repr(current_filepath)+'.')\n self.metadata['previous'][metadata_role] = current_metadata_object\n self.metadata['current'][metadata_role] = updated_metadata_object\n self._update_fileinfo(metadata_filename)",
"def update_metadata(self, metadata):\n return self.parent.update_metadata_for_node(self, metadata)",
"def _ensure_not_expired(self, metadata_role):\n \n # Construct the full metadata filename and the location of its\n # current path. The current path of 'metadata_role' is needed\n # to log the exact filename of the expired metadata.\n metadata_filename = metadata_role + '.txt'\n rolepath = os.path.join(self.metadata_directory['current'],\n metadata_filename)\n rolepath = os.path.abspath(rolepath)\n \n # Extract the expiration time.\n expires = self.metadata['current'][metadata_role]['expires']\n \n # If the current time has surpassed the expiration date, raise\n # an exception. 'expires' is in YYYY-MM-DD HH:MM:SS format, so\n # convert it to seconds since the epoch, which is the time format\n # returned by time.time() (i.e., current time), before comparing.\n current_time = time.time()\n expiry_time = tuf.formats.parse_time(expires)\n if expiry_time < current_time:\n logger.error('Metadata '+repr(rolepath)+' expired on '+repr(expires)+'.')\n raise tuf.ExpiredMetadataError(expires)",
"def update_metadata(self, metadata):\n return self.manager.update_metadata(self, metadata)",
"def _load_metadata_from_file(self, metadata_set, metadata_role):\n\n # Ensure we have a valid metadata set.\n if metadata_set not in ['current', 'previous']:\n raise tuf.Error('Invalid metadata set: '+repr(metadata_set))\n\n # Save and construct the full metadata path.\n metadata_directory = self.metadata_directory[metadata_set]\n metadata_filename = metadata_role + '.txt'\n metadata_filepath = os.path.join(metadata_directory, metadata_filename)\n \n # Ensure the metadata path is valid/exists, else ignore the call. \n if os.path.exists(metadata_filepath):\n # Load the file. The loaded object should conform to\n # 'tuf.formats.SIGNABLE_SCHEMA'.\n metadata_signable = tuf.util.load_json_file(metadata_filepath)\n\n tuf.formats.check_signable_object_format(metadata_signable)\n\n # Extract the 'signed' role object from 'metadata_signable'.\n metadata_object = metadata_signable['signed']\n \n # Save the metadata object to the metadata store.\n self.metadata[metadata_set][metadata_role] = metadata_object\n \n # We need to rebuild the key and role databases if \n # metadata object is 'root' or target metadata.\n if metadata_set == 'current':\n if metadata_role == 'root':\n self._rebuild_key_and_role_db()\n elif metadata_object['_type'] == 'Targets':\n # TODO: Should we also remove the keys of the delegated roles?\n tuf.roledb.remove_delegated_roles(metadata_role)\n self._import_delegations(metadata_role)",
"def update_metadata_for_node(self, node, metadata):\n return self.manager.update_metadata(self, metadata, node=node)",
"def metadata_update(self, new_metadata=None):\n if new_metadata is None:\n self.metadata_set(self.t.metadata())",
"def __verify_uncompressed_metadata_file(self, metadata_file_object,\n metadata_role):\n\n metadata = metadata_file_object.read()\n try:\n metadata_signable = tuf.util.load_json_string(metadata)\n except Exception, exception:\n raise tuf.InvalidMetadataJSONError(exception)\n else:\n # Ensure the loaded 'metadata_signable' is properly formatted.\n tuf.formats.check_signable_object_format(metadata_signable)\n\n # Is 'metadata_signable' newer than the currently installed\n # version?\n current_metadata_role = self.metadata['current'].get(metadata_role)\n\n # Compare metadata version numbers. Ensure there is a current\n # version of the metadata role to be updated.\n if current_metadata_role is not None:\n current_version = current_metadata_role['version']\n downloaded_version = metadata_signable['signed']['version']\n if downloaded_version < current_version:\n raise tuf.ReplayedMetadataError(metadata_role, downloaded_version,\n current_version)\n\n # Reject the metadata if any specified targets are not allowed.\n if metadata_signable['signed']['_type'] == 'Targets':\n self._ensure_all_targets_allowed(metadata_role,\n metadata_signable['signed'])\n\n # Verify the signature on the downloaded metadata object.\n valid = tuf.sig.verify(metadata_signable, metadata_role)\n if not valid:\n raise tuf.BadSignatureError(metadata_role)",
"def update_all_metadata(self, metadata):\n return self.manager.update_all_metadata(self, metadata)",
"def _recover_auth_meta(self, auth_id, auth_meta):\n remove_volumes = []\n\n for volume, volume_data in auth_meta['volumes'].items():\n if not volume_data['dirty']:\n continue\n\n (group_id, volume_id) = volume.split('/')\n group_id = group_id if group_id is not 'None' else None\n volume_path = VolumePath(group_id, volume_id)\n access_level = volume_data['access_level']\n\n with self._volume_lock(volume_path):\n vol_meta = self._volume_metadata_get(volume_path)\n\n # No VMeta update indicates that there was no auth update\n # in Ceph either. So it's safe to remove corresponding\n # partial update in AMeta.\n if not vol_meta or auth_id not in vol_meta['auths']:\n remove_volumes.append(volume)\n continue\n\n want_auth = {\n 'access_level': access_level,\n 'dirty': False,\n }\n # VMeta update looks clean. Ceph auth update must have been\n # clean.\n if vol_meta['auths'][auth_id] == want_auth:\n continue\n\n readonly = True if access_level is 'r' else False\n self._authorize_volume(volume_path, auth_id, readonly)\n\n # Recovered from partial auth updates for the auth ID's access\n # to a volume.\n auth_meta['volumes'][volume]['dirty'] = False\n self._auth_metadata_set(auth_id, auth_meta)\n\n for volume in remove_volumes:\n del auth_meta['volumes'][volume]\n\n if not auth_meta['volumes']:\n # Clean up auth meta file\n self.fs.unlink(self._auth_metadata_path(auth_id))\n return\n\n # Recovered from all partial auth updates for the auth ID.\n auth_meta['dirty'] = False\n self._auth_metadata_set(auth_id, auth_meta)",
"def update_role_files(**kwargs):\n\n # Finds out which tracking branch you are on\n # Generates a commit in OA and each of its roles\n # Generates a git show output\n # Asks before triggering git review\n\n # Example commit message\n # Update all SHAs for 15.1.8\n # This patch updates all the roles to the latest available stable\n # SHA's, copies the release notes from the updated roles into the\n # integrated repo, updates all the OpenStack Service SHA's, and\n # updates the appropriate python requirements pins.\n click.echo(\"Not implemented yet\")",
"def update(self, role, timeout=None):\n req = RoleUpdateRequest()\n\n if role is not None:\n req.role.CopyFrom(plumbing.convert_role_to_plumbing(role))\n tries = 0\n plumbing_response = None\n while True:\n try:\n plumbing_response = self.stub.Update(\n req,\n metadata=self.parent.get_metadata('Roles.Update', req),\n timeout=timeout)\n except Exception as e:\n if self.parent.shouldRetry(tries, e):\n tries += 1\n self.parent.jitterSleep(tries)\n continue\n raise plumbing.convert_error_to_porcelain(e) from e\n break\n\n resp = models.RoleUpdateResponse()\n resp.meta = plumbing.convert_update_response_metadata_to_porcelain(\n plumbing_response.meta)\n resp.role = plumbing.convert_role_to_porcelain(plumbing_response.role)\n resp.rate_limit = plumbing.convert_rate_limit_metadata_to_porcelain(\n plumbing_response.rate_limit)\n return resp",
"def test_update_metadata(self):\n pass",
"def check_dataset_old_metadata_location(**_):\n old_metadata = get_pre_0_3_4_datasets_metadata()\n\n if not old_metadata:\n return True, False, None\n\n problems = (\n WARNING + \"There are metadata files in the old location.\"\n '\\n (use \"renku migrate\" to move them)\\n\\n\\t'\n + \"\\n\\t\".join(click.style(str(path.relative_to(project_context.path)), fg=\"yellow\") for path in old_metadata)\n + \"\\n\"\n )\n\n return False, False, problems",
"def unlinkold(self, timestamp):\n if not self.metadata or self.metadata[X_TIMESTAMP] >= timestamp:\n return\n\n assert self.data_file, \\\n \"Have metadata, %r, but no data_file\" % self.metadata\n\n def _unlinkold():\n if self._is_dir:\n # Marker, or object, directory.\n #\n # Delete from the filesystem only if it contains no objects.\n # If it does contain objects, then just remove the object\n # metadata tag which will make this directory a\n # fake-filesystem-only directory and will be deleted when the\n # container or parent directory is deleted.\n metadata = read_metadata(self.data_file)\n if dir_is_object(metadata):\n metadata[X_OBJECT_TYPE] = DIR_NON_OBJECT\n write_metadata(self.data_file, metadata)\n rmobjdir(self.data_file)\n else:\n # Delete file object\n do_unlink(self.data_file)\n\n # Garbage collection of non-object directories. Now that we\n # deleted the file, determine if the current directory and any\n # parent directory may be deleted.\n dirname = os.path.dirname(self.data_file)\n while dirname and dirname != self._container_path:\n # Try to remove any directories that are not objects.\n if not rmobjdir(dirname):\n # If a directory with objects has been found, we can stop\n # garabe collection\n break\n else:\n dirname = os.path.dirname(dirname)\n\n self.threadpool.run_in_thread(_unlinkold)\n\n self.metadata = {}\n self.data_file = None",
"def update_metadata(self, metadata: t.Mapping[str, str]) -> None:\n self._metadata.update(metadata)",
"def update_metadata_for_node(self, loadbalancer, node, metadata):\n return loadbalancer.update_metadata_for_node(node, metadata)",
"def _ensure_all_targets_allowed(self, metadata_role, metadata_object):\n \n # Return if 'metadata_role' is 'targets'. 'targets' is not\n # a delegated role.\n if metadata_role == 'targets':\n return\n \n # The targets of delegated roles are stored in the parent's\n # metadata file. Retrieve the parent role of 'metadata_role'\n # to confirm 'metadata_role' contains valid targets.\n parent_role = tuf.roledb.get_parent_rolename(metadata_role)\n\n # Iterate over the targets of 'metadata_role' and confirm they are trusted,\n # or their root parent directory exists in the role delegated paths of the\n # parent role.\n roles = self.metadata['current'][parent_role]['delegations']['roles']\n role_index = tuf.repo.signerlib.find_delegated_role(roles, metadata_role)\n\n # Ensure the delegated role exists prior to extracting trusted paths from\n # the parent's 'paths', or trusted path hash prefixes from the parent's\n # 'path_hash_prefixes'.\n if role_index is not None:\n role = roles[role_index] \n allowed_child_paths = role.get('paths')\n allowed_child_path_hash_prefixes = role.get('path_hash_prefixes')\n actual_child_targets = metadata_object['targets'].keys()\n\n if allowed_child_path_hash_prefixes is not None:\n consistent = self._paths_are_consistent_with_hash_prefixes\n if not consistent(actual_child_targets,\n allowed_child_path_hash_prefixes):\n raise tuf.ForbiddenTargetError('Role '+repr(metadata_role)+\\\n ' specifies target which does not'+\\\n ' have a path hash prefix matching'+\\\n ' the prefix listed by the parent'+\\\n ' role '+repr(parent_role)+'.')\n\n elif allowed_child_paths is not None: \n\n # Check that each delegated target is either explicitly listed or a parent\n # directory is found under role['paths'], otherwise raise an exception.\n # If the parent role explicitly lists target file paths in 'paths',\n # this loop will run in O(n^2), the worst-case. The repository\n # maintainer will likely delegate entire directories, and opt for\n # explicit file paths if the targets in a directory are delegated to \n # different roles/developers.\n for child_target in actual_child_targets:\n for allowed_child_path in allowed_child_paths:\n prefix = os.path.commonprefix([child_target, allowed_child_path])\n if prefix == allowed_child_path:\n break\n else: \n raise tuf.ForbiddenTargetError('Role '+repr(metadata_role)+\\\n ' specifies target '+\\\n repr(child_target)+' which is not'+\\\n ' an allowed path according to'+\\\n ' the delegations set by '+\\\n repr(parent_role)+'.')\n\n else:\n\n # 'role' should have been validated when it was downloaded.\n # The 'paths' or 'path_hash_prefixes' attributes should not be missing,\n # so raise an error in case this clause is reached.\n raise tuf.FormatError(repr(role)+' did not contain one of '+\\\n 'the required fields (\"paths\" or '+\\\n '\"path_hash_prefixes\").')\n\n # Raise an exception if the parent has not delegated to the specified\n # 'metadata_role' child role.\n else:\n raise tuf.RepositoryError(repr(parent_role)+' has not delegated to '+\\\n repr(metadata_role)+'.')",
"def test_update_metadata1(self):\n pass",
"def test_update_meta_file_meta_file_exists(self):\n # Expected results\n date_old = '2021-09-13'\n date_new = '2021-09-14'\n dates_expected = [date_old, date_new]\n\n meta_content = f\"{self.meta_date_col},{self.meta_timestamp_col}\\n{date_old},{datetime.today().strftime(self.meta_timestamp_format)}\"\n self.bucket.put_object(Body=meta_content, Key=self.meta_key)\n # method execution\n MetaFile.update_meta_file(date_new, self.trg_bucket_connector)\n # read meta file\n df_result = self.trg_bucket_connector.read_meta_file()\n dates_result = list(df_result[\n self.meta_date_col])\n self.assertEqual(dates_expected, dates_result)",
"def _targets_of_role(self, rolename, targets=None, skip_refresh=False):\n\n if targets is None:\n targets = []\n\n logger.debug('Getting targets of role: '+repr(rolename)+'.')\n\n if not tuf.roledb.role_exists(rolename):\n raise tuf.UnknownRoleError(rolename)\n\n # We do not need to worry about the target paths being trusted because\n # this is enforced before any new metadata is accepted.\n if not skip_refresh:\n self._refresh_targets_metadata(rolename)\n \n # Do we have metadata for 'rolename'?\n if rolename not in self.metadata['current']:\n message = 'No metadata for '+rolename+'. Unable to determine targets.'\n logger.debug(message)\n return targets\n\n # Get the targets specified by the role itself.\n for filepath, fileinfo in self.metadata['current'][rolename]['targets'].items():\n new_target = {} \n new_target['filepath'] = filepath \n new_target['fileinfo'] = fileinfo\n \n targets.append(new_target)\n\n return targets",
"def changeRoleInfo(self, role, info):",
"def set_metadata(self, metadata):\n return self.parent.set_metadata_for_node(self, metadata)",
"def _move_current_to_previous(self, metadata_role):\n\n # Get the 'current' and 'previous' full file paths for 'metadata_role'\n metadata_filepath = metadata_role + '.txt'\n previous_filepath = os.path.join(self.metadata_directory['previous'],\n metadata_filepath)\n current_filepath = os.path.join(self.metadata_directory['current'],\n metadata_filepath)\n\n # Remove the previous path if it exists.\n if os.path.exists(previous_filepath):\n os.remove(previous_filepath)\n\n # Move the current path to the previous path. \n if os.path.exists(current_filepath):\n tuf.util.ensure_parent_dir(previous_filepath)\n os.rename(current_filepath, previous_filepath)",
"def update_metadata(self, file_id, metadata):\n pass",
"def metadata_update(self, _):\n self.details.original_widget = YesNoWidget('Update metadata files?', self.__metadata_update)",
"def _rebuild_key_and_role_db(self):\n \n # Clobbering this means all delegated metadata files are rendered outdated\n # and will need to be reloaded. However, reloading the delegated metadata\n # files is avoided here because fetching target information with methods\n # like all_targets() and target() always cause a refresh of these files.\n # The metadata files for delegated roles are also not loaded when the\n # repository is first instantiated. Due to this setup, reloading delegated\n # roles is not required here.\n tuf.keydb.create_keydb_from_root_metadata(self.metadata['current']['root'])\n tuf.roledb.create_roledb_from_root_metadata(self.metadata['current']['root'])"
] |
[
"0.7246707",
"0.7186253",
"0.6441687",
"0.6422909",
"0.5998397",
"0.5584296",
"0.5539659",
"0.54305273",
"0.54012203",
"0.539539",
"0.5306128",
"0.52712166",
"0.50628555",
"0.5043398",
"0.5006379",
"0.49786666",
"0.4959843",
"0.48630646",
"0.48453847",
"0.48013136",
"0.47740373",
"0.4754555",
"0.47516772",
"0.47180772",
"0.4716126",
"0.4689491",
"0.4680875",
"0.4675735",
"0.46564493",
"0.46175885"
] |
0.79665345
|
0
|
Ensure the delegated targets of 'metadata_role' are allowed; this is determined by inspecting the 'delegations' field of the parent role of 'metadata_role'. If a target specified by 'metadata_object' is not found in the parent role's delegations field, raise an exception. Targets allowed are either exlicitly listed under the 'paths' field, or implicitly exist under a subdirectory of a parent directory listed under 'paths'. A parent role may delegate trust to all files under a particular directory, including files in subdirectories, by simply listing the directory (e.g., 'packages/source/Django/', the equivalent of 'packages/source/Django/'). Targets listed in hashed bins are also validated (i.e., its calculated path hash prefix must be delegated by the parent role.
|
def _ensure_all_targets_allowed(self, metadata_role, metadata_object):
# Return if 'metadata_role' is 'targets'. 'targets' is not
# a delegated role.
if metadata_role == 'targets':
return
# The targets of delegated roles are stored in the parent's
# metadata file. Retrieve the parent role of 'metadata_role'
# to confirm 'metadata_role' contains valid targets.
parent_role = tuf.roledb.get_parent_rolename(metadata_role)
# Iterate over the targets of 'metadata_role' and confirm they are trusted,
# or their root parent directory exists in the role delegated paths of the
# parent role.
roles = self.metadata['current'][parent_role]['delegations']['roles']
role_index = tuf.repo.signerlib.find_delegated_role(roles, metadata_role)
# Ensure the delegated role exists prior to extracting trusted paths from
# the parent's 'paths', or trusted path hash prefixes from the parent's
# 'path_hash_prefixes'.
if role_index is not None:
role = roles[role_index]
allowed_child_paths = role.get('paths')
allowed_child_path_hash_prefixes = role.get('path_hash_prefixes')
actual_child_targets = metadata_object['targets'].keys()
if allowed_child_path_hash_prefixes is not None:
consistent = self._paths_are_consistent_with_hash_prefixes
if not consistent(actual_child_targets,
allowed_child_path_hash_prefixes):
raise tuf.ForbiddenTargetError('Role '+repr(metadata_role)+\
' specifies target which does not'+\
' have a path hash prefix matching'+\
' the prefix listed by the parent'+\
' role '+repr(parent_role)+'.')
elif allowed_child_paths is not None:
# Check that each delegated target is either explicitly listed or a parent
# directory is found under role['paths'], otherwise raise an exception.
# If the parent role explicitly lists target file paths in 'paths',
# this loop will run in O(n^2), the worst-case. The repository
# maintainer will likely delegate entire directories, and opt for
# explicit file paths if the targets in a directory are delegated to
# different roles/developers.
for child_target in actual_child_targets:
for allowed_child_path in allowed_child_paths:
prefix = os.path.commonprefix([child_target, allowed_child_path])
if prefix == allowed_child_path:
break
else:
raise tuf.ForbiddenTargetError('Role '+repr(metadata_role)+\
' specifies target '+\
repr(child_target)+' which is not'+\
' an allowed path according to'+\
' the delegations set by '+\
repr(parent_role)+'.')
else:
# 'role' should have been validated when it was downloaded.
# The 'paths' or 'path_hash_prefixes' attributes should not be missing,
# so raise an error in case this clause is reached.
raise tuf.FormatError(repr(role)+' did not contain one of '+\
'the required fields ("paths" or '+\
'"path_hash_prefixes").')
# Raise an exception if the parent has not delegated to the specified
# 'metadata_role' child role.
else:
raise tuf.RepositoryError(repr(parent_role)+' has not delegated to '+\
repr(metadata_role)+'.')
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _import_delegations(self, parent_role):\n \n current_parent_metadata = self.metadata['current'][parent_role]\n \n if 'delegations' not in current_parent_metadata:\n return\n\n # This could be quite slow with a huge number of delegations.\n keys_info = current_parent_metadata['delegations'].get('keys', {})\n roles_info = current_parent_metadata['delegations'].get('roles', [])\n\n logger.debug('Adding roles delegated from '+repr(parent_role)+'.')\n \n # Iterate through the keys of the delegated roles of 'parent_role'\n # and load them.\n for keyid, keyinfo in keys_info.items():\n if keyinfo['keytype'] in ['rsa', 'ed25519']:\n key = tuf.keys.format_metadata_to_key(keyinfo)\n \n # We specify the keyid to ensure that it's the correct keyid\n # for the key.\n try:\n tuf.keydb.add_key(key, keyid)\n except tuf.KeyAlreadyExistsError:\n pass\n except (tuf.FormatError, tuf.Error), e:\n logger.exception('Failed to add keyid: '+repr(keyid)+'.')\n logger.error('Aborting role delegation for parent role '+parent_role+'.')\n raise\n else:\n logger.warn('Invalid key type for '+repr(keyid)+'.')\n continue\n\n # Add the roles to the role database.\n for roleinfo in roles_info:\n try:\n # NOTE: tuf.roledb.add_role will take care\n # of the case where rolename is None.\n rolename = roleinfo.get('name')\n logger.debug('Adding delegated role: '+str(rolename)+'.')\n tuf.roledb.add_role(rolename, roleinfo)\n except tuf.RoleAlreadyExistsError, e:\n logger.warn('Role already exists: '+rolename)\n except:\n logger.exception('Failed to add delegated role: '+rolename+'.')\n raise",
"def _visit_child_role(self, child_role, target_filepath):\n\n child_role_name = child_role['name']\n child_role_paths = child_role.get('paths')\n child_role_path_hash_prefixes = child_role.get('path_hash_prefixes')\n # A boolean indicator that tell us whether 'child_role' has been delegated\n # the target with the name 'target_filepath'.\n child_role_is_relevant = False\n\n if child_role_path_hash_prefixes is not None:\n target_filepath_hash = self._get_target_hash(target_filepath)\n for child_role_path_hash_prefix in child_role_path_hash_prefixes:\n if target_filepath_hash.startswith(child_role_path_hash_prefix):\n child_role_is_relevant = True\n\n elif child_role_paths is not None:\n for child_role_path in child_role_paths:\n # A child role path may be a filepath or directory. The child\n # role 'child_role_name' is added if 'target_filepath' is located\n # under 'child_role_path'. Explicit filepaths are also added.\n prefix = os.path.commonprefix([target_filepath, child_role_path])\n if prefix == child_role_path:\n child_role_is_relevant = True\n\n else:\n # 'role_name' should have been validated when it was downloaded.\n # The 'paths' or 'path_hash_prefixes' fields should not be missing,\n # so we raise a format error here in case they are both missing.\n raise tuf.FormatError(repr(child_role_name)+' has neither ' \\\n '\"paths\" nor \"path_hash_prefixes\"!')\n\n if child_role_is_relevant:\n logger.debug('Child role '+repr(child_role_name)+' has target '+\n repr(target_filepath))\n return child_role_name\n else:\n logger.debug('Child role '+repr(child_role_name)+\n ' does not have target '+repr(target_filepath))\n return None",
"def can_manage_relationship_aliases(self):\n # NOTE: It is expected that real authentication hints will be\n # handled in a service adapter above the pay grade of this impl.\n return True",
"def test_validate_delegate_inherited(self):\n self.make_assignment(self.category, self.user_bob, self.role_delegate)\n # Limit should not be reached\n delegate_as = self.make_assignment(\n self.project, self.user_carol, self.role_delegate\n )\n self.assertIsInstance(delegate_as, RoleAssignment)",
"def _targets_of_role(self, rolename, targets=None, skip_refresh=False):\n\n if targets is None:\n targets = []\n\n logger.debug('Getting targets of role: '+repr(rolename)+'.')\n\n if not tuf.roledb.role_exists(rolename):\n raise tuf.UnknownRoleError(rolename)\n\n # We do not need to worry about the target paths being trusted because\n # this is enforced before any new metadata is accepted.\n if not skip_refresh:\n self._refresh_targets_metadata(rolename)\n \n # Do we have metadata for 'rolename'?\n if rolename not in self.metadata['current']:\n message = 'No metadata for '+rolename+'. Unable to determine targets.'\n logger.debug(message)\n return targets\n\n # Get the targets specified by the role itself.\n for filepath, fileinfo in self.metadata['current'][rolename]['targets'].items():\n new_target = {} \n new_target['filepath'] = filepath \n new_target['fileinfo'] = fileinfo\n \n targets.append(new_target)\n\n return targets",
"def _refresh_targets_metadata(self, rolename='targets', include_delegations=False):\n\n roles_to_update = []\n\n # See if this role provides metadata and, if we're including\n # delegations, look for metadata from delegated roles.\n role_prefix = rolename + '/'\n for metadata_path in self.metadata['current']['release']['meta'].keys():\n if metadata_path == rolename + '.txt':\n roles_to_update.append(metadata_path[:-len('.txt')])\n elif include_delegations and metadata_path.startswith(role_prefix):\n # Add delegated roles. Skip roles names containing compression\n # extensions.\n if metadata_path.endswith('.txt'): \n roles_to_update.append(metadata_path[:-len('.txt')])\n\n # Remove the 'targets' role because it gets updated when the targets.txt\n # file is updated in _update_metadata_if_changed('targets').\n if rolename == 'targets':\n try:\n roles_to_update.remove('targets')\n except ValueError:\n message = 'The Release metadata file is missing the targets.txt entry.'\n raise tuf.RepositoryError(message)\n \n # If there is nothing to refresh, we are done.\n if not roles_to_update:\n return\n\n # Sort the roles so that parent roles always come first.\n roles_to_update.sort()\n logger.debug('Roles to update: '+repr(roles_to_update)+'.')\n\n # Iterate through 'roles_to_update', load its metadata\n # file, and update it if it has changed.\n for rolename in roles_to_update:\n self._load_metadata_from_file('previous', rolename)\n self._load_metadata_from_file('current', rolename)\n\n self._update_metadata_if_changed(rolename)\n\n # Remove the role if it has expired.\n try:\n self._ensure_not_expired(rolename)\n except tuf.ExpiredMetadataError:\n tuf.roledb.remove_role(rolename)",
"def ensure_access(self, target_member : M, accessor : M, permission : str):\n if not permission: \n return True\n if accessor is None:\n raise errors.NotAllowed(\"Accessor not found\")\n if target_member != accessor:\n raise errors.NotAllowed(\"Access not allowed for permission '%s'\" % permission)\n return True",
"def targets_of_role(self, rolename='targets'):\n \n # Does 'rolename' have the correct format?\n # Raise 'tuf.FormatError' if there is a mismatch.\n tuf.formats.RELPATH_SCHEMA.check_match(rolename)\n\n self._refresh_targets_metadata(rolename)\n \n return self._targets_of_role(rolename, skip_refresh=True)",
"def test_create_delegate_limit_inherit(self):\n # Set up category owner\n new_user = self.make_user('new_user')\n self.owner_as_cat.user = new_user\n\n url = reverse(\n 'projectroles:api_role_create',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'role': PROJECT_ROLE_DELEGATE,\n 'user': str(self.assign_user.sodar_uuid),\n }\n # NOTE: Post as owner\n response = self.request_knox(url, method='POST', data=post_data)\n\n self.assertEqual(response.status_code, 201, msg=response.content)\n self.assertEqual(\n RoleAssignment.objects.filter(project=self.project).count(), 2\n )\n role_as = RoleAssignment.objects.filter(\n project=self.project, role=self.role_delegate, user=self.assign_user\n ).first()\n self.assertIsNotNone(role_as)",
"def test_create_delegate_limit(self):\n # Create new user and grant delegate role\n new_user = self.make_user('new_user')\n self.make_assignment(self.project, new_user, self.role_delegate)\n url = reverse(\n 'projectroles:api_role_create',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'role': PROJECT_ROLE_DELEGATE,\n 'user': str(self.assign_user.sodar_uuid),\n }\n # NOTE: Post as owner\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 400, msg=response.content)",
"def validate_application_command_permission_overwrite_target(target):\n # GOTO\n while True:\n if isinstance(target, Role):\n target_type = APPLICATION_COMMAND_PERMISSION_OVERWRITE_TARGET_TYPE_ROLE\n target_id = target.id\n target_lookup_failed = False\n break\n \n if isinstance(target, ClientUserBase):\n target_type = APPLICATION_COMMAND_PERMISSION_OVERWRITE_TARGET_TYPE_USER\n target_id = target.id\n target_lookup_failed = False\n break\n \n if isinstance(target, Channel):\n target_type = APPLICATION_COMMAND_PERMISSION_OVERWRITE_TARGET_TYPE_CHANNEL\n target_id = target.id\n target_lookup_failed = False\n break\n \n if isinstance(target, tuple) and len(target) == 2:\n target_type_maybe, target_id_maybe = target\n \n if isinstance(target_type_maybe, type):\n if issubclass(target_type_maybe, Role):\n target_type = APPLICATION_COMMAND_PERMISSION_OVERWRITE_TARGET_TYPE_ROLE\n elif issubclass(target_type_maybe, ClientUserBase):\n target_type = APPLICATION_COMMAND_PERMISSION_OVERWRITE_TARGET_TYPE_USER\n elif issubclass(target_type_maybe, Channel):\n target_type = APPLICATION_COMMAND_PERMISSION_OVERWRITE_TARGET_TYPE_CHANNEL\n else:\n target_lookup_failed = True\n break\n \n elif isinstance(target_type_maybe, str):\n if target_type_maybe in ('Role', 'role'):\n target_type = APPLICATION_COMMAND_PERMISSION_OVERWRITE_TARGET_TYPE_ROLE\n elif target_type_maybe in ('User', 'user'):\n target_type = APPLICATION_COMMAND_PERMISSION_OVERWRITE_TARGET_TYPE_USER\n elif target_type_maybe in ('Channel', 'channel'):\n target_type = APPLICATION_COMMAND_PERMISSION_OVERWRITE_TARGET_TYPE_CHANNEL\n else:\n target_lookup_failed = True\n break\n \n elif isinstance(target_type_maybe, ApplicationCommandPermissionOverwriteTargetType):\n target_type = target_type_maybe\n \n elif isinstance(target_type_maybe, ApplicationCommandPermissionOverwriteTargetType.VALUE_TYPE):\n target_type = ApplicationCommandPermissionOverwriteTargetType.get(target_type_maybe)\n \n else:\n target_lookup_failed = True\n break\n \n if type(target_id_maybe) is int:\n target_id = target_id_maybe\n elif isinstance(target_id_maybe, int):\n target_id = int(target_id_maybe)\n else:\n target_lookup_failed = True\n break\n \n target_lookup_failed = False\n break\n \n target_lookup_failed = True\n break\n \n if target_lookup_failed:\n raise TypeError(\n f'`target` can be `{Role.__name__}`, `{ClientUserBase.__name__}`, `{Channel.__name__}`, '\n f'`tuple` ((`{Role.__name__}`, `{ClientUserBase.__name__}`, `{Channel.__name__}`, `str` '\n f'(`\\'Role\\'`, `\\'role\\'`, `\\'User\\'`, `\\'user\\'`, `\\'Channel\\'`, `\\'channel\\'`)), `int`), '\n f'got {target.__class__.__name__}: {target!r}.'\n )\n \n return target_type, target_id",
"def can_create_relationships(self):\n # Implemented from template for\n # osid.resource.ResourceAdminSession.can_create_resources\n # NOTE: It is expected that real authentication hints will be\n # handled in a service adapter above the pay grade of this impl.\n return True",
"def check_for_delegation(record):\n try:\n if not record.domain.delegated:\n return\n except ObjectDoesNotExist:\n return\n if not record.pk: # We don't exist yet.\n raise ValidationError(\"No objects can be created in the {0}\"\n \"domain. It is delegated.\"\n .format(record.domain.name))",
"def can_target(name):\n return False",
"def test_validate_delegate_no_limit(self):\n self.make_assignment(self.project, self.user_bob, self.role_delegate)\n try:\n self.make_assignment(\n self.project, self.user_carol, self.role_delegate\n )\n except ValidationError as e:\n self.fail(e)",
"def delegations(self) -> Optional[Sequence['outputs.AssessmentDelegation']]:\n return pulumi.get(self, \"delegations\")",
"def can_delete_relationships(self):\n # Implemented from template for\n # osid.resource.ResourceAdminSession.can_delete_resources\n # NOTE: It is expected that real authentication hints will be\n # handled in a service adapter above the pay grade of this impl.\n return True",
"def allow_child (self, name, user, obj, ** kw) :\n try :\n p = self.child_permission_map [name]\n except KeyError :\n return True\n else :\n return p.instance (user, self, obj = obj, ** kw)",
"def test_create_role_binding_restriction_for_all_namespaces(self):\n pass",
"def test_create_namespaced_role_binding_restriction(self):\n pass",
"def test_read_namespaced_role_binding_restriction(self):\n pass",
"def can_update_relationships(self):\n # Implemented from template for\n # osid.resource.ResourceAdminSession.can_update_resources\n # NOTE: It is expected that real authentication hints will be\n # handled in a service adapter above the pay grade of this impl.\n return True",
"def _check_permissions(source: Any, info: Info, kwargs: Dict[str, Any]):\n for permission_class in self.permission_classes:\n permission = permission_class()\n\n if not permission.has_permission(source, info, **kwargs):\n message = getattr(permission, \"message\", None)\n raise PermissionError(message)",
"def list_delegations(self, kwargs):\n verbose = kwargs.get(\"verbose\", False)\n filter_ = kwargs.get(\"filter\", \"all\")\n\n attributes = ALL if verbose else [\"sAMAccountName\", \"userAccountControl\"]\n\n if filter_ == \"all\":\n if not verbose:\n attributes.extend([\n \"msDS-AllowedToDelegateTo\",\n \"msDS-AllowedToActOnBehalfOfOtherIdentity\"\n ])\n entries = self.engine.query(self.engine.ALL_DELEGATIONS_FILTER(), attributes)\n elif filter_ == \"unconstrained\":\n entries = self.engine.query(self.engine.UNCONSTRAINED_DELEGATION_FILTER(), attributes)\n elif filter_ == \"constrained\":\n if not verbose:\n attributes.append(\"msDS-AllowedToDelegateTo\")\n entries = self.engine.query(self.engine.CONSTRAINED_DELEGATION_FILTER(), attributes)\n elif filter_ == \"rbcd\":\n if not verbose:\n attributes.append(\"msDS-AllowedToActOnBehalfOfOtherIdentity\")\n entries = self.engine.query(self.engine.RESOURCE_BASED_CONSTRAINED_DELEGATION_FILTER(), attributes)\n else:\n return None\n\n if verbose:\n self.display(entries, verbose)\n else:\n for entry in entries:\n try:\n uac = entry[\"userAccountControl\"]\n sam = entry[\"sAMAccountName\"]\n delegate = entry.get(\"msDS-AllowedToDelegateTo\")\n allowed_to_act = entry.get(\"msDS-AllowedToActOnBehalfOfOtherIdentity\")\n if (filter_ == \"unconstrained\" or filter_ == \"all\") and \"TRUSTED_FOR_DELEGATION\" in uac:\n print(f\"{sam}:unconstrained:\")\n if (filter_ == \"constrained\" or filter_ == \"all\") and delegate:\n transition = \"with\" if \"TRUSTED_TO_AUTH_FOR_DELEGATION\" in uac else \"without\"\n for a in delegate:\n print(f\"{sam}:constrained {transition} protocol transition:{a}\")\n if (filter_ == \"rbcd\" or filter_ == \"all\") and allowed_to_act:\n sd = parse_ntSecurityDescriptor(allowed_to_act)\n for ace in sd['DACL']['ACEs']:\n try:\n sid = ace.get('SID')\n if not sid:\n continue\n res = self.engine.resolve_sid(sid)\n name = res[0]['sAMAccountName']\n print(f\"{name}:rbcd:{sam}\")\n except Exception:\n print(f\"{sid}:rbcd:{sam}\")\n except Exception:\n continue",
"def test_delete_namespaced_role_binding_restriction(self):\n pass",
"def test_verify_recursive_and_transverse_acl_options():",
"def delegations(self) -> Optional[Sequence['outputs.DelegationResponse']]:\n return pulumi.get(self, \"delegations\")",
"def can_lookup_relationships(self):\n # Implemented from template for\n # osid.resource.ResourceLookupSession.can_lookup_resources\n # NOTE: It is expected that real authentication hints will be\n # handled in a service adapter above the pay grade of this impl.\n return True",
"def test_list_namespaced_role_binding_restriction(self):\n pass",
"def _update_metadata_if_changed(self, metadata_role, referenced_metadata='release'):\n \n uncompressed_metadata_filename = metadata_role + '.txt'\n\n # Ensure the referenced metadata has been loaded. The 'root' role may be\n # updated without having 'release' available. \n if referenced_metadata not in self.metadata['current']:\n message = 'Cannot update '+repr(metadata_role)+' because ' \\\n +referenced_metadata+' is missing.'\n raise tuf.RepositoryError(message)\n # The referenced metadata has been loaded. Extract the new\n # fileinfo for 'metadata_role' from it. \n else:\n message = repr(metadata_role)+' referenced in '+\\\n repr(referenced_metadata)+'. '+repr(metadata_role)+' may be updated.'\n logger.debug(message)\n \n # There might be a compressed version of 'release.txt' or Targets\n # metadata available for download. Check the 'meta' field of\n # 'referenced_metadata' to see if it is listed when 'metadata_role'\n # is 'release'. The full rolename for delegated Targets metadata\n # must begin with 'targets/'. The Release role lists all the Targets\n # metadata available on the repository, including any that may be in\n # compressed form.\n compression = None\n\n # Extract the fileinfo of the uncompressed version of 'metadata_role'.\n uncompressed_fileinfo = self.metadata['current'][referenced_metadata] \\\n ['meta'] \\\n [uncompressed_metadata_filename]\n\n # Check for availability of compressed versions of 'release.txt',\n # 'targets.txt', and delegated Targets, which also start with 'targets'.\n # For 'targets.txt' and delegated metadata, 'referenced_metata'\n # should always be 'release'. 'release.txt' specifies all roles\n # provided by a repository, including their file sizes and hashes.\n if metadata_role == 'release' or metadata_role.startswith('targets'):\n gzip_metadata_filename = uncompressed_metadata_filename + '.gz'\n if gzip_metadata_filename in self.metadata['current'] \\\n [referenced_metadata]['meta']:\n compression = 'gzip'\n compressed_fileinfo = self.metadata['current'][referenced_metadata] \\\n ['meta'][gzip_metadata_filename]\n # NOTE: When we download the compressed file, we care about its\n # compressed length. However, we check the hash of the uncompressed\n # file; therefore we use the hashes of the uncompressed file.\n fileinfo = {'length': compressed_fileinfo['length'],\n 'hashes': uncompressed_fileinfo['hashes']}\n logger.debug('Compressed version of '+\\\n repr(uncompressed_metadata_filename)+' is available at '+\\\n repr(gzip_metadata_filename)+'.')\n else:\n logger.debug('Compressed version of '+\\\n repr(uncompressed_metadata_filename)+' not available.')\n fileinfo = uncompressed_fileinfo\n else:\n fileinfo = uncompressed_fileinfo\n\n # Simply return if the file has not changed, according to the metadata\n # about the uncompressed file provided by the referenced metadata.\n if not self._fileinfo_has_changed(uncompressed_metadata_filename,\n uncompressed_fileinfo):\n return\n\n logger.debug('Metadata '+repr(uncompressed_metadata_filename)+\\\n ' has changed.')\n\n try:\n self._update_metadata(metadata_role, fileinfo=fileinfo,\n compression=compression)\n except:\n # The current metadata we have is not current but we couldn't\n # get new metadata. We shouldn't use the old metadata anymore.\n # This will get rid of in-memory knowledge of the role and\n # delegated roles, but will leave delegated metadata files as\n # current files on disk.\n # TODO: Should we get rid of the delegated metadata files?\n # We shouldn't need to, but we need to check the trust\n # implications of the current implementation.\n self._delete_metadata(metadata_role)\n logger.error('Metadata for '+str(metadata_role)+' could not be updated')\n raise\n else:\n # We need to remove delegated roles because the delegated roles\n # may not be trusted anymore.\n if metadata_role == 'targets' or metadata_role.startswith('targets/'):\n logger.debug('Removing delegated roles of '+repr(metadata_role)+'.')\n # TODO: Should we also remove the keys of the delegated roles?\n tuf.roledb.remove_delegated_roles(metadata_role)\n self._import_delegations(metadata_role)"
] |
[
"0.628052",
"0.57578766",
"0.55259514",
"0.54520214",
"0.5116347",
"0.4986853",
"0.49348375",
"0.49187204",
"0.49093932",
"0.48896667",
"0.48627713",
"0.48561046",
"0.48271003",
"0.48104677",
"0.47576022",
"0.47299388",
"0.4701976",
"0.46875975",
"0.4680922",
"0.46689802",
"0.4664111",
"0.4638099",
"0.4617703",
"0.46176022",
"0.46061862",
"0.45980072",
"0.45857006",
"0.45618463",
"0.45580012",
"0.4542403"
] |
0.86525065
|
0
|
Determine whether a list of paths are consistent with theirs alleged path hash prefixes. By default, the SHA256 hash function will be used.
|
def _paths_are_consistent_with_hash_prefixes(self, paths,
path_hash_prefixes):
# Assume that 'paths' and 'path_hash_prefixes' are inconsistent until
# proven otherwise.
consistent = False
if len(paths) > 0 and len(path_hash_prefixes) > 0:
for path in paths:
path_hash = self._get_target_hash(path)
# Assume that every path is inconsistent until proven otherwise.
consistent = False
for path_hash_prefix in path_hash_prefixes:
if path_hash.startswith(path_hash_prefix):
consistent = True
break
# This path has no matching path_hash_prefix. Stop looking further.
if not consistent: break
return consistent
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def checkIntersections(path_list):\n som = 0\n joined_list = [hash(i) for i in list(itertools.chain.from_iterable(path_list))] # lelijk\n occurrences = np.bincount(joined_list)\n for i in occurrences:\n if i > 1:\n som += i\n return som",
"def CheckHashes(self, hashes):\n hash_map = {}\n for hsh in hashes:\n if hsh.HasField(\"sha256\"):\n # The canonical name of the file is where we store the file hash.\n digest = hsh.sha256\n hash_map[aff4.ROOT_URN.Add(\"files/hash/generic/sha256\").Add(\n str(digest))] = digest\n\n for metadata in aff4.FACTORY.Stat(list(hash_map), token=self.token):\n yield metadata[\"urn\"], hash_map[metadata[\"urn\"]]",
"def has_hash(self, h):\n rsp = h.hashlist(self.path)\n if re.search(\"\\n[0-9a-f]+\\smd5\\s%s\" % self.path, rsp):\n rval = True\n else:\n rval = False\n return rval",
"def consistent(self):\n return self.incore_digest == self.ondisk_digest",
"def hash_files_or_dirs(paths: List[str]) -> str:\n md5 = hashlib.md5()\n for path in sorted(paths):\n md5 = _hash_file_or_dir(path, md5)\n return md5.hexdigest()",
"def CheckHashes(self, hashes, unused_external=True):\n hash_map = {}\n for hsh in hashes:\n if hsh.HasField(\"sha1\"):\n digest = hsh.sha1\n hash_urn = self.PATH.Add(str(digest))\n logging.info(\"Checking URN %s\", str(hash_urn))\n hash_map[hash_urn] = digest\n\n for metadata in aff4.FACTORY.Stat(list(hash_map), token=self.token):\n yield metadata[\"urn\"], hash_map[metadata[\"urn\"]]",
"def compute_fingerprint(path_list):\r\n\r\n hasher = hashlib.sha1()\r\n\r\n for path in path_list:\r\n\r\n # For directories, create a hash based on the modification times\r\n # of first-level subdirectories\r\n if os.path.isdir(path):\r\n for dirname in sorted(os.listdir(path)):\r\n p = os.path.join(path, dirname)\r\n if os.path.isdir(p):\r\n hasher.update(str(os.stat(p).st_mtime))\r\n\r\n # For files, hash the contents of the file\r\n if os.path.isfile(path):\r\n with open(path, \"rb\") as file_handle:\r\n hasher.update(file_handle.read())\r\n\r\n return hasher.hexdigest()",
"def compute_fingerprint(path_list):\n\n hasher = hashlib.sha1()\n\n for path_item in path_list:\n\n # For directories, create a hash based on the modification times\n # of first-level subdirectories\n if os.path.isdir(path_item):\n for dirname in sorted(os.listdir(path_item)):\n path_name = os.path.join(path_item, dirname)\n if os.path.isdir(path_name):\n hasher.update(str(os.stat(path_name).st_mtime).encode('utf-8'))\n\n # For files, hash the contents of the file\n if os.path.isfile(path_item):\n with open(path_item, \"rb\") as file_handle:\n hasher.update(file_handle.read())\n\n return hasher.hexdigest()",
"def verify_path(path, leaf, root, algo='sha256'):\n last_parent = leaf\n assert leaf in path[0][:1]\n address = ''\n for left, right, parent in path:\n assert do_hash(left + right, algo) == parent\n if last_parent == left:\n address = \"0\" + address\n elif last_parent == right:\n address = \"1\" + address\n else:\n assert False\n last_parent = parent\n\n assert last_parent == root\n return address",
"def test_get_cheap_hash(get_all_structures):\n comp_matrix = np.zeros((len(get_all_structures), len(get_all_structures)))\n for i, structure_a in enumerate(get_all_structures):\n for j, structure_b in enumerate(get_all_structures):\n if i < j:\n hash_a = get_cheap_hash(structure_a)\n hash_b = get_cheap_hash(structure_b)\n if hash_a == hash_b:\n comp_matrix[i][j] = 1\n else:\n comp_matrix[i][j] = 0\n assert sum(comp_matrix) == sum(np.diag(comp_matrix))",
"def _verify_hashes(hashes):\n\n for item in hashes:\n try:\n hashlib.new(item)\n VALID_HASH.append(item)\n except Exception:\n pass",
"def __hash__(self):\n return hash(self._full_path)",
"def compare(self, checksum):\n real_checksum = checksum\n if len(checksum) > self.hasher_size:\n real_checksum = checksum[0:self.hasher_size]\n afile = checksum[self.hasher_size:len(checksum)]\n self.path = os.path.join(self.path, afile)\n self.compute()\n return self.real_checksum == real_checksum",
"def test_hash(self):\n ffs = get_available_force_fields()\n\n for ff1, ff2 in itertools.combinations(ffs, 2):\n assert hash(ff1) != hash(ff2)",
"def test_find_simple_conflicting_paths(self):\n ruleset_a = [\n Rule(priority=9, table=0,\n match=Match([('IPV4_DST', 1, None)]),\n instructions=Instructions(dup=output1)),\n Rule(priority=0, table=0)\n ]\n\n ruleset_b = [\n Rule(priority=9, table=0,\n match=Match([('IPV4_DST', 1, None)])),\n Rule(priority=0, table=0)\n ]\n\n ruleset_c = [\n Rule(priority=0, table=0)\n ]\n # Expected results\n result_ab = {\n (ruleset_a[0],): frozenset([(ruleset_b[0],)])\n }\n result_ba = {\n (ruleset_b[0],): frozenset([(ruleset_a[0],)])\n }\n result_ac = {\n (ruleset_a[0],): frozenset([(ruleset_c[0],)])\n }\n result_ca = {\n (ruleset_c[0],): frozenset([(ruleset_a[0],)])\n }\n single_a = to_single_table(ruleset_a)\n single_b = to_single_table(ruleset_b)\n single_c = to_single_table(ruleset_c)\n norm_a = normalise(single_a)\n norm_b = normalise(single_b)\n norm_c = normalise(single_c)\n equal_ab, diff_ab = check_equal(norm_a, norm_b, diff=True)\n self.assertFalse(equal_ab)\n equal_ac, diff_ac = check_equal(norm_a, norm_b, diff=True)\n self.assertFalse(equal_ac)\n self.assertTrue(check_equal(norm_b, norm_c))\n\n paths_ab = find_conflicting_paths(diff_ab, single_a, single_b)\n self.assertEqual(paths_ab, result_ab)\n self.assertNotEqual(paths_ab, result_ba) # Sanity check\n\n paths_ba = find_conflicting_paths(diff_ab, single_b, single_a)\n self.assertEqual(paths_ba, result_ba)\n self.assertNotEqual(paths_ba, result_ab) # Sanity check\n\n paths_ca = find_conflicting_paths(diff_ac, single_c, single_a)\n self.assertEqual(paths_ca, result_ca)\n\n paths_ac = find_conflicting_paths(diff_ac, single_a, single_c)\n self.assertEqual(paths_ac, result_ac)",
"def _calculate_hash(files: Iterable[str], root: str) -> str:\n file_hash = hashlib.md5()\n for file_name in sorted(files):\n file_path = os.path.join(root, file_name)\n file_hash.update((file_name + \"\\0\").encode())\n with open(file_path, \"rb\") as file_:\n # pylint: disable=cell-var-from-loop\n for chunk in iter(lambda: file_.read(4096), \"\"):\n if not chunk:\n break\n file_hash.update(chunk)\n file_hash.update(\"\\0\".encode())\n\n return file_hash.hexdigest()",
"def hash_all(strs, digest=None):\r\n digest = digest or hashlib.sha1()\r\n for s in strs:\r\n digest.update(s)\r\n return digest.hexdigest()",
"def test_get_hash(get_all_structures):\n comp_matrix = np.zeros((len(get_all_structures), len(get_all_structures)))\n for i, structure_a in enumerate(get_all_structures):\n for j, structure_b in enumerate(get_all_structures):\n if i < j:\n hash_a = get_hash(structure_a)\n hash_b = get_hash(structure_b)\n if hash_a == hash_b:\n comp_matrix[i][j] = 1\n else:\n comp_matrix[i][j] = 0\n assert sum(comp_matrix) == sum(np.diag(comp_matrix))",
"def assertEqualPathsList(first: Iterable[str], second: Iterable[str]) -> None: # pragma: no cover\n if any(isPass(path) for path in first):\n return\n if any(isPass(path) for path in second):\n return\n for fpath in first:\n assert any(fnmatch.fnmatch(fpath, spath) for spath in second)\n for spath in second:\n assert any(fnmatch.fnmatch(fpath, spath) for fpath in first)",
"def correct_checksum():\n test_strs = [\"ch3ck1nG c0rr3ct ch3cksu|\\/|\\n\", \"y3T an0th3r str1ng0_x\\/.!&\\n\"]\n\n def test_checksum(test_str):\n server = start_server()\n client = start_client()\n\n write_to(client, test_str)\n segments = read_segments_from(client)\n if not segments:\n return False\n teardown()\n\n # Start reference solution to get answers.\n ref_server = start_server(port=REF_PORT, reference=True)\n ref_client = start_client(server_port=REF_PORT, reference=True)\n\n # Get reference checksum.\n write_to(ref_client, test_str)\n ref_segment = read_segments_from(ref_client)[0]\n ref_checksum = ref_segment.checksum\n\n # Check the first sent segment.\n segment = segments[0]\n\n # Checksum equal to the reference checksum.\n if segment.checksum == ref_checksum:\n return True\n\n # Maybe they also set an ACK for this segment. Compare with the computed\n # checksum.\n return int(segment.checksum, 16) == segment.c_repr.cksum;\n\n return reduce(lambda a, b: a and b, [test_checksum(t) for t in test_strs])",
"def _calculate_link_hash(links):\n to_hash = ''.join(sorted(links.keys()))\n # Hashlib takes encoded Strings, not Unicode objects\n return hashlib.md5(to_hash.encode('utf-8')).hexdigest()",
"def __hash__(self):\n return hash(self.base_location) ^ hash(self.fold_path) ^ hash(self.field)",
"def hash_all(strs: typing.Iterable[bytes | str]) -> str:\n digest = hashlib.sha1()\n for s in strs:\n s = ensure_binary(s)\n digest.update(s)\n return digest.hexdigest()",
"def comparison(test_hashes):\n # neat little bit of python which fills an array with the passwords form 0000 to 9999\n hashes = ['{0:04}'.format(num) for num in range(0, 9999)]\n\n hashed = []\n\n # convert the above list to the format matching the input\n for i in range(0, len(hashes)):\n hashed.append(str(hash_comparing(hashes[i]).hexdigest().upper()))\n\n # linear search\n found = []\n for j in range(0, len(hashed)):\n if hashed[j] in test_hashes:\n found.append(hashes[j])\n\n # neat formatting to get the passwords back\n for i in range(0, len(found)):\n next_print = str(found[i])\n while len(next_print) < 0:\n next_print = '0' + next_print\n print(next_print)\n\n return found",
"def is_hash(fhash):\n\n # Intentionally doing if/else statement for ease of testing and reading\n if re.match(re_md5, fhash):\n return True\n elif re.match(re_sha1, fhash):\n return True\n elif re.match(re_sha256, fhash):\n return True\n elif re.match(re_sha512, fhash):\n return True\n elif re.match(re_ssdeep, fhash):\n return True\n else:\n return False",
"def check_md5sum(file1: str, file2: str) -> bool:\n return get_md5_hash(file1) == get_md5_hash(file2)",
"def path_touched(*paths, commit_range):\n return check_output([\n 'git', 'diff', '--name-only', commit_range, '--', *paths\n ]).decode('utf-8').strip() != ''",
"def get_md5_of_unordered_fileset(list_of_filepaths):\n hashes = [get_md5(filepath) for filepath in list_of_filepaths]\n return hashlib.md5(\";\".join(sorted(hashes)).encode('utf-8')).hexdigest()",
"def test_12_multihash(self):\n self.base_12_multihash(\"sha256\")\n if sha512_supported:\n self.base_12_multihash(\"sha512_256\")",
"def verifyHashcode(digest):\n list_str = [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"A\", \"B\", \"C\", \"D\", \"E\", \"F\"]\n list_num = [ 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 12 , 13 , 14 , 15 ]\n \n total = 0\n for i2 in range(len(digest)):\n digest_i = digest[i2]\n #print(\"digest_i =\", digest_i)\n \n for i1 in range(16):\n if digest_i == list_str[i1] and i2 != 0:\n total += list_num[i1]\n #print(\"total =\", total)\n #print(\"list_num[i1] =\", list_num[i1])\n continue\n \n #print(\"--- --- ---\")\n \n #print(\"total =\", total)\n \n checknum = total % 16\n #print(\"checknum =\", checknum)\n \n checkstr = list_str[checknum]\n #print(\"checkstr =\", checkstr)\n \n checkorg = digest[0]\n #print(\"checkorg =\", checkorg)\n \n if checkorg == checkstr:\n isValid = True\n else:\n isValid = False\n \n return isValid"
] |
[
"0.6460765",
"0.60971636",
"0.60522926",
"0.60324067",
"0.60068655",
"0.5927751",
"0.58884984",
"0.5864461",
"0.57985765",
"0.5792949",
"0.57527",
"0.57444113",
"0.57175684",
"0.5711271",
"0.57033604",
"0.5662781",
"0.56612056",
"0.56524724",
"0.5587995",
"0.5470321",
"0.546591",
"0.5444754",
"0.5411526",
"0.5383571",
"0.53801185",
"0.5369662",
"0.536662",
"0.5352043",
"0.5320449",
"0.5316852"
] |
0.80761313
|
0
|
Determine whether the current fileinfo of 'metadata_filename' differs from 'new_fileinfo'. The 'new_fileinfo' argument should be extracted from the latest copy of the metadata
|
def _fileinfo_has_changed(self, metadata_filename, new_fileinfo):
# If there is no fileinfo currently stored for 'metadata_filename',
# try to load the file, calculate the fileinfo, and store it.
if metadata_filename not in self.fileinfo:
self._update_fileinfo(metadata_filename)
# Return true if there is no fileinfo for 'metadata_filename'.
# 'metadata_filename' is not in the 'self.fileinfo' store
# and it doesn't exist in the 'current' metadata location.
if self.fileinfo.get(metadata_filename) is None:
return True
current_fileinfo = self.fileinfo[metadata_filename]
if current_fileinfo['length'] != new_fileinfo['length']:
return True
# Now compare hashes. Note that the reason we can't just do a simple
# equality check on the fileinfo dicts is that we want to support the
# case where the hash algorithms listed in the metadata have changed
# without having that result in considering all files as needing to be
# updated, or not all hash algorithms listed can be calculated on the
# specific client.
for algorithm, hash_value in new_fileinfo['hashes'].items():
# We're only looking for a single match. This isn't a security
# check, we just want to prevent unnecessary downloads.
if hash_value == current_fileinfo['hashes'][algorithm]:
return False
return True
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _update_fileinfo(self, metadata_filename):\n \n # In case we delayed loading the metadata and didn't do it in\n # __init__ (such as with delegated metadata), then get the file\n # info now.\n \n # Save the path to the current metadata file for 'metadata_filename'.\n current_filepath = os.path.join(self.metadata_directory['current'],\n metadata_filename)\n # If the path is invalid, simply return and leave fileinfo unset.\n if not os.path.exists(current_filepath):\n self.fileinfo[current_filepath] = None\n return\n \n # Extract the file information from the actual file and save it\n # to the fileinfo store.\n file_length, hashes = tuf.util.get_file_details(current_filepath)\n metadata_fileinfo = tuf.formats.make_fileinfo(file_length, hashes)\n self.fileinfo[metadata_filename] = metadata_fileinfo",
"def test_new_file_diff(self):\n diff = (\n b'diff --git a/IAMNEW b/IAMNEW\\n'\n b'new file mode 100644\\n'\n b'index 0000000..e69de29\\n'\n b'--- /dev/null\\n'\n b'+++ b/IAMNEW\\n'\n b'@@ -0,0 +1,1 @@\\n'\n b'+Hello\\n'\n )\n\n parsed_files = self.tool.get_parser(diff).parse()\n self.assertEqual(len(parsed_files), 1)\n\n self.assert_parsed_diff_file(\n parsed_files[0],\n orig_filename=b'IAMNEW',\n orig_file_details=PRE_CREATION,\n modified_filename=b'IAMNEW',\n modified_file_details=b'e69de29',\n new_unix_mode='100644',\n insert_count=1,\n data=diff)",
"def check_dataset_old_metadata_location(**_):\n old_metadata = get_pre_0_3_4_datasets_metadata()\n\n if not old_metadata:\n return True, False, None\n\n problems = (\n WARNING + \"There are metadata files in the old location.\"\n '\\n (use \"renku migrate\" to move them)\\n\\n\\t'\n + \"\\n\\t\".join(click.style(str(path.relative_to(project_context.path)), fg=\"yellow\") for path in old_metadata)\n + \"\\n\"\n )\n\n return False, False, problems",
"def has_changed(self):\n timestamp = os.stat(self.filename).st_mtime\n if timestamp > self.last_timestamp:\n self.last_timestamp = timestamp\n return True\n return False",
"def _is_tracked(filename, metadata):\n current_local_sha = local_metadata.get(filename, None)\n current_remote_sha = metadata.get(filename, None)\n return current_local_sha is not None \\\n and current_remote_sha is not None \\\n and current_local_sha == current_remote_sha",
"def file_is_modified(filename, lastupdate):\n now = datetime.datetime.utcnow()\n update = file_get_mdatetime(filename)\n return now >= update and update >= lastupdate",
"def _is_remote_file_different(local_file, remote_file, ftp_connection, fatal_if_nonexistant=False, local_must_be_newer=False):\n # Check for an error, if the error is that the file does not exist. By default, if the remote file does not exist,\n # assume that means that it needs to be uploaded. However, if fatal_if_nonexistant is True, then raise an exception.\n try:\n remote_size, remote_mtime = _remote_file_size_modtime(ftp_connection, remote_file)\n except error_perm: # I'm assuming that error_perm is only raised if the file doesn't exist, which is probably incorrect, but I have no way to test if you don't have permission to access the file\n if not fatal_if_nonexistant:\n return False\n else:\n raise\n\n local_size, local_mtime = _local_file_size_modtime(local_file)\n # We need to remove the sub-second components of the local mtime, because it is not required of the FTP MDTM command\n # that we use to get the remote time that it include smaller time resolution than seconds.\n local_mtime = local_mtime.replace(microsecond=0)\n \n if local_must_be_newer:\n return local_mtime > remote_mtime or local_size != remote_size\n else:\n return local_mtime != remote_mtime or local_size != remote_size",
"def test_new_file_no_content_diff(self):\n diff = (\n b'diff --git a/newfile b/newfile\\n'\n b'new file mode 100644\\n'\n b'index 0000000..e69de29\\n'\n )\n\n parsed_files = self.tool.get_parser(diff).parse()\n self.assertEqual(len(parsed_files), 1)\n\n self.assert_parsed_diff_file(\n parsed_files[0],\n orig_filename=b'newfile',\n orig_file_details=PRE_CREATION,\n modified_filename=b'newfile',\n modified_file_details=b'e69de29',\n new_unix_mode='100644',\n data=diff)",
"def is_new_file(self):\n return self.filename is None",
"def is_newer(filename1, filename2):\n return os.stat(filename1).st_mtime > os.stat(filename2).st_mtime",
"def is_outdated(self):\n\n if not self.is_done:\n return False\n elif not (self.input_files and self.output_files):\n return False\n\n return fileutils.modified_after(self.input_files, self.output_files)",
"def is_match(self, old_log_file, new_log_file):\n Gumtree.gumtree.setOldAndNewFile(old_log_file, new_log_file)\n return Gumtree.gumtree.isMatch()",
"def is_more_rencent(filename: str, comparison_filename: str):\n return os.path.getmtime(filename) > os.path.getmtime(comparison_filename)",
"def IsFileNewer(name1, name2):\n\n\tif not os.path.exists(name1):\n\t\treturn 0\n\n\tif not os.path.exists(name2):\n\t\treturn 1\n\n\tmod_time1 = os.stat(name1)[stat.ST_MTIME]\n\tmod_time2 = os.stat(name2)[stat.ST_MTIME]\n\treturn (mod_time1 > mod_time2)",
"def test_update_meta_file_meta_file_exists(self):\n # Expected results\n date_old = '2021-09-13'\n date_new = '2021-09-14'\n dates_expected = [date_old, date_new]\n\n meta_content = f\"{self.meta_date_col},{self.meta_timestamp_col}\\n{date_old},{datetime.today().strftime(self.meta_timestamp_format)}\"\n self.bucket.put_object(Body=meta_content, Key=self.meta_key)\n # method execution\n MetaFile.update_meta_file(date_new, self.trg_bucket_connector)\n # read meta file\n df_result = self.trg_bucket_connector.read_meta_file()\n dates_result = list(df_result[\n self.meta_date_col])\n self.assertEqual(dates_expected, dates_result)",
"def DiffResults(marker, new_results, old_results, diff_results, strip_reason):\n old_file = open(old_results, \"r\")\n new_file = open(new_results, \"r\")\n diff_file = open(diff_results, \"a\") \n\n # Read lines from each file\n ndict = new_file.readlines()\n cdict = old_file.readlines()\n\n # Write marker to diff file\n diff_file.writelines(marker + \"\\n\")\n diff_file.writelines(\"###############\\n\")\n\n # Strip reason from result lines\n if strip_reason is True:\n for i in range(0, len(ndict)):\n ndict[i] = ndict[i].split(' ')[0] + \"\\n\"\n for i in range(0, len(cdict)):\n cdict[i] = cdict[i].split(' ')[0] + \"\\n\"\n\n # Find results in new_results missing in old_results\n new_count=0\n for line in ndict:\n if line not in cdict:\n diff_file.writelines(\"+ \" + line)\n new_count += 1\n\n # Find results in old_results missing in new_results\n missing_count=0\n for line in cdict:\n if line not in ndict:\n diff_file.writelines(\"- \" + line)\n missing_count += 1\n\n logging.info(marker + \" >>> \" + str(new_count) + \" new, \" + str(missing_count) + \" misses\")\n\n diff_file.writelines(\"\\n\\n\")\n\n old_file.close()\n new_file.close()\n diff_file.close()\n return",
"def original_modified(self):\n if self.modified > self.created:\n return True\n else:\n return False",
"def file_newer(check_file: str, base_file: str) -> bool:\n if os.path.isfile(check_file):\n cf_modtime_ts = os.path.getmtime(check_file)\n bf_modtime_ts = os.path.getmtime(base_file)\n else:\n return False\n\n return cf_modtime_ts > bf_modtime_ts",
"def newer (source, target):\r\n\r\n if not os.path.exists (target):\r\n return 1\r\n\r\n from stat import ST_MTIME\r\n mtime1 = os.stat(source)[ST_MTIME]\r\n mtime2 = os.stat(target)[ST_MTIME]\r\n\r\n return mtime1 > mtime2",
"def test_new_file_no_content_with_following_diff(self):\n diff1 = (\n b'diff --git a/newfile b/newfile\\n'\n b'new file mode 100644\\n'\n b'index 0000000..e69de29\\n'\n )\n diff2 = (\n b'diff --git a/cfg/testcase.ini b/cfg/testcase.ini\\n'\n b'index cc18ec8..5e70b73 100644\\n'\n b'--- a/cfg/testcase.ini\\n'\n b'+++ b/cfg/testcase.ini\\n'\n b'@@ -1,6 +1,7 @@\\n'\n b'+blah blah blah\\n'\n b' [mysql]\\n'\n b' host = localhost\\n'\n b' port = 3306\\n'\n b' user = user\\n'\n b' pass = pass\\n'\n b'-db = pyunit\\n'\n b'+db = pyunit\\n'\n )\n diff = diff1 + diff2\n\n parsed_files = self.tool.get_parser(diff).parse()\n self.assertEqual(len(parsed_files), 2)\n\n self.assert_parsed_diff_file(\n parsed_files[0],\n orig_filename=b'newfile',\n orig_file_details=PRE_CREATION,\n modified_filename=b'newfile',\n modified_file_details=b'e69de29',\n new_unix_mode='100644',\n data=diff1)\n\n self.assert_parsed_diff_file(\n parsed_files[1],\n orig_filename=b'cfg/testcase.ini',\n orig_file_details=b'cc18ec8',\n modified_filename=b'cfg/testcase.ini',\n modified_file_details=b'5e70b73',\n old_unix_mode='100644',\n new_unix_mode='100644',\n insert_count=2,\n delete_count=1,\n data=diff2)",
"def _file_newer(cls, path, check_mtime):\n path_mtime = os.path.getmtime(path)\n return path_mtime > check_mtime",
"def newer(source, target):\n if not os.path.exists(source):\n raise DistutilsFileError(\"file '%s' does not exist\" %\n os.path.abspath(source))\n if not os.path.exists(target):\n return True\n\n return os.stat(source).st_mtime > os.stat(target).st_mtime",
"def hasChanged(self):\n return ((self.mtime != getmtime(self.path)) or\n (self.size != os.path.getsize(self.path)) )",
"def write_diff(new_data, old_data, path, file_new, file_old):\n count = []\n REG = r'^[2-9][0-9]\\.[0-9]{2}\\:'\n for new, old in zip(new_data, old_data):\n if re.match(REG, new):\n if new != old:\n count.append(new)\n with open(path, 'w', encoding='utf-8') as f:\n f.write('Difference was found in ' + str(len(count)) + ' params.\\n')\n f.write(' | ' + file_new[-16:-8] + ' | ' + new)\n f.write(' | ' + file_old[-16:-8] + ' | ' + old + '\\n')",
"def is_new_transaction_log_file(self):\n return self.file_type() == FileType.FILE_TYPE_LOG_NEW",
"def _newer(a: str, b: str) -> bool:\n if not os.path.exists(a):\n return False\n if not os.path.exists(b):\n return True\n return os.path.getmtime(a) >= os.path.getmtime(b)",
"def test_is_generated_map_different__just_old(self) -> None:\n with open(self._old_file, 'w') as f:\n json.dump({}, f)\n self.assertFalse(os.path.isfile(self._gen_file))\n gen = generate.GenerateDataImpl(self._config)\n res = gen.is_generated_map_different()\n self.assertFalse(res)",
"def test_is_generated_map_different__just_new(self) -> None:\n with open(self._gen_file, 'w') as f:\n json.dump({}, f)\n self.assertFalse(os.path.isfile(self._old_file))\n gen = generate.GenerateDataImpl(self._config)\n res = gen.is_generated_map_different()\n self.assertTrue(res)",
"def is_old_transaction_log_file(self):\n return (self.file_type() == FileType.FILE_TYPE_LOG_OLD_1) or (self.file_type() == FileType.FILE_TYPE_LOG_OLD_2)",
"def __ne__(self, other):\n if not isinstance(other, SharedFileMetadata):\n return True\n\n return self.to_dict() != other.to_dict()"
] |
[
"0.6610963",
"0.64198935",
"0.615069",
"0.61243784",
"0.5961765",
"0.59265333",
"0.590873",
"0.59039855",
"0.5862563",
"0.586194",
"0.5858841",
"0.57861775",
"0.5778181",
"0.57122946",
"0.56918406",
"0.56588155",
"0.5657629",
"0.56488836",
"0.5645154",
"0.5627097",
"0.56251556",
"0.5602249",
"0.5558019",
"0.5557618",
"0.5555339",
"0.5545077",
"0.5530682",
"0.5516261",
"0.5512301",
"0.55101174"
] |
0.8372806
|
0
|
Update the 'self.fileinfo' entry for the metadata belonging to 'metadata_filename'. If the 'current' metadata for 'metadata_filename' cannot be loaded, set its fileinfo' to 'None' to signal that it is not in the 'self.fileinfo' AND it also doesn't exist locally.
|
def _update_fileinfo(self, metadata_filename):
# In case we delayed loading the metadata and didn't do it in
# __init__ (such as with delegated metadata), then get the file
# info now.
# Save the path to the current metadata file for 'metadata_filename'.
current_filepath = os.path.join(self.metadata_directory['current'],
metadata_filename)
# If the path is invalid, simply return and leave fileinfo unset.
if not os.path.exists(current_filepath):
self.fileinfo[current_filepath] = None
return
# Extract the file information from the actual file and save it
# to the fileinfo store.
file_length, hashes = tuf.util.get_file_details(current_filepath)
metadata_fileinfo = tuf.formats.make_fileinfo(file_length, hashes)
self.fileinfo[metadata_filename] = metadata_fileinfo
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def update_metadata(self, file_id, metadata):\n pass",
"def _update_filesystem_metadata(self, metadata):\n directory, fname = os.path.split(self.fname)\n fbase = os.path.splitext(fname)[0]\n \n # Test for presence and size of zip file\n zip_file = fbase + '.zip'\n zip_path = os.path.join(directory, zip_file)\n \n if os.path.isfile(zip_path):\n location = 'on_disk'\n data_file_size = os.path.getsize(zip_path)\n else:\n location = 'on_tape'\n data_file_size = 0\n \n # Test for presence of quick look PNG file\n quicklook_file = fbase + '.png'\n quicklook_path = os.path.join(directory, quicklook_file)\n \n if not os.path.isfile(quicklook_path):\n quicklook_file = ''\n\n # Add to metadata dictionary\n item_map = {'directory': directory, 'metadata_file': fname,\n 'data_file': zip_file, 'location': location, \n 'data_file_size': data_file_size, 'quicklook_file': quicklook_file}\n \n for key, value in item_map.items():\n metadata[key] = value",
"def _fileinfo_has_changed(self, metadata_filename, new_fileinfo):\n \n # If there is no fileinfo currently stored for 'metadata_filename',\n # try to load the file, calculate the fileinfo, and store it.\n if metadata_filename not in self.fileinfo:\n self._update_fileinfo(metadata_filename)\n\n # Return true if there is no fileinfo for 'metadata_filename'.\n # 'metadata_filename' is not in the 'self.fileinfo' store\n # and it doesn't exist in the 'current' metadata location.\n if self.fileinfo.get(metadata_filename) is None:\n return True\n\n current_fileinfo = self.fileinfo[metadata_filename]\n\n if current_fileinfo['length'] != new_fileinfo['length']:\n return True\n\n # Now compare hashes. Note that the reason we can't just do a simple\n # equality check on the fileinfo dicts is that we want to support the\n # case where the hash algorithms listed in the metadata have changed\n # without having that result in considering all files as needing to be\n # updated, or not all hash algorithms listed can be calculated on the\n # specific client.\n for algorithm, hash_value in new_fileinfo['hashes'].items():\n # We're only looking for a single match. This isn't a security\n # check, we just want to prevent unnecessary downloads.\n if hash_value == current_fileinfo['hashes'][algorithm]:\n return False\n\n return True",
"def metadata_update(self, new_metadata=None):\n if new_metadata is None:\n self.metadata_set(self.t.metadata())",
"def process_metadata(self, metadata, allow_empty):\n\n # make sure the metadata is not null nor empty\n url = self.get_url_from_metadata(metadata)\n if url != None:\n # don't signal if we have already signalled this file\n if url != self.last_url:\n artist = metadata.lookup_value('xesam:artist')[0]\n title = metadata.lookup_value('xesam:title').get_string()\n\n self.last_url = url\n self.signal_metadata(url, artist, title)\n\n # in case when the script has just started, we need to signal\n # even if there is no file\n elif allow_empty:\n self.signal_metadata(None)",
"def update_metadata(self, metadata: t.Mapping[str, str]) -> None:\n self._metadata.update(metadata)",
"def metadata_update(self, _):\n self.details.original_widget = YesNoWidget('Update metadata files?', self.__metadata_update)",
"def _update_extra_metadata(self, extra_metadata):\n self._add_filename_metadata(extra_metadata)\n self._derive_extra_metadata(extra_metadata)\n \n if type(self) == SAFESentinel3:\n self._extract_metadata_from_zipfile(extra_metadata)",
"def _file_update(self, filename):\n values = TaskInfo._parse_file(filename)\n self._load_dict(values)",
"def update_metadata(self, metadata):\n return self.manager.update_metadata(self, metadata)",
"def __metadata_update(self, value):\n if value:\n try:\n self.details.original_widget = TextWidget('Updating metadata files. Please, wait...')\n self.execution_manager.close()\n except QMapError as e:\n self.details.original_widget = TextWidget(e)\n self.__back_to_main()",
"def updateFileInfo(self, data, pid):\n self.db.updateLinkInfo(data)\n self.evm.dispatchEvent(\"packageUpdated\", pid)",
"def update_metadata(self, metadata):\n return self.parent.update_metadata_for_node(self, metadata)",
"def update_mp3_metadata(self, mp3_file):\n if isinstance(mp3_file, str):\n mp3_file = mp3_utility.Mp3File(file_path=mp3_file, load_tags_from_file=True)\n remote_name = self.get_remote_name(mp3_file.file_path)\n archive_item_file_details = self.item_files_dict.get(remote_name, None)\n mp3_metadata = mp3_file.metadata\n if archive_item_file_details is None:\n logging.warning(\"The file does not exist! Skipping.\")\n else:\n remote_tag_update_needed = (archive_item_file_details.get(\"artist\", \"\") != mp3_metadata.artist) or (\n archive_item_file_details.get(\"creator\", \"\") != mp3_metadata.artist) or (\n archive_item_file_details.get(\"title\", \"\") != mp3_metadata.title) or (\n archive_item_file_details.get(\"album\", \"\") != mp3_metadata.album) or (\n archive_item_file_details.get(\"album_artist\",\n \"\") != mp3_metadata.album_artist)\n if remote_tag_update_needed:\n logging.info(\"***Updating %s in archive item.\" % remote_name)\n logging.info(\n internetarchive.modify_metadata(\n self.archive_id,\n metadata=dict(title=mp3_metadata.title, album=mp3_metadata.album,\n album_artist=mp3_metadata.album_artist,\n artist=mp3_metadata.artist, creator=mp3_metadata.artist),\n target=os.path.join(\"files\", remote_name)))",
"def _add_filename_metadata(self, extra_metadata): \n \n # Make sure product_info section exists\n extra_metadata.setdefault('product_info', {})\n \n file_name = os.path.basename(self.fname)\n fn_comps = file_name.split(\"_\")\n \n if self.__class__ == SAFESentinel1:\n component = fn_comps[2]\n if len(component) < 4: \n resolution = 'N/A'\n else:\n resolution = component[-1]\n \n extra_metadata['product_info']['Resolution'] = resolution\n \n # Add file/scan name \n extra_metadata['product_info']['Name'] = os.path.splitext(file_name)[0]\n \n # Add Satellite and Mission from the file path\n comp_1 = fn_comps[0].upper()\n extra_metadata['platform']['Mission'] = \"Sentinel-%s\" % comp_1[1]\n extra_metadata['platform']['Satellite'] = \"Sentinel-%s\" % comp_1[1:]",
"def _update_metadata(self, metadata_role, fileinfo, compression=None):\n\n # Construct the metadata filename as expected by the download/mirror modules.\n metadata_filename = metadata_role + '.txt'\n uncompressed_metadata_filename = metadata_filename\n \n # The 'release' or Targets metadata may be compressed. Add the appropriate\n # extension to 'metadata_filename'. \n if compression == 'gzip':\n metadata_filename = metadata_filename + '.gz'\n\n # Extract file length and file hashes. They will be passed as arguments\n # to 'download_file' function.\n compressed_file_length = fileinfo['length']\n uncompressed_file_hashes = fileinfo['hashes']\n\n # Attempt a file download from each mirror until the file is downloaded and\n # verified. If the signature of the downloaded file is valid, proceed,\n # otherwise log a warning and try the next mirror. 'metadata_file_object'\n # is the file-like object returned by 'download.py'. 'metadata_signable'\n # is the object extracted from 'metadata_file_object'. Metadata saved to\n # files are regarded as 'signable' objects, conformant to\n # 'tuf.formats.SIGNABLE_SCHEMA'.\n #\n # Some metadata (presently timestamp) will be downloaded \"unsafely\", in the\n # sense that we can only estimate its true length and know nothing about\n # its hashes. This is because not all metadata will have other metadata\n # for it; otherwise we will have an infinite regress of metadata signing\n # for each other. In this case, we will download the metadata up to the\n # best length we can get for it, not check its hashes, but perform the rest\n # of the checks (e.g signature verification).\n #\n # Note also that we presently support decompression of only \"safe\"\n # metadata, but this is easily extend to \"unsafe\" metadata as well as\n # \"safe\" targets.\n\n if metadata_role == 'timestamp':\n metadata_file_object = \\\n self.unsafely_get_metadata_file(metadata_role, metadata_filename,\n compressed_file_length)\n else:\n metadata_file_object = \\\n self.safely_get_metadata_file(metadata_role, metadata_filename,\n compressed_file_length,\n uncompressed_file_hashes,\n compression=compression)\n\n # The metadata has been verified. Move the metadata file into place.\n # First, move the 'current' metadata file to the 'previous' directory\n # if it exists.\n current_filepath = os.path.join(self.metadata_directory['current'],\n metadata_filename)\n current_filepath = os.path.abspath(current_filepath)\n tuf.util.ensure_parent_dir(current_filepath)\n \n previous_filepath = os.path.join(self.metadata_directory['previous'],\n metadata_filename)\n previous_filepath = os.path.abspath(previous_filepath)\n if os.path.exists(current_filepath):\n # Previous metadata might not exist, say when delegations are added.\n tuf.util.ensure_parent_dir(previous_filepath)\n shutil.move(current_filepath, previous_filepath)\n\n # Next, move the verified updated metadata file to the 'current' directory.\n # Note that the 'move' method comes from tuf.util's TempFile class.\n # 'metadata_file_object' is an instance of tuf.util.TempFile.\n metadata_signable = tuf.util.load_json_string(metadata_file_object.read())\n if compression == 'gzip':\n current_uncompressed_filepath = \\\n os.path.join(self.metadata_directory['current'],\n uncompressed_metadata_filename)\n current_uncompressed_filepath = \\\n os.path.abspath(current_uncompressed_filepath)\n metadata_file_object.move(current_uncompressed_filepath)\n else:\n metadata_file_object.move(current_filepath)\n\n # Extract the metadata object so we can store it to the metadata store.\n # 'current_metadata_object' set to 'None' if there is not an object\n # stored for 'metadata_role'.\n updated_metadata_object = metadata_signable['signed']\n current_metadata_object = self.metadata['current'].get(metadata_role)\n\n # Finally, update the metadata and fileinfo stores.\n logger.debug('Updated '+repr(current_filepath)+'.')\n self.metadata['previous'][metadata_role] = current_metadata_object\n self.metadata['current'][metadata_role] = updated_metadata_object\n self._update_fileinfo(metadata_filename)",
"def _sync_metadata(self, stat):\n self._done = stat.done\n self._all_files_processed = stat.all_files_processed\n self._last_parsing_stat_received_at = time.monotonic()",
"def set_file_metadata(self, metadata=None, timeout=None, **kwargs): # type: ignore\n #type: (Optional[Dict[str, Any]], Optional[int], Optional[Any]) -> Dict[str, Any]\n headers = kwargs.pop('headers', {})\n headers.update(add_metadata_headers(metadata)) # type: ignore\n try:\n return self._client.file.set_metadata( # type: ignore\n timeout=timeout,\n cls=return_response_headers,\n headers=headers,\n metadata=metadata,\n **kwargs)\n except StorageErrorException as error:\n process_storage_error(error)",
"def process_file(self, filepath, only_if_updated=True):\n raise NotImplementedError()",
"def __appendMetaData(self, filename):\n metadata = {'Model': 'LFM',\n 'Source': filename,\n 'Date processed': datetime.datetime.now(),\n 'Start date': self.startDate\n }\n \n self.data.append(key='meta',\n name='Metadata for LFM Solar Wind file',\n units='n/a',\n data=metadata)",
"def update_metadata(self):\n parser = GenericParser(\n fn_re='{}/(e\\d+s\\d+)_.*/Production.nc'.format(self.data_folder),\n group_names=['sim'],\n group_transforms=[lambda x: x],\n top_fn='',\n step_ps=self.timestep\n )\n meta = gather_metadata('{}/e*/*nc'.format(self.data_folder), parser)\n meta['top_fn'] = sorted(glob('{}/e*/structure.prmtop'.format(self.input_folder)))\n self.meta = meta",
"def metadata(self, metadata: Mapping[str, str]):\r\n self._metadata = metadata",
"def metadata(self, metadata: Mapping[str, str]):\r\n self._metadata = metadata",
"def _update_metadata_if_changed(self, metadata_role, referenced_metadata='release'):\n \n uncompressed_metadata_filename = metadata_role + '.txt'\n\n # Ensure the referenced metadata has been loaded. The 'root' role may be\n # updated without having 'release' available. \n if referenced_metadata not in self.metadata['current']:\n message = 'Cannot update '+repr(metadata_role)+' because ' \\\n +referenced_metadata+' is missing.'\n raise tuf.RepositoryError(message)\n # The referenced metadata has been loaded. Extract the new\n # fileinfo for 'metadata_role' from it. \n else:\n message = repr(metadata_role)+' referenced in '+\\\n repr(referenced_metadata)+'. '+repr(metadata_role)+' may be updated.'\n logger.debug(message)\n \n # There might be a compressed version of 'release.txt' or Targets\n # metadata available for download. Check the 'meta' field of\n # 'referenced_metadata' to see if it is listed when 'metadata_role'\n # is 'release'. The full rolename for delegated Targets metadata\n # must begin with 'targets/'. The Release role lists all the Targets\n # metadata available on the repository, including any that may be in\n # compressed form.\n compression = None\n\n # Extract the fileinfo of the uncompressed version of 'metadata_role'.\n uncompressed_fileinfo = self.metadata['current'][referenced_metadata] \\\n ['meta'] \\\n [uncompressed_metadata_filename]\n\n # Check for availability of compressed versions of 'release.txt',\n # 'targets.txt', and delegated Targets, which also start with 'targets'.\n # For 'targets.txt' and delegated metadata, 'referenced_metata'\n # should always be 'release'. 'release.txt' specifies all roles\n # provided by a repository, including their file sizes and hashes.\n if metadata_role == 'release' or metadata_role.startswith('targets'):\n gzip_metadata_filename = uncompressed_metadata_filename + '.gz'\n if gzip_metadata_filename in self.metadata['current'] \\\n [referenced_metadata]['meta']:\n compression = 'gzip'\n compressed_fileinfo = self.metadata['current'][referenced_metadata] \\\n ['meta'][gzip_metadata_filename]\n # NOTE: When we download the compressed file, we care about its\n # compressed length. However, we check the hash of the uncompressed\n # file; therefore we use the hashes of the uncompressed file.\n fileinfo = {'length': compressed_fileinfo['length'],\n 'hashes': uncompressed_fileinfo['hashes']}\n logger.debug('Compressed version of '+\\\n repr(uncompressed_metadata_filename)+' is available at '+\\\n repr(gzip_metadata_filename)+'.')\n else:\n logger.debug('Compressed version of '+\\\n repr(uncompressed_metadata_filename)+' not available.')\n fileinfo = uncompressed_fileinfo\n else:\n fileinfo = uncompressed_fileinfo\n\n # Simply return if the file has not changed, according to the metadata\n # about the uncompressed file provided by the referenced metadata.\n if not self._fileinfo_has_changed(uncompressed_metadata_filename,\n uncompressed_fileinfo):\n return\n\n logger.debug('Metadata '+repr(uncompressed_metadata_filename)+\\\n ' has changed.')\n\n try:\n self._update_metadata(metadata_role, fileinfo=fileinfo,\n compression=compression)\n except:\n # The current metadata we have is not current but we couldn't\n # get new metadata. We shouldn't use the old metadata anymore.\n # This will get rid of in-memory knowledge of the role and\n # delegated roles, but will leave delegated metadata files as\n # current files on disk.\n # TODO: Should we get rid of the delegated metadata files?\n # We shouldn't need to, but we need to check the trust\n # implications of the current implementation.\n self._delete_metadata(metadata_role)\n logger.error('Metadata for '+str(metadata_role)+' could not be updated')\n raise\n else:\n # We need to remove delegated roles because the delegated roles\n # may not be trusted anymore.\n if metadata_role == 'targets' or metadata_role.startswith('targets/'):\n logger.debug('Removing delegated roles of '+repr(metadata_role)+'.')\n # TODO: Should we also remove the keys of the delegated roles?\n tuf.roledb.remove_delegated_roles(metadata_role)\n self._import_delegations(metadata_role)",
"def metadata(self, metadata):\n\n self._metadata = metadata",
"def metadata(self, metadata):\n\n self._metadata = metadata",
"def metadata(self, metadata):\n\n self._metadata = metadata",
"def metadata(self, metadata):\n\n self._metadata = metadata",
"def metadata(self, metadata):\n\n self._metadata = metadata",
"def metadata(self, metadata):\n\n self._metadata = metadata"
] |
[
"0.67793924",
"0.66336787",
"0.6279058",
"0.58203685",
"0.58027214",
"0.5740213",
"0.5739799",
"0.5691326",
"0.5655879",
"0.55965215",
"0.5530154",
"0.5512755",
"0.550789",
"0.5398123",
"0.5380825",
"0.5296818",
"0.5247495",
"0.522319",
"0.5214021",
"0.5198123",
"0.5195354",
"0.5112523",
"0.5112523",
"0.5106476",
"0.50883275",
"0.50883275",
"0.50883275",
"0.50883275",
"0.50883275",
"0.50883275"
] |
0.8279816
|
0
|
Move the current metadata file for 'metadata_role' to the previous directory.
|
def _move_current_to_previous(self, metadata_role):
# Get the 'current' and 'previous' full file paths for 'metadata_role'
metadata_filepath = metadata_role + '.txt'
previous_filepath = os.path.join(self.metadata_directory['previous'],
metadata_filepath)
current_filepath = os.path.join(self.metadata_directory['current'],
metadata_filepath)
# Remove the previous path if it exists.
if os.path.exists(previous_filepath):
os.remove(previous_filepath)
# Move the current path to the previous path.
if os.path.exists(current_filepath):
tuf.util.ensure_parent_dir(previous_filepath)
os.rename(current_filepath, previous_filepath)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _delete_metadata(self, metadata_role):\n \n # The root metadata role is never deleted without a replacement.\n if metadata_role == 'root':\n return\n \n # Get rid of the current metadata file.\n self._move_current_to_previous(metadata_role)\n \n # Remove knowledge of the role.\n if metadata_role in self.metadata['current']:\n del self.metadata['current'][metadata_role]\n tuf.roledb.remove_role(metadata_role)",
"def move_back(self) -> None:\n if self._file_was_moved:\n os.rename(self._new_path, self._file_path)\n pass",
"def _restoreRole(self, oldRole, args):\n if oldRole:\n args['role'] = oldRole\n else:\n del args['role']",
"def move_from_temp_directory(self):",
"def move_to_complete(metadata: Metadata):\n\n func = f\"{__name__}.move_to_complete\"\n\n metadata_updated = get_destination(metadata)\n moved = move(metadata[\"full_clipname\"], metadata_updated[\"destination\"])\n metadata_updated[\"destination\"] = moved\n\n post_event(\n \"log_info\",\n f\"{func}\",\n f\"The file was moved from: {metadata_updated['full_clipname']}\",\n )\n post_event(\n \"log_info\",\n f\"{func}\",\n f\"The file was moved to: {metadata_updated['destination']}\",\n )\n\n return metadata_updated",
"def delete_meta_file(self):\n try:\n self.logger.debug('Delete old metadata file %s.', self.meta_file_path)\n os.remove(self.meta_file_path)\n except OSError as ex:\n if ex.errno != errno.ENOENT:\n raise MetadataError('Failed to delete old metadata file. {}'\n .format(ex.strerror))",
"def move_file(self, ctx):\n pass",
"def _load_metadata_from_file(self, metadata_set, metadata_role):\n\n # Ensure we have a valid metadata set.\n if metadata_set not in ['current', 'previous']:\n raise tuf.Error('Invalid metadata set: '+repr(metadata_set))\n\n # Save and construct the full metadata path.\n metadata_directory = self.metadata_directory[metadata_set]\n metadata_filename = metadata_role + '.txt'\n metadata_filepath = os.path.join(metadata_directory, metadata_filename)\n \n # Ensure the metadata path is valid/exists, else ignore the call. \n if os.path.exists(metadata_filepath):\n # Load the file. The loaded object should conform to\n # 'tuf.formats.SIGNABLE_SCHEMA'.\n metadata_signable = tuf.util.load_json_file(metadata_filepath)\n\n tuf.formats.check_signable_object_format(metadata_signable)\n\n # Extract the 'signed' role object from 'metadata_signable'.\n metadata_object = metadata_signable['signed']\n \n # Save the metadata object to the metadata store.\n self.metadata[metadata_set][metadata_role] = metadata_object\n \n # We need to rebuild the key and role databases if \n # metadata object is 'root' or target metadata.\n if metadata_set == 'current':\n if metadata_role == 'root':\n self._rebuild_key_and_role_db()\n elif metadata_object['_type'] == 'Targets':\n # TODO: Should we also remove the keys of the delegated roles?\n tuf.roledb.remove_delegated_roles(metadata_role)\n self._import_delegations(metadata_role)",
"def _update_metadata_if_changed(self, metadata_role, referenced_metadata='release'):\n \n uncompressed_metadata_filename = metadata_role + '.txt'\n\n # Ensure the referenced metadata has been loaded. The 'root' role may be\n # updated without having 'release' available. \n if referenced_metadata not in self.metadata['current']:\n message = 'Cannot update '+repr(metadata_role)+' because ' \\\n +referenced_metadata+' is missing.'\n raise tuf.RepositoryError(message)\n # The referenced metadata has been loaded. Extract the new\n # fileinfo for 'metadata_role' from it. \n else:\n message = repr(metadata_role)+' referenced in '+\\\n repr(referenced_metadata)+'. '+repr(metadata_role)+' may be updated.'\n logger.debug(message)\n \n # There might be a compressed version of 'release.txt' or Targets\n # metadata available for download. Check the 'meta' field of\n # 'referenced_metadata' to see if it is listed when 'metadata_role'\n # is 'release'. The full rolename for delegated Targets metadata\n # must begin with 'targets/'. The Release role lists all the Targets\n # metadata available on the repository, including any that may be in\n # compressed form.\n compression = None\n\n # Extract the fileinfo of the uncompressed version of 'metadata_role'.\n uncompressed_fileinfo = self.metadata['current'][referenced_metadata] \\\n ['meta'] \\\n [uncompressed_metadata_filename]\n\n # Check for availability of compressed versions of 'release.txt',\n # 'targets.txt', and delegated Targets, which also start with 'targets'.\n # For 'targets.txt' and delegated metadata, 'referenced_metata'\n # should always be 'release'. 'release.txt' specifies all roles\n # provided by a repository, including their file sizes and hashes.\n if metadata_role == 'release' or metadata_role.startswith('targets'):\n gzip_metadata_filename = uncompressed_metadata_filename + '.gz'\n if gzip_metadata_filename in self.metadata['current'] \\\n [referenced_metadata]['meta']:\n compression = 'gzip'\n compressed_fileinfo = self.metadata['current'][referenced_metadata] \\\n ['meta'][gzip_metadata_filename]\n # NOTE: When we download the compressed file, we care about its\n # compressed length. However, we check the hash of the uncompressed\n # file; therefore we use the hashes of the uncompressed file.\n fileinfo = {'length': compressed_fileinfo['length'],\n 'hashes': uncompressed_fileinfo['hashes']}\n logger.debug('Compressed version of '+\\\n repr(uncompressed_metadata_filename)+' is available at '+\\\n repr(gzip_metadata_filename)+'.')\n else:\n logger.debug('Compressed version of '+\\\n repr(uncompressed_metadata_filename)+' not available.')\n fileinfo = uncompressed_fileinfo\n else:\n fileinfo = uncompressed_fileinfo\n\n # Simply return if the file has not changed, according to the metadata\n # about the uncompressed file provided by the referenced metadata.\n if not self._fileinfo_has_changed(uncompressed_metadata_filename,\n uncompressed_fileinfo):\n return\n\n logger.debug('Metadata '+repr(uncompressed_metadata_filename)+\\\n ' has changed.')\n\n try:\n self._update_metadata(metadata_role, fileinfo=fileinfo,\n compression=compression)\n except:\n # The current metadata we have is not current but we couldn't\n # get new metadata. We shouldn't use the old metadata anymore.\n # This will get rid of in-memory knowledge of the role and\n # delegated roles, but will leave delegated metadata files as\n # current files on disk.\n # TODO: Should we get rid of the delegated metadata files?\n # We shouldn't need to, but we need to check the trust\n # implications of the current implementation.\n self._delete_metadata(metadata_role)\n logger.error('Metadata for '+str(metadata_role)+' could not be updated')\n raise\n else:\n # We need to remove delegated roles because the delegated roles\n # may not be trusted anymore.\n if metadata_role == 'targets' or metadata_role.startswith('targets/'):\n logger.debug('Removing delegated roles of '+repr(metadata_role)+'.')\n # TODO: Should we also remove the keys of the delegated roles?\n tuf.roledb.remove_delegated_roles(metadata_role)\n self._import_delegations(metadata_role)",
"def delete(self) -> None:\n try:\n self._logger.debug('Delete old metadata file %s.', self._path)\n os.remove(self._path)\n except OSError as ex:\n if ex.errno != errno.ENOENT:\n msg = 'Failed to delete old metadata file. {}'.format(ex.strerror)\n raise MetaFileError(msg)",
"def movedir(self):\n pass",
"def remove_old_persistent_data(self) -> None:\n global persistent_directory # pylint: disable=invalid-name\n path = os.path.join(persistent_directory.value, self.name + \".actions.yaml\")\n if os.path.exists(path):\n Logger.debug(f\"Remove the persistent actions: {path}\")\n os.remove(path)\n\n if \"/\" not in self.name:\n return\n try:\n os.rmdir(os.path.dirname(path))\n except OSError:\n pass",
"def _restore_orig_directory(self):\n if not self._is_temp_dir:\n return\n self._base_data_dir = self._orig_base_data_dir\n del self._orig_base_data_dir\n self._base_logs_dir = self._orig_base_logs_dir\n del self._orig_base_logs_dir\n self.db.change_path(self._base_data_dir / \"projects.db\")\n self.set_current(\"default\", update=False)\n self._is_temp_dir = False",
"def previous_directory(self):\r\n prev_dir = Path(self.path_viewer.text()).parent\r\n self.set_new_path(str(prev_dir))",
"def __restoreRole(self, session):\r\n if self.__role:\r\n _logger.info(\"Switching user to role: %s\" % self.__role)\r\n\r\n session.role = self.__role\r\n self.__role = None\r\n _logger.info(\"Switched user to role: %s\" % session.role)",
"def _backup_meta_data(meta_path: Path) -> None:\n meta_path = meta_path.resolve()\n backup_meta_path = meta_path.parent / (meta_path.name + \".bak\")\n i = 0\n while backup_meta_path.exists():\n backup_meta_path = backup_meta_path.with_suffix(\".bak{}\".format(i))\n i += 1\n shutil.copy(str(meta_path), str(backup_meta_path))",
"def move_object_metadata(self, bucket_name, src_object_name, dst_object_name):\n\n return h3lib.move_object_metadata(self._handle, bucket_name, src_object_name, dst_object_name, self._user_id)",
"def removeMeta(self, row, column):\n filePath = self.filesList.selectedItems()[0].text(2)\n metaHeader = (self.metadataList.item(row, 0)).text()\n logging.debug(\"Removing metadata \" + metaHeader + \" from \" + str(filePath))\n self.filesList.removeMeta(filePath, metaHeader, row)",
"def rollback():\n current_timestamp = current()\n previous_timestamp = previous()\n\n if previous_timestamp:\n execute(symlink, *(previous_timestamp, ))\n run('rm -rf %s' % os.path.join(env.releases_dir, current_timestamp))",
"def restore_old_install(self):\n USER.info('%s: Restoring Old Install', self.recipe.name)\n shutil.move(self.back_dir, self.recipe.install_dir)\n pakit.conf.IDB[self.recipe.name] = self.old_entry\n walk_and_link(self.recipe.install_dir, self.recipe.link_dir)",
"def _move_self_to(self, new_dir=None, new_name=None):\n if self.is_downloaded:\n if new_dir and not new_name:\n shutil.move(self._download_path, os.path.join(new_dir, self.download_filename))\n elif new_name and not new_dir:\n shutil.move(self._download_path, os.path.join(self.download_dir, new_name))\n elif new_name and new_dir:\n shutil.move(self._download_path, os.path.join(new_dir, new_name))",
"def move_to(self, path: str) -> None:\n self._new_path = os.path.join(path, self.annot_type, os.path.basename(self._file_path))\n os.rename(self._file_path, self._new_path)\n self._file_was_moved = True",
"def rollback(self):\n self.stream.seek(0)",
"def _restore_file(file):\n\n os.remove(file)\n os.rename(file + '.bak', file)",
"def move(self, **kwargs):\n if os.path.exists(self.old_artifact_path):\n if os.path.exists(self.target):\n shutil.rmtree(self.target)\n log.info(\"Copying %s on the local filesystem\" % self.type)\n shutil.copytree(self.old_artifact_path, self.target)\n else:\n log.warning(\"Not moving docs, because the build dir is unknown.\")",
"def move(self,fileName,destDir):\n self.unload(fileName)\n FileInfos.move(self,fileName,destDir)",
"def move_email(self, s3key, directory, date):\r\n dest_key = os.path.join(self.s3_base_dir, directory)\r\n \r\n if date is not None:\r\n dest_key = os.path.join(dest_key, date.strftime(\"%Y%m%d\"), os.path.basename(s3key))\r\n else:\r\n dest_key = os.path.join(dest_key, os.path.basename(s3key))\r\n if dest_key == s3key:\r\n return\r\n dest_key = dest_key.replace('\\\\','/')\r\n #logging.info(\"moving file\", repr(s3key), \"->\", repr(dest_key))\r\n self.client.copy_object(self.s3_bucket, s3key, self.s3_bucket, dest_key)\r\n self.client.delete_object(self.s3_bucket, s3key)",
"def delete_previous_files():\n def delete(root: Path):\n shutil.rmtree(root / 'output', ignore_errors=True)\n for p in root.iterdir():\n if str(p).endswith(('.log', 'jobs.csv', 'csv.lock', '.yaml')):\n p.unlink()\n\n delete(wt_registration_dir)\n delete(mut_registration_dir)",
"def _update_metadata(self, metadata_role, fileinfo, compression=None):\n\n # Construct the metadata filename as expected by the download/mirror modules.\n metadata_filename = metadata_role + '.txt'\n uncompressed_metadata_filename = metadata_filename\n \n # The 'release' or Targets metadata may be compressed. Add the appropriate\n # extension to 'metadata_filename'. \n if compression == 'gzip':\n metadata_filename = metadata_filename + '.gz'\n\n # Extract file length and file hashes. They will be passed as arguments\n # to 'download_file' function.\n compressed_file_length = fileinfo['length']\n uncompressed_file_hashes = fileinfo['hashes']\n\n # Attempt a file download from each mirror until the file is downloaded and\n # verified. If the signature of the downloaded file is valid, proceed,\n # otherwise log a warning and try the next mirror. 'metadata_file_object'\n # is the file-like object returned by 'download.py'. 'metadata_signable'\n # is the object extracted from 'metadata_file_object'. Metadata saved to\n # files are regarded as 'signable' objects, conformant to\n # 'tuf.formats.SIGNABLE_SCHEMA'.\n #\n # Some metadata (presently timestamp) will be downloaded \"unsafely\", in the\n # sense that we can only estimate its true length and know nothing about\n # its hashes. This is because not all metadata will have other metadata\n # for it; otherwise we will have an infinite regress of metadata signing\n # for each other. In this case, we will download the metadata up to the\n # best length we can get for it, not check its hashes, but perform the rest\n # of the checks (e.g signature verification).\n #\n # Note also that we presently support decompression of only \"safe\"\n # metadata, but this is easily extend to \"unsafe\" metadata as well as\n # \"safe\" targets.\n\n if metadata_role == 'timestamp':\n metadata_file_object = \\\n self.unsafely_get_metadata_file(metadata_role, metadata_filename,\n compressed_file_length)\n else:\n metadata_file_object = \\\n self.safely_get_metadata_file(metadata_role, metadata_filename,\n compressed_file_length,\n uncompressed_file_hashes,\n compression=compression)\n\n # The metadata has been verified. Move the metadata file into place.\n # First, move the 'current' metadata file to the 'previous' directory\n # if it exists.\n current_filepath = os.path.join(self.metadata_directory['current'],\n metadata_filename)\n current_filepath = os.path.abspath(current_filepath)\n tuf.util.ensure_parent_dir(current_filepath)\n \n previous_filepath = os.path.join(self.metadata_directory['previous'],\n metadata_filename)\n previous_filepath = os.path.abspath(previous_filepath)\n if os.path.exists(current_filepath):\n # Previous metadata might not exist, say when delegations are added.\n tuf.util.ensure_parent_dir(previous_filepath)\n shutil.move(current_filepath, previous_filepath)\n\n # Next, move the verified updated metadata file to the 'current' directory.\n # Note that the 'move' method comes from tuf.util's TempFile class.\n # 'metadata_file_object' is an instance of tuf.util.TempFile.\n metadata_signable = tuf.util.load_json_string(metadata_file_object.read())\n if compression == 'gzip':\n current_uncompressed_filepath = \\\n os.path.join(self.metadata_directory['current'],\n uncompressed_metadata_filename)\n current_uncompressed_filepath = \\\n os.path.abspath(current_uncompressed_filepath)\n metadata_file_object.move(current_uncompressed_filepath)\n else:\n metadata_file_object.move(current_filepath)\n\n # Extract the metadata object so we can store it to the metadata store.\n # 'current_metadata_object' set to 'None' if there is not an object\n # stored for 'metadata_role'.\n updated_metadata_object = metadata_signable['signed']\n current_metadata_object = self.metadata['current'].get(metadata_role)\n\n # Finally, update the metadata and fileinfo stores.\n logger.debug('Updated '+repr(current_filepath)+'.')\n self.metadata['previous'][metadata_role] = current_metadata_object\n self.metadata['current'][metadata_role] = updated_metadata_object\n self._update_fileinfo(metadata_filename)",
"def step_back(\n self):\n if self.backup != None:\n self.data = self.backup"
] |
[
"0.70138854",
"0.6071331",
"0.56894535",
"0.564957",
"0.5627171",
"0.56169873",
"0.5502331",
"0.55000734",
"0.53757805",
"0.53468674",
"0.52324885",
"0.51942086",
"0.51926607",
"0.51787686",
"0.51284546",
"0.5116568",
"0.51012844",
"0.50922936",
"0.5083969",
"0.5048785",
"0.49898592",
"0.4985296",
"0.4978771",
"0.49663457",
"0.49621373",
"0.49594736",
"0.49542987",
"0.49532074",
"0.4944625",
"0.49313113"
] |
0.8314233
|
0
|
Remove all (current) knowledge of 'metadata_role'. The metadata belonging to 'metadata_role' is removed from the current 'self.metadata' store and from the role database. The 'root.txt' role file is never removed.
|
def _delete_metadata(self, metadata_role):
# The root metadata role is never deleted without a replacement.
if metadata_role == 'root':
return
# Get rid of the current metadata file.
self._move_current_to_previous(metadata_role)
# Remove knowledge of the role.
if metadata_role in self.metadata['current']:
del self.metadata['current'][metadata_role]
tuf.roledb.remove_role(metadata_role)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def handleCleanMetadataKeep(self):\n logging.debug(\"Removing all metadata found...\")\n filePath = self.filesList.selectedItems()[0].text(2)\n self.filesList.removeAllMeta(filePath)",
"def delete_meta_file(self):\n try:\n self.logger.debug('Delete old metadata file %s.', self.meta_file_path)\n os.remove(self.meta_file_path)\n except OSError as ex:\n if ex.errno != errno.ENOENT:\n raise MetadataError('Failed to delete old metadata file. {}'\n .format(ex.strerror))",
"def delete(self) -> None:\n try:\n self._logger.debug('Delete old metadata file %s.', self._path)\n os.remove(self._path)\n except OSError as ex:\n if ex.errno != errno.ENOENT:\n msg = 'Failed to delete old metadata file. {}'.format(ex.strerror)\n raise MetaFileError(msg)",
"def remove_old_persistent_data(self) -> None:\n global persistent_directory # pylint: disable=invalid-name\n path = os.path.join(persistent_directory.value, self.name + \".actions.yaml\")\n if os.path.exists(path):\n Logger.debug(f\"Remove the persistent actions: {path}\")\n os.remove(path)\n\n if \"/\" not in self.name:\n return\n try:\n os.rmdir(os.path.dirname(path))\n except OSError:\n pass",
"def cleanup_metadata(self, cleanup_metadata):\n\n self._cleanup_metadata = cleanup_metadata",
"def handleAllMetaClear(self, path):\n logging.debug(\"All Metadata removed, clearing the table...\")\n self.metadataList.clear()\n self.metadataList.setRowCount(0)\n self.metadataList.setHorizontalHeaderLabels([\"Metadata Header\", \"Value\"])\n self.fileNotSupported.hide()\n self.changeEnableMenus(self.filesList.getFileObj(path))",
"def teardown(bot):\n bot.remove_cog('RoleManager')",
"def reset(self):\n q.system.fs.removeDirTree(self.metadataPath)\n self.__init__(self.metadataPath,self.root)",
"def test_remove_orphaned_metadata(self):\n self.tool.filesystem.write_text_file(\n self.finder.path_from_web_tests('external', 'wpt', 'dir', 'is',\n 'orphaned.html.ini'),\n '[orphaned.html]\\n')\n self.tool.filesystem.write_text_file(\n self.finder.path_from_web_tests('external', 'wpt',\n 'infrastructure', 'metadata',\n 'testdriver.html.ini'),\n '[testdriver.html]\\n')\n self.tool.filesystem.write_text_file(\n self.finder.path_from_web_tests('external', 'wpt', 'dir', 'is',\n '__dir__.ini'), 'expected: FAIL\\n')\n with self._patch_builtins():\n manifests = load_and_update_manifests(self.finder)\n self.command.remove_orphaned_metadata(manifests)\n self.assertFalse(\n self.tool.filesystem.exists(\n self.finder.path_from_web_tests('external', 'wpt', 'dir', 'is',\n 'orphaned.html.ini')))\n self.assertTrue(\n self.tool.filesystem.exists(\n self.finder.path_from_web_tests('external', 'wpt', 'dir', 'is',\n '__dir__.ini')))\n self.assertTrue(\n self.tool.filesystem.exists(\n self.finder.path_from_web_tests('external', 'wpt',\n 'infrastructure', 'metadata',\n 'testdriver.html.ini')))",
"def delete_metadata(self, keys=None):\n return self.parent.delete_metadata_for_node(self, keys=keys)",
"def _delete_cache_metadata(self, force_delete_file):\n if force_delete_file:\n self._delete_dirs_datasets_in_cache_dir_except_downloads()\n else:\n msg = 'All metadata files of all datasets will be lost if you proceed! ' + \\\n 'Set both \\'force_delete_file=True\\' and \\'force_delete_metadata=True\\' ' + \\\n 'to proceed with the deletion of dbcollection.json and all metadata files.'\n warnings.warn(msg, UserWarning, stacklevel=2)",
"def _delete_roles(self):\n for role in self.roles:\n role.delete()",
"def remove(self): \n self.doRoot(self.removeDir)\n settings.getChanged('mosh.resourceReplacer.applied').remove(self.file)",
"def cleanUp(self):\n import evoware.fileutil as F\n F.tryRemove(self.f_project, verbose=(self.VERBOSITY>1), tree=1)",
"def remove(self):\n self.remove_file()",
"def delete_server_metadata(self, name):\n raise NotImplementedError",
"def __del__(self):\n shutil.rmtree(self.epub_dir)",
"def remove_extra_metadata(meta: dict) -> None:\n keys = get_extra_metadata_keys()\n remove_keys(data=meta, keys=keys)",
"def removeMeta(self, row, column):\n filePath = self.filesList.selectedItems()[0].text(2)\n metaHeader = (self.metadataList.item(row, 0)).text()\n logging.debug(\"Removing metadata \" + metaHeader + \" from \" + str(filePath))\n self.filesList.removeMeta(filePath, metaHeader, row)",
"def cleanUp(self):\r\n remove_files(self._db_files_to_remove, error_on_missing=False)",
"def delete_metadata(self, keys=None):\n return self.manager.delete_metadata(self, keys=keys)",
"def remove_custom_installation(self):\n\n logger.info(\"Removing old customization\")\n for candidate in os.listdir(self.rundir):\n if candidate not in (\"config\", \"delta\"):\n candidate = os.path.join(self.rundir, candidate)\n try:\n shutil.rmtree(candidate)\n except NotADirectoryError:\n os.remove(candidate)",
"def clear(self):\n for tag in self.meta.findall(CN('meta:user-defined')):\n self.meta.remove(tag)",
"def delete_metadata(self, keys):\n return self.manager.delete_metadata(self, keys)",
"def handleCleanMetadataRecon(self):\n logging.debug(\"Removing compromising personal info and remaking the file...\")\n filePath = self.filesList.selectedItems()[0].text(2)\n fileType = self.filesList.getFileObj(filePath).type\n self.printPdfPersonalData(filePath, \n fileType,\n AddedFile.changeBase(filePath, self.outputPath))\n self.tabArea.setCurrentIndex(1)\n self.changeCursor()\n self.filesList.getFileObj(filePath).reconMetaCleaned = True",
"def cleanup(self):\n if os.path.exists(self.tgzfile):\n os.remove(self.tgzfile)\n\n if os.path.exists(self.dirname):\n shutil.rmtree(self.dirname)",
"def clearMetaFiles(self, meta_id,fpath):\n\n try:\n con = self.getMetadataDatabaseConnection()\n con.cursor().callproc('clear_meta_analysis_files', [meta_id,fpath])\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), e)\n raise Exception(str(e))",
"def remove_role():\n headers = {\"X-Vault-Token\": args.x_vault_token}\n url = \"{0}/auth/{1}/role/{2}\".format(args.vault_url, args.k8s_cluster_name, args.k8s_namespace)\n print 'Removing role {0} for {1}'.format(args.k8s_namespace, args.k8s_cluster_name)\n send_delete(url=url, headers=headers)",
"def db_remove():\n\n db.session.close()\n db.drop_all()\n\n path = current_app.config['SNER_VAR']\n for file_object in os.listdir(path):\n file_object_path = os.path.join(path, file_object)\n if os.path.isdir(file_object_path):\n shutil.rmtree(file_object_path)\n else:\n os.unlink(file_object_path)",
"def _remove_metadata(library_name: str, version_string: str) -> None:\n possible_normalized_directory_names = (\n _get_possible_normalized_metadata_directory_names(\n library_name, version_string))\n normalized_directory_names = [\n normalize_directory_name(name)\n for name in os.listdir(common.THIRD_PARTY_PYTHON_LIBS_DIR)\n if os.path.isdir(\n os.path.join(common.THIRD_PARTY_PYTHON_LIBS_DIR, name))\n ]\n for normalized_directory_name in normalized_directory_names:\n # Python metadata directory names contain a python library name that\n # does not have uniform case. However, python libraries are equivalent\n # regardless of their case. Therefore, in order to check if a python\n # library's metadata exists in a directory, we need to normalize the\n # directory name. Otherwise, we would need to check every permutation of\n # the casing for metadata directories generated with the naming\n # convention: <library_name>-<library-version>.\n if normalized_directory_name in possible_normalized_directory_names:\n path_to_delete = os.path.join(\n common.THIRD_PARTY_PYTHON_LIBS_DIR, normalized_directory_name)\n shutil.rmtree(path_to_delete)"
] |
[
"0.6399154",
"0.63534534",
"0.62386084",
"0.62131715",
"0.6198965",
"0.6033478",
"0.5950016",
"0.58653796",
"0.5831039",
"0.5812334",
"0.578384",
"0.57300204",
"0.5706388",
"0.565144",
"0.56208694",
"0.5618025",
"0.5594267",
"0.5579753",
"0.5573522",
"0.55539423",
"0.55429447",
"0.5506153",
"0.55005854",
"0.5495498",
"0.5490026",
"0.54886144",
"0.54556847",
"0.5455176",
"0.5455038",
"0.5429515"
] |
0.8563163
|
0
|
Raise an exception if the current specified metadata has expired.
|
def _ensure_not_expired(self, metadata_role):
# Construct the full metadata filename and the location of its
# current path. The current path of 'metadata_role' is needed
# to log the exact filename of the expired metadata.
metadata_filename = metadata_role + '.txt'
rolepath = os.path.join(self.metadata_directory['current'],
metadata_filename)
rolepath = os.path.abspath(rolepath)
# Extract the expiration time.
expires = self.metadata['current'][metadata_role]['expires']
# If the current time has surpassed the expiration date, raise
# an exception. 'expires' is in YYYY-MM-DD HH:MM:SS format, so
# convert it to seconds since the epoch, which is the time format
# returned by time.time() (i.e., current time), before comparing.
current_time = time.time()
expiry_time = tuf.formats.parse_time(expires)
if expiry_time < current_time:
logger.error('Metadata '+repr(rolepath)+' expired on '+repr(expires)+'.')
raise tuf.ExpiredMetadataError(expires)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def expired(self):\n raise InvalidSessionException('Need to be implemented')",
"def has_expired(self) -> bool:\n raise NotImplementedError() # pragma: nocover",
"def is_access_expired(self) -> bool:\n entitlement_contract = self.cfg.entitlements.get(self.name, {})\n # TODO(No expiry per resource in MVP yet)\n expire_str = entitlement_contract.get('expires')\n if not expire_str:\n return False\n expiry = datetime.strptime(expire_str, '%Y-%m-%dT%H:%M:%S.%fZ')\n if expiry >= datetime.utcnow():\n return False\n return True",
"def is_expired(self):\n if self.access_token is None:\n logging.debug('Access token not found')\n return True\n else:\n return (self.expiration <= datetime.now())",
"def _has_expired(self):\r\n expired = False\r\n if hasattr(self, 'Expiration'):\r\n now = datetime.datetime.utcnow()\r\n expiration = datetime.datetime.strptime(self.Expiration, '%Y-%m-%dT%H:%M:%SZ')\r\n expired = (now >= expiration)\r\n else:\r\n raise ValueError(\"ERROR: Request for expired property, but no Expiration in HIT!\")\r\n return expired",
"def is_expired(self):\n return timeutils.utcnow_ts() > self.expire_ts",
"def test_trust_expire_invalid(self):\n # with an expiry specified\n expires_str = 'bad.123Z'\n self.assertRaises(lib_exc.BadRequest,\n self.create_trust,\n expires=expires_str)",
"def _has_expired(self):\n try:\n expires = datetime.fromtimestamp(\n os.stat(self.lockfile).st_mtime\n )\n except OSError as e:\n if e in self.NOT_EXIST_ERRORS:\n return False\n raise\n return datetime.now() > expires",
"def is_expired(self):\n delta = datetime.datetime.now() - self.created_at\n\n return delta.total_seconds() > 15*60",
"def has_expired(self):\n self.ensure_one()\n return datetime.now() > fields.Datetime.from_string(self.expires)",
"def test_invalid(self):\n created = datetime.datetime(2019, 1, 1, tzinfo=datetime.timezone.utc)\n last_used = datetime.datetime(2019, 1, 2, tzinfo=datetime.timezone.utc)\n key = Key('user2', 'ldasfkk', 'Inactive', created, last_used)\n with pytest.raises(AssertionError):\n key.audit(5, 1, 1, 1)",
"def test_raises_token_expired_when_applicable(self):\n\n badgr = self.get_badgr_setup()\n with vcr.use_cassette('tests/vcr_cassettes/no_valid_auth_token.yaml'):\n with self.assertRaises(exceptions.TokenAndRefreshExpiredError):\n badgr.get_from_server(self._sample_url)",
"def test_evict_expired(self):\n\n # use an invasive technique so that we don't have to sleep for\n # the item to expire\n\n bc = TimedCache(keep_time=1)\n\n bc[\"test\"] = \"value\"\n bc[\"test2\"] = \"value2\"\n self.assertEqual(len(bc), 2)\n\n # test that expired item i\n bc.cache[\"test\"].timestamp = bc.cache[\"test\"].timestamp - 2\n bc.purge_expired()\n self.assertEqual(len(bc), 1)\n self.assertFalse(\"test\" in bc)\n self.assertTrue(\"test2\" in bc)",
"def test_expires(self):\n # We aren't bother going to test the actual time in expires, that\n # way lies pain with broken tests later.\n up = self.get(self.good_data)\n hdrs = dict(up.get_headers(1))\n lm = datetime(*utils.parsedate_tz(hdrs['Last-Modified'])[:7])\n exp = datetime(*utils.parsedate_tz(hdrs['Expires'])[:7])\n assert (exp - lm).seconds == 3600",
"def delete_expired(self):\n check_time = datetime.now()\n if self.can_expire and self.duration:\n exp_times = deepcopy(self.exp_times)\n for key in exp_times:\n if exp_times[key] < check_time:\n self.delete(key)",
"def expired(self):\n return int(time.time()) > self.expires_at",
"def test_get_versions_cached_expired_connectivity_error(self):\n versions = {\"foo-1.0.tar.gz\": \"../../packages/foo-1.0.tar.gz\"}\n self.index._save_index(\"foo\", versions)\n with patch.object(self.index, \"_is_expired\", lambda ttl: True):\n with patch(\"cheddar.index.remote.get\") as mocked:\n mocked.return_value = MagicMock()\n mocked.return_value.status_code = codes.gateway_timeout\n result = self.index.get_versions(\"foo\")\n eq_(result, versions)\n eq_(mocked.call_count, 1)",
"def _check_expire(self):\n self._log.debug(\"Checking entry expiration...\")\n current_time = time.time()\n for key in self._obj_cache.keys():\n self._log.debug(' -> %s (type = %s)',\n key, type(self._obj_cache[key]))\n # Remove if the key has a timeout, and the timeout period has been\n # exceeded (last access + timeout period <= current_time).\n if self._obj_timeouts[key] > 0 \\\n and current_time >= (self._obj_last_access[key]\n + self._obj_timeouts[key]):\n self._log.debug(' EXPIRED -- removing')\n # delete\n del self._obj_cache[key]\n del self._obj_last_access[key]\n del self._obj_timeouts[key]",
"def test_old_expiration(self):\n created = datetime.datetime(2019, 1, 1, tzinfo=datetime.timezone.utc)\n last_used = datetime.datetime(2019, 1, 2, tzinfo=datetime.timezone.utc)\n key = Key('username', 'keyid', 'Active', created, last_used)\n key.audit(10, 11, 10, 8)\n assert key.audit_state == 'expire'",
"def clear_expired(self):\n raise NotImplementedError",
"def is_expired(self):\n return self.expiration_date <= self._now()",
"def is_expired(self):\n\n if self._lifetime is not None and self._lifetime > 0:\n # 300 seconds waite is the tolerance !\n # The unit of lifetime is millisecond\n if (time.time() - self._create_date) * 1000 > self._lifetime + 300000:\n return True\n\n return False",
"def _is_expired(self):\n current_time = datetime.now()\n if (current_time > self._expires_at):\n logging.debug('token expired')\n return True\n else:\n return False",
"def check_expiration(self, cur_time):\n\n\t\ttime_limit = 1000\n\t\ttime_elapsed = cur_time - self.time_created\n\n\t\t# Erase cache after an arbitrary amount of time\n\t\tif time_elapsed > time_limit:\n\t\t\tself.cache_expiration()",
"def checked_expired(self, order):\n exp_time = order.get_expiration_time()\n curr_time = self.table.current_time\n # self.debug(\"Check %i expiration: exp(%f) vs. curr(%f)\" % (order.m_orderId, exp_time, curr_time))\n if curr_time >= exp_time:\n self.debug(\"Order %i has expired\" % order.m_orderId)\n order.expired()\n return order",
"def isExpired(self):\n return True/False",
"def is_expired(self):\n return utcnow() >= self.expires",
"def flush_expired_tokens(self):\n raise exception.NotImplemented() # pragma: no cover",
"def is_expired(self) -> bool:\n return now() > self.expires",
"def test_access_token_all_expired(self):\n exp = self.factory.create(access_token='expired', expires_at=self.expired_dt)\n with HTTMock(spark_cloud_mock):\n token = CloudCredentials.objects._access_token()\n self.assertEqual(token, None)\n exp.delete()"
] |
[
"0.66576964",
"0.6158783",
"0.5893915",
"0.58275145",
"0.5823112",
"0.5794615",
"0.5753321",
"0.57031006",
"0.56852293",
"0.5669985",
"0.5650522",
"0.5640487",
"0.5634891",
"0.56336796",
"0.56275904",
"0.5607949",
"0.5605594",
"0.5583486",
"0.55787855",
"0.5542038",
"0.5537857",
"0.5536443",
"0.5519218",
"0.5503734",
"0.55019754",
"0.54997754",
"0.54859805",
"0.5482576",
"0.5475089",
"0.5473544"
] |
0.7384562
|
0
|
Get a list of the target information for all the trusted targets on the repository. This list also includes all the targets of delegated roles. The list conforms to 'tuf.formats.TARGETFILES_SCHEMA'
|
def all_targets(self):
# Load the most up-to-date targets of the 'targets' role and all
# delegated roles.
self._refresh_targets_metadata(include_delegations=True)
all_targets = []
# Fetch the targets for the 'targets' role.
all_targets = self._targets_of_role('targets', skip_refresh=True)
# Fetch the targets for the delegated roles.
for delegated_role in tuf.roledb.get_delegated_rolenames('targets'):
all_targets = self._targets_of_role(delegated_role, all_targets,
skip_refresh=True)
return all_targets
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def Targets(self):\n return self._targets",
"def all_targets(self):\n return self._combined_all_versioned_targets.targets",
"def ProduceTargets(self):\n\n if self.completion_wanted:\n return self._FindTarget()\n else:\n return []",
"def targets(self):\n\n return [get_target_by_id(i) for i in self._target_ids]",
"def targets(self):\n\n\t\tstatus, targets = self.execute(self.mission, 'target_list', self.kingdom)\n\n\t\t# Nothing specified : default is everyone but me.\n\t\tif targets == self:\n\t\t\ttargets = Kingdom.objects.exclude(id=self.kingdom_id)\n\t\t\n\t\t# Pre-fetch user, for direct access to kingdom name.\n\t\tif isinstance(targets, QuerySet):\n\t\t\ttargets = targets.select_related('user')\n\n\t\treturn targets",
"def _targets_of_role(self, rolename, targets=None, skip_refresh=False):\n\n if targets is None:\n targets = []\n\n logger.debug('Getting targets of role: '+repr(rolename)+'.')\n\n if not tuf.roledb.role_exists(rolename):\n raise tuf.UnknownRoleError(rolename)\n\n # We do not need to worry about the target paths being trusted because\n # this is enforced before any new metadata is accepted.\n if not skip_refresh:\n self._refresh_targets_metadata(rolename)\n \n # Do we have metadata for 'rolename'?\n if rolename not in self.metadata['current']:\n message = 'No metadata for '+rolename+'. Unable to determine targets.'\n logger.debug(message)\n return targets\n\n # Get the targets specified by the role itself.\n for filepath, fileinfo in self.metadata['current'][rolename]['targets'].items():\n new_target = {} \n new_target['filepath'] = filepath \n new_target['fileinfo'] = fileinfo\n \n targets.append(new_target)\n\n return targets",
"def target_ids(self):\n\n return self._target_ids",
"def targets(self): # type: () -> t.List[HostConfig]\n return self.host_settings.targets",
"def targets_infos(self) -> Dict[str, MetaFile]:\n raise NotImplementedError",
"def get_targets() -> Generator[dict, dict, list[TargetInfo]]:\n response = yield {\"method\": \"Target.getTargets\", \"params\": {}}\n return [TargetInfo.from_json(t) for t in response[\"targetInfos\"]]",
"def targets(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['PlanTargetsArgs']]]]:\n return pulumi.get(self, \"targets\")",
"def GetTargets(self):\n return []",
"def get_targets(self):\n\t\treturn self.prDoc['inputs']['data'][0]['targets']",
"def list_targets(self):\n tenant_id = self.request.user.tenant_id\n ports = port_list(self.request, tenant_id=tenant_id)\n servers, has_more = nova.server_list(self.request, detailed=False)\n server_dict = collections.OrderedDict(\n [(s.id, s.name) for s in servers])\n reachable_subnets = self._get_reachable_subnets(ports)\n\n targets = []\n for p in ports:\n # Remove network ports from Floating IP targets\n if p.device_owner.startswith('network:'):\n continue\n server_name = server_dict.get(p.device_id)\n\n for ip in p.fixed_ips:\n if ip['subnet_id'] not in reachable_subnets:\n continue\n # Floating IPs can only target IPv4 addresses.\n if netaddr.IPAddress(ip['ip_address']).version != 4:\n continue\n targets.append(FloatingIpTarget(p, ip['ip_address'],\n server_name))\n return targets",
"def get_target_list(data_dir):\n target_list = os.listdir(data_dir)\n\n return target_list",
"def get_targets():\n # Use a list comp because querying MODM with Guid.find(Q('referent', 'eq', None))\n # only catches the first case.\n return [each for each in Guid.find() if each.referent is None]",
"def targets(self):\n if self._targets is None:\n if self.verbose:\n print('Building targets from coco dataset')\n cacher = self._cacher('targets')\n _targets = cacher.tryload(on_error='clear')\n if _targets is None:\n _targets = tabular_coco_targets(self.dset)\n cacher.save(_targets)\n self._targets = _targets\n return self._targets",
"def target(self) -> list[str]:\n if self._target is None:\n print(self.__class__.target.__doc__)\n raise SilSubProblemError(\n \"The *target* property has not been set (see above).\"\n )\n return self._target",
"def output_targets(self) -> Set[str]:\n return {\n out.target\n for out in\n self.outputs\n }",
"def list(self):\n\n r = self.get_resource()\n\n return list(self.set_target_file(u.target_file) for u in r.list())",
"def get_all_targets(self):\n return dict(self._targets)",
"def get_all_targets():\n\n json_data = gtop.get_json_from_gtop(\"targets\")\n return [Target(t) for t in json_data]",
"def targets(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ContactTargetsArgs']]]]:\n return pulumi.get(self, \"targets\")",
"def targets(self, rolename: str = Targets.type) -> Targets:\n targets = self.open(rolename).signed\n if not isinstance(targets, Targets):\n raise RuntimeError(\"Unexpected targets type\")\n return targets",
"def AllTargets(target_list, target_dicts, build_file):\n bftargets = BuildFileTargets(target_list, build_file)\n deptargets = DeepDependencyTargets(target_dicts, bftargets)\n return bftargets + deptargets",
"def get_targets(self):\n\t\n\t\tself.target = []\n\t\ttarget_ins = self.settings['target']\n\t\tfor key in target_ins.keys():\n\t\t\tif key == 'raw':\n\t\t\t\tself.target.append(target_ins[key])\n\t\t\telif key == 'textfile':\n\t\t\t\twith open(target_ins[key],'r') as fp: targs = fp.readlines()\n\t\t\t\tfor t in targs:\n\t\t\t\t\tif re.match('^[a-z,A-Z,_].+\\s*:\\s*[A-Z].+$',t):\n\t\t\t\t\t\tself.target.append(tuple([i.strip() for i in t.split(':')]))\n\t\t\telif key == 'textfile_rna':\n\t\t\t\twith open(target_ins[key],'r') as fp: targs = fp.readlines()\n\t\t\t\tfor t in targs:\n\t\t\t\t\tif re.match('^[a-z,A-Z,0-9,_].+\\s*:\\s*[A-Z,a-z].+$',t):\n\t\t\t\t\t\tself.target.append(list([i.strip() for i in t.split(':')]))\n\t\t\t\t\t\trnaseq = self.target[-1][1]\n\t\t\t\t\t\t#---extra substitutions for later\n\t\t\t\t\t\tif 'regex_subs' in self.settings.keys():\n\t\t\t\t\t\t\tfor regex in self.settings['regex_subs']:\n\t\t\t\t\t\t\t\trnaseq = re.sub(regex[0],regex[1],rnaseq)\n\t\t\t\t\t\trnaseq = rnaseq.upper()\n\t\t\t\t\t\trnaseq = re.sub('T','U',rnaseq)\n\t\t\t\t\t\taminoseq = ''.join([dna_mapping[i] for i in [rnaseq[i:i+3] \n\t\t\t\t\t\t\tfor i in range(0,len(rnaseq),3)]])\n\t\t\t\t\t\tself.target[-1][1] = re.sub('T','U',aminoseq)\n\t\t\t\t\t\tself.target[-1] = tuple(self.target[-1])\n\t\t\telse: raise Exception('except: unclear target type')",
"def _get_installed_targets(target_types):\r\n lines = [TargetsHelp.INSTALLED_TARGETS_HEADER]\r\n for target_type in sorted(target_types.keys()):\r\n if target_types[target_type].__doc__ is None:\r\n desc = 'Description unavailable.'\r\n else:\r\n desc = target_types[target_type].__doc__.split('\\n')[0]\r\n lines.append(' %s: %s' % (\r\n TargetsHelp.TARGET_TO_ALIAS[target_type].rjust(TargetsHelp.MAX_ALIAS_LEN), desc))\r\n return lines",
"def all_versioned_targets(self):\n return self._all_versioned_targets",
"def get_target_actors(self):\n target_actors = [ddpg_agent.target_actor for ddpg_agent in self.maddpg_agent]\n return target_actors",
"def get_target_actors(self):\n target_actors = [ddpg_agent.target_actor for ddpg_agent in self.maddpg_agent]\n return target_actors"
] |
[
"0.6948129",
"0.67123234",
"0.66501284",
"0.66192716",
"0.6465113",
"0.64457554",
"0.6334513",
"0.6326923",
"0.6301015",
"0.6260099",
"0.6252691",
"0.6228816",
"0.62269753",
"0.62018263",
"0.6183172",
"0.6166132",
"0.61596525",
"0.61502266",
"0.6108508",
"0.61027354",
"0.6072686",
"0.6069221",
"0.60560834",
"0.6002256",
"0.60017484",
"0.5992835",
"0.5985979",
"0.5963676",
"0.5960609",
"0.5960609"
] |
0.6962754
|
0
|
Refresh the targets metadata of 'rolename'. If 'include_delegations' is True, include all the delegations that follow 'rolename'. The metadata for the 'targets' role is updated in refresh() by the _update_metadata_if_changed('targets') call, not here. Delegated roles are not loaded when the repository is first initialized. They are loaded from disk, updated if they have changed, and stored to the 'self.metadata' store by this function. This function is called by the target methods, like all_targets() and targets_of_role().
|
def _refresh_targets_metadata(self, rolename='targets', include_delegations=False):
roles_to_update = []
# See if this role provides metadata and, if we're including
# delegations, look for metadata from delegated roles.
role_prefix = rolename + '/'
for metadata_path in self.metadata['current']['release']['meta'].keys():
if metadata_path == rolename + '.txt':
roles_to_update.append(metadata_path[:-len('.txt')])
elif include_delegations and metadata_path.startswith(role_prefix):
# Add delegated roles. Skip roles names containing compression
# extensions.
if metadata_path.endswith('.txt'):
roles_to_update.append(metadata_path[:-len('.txt')])
# Remove the 'targets' role because it gets updated when the targets.txt
# file is updated in _update_metadata_if_changed('targets').
if rolename == 'targets':
try:
roles_to_update.remove('targets')
except ValueError:
message = 'The Release metadata file is missing the targets.txt entry.'
raise tuf.RepositoryError(message)
# If there is nothing to refresh, we are done.
if not roles_to_update:
return
# Sort the roles so that parent roles always come first.
roles_to_update.sort()
logger.debug('Roles to update: '+repr(roles_to_update)+'.')
# Iterate through 'roles_to_update', load its metadata
# file, and update it if it has changed.
for rolename in roles_to_update:
self._load_metadata_from_file('previous', rolename)
self._load_metadata_from_file('current', rolename)
self._update_metadata_if_changed(rolename)
# Remove the role if it has expired.
try:
self._ensure_not_expired(rolename)
except tuf.ExpiredMetadataError:
tuf.roledb.remove_role(rolename)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _targets_of_role(self, rolename, targets=None, skip_refresh=False):\n\n if targets is None:\n targets = []\n\n logger.debug('Getting targets of role: '+repr(rolename)+'.')\n\n if not tuf.roledb.role_exists(rolename):\n raise tuf.UnknownRoleError(rolename)\n\n # We do not need to worry about the target paths being trusted because\n # this is enforced before any new metadata is accepted.\n if not skip_refresh:\n self._refresh_targets_metadata(rolename)\n \n # Do we have metadata for 'rolename'?\n if rolename not in self.metadata['current']:\n message = 'No metadata for '+rolename+'. Unable to determine targets.'\n logger.debug(message)\n return targets\n\n # Get the targets specified by the role itself.\n for filepath, fileinfo in self.metadata['current'][rolename]['targets'].items():\n new_target = {} \n new_target['filepath'] = filepath \n new_target['fileinfo'] = fileinfo\n \n targets.append(new_target)\n\n return targets",
"def targets_of_role(self, rolename='targets'):\n \n # Does 'rolename' have the correct format?\n # Raise 'tuf.FormatError' if there is a mismatch.\n tuf.formats.RELPATH_SCHEMA.check_match(rolename)\n\n self._refresh_targets_metadata(rolename)\n \n return self._targets_of_role(rolename, skip_refresh=True)",
"def refresh(self):\n\n # The timestamp role does not have signed metadata about it; otherwise we\n # would need an infinite regress of metadata. Therefore, we use some\n # default, sane metadata about it.\n DEFAULT_TIMESTAMP_FILEINFO = {\n 'hashes':None,\n 'length': tuf.conf.DEFAULT_TIMESTAMP_REQUIRED_LENGTH\n }\n\n # Update the top-level metadata. The _update_metadata_if_changed() and\n # _update_metadata() calls below do NOT perform an update if there\n # is insufficient trusted signatures for the specified metadata.\n # Raise 'tuf.NoWorkingMirrorError' if an update fails.\n\n # Use default but sane information for timestamp metadata, and do not\n # require strict checks on its required length.\n self._update_metadata('timestamp', DEFAULT_TIMESTAMP_FILEINFO)\n\n self._update_metadata_if_changed('release', referenced_metadata='timestamp')\n\n self._update_metadata_if_changed('root')\n\n self._update_metadata_if_changed('targets')\n\n # Updated the top-level metadata (which all had valid signatures), however,\n # have they expired? Raise 'tuf.ExpiredMetadataError' if any of the metadata\n # has expired.\n for metadata_role in ['timestamp', 'root', 'release', 'targets']:\n self._ensure_not_expired(metadata_role)",
"def _import_delegations(self, parent_role):\n \n current_parent_metadata = self.metadata['current'][parent_role]\n \n if 'delegations' not in current_parent_metadata:\n return\n\n # This could be quite slow with a huge number of delegations.\n keys_info = current_parent_metadata['delegations'].get('keys', {})\n roles_info = current_parent_metadata['delegations'].get('roles', [])\n\n logger.debug('Adding roles delegated from '+repr(parent_role)+'.')\n \n # Iterate through the keys of the delegated roles of 'parent_role'\n # and load them.\n for keyid, keyinfo in keys_info.items():\n if keyinfo['keytype'] in ['rsa', 'ed25519']:\n key = tuf.keys.format_metadata_to_key(keyinfo)\n \n # We specify the keyid to ensure that it's the correct keyid\n # for the key.\n try:\n tuf.keydb.add_key(key, keyid)\n except tuf.KeyAlreadyExistsError:\n pass\n except (tuf.FormatError, tuf.Error), e:\n logger.exception('Failed to add keyid: '+repr(keyid)+'.')\n logger.error('Aborting role delegation for parent role '+parent_role+'.')\n raise\n else:\n logger.warn('Invalid key type for '+repr(keyid)+'.')\n continue\n\n # Add the roles to the role database.\n for roleinfo in roles_info:\n try:\n # NOTE: tuf.roledb.add_role will take care\n # of the case where rolename is None.\n rolename = roleinfo.get('name')\n logger.debug('Adding delegated role: '+str(rolename)+'.')\n tuf.roledb.add_role(rolename, roleinfo)\n except tuf.RoleAlreadyExistsError, e:\n logger.warn('Role already exists: '+rolename)\n except:\n logger.exception('Failed to add delegated role: '+rolename+'.')\n raise",
"def all_targets(self):\n \n # Load the most up-to-date targets of the 'targets' role and all\n # delegated roles.\n self._refresh_targets_metadata(include_delegations=True)\n \n all_targets = []\n # Fetch the targets for the 'targets' role.\n all_targets = self._targets_of_role('targets', skip_refresh=True)\n\n # Fetch the targets for the delegated roles.\n for delegated_role in tuf.roledb.get_delegated_rolenames('targets'):\n all_targets = self._targets_of_role(delegated_role, all_targets,\n skip_refresh=True)\n \n return all_targets",
"def _update_metadata_if_changed(self, metadata_role, referenced_metadata='release'):\n \n uncompressed_metadata_filename = metadata_role + '.txt'\n\n # Ensure the referenced metadata has been loaded. The 'root' role may be\n # updated without having 'release' available. \n if referenced_metadata not in self.metadata['current']:\n message = 'Cannot update '+repr(metadata_role)+' because ' \\\n +referenced_metadata+' is missing.'\n raise tuf.RepositoryError(message)\n # The referenced metadata has been loaded. Extract the new\n # fileinfo for 'metadata_role' from it. \n else:\n message = repr(metadata_role)+' referenced in '+\\\n repr(referenced_metadata)+'. '+repr(metadata_role)+' may be updated.'\n logger.debug(message)\n \n # There might be a compressed version of 'release.txt' or Targets\n # metadata available for download. Check the 'meta' field of\n # 'referenced_metadata' to see if it is listed when 'metadata_role'\n # is 'release'. The full rolename for delegated Targets metadata\n # must begin with 'targets/'. The Release role lists all the Targets\n # metadata available on the repository, including any that may be in\n # compressed form.\n compression = None\n\n # Extract the fileinfo of the uncompressed version of 'metadata_role'.\n uncompressed_fileinfo = self.metadata['current'][referenced_metadata] \\\n ['meta'] \\\n [uncompressed_metadata_filename]\n\n # Check for availability of compressed versions of 'release.txt',\n # 'targets.txt', and delegated Targets, which also start with 'targets'.\n # For 'targets.txt' and delegated metadata, 'referenced_metata'\n # should always be 'release'. 'release.txt' specifies all roles\n # provided by a repository, including their file sizes and hashes.\n if metadata_role == 'release' or metadata_role.startswith('targets'):\n gzip_metadata_filename = uncompressed_metadata_filename + '.gz'\n if gzip_metadata_filename in self.metadata['current'] \\\n [referenced_metadata]['meta']:\n compression = 'gzip'\n compressed_fileinfo = self.metadata['current'][referenced_metadata] \\\n ['meta'][gzip_metadata_filename]\n # NOTE: When we download the compressed file, we care about its\n # compressed length. However, we check the hash of the uncompressed\n # file; therefore we use the hashes of the uncompressed file.\n fileinfo = {'length': compressed_fileinfo['length'],\n 'hashes': uncompressed_fileinfo['hashes']}\n logger.debug('Compressed version of '+\\\n repr(uncompressed_metadata_filename)+' is available at '+\\\n repr(gzip_metadata_filename)+'.')\n else:\n logger.debug('Compressed version of '+\\\n repr(uncompressed_metadata_filename)+' not available.')\n fileinfo = uncompressed_fileinfo\n else:\n fileinfo = uncompressed_fileinfo\n\n # Simply return if the file has not changed, according to the metadata\n # about the uncompressed file provided by the referenced metadata.\n if not self._fileinfo_has_changed(uncompressed_metadata_filename,\n uncompressed_fileinfo):\n return\n\n logger.debug('Metadata '+repr(uncompressed_metadata_filename)+\\\n ' has changed.')\n\n try:\n self._update_metadata(metadata_role, fileinfo=fileinfo,\n compression=compression)\n except:\n # The current metadata we have is not current but we couldn't\n # get new metadata. We shouldn't use the old metadata anymore.\n # This will get rid of in-memory knowledge of the role and\n # delegated roles, but will leave delegated metadata files as\n # current files on disk.\n # TODO: Should we get rid of the delegated metadata files?\n # We shouldn't need to, but we need to check the trust\n # implications of the current implementation.\n self._delete_metadata(metadata_role)\n logger.error('Metadata for '+str(metadata_role)+' could not be updated')\n raise\n else:\n # We need to remove delegated roles because the delegated roles\n # may not be trusted anymore.\n if metadata_role == 'targets' or metadata_role.startswith('targets/'):\n logger.debug('Removing delegated roles of '+repr(metadata_role)+'.')\n # TODO: Should we also remove the keys of the delegated roles?\n tuf.roledb.remove_delegated_roles(metadata_role)\n self._import_delegations(metadata_role)",
"def _rebuild_key_and_role_db(self):\n \n # Clobbering this means all delegated metadata files are rendered outdated\n # and will need to be reloaded. However, reloading the delegated metadata\n # files is avoided here because fetching target information with methods\n # like all_targets() and target() always cause a refresh of these files.\n # The metadata files for delegated roles are also not loaded when the\n # repository is first instantiated. Due to this setup, reloading delegated\n # roles is not required here.\n tuf.keydb.create_keydb_from_root_metadata(self.metadata['current']['root'])\n tuf.roledb.create_roledb_from_root_metadata(self.metadata['current']['root'])",
"def update_targets(self):\n self.actor.update_target_network()\n self.critic.update_target_network()",
"def _ensure_all_targets_allowed(self, metadata_role, metadata_object):\n \n # Return if 'metadata_role' is 'targets'. 'targets' is not\n # a delegated role.\n if metadata_role == 'targets':\n return\n \n # The targets of delegated roles are stored in the parent's\n # metadata file. Retrieve the parent role of 'metadata_role'\n # to confirm 'metadata_role' contains valid targets.\n parent_role = tuf.roledb.get_parent_rolename(metadata_role)\n\n # Iterate over the targets of 'metadata_role' and confirm they are trusted,\n # or their root parent directory exists in the role delegated paths of the\n # parent role.\n roles = self.metadata['current'][parent_role]['delegations']['roles']\n role_index = tuf.repo.signerlib.find_delegated_role(roles, metadata_role)\n\n # Ensure the delegated role exists prior to extracting trusted paths from\n # the parent's 'paths', or trusted path hash prefixes from the parent's\n # 'path_hash_prefixes'.\n if role_index is not None:\n role = roles[role_index] \n allowed_child_paths = role.get('paths')\n allowed_child_path_hash_prefixes = role.get('path_hash_prefixes')\n actual_child_targets = metadata_object['targets'].keys()\n\n if allowed_child_path_hash_prefixes is not None:\n consistent = self._paths_are_consistent_with_hash_prefixes\n if not consistent(actual_child_targets,\n allowed_child_path_hash_prefixes):\n raise tuf.ForbiddenTargetError('Role '+repr(metadata_role)+\\\n ' specifies target which does not'+\\\n ' have a path hash prefix matching'+\\\n ' the prefix listed by the parent'+\\\n ' role '+repr(parent_role)+'.')\n\n elif allowed_child_paths is not None: \n\n # Check that each delegated target is either explicitly listed or a parent\n # directory is found under role['paths'], otherwise raise an exception.\n # If the parent role explicitly lists target file paths in 'paths',\n # this loop will run in O(n^2), the worst-case. The repository\n # maintainer will likely delegate entire directories, and opt for\n # explicit file paths if the targets in a directory are delegated to \n # different roles/developers.\n for child_target in actual_child_targets:\n for allowed_child_path in allowed_child_paths:\n prefix = os.path.commonprefix([child_target, allowed_child_path])\n if prefix == allowed_child_path:\n break\n else: \n raise tuf.ForbiddenTargetError('Role '+repr(metadata_role)+\\\n ' specifies target '+\\\n repr(child_target)+' which is not'+\\\n ' an allowed path according to'+\\\n ' the delegations set by '+\\\n repr(parent_role)+'.')\n\n else:\n\n # 'role' should have been validated when it was downloaded.\n # The 'paths' or 'path_hash_prefixes' attributes should not be missing,\n # so raise an error in case this clause is reached.\n raise tuf.FormatError(repr(role)+' did not contain one of '+\\\n 'the required fields (\"paths\" or '+\\\n '\"path_hash_prefixes\").')\n\n # Raise an exception if the parent has not delegated to the specified\n # 'metadata_role' child role.\n else:\n raise tuf.RepositoryError(repr(parent_role)+' has not delegated to '+\\\n repr(metadata_role)+'.')",
"def refresh(self):\n self.proxies = self._init_proxies(self.proxy_providers)",
"def update_targets_with_http_info(self, target, **kwargs):\n\n all_params = ['target', 'ids', 'names']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method update_targets\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'target' is set\n if ('target' not in params) or (params['target'] is None):\n raise ValueError(\"Missing the required parameter `target` when calling `update_targets`\")\n\n\n collection_formats = {}\n\n path_params = {}\n\n query_params = []\n if 'ids' in params:\n query_params.append(('ids', params['ids']))\n collection_formats['ids'] = 'csv'\n if 'names' in params:\n query_params.append(('names', params['names']))\n collection_formats['names'] = 'csv'\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'target' in params:\n body_params = params['target']\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['AuthTokenHeader']\n\n return self.api_client.call_api('/1.9/targets', 'PATCH',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='TargetResponse',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)",
"def edit_targets(\n self, rolename: str = Targets.type\n ) -> Generator[Targets, None, None]:\n with self.edit(rolename) as targets:\n if not isinstance(targets, Targets):\n raise RuntimeError(f\"Unexpected targets ({rolename}) type\")\n yield targets",
"def update_target(self):\n pass",
"def delegations(self) -> Optional[Sequence['outputs.DelegationResponse']]:\n return pulumi.get(self, \"delegations\")",
"def RefreshLearnedInformation(self):\n\t\tpayload = { \"Arg1\": self.href }\n\t\treturn self._execute('refreshLearnedInformation', payload=payload, response_object=None)",
"def targets(self, rolename: str = Targets.type) -> Targets:\n targets = self.open(rolename).signed\n if not isinstance(targets, Targets):\n raise RuntimeError(\"Unexpected targets type\")\n return targets",
"def update_relus(self):\n def relu_backward_hook_function(module, grad_in, grad_out):\n \"\"\"\n If there is a negative gradient, change it to zero\n \"\"\"\n # Get last forward output\n corresponding_forward_output = self.forward_relu_outputs[-1]\n corresponding_forward_output[corresponding_forward_output > 0] = 1\n modified_grad_out = corresponding_forward_output * torch.clamp(grad_in[0], min=0.0)\n del self.forward_relu_outputs[-1] # Remove last forward output\n return (modified_grad_out,)\n\n def relu_forward_hook_function(module, ten_in, ten_out):\n \"\"\"\n Store results of forward pass\n \"\"\"\n self.forward_relu_outputs.append(ten_out)\n\n # Loop through layers, hook up ReLUs\n for pos, module in self.model._features_extractor._modules.items():\n for layer in module:\n if isinstance(layer, LeakyReLU):\n layer.register_backward_hook(relu_backward_hook_function)\n layer.register_forward_hook(relu_forward_hook_function)",
"def refresh_info(self):\n if not self.is_setup_connected() and not self.target_namespace:\n return\n self.get_association_info()\n self.create_techanim_connections()",
"def update_relus(self):\n def relu_backward_hook_function(module, grad_in, grad_out):\n \"\"\"\n If there is a negative gradient, change it to zero\n \"\"\"\n # Get last forward output\n corresponding_forward_output = self.forward_relu_outputs[-1]\n corresponding_forward_output[corresponding_forward_output > 0] = 1\n modified_grad_out = corresponding_forward_output * torch.clamp(grad_in[0], min=0.0)\n del self.forward_relu_outputs[-1] # Remove last forward output\n return (modified_grad_out,)\n\n def relu_forward_hook_function(module, ten_in, ten_out):\n \"\"\"\n Store results of forward pass\n \"\"\"\n self.forward_relu_outputs.append(ten_out)\n\n # Loop through layers, hook up ReLUs\n for pos, module in self.model.named_modules():\n if isinstance(module, ReLU):\n module.register_backward_hook(relu_backward_hook_function)\n module.register_forward_hook(relu_forward_hook_function)",
"def update_all_metadata(self, metadata):\n return self.manager.update_all_metadata(self, metadata)",
"def addTarget(self, data):\n #tgt: length x n_feature, n_feture is 5, AMR_CAT, AMR_LE, AMR_AUX, AMR_SENSE, AMR_CAN_COPY\n # after make all node aligned to a word or NULL word, length is equal to the length of tokes.\n if \"amr_id\" in data:\n self.tgt.append(torch.LongTensor(data[\"amr_id\"])) # lemma,cat, lemma_sense,ner,is_high\n # align_index, simple append all the aligned index\n # align_index = [[[i1,ij],[i2,ik] ]]\n self.align_index.append(data[\"amr_index\"]) # this index is also recategorized id\n\n amrl = len(data[\"amr_id\"])\n for i in data[\"amr_rel_index\"]:\n assert i <amrl,data\n #rel\n self.rel_seq.append(torch.LongTensor(data[\"amr_rel_id\"])) # lemma,cat, lemma_sense, the order is in gold amr node order\n self.rel_index.append(torch.LongTensor(data[\"amr_rel_index\"])) # index of head node from recategorized node order\n # here use rel dict to exchange the roleStr into id., mats is a matrix [real_gold_amr_len x real_gold_amr_len]\n mats = role_mat_to_sparse(data[\"amr_roles_mat\"], self.rel_dict)\n\n self.rel_mat.append(mats) #role, index\n self.root.append(data[\"amr_root\"]) #role, index for gold amr nodes\n\n #source means raw contents before becoming a tensor\n self.tgt_source.append([data[\"amr_rel_seq\"],data[\"amr_rel_triples\"],data[\"amr_convertedl_seq\"],data[\"amr_seq\"]])\n elif \"psd_id\" in data:\n self.tgt.append(torch.LongTensor(data[\"psd_id\"])) # lemma,cat, lemma_sense,ner,is_high\n # align_index, simple append all the aligned index\n # align_index = [[[i1,ij],[i2,ik] ]]\n self.align_index.append(data[\"psd_index\"])\n\n amrl = len(data[\"psd_id\"])\n for i in data[\"psd_rel_index\"]:\n assert i <amrl,data\n #rel\n self.rel_seq.append(torch.LongTensor(data[\"psd_rel_id\"])) # lemma,cat, lemma_sense\n self.rel_index.append(torch.LongTensor(data[\"psd_rel_index\"]))\n # here use rel dict to exchange the roleStr into id.\n mats = role_mat_to_sparse(data[\"psd_roles_mat\"], self.rel_dict)\n self.rel_mat.append(mats) #role, index\n self.root.append(data[\"psd_root\"]) #role, index\n\n #source means raw contents before becoming a tensor\n self.tgt_source.append([data[\"psd_rel_seq\"],data[\"psd_rel_triples\"],data[\"psd_convertedl_seq\"],data[\"psd_seq\"]])\n elif \"dm_id\" in data:\n self.tgt.append(torch.LongTensor(data[\"dm_id\"])) # lemma,cat, lemma_sense,ner,is_high\n # align_index, simple append all the aligned index\n # align_index = [[[i1,ij],[i2,ik] ]]\n self.align_index.append(data[\"dm_index\"])\n\n amrl = len(data[\"dm_id\"])\n for i in data[\"dm_rel_index\"]:\n assert i <amrl,data\n #rel\n self.rel_seq.append(torch.LongTensor(data[\"dm_rel_id\"])) # lemma,cat, lemma_sense\n self.rel_index.append(torch.LongTensor(data[\"dm_rel_index\"]))\n # here use rel dict to exchange the roleStr into id.\n mats = role_mat_to_sparse(data[\"dm_roles_mat\"], self.rel_dict)\n self.rel_mat.append(mats) #role, index\n self.root.append(data[\"dm_root\"]) #role, index\n\n #source means raw contents before becoming a tensor\n self.tgt_source.append([data[\"dm_rel_seq\"],data[\"dm_rel_triples\"],data[\"dm_convertedl_seq\"],data[\"dm_seq\"]])",
"def update_relus(self):\n def relu_backward_hook_function(module, grad_in, grad_out):\n \"\"\"\n If there is a negative gradient, change it to zero\n \"\"\"\n # Get last forward output\n corresponding_forward_output = self.forward_relu_outputs[-1]\n corresponding_forward_output[corresponding_forward_output > 0] = 1\n modified_grad_out = corresponding_forward_output * torch.clamp(grad_in[0], min=0.0)\n del self.forward_relu_outputs[-1] # Remove last forward output\n return (modified_grad_out,)\n\n def relu_forward_hook_function(module, ten_in, ten_out):\n \"\"\"\n Store results of forward pass\n \"\"\"\n self.forward_relu_outputs.append(ten_out)\n\n # Loop through layers, hook up ReLUs\n for pos, module in self.model.named_modules():\n if isinstance(module, nn.ReLU):\n module.register_backward_hook(relu_backward_hook_function)\n module.register_forward_hook(relu_forward_hook_function)",
"def update_target_network(self):\n\n\t\tprint \"Updating Target DQN...\"\n\t\t\n\t\tself.update_operation.run()",
"def update_relus(self):\n def relu_backward_hook_function(module, grad_in, grad_out):\n \"\"\"\n If there is a negative gradient, change it to zero\n \"\"\"\n # Get last forward output\n corresponding_forward_output = self.forward_relu_outputs[-1]\n corresponding_forward_output[corresponding_forward_output > 0] = 1\n modified_grad_out = corresponding_forward_output * torch.clamp(grad_in[0], min=0.0)\n del self.forward_relu_outputs[-1] # Remove last forward output\n return (modified_grad_out,)\n\n def relu_forward_hook_function(module, ten_in, ten_out):\n \"\"\"\n Store results of forward pass\n \"\"\"\n self.forward_relu_outputs.append(ten_out)\n\n # Loop through layers, hook up ReLUs\n for pos, module in self.model.features._modules.items():\n if isinstance(module, ReLU):\n module.register_backward_hook(relu_backward_hook_function)\n module.register_forward_hook(relu_forward_hook_function)",
"def _preorder_depth_first_walk(self, target_filepath):\n\n target = None\n current_metadata = self.metadata['current']\n role_names = ['targets']\n\n # Ensure the client has the most up-to-date version of 'targets.txt'.\n # Raise 'tuf.NoWorkingMirrorError' if the changed metadata cannot be successfully\n # downloaded and 'tuf.RepositoryError' if the referenced metadata is\n # missing. Target methods such as this one are called after the top-level\n # metadata have been refreshed (i.e., updater.refresh()).\n self._update_metadata_if_changed('targets')\n\n # Preorder depth-first traversal of the tree of target delegations.\n while len(role_names) > 0 and target is None:\n\n # Pop the role name from the top of the stack.\n role_name = role_names.pop(-1)\n\n # The metadata for 'role_name' must be downloaded/updated before\n # its targets, delegations, and child roles can be inspected.\n # self.metadata['current'][role_name] is currently missing.\n # _refresh_targets_metadata() does not refresh 'targets.txt', it\n # expects _update_metadata_if_changed() to have already refreshed it,\n # which this function has checked above.\n self._refresh_targets_metadata(role_name, include_delegations=False)\n\n role_metadata = current_metadata[role_name]\n targets = role_metadata['targets']\n delegations = role_metadata.get('delegations', {})\n child_roles = delegations.get('roles', [])\n target = self._get_target_from_targets_role(role_name, targets,\n target_filepath)\n\n if target is None:\n\n # Push children in reverse order of appearance onto the stack.\n # NOTE: This may be a slow operation if there are many delegated roles.\n for child_role in reversed(child_roles):\n child_role_name = self._visit_child_role(child_role, target_filepath)\n if child_role_name is None:\n logger.debug('Skipping child role '+repr(child_role_name))\n else:\n logger.debug('Adding child role '+repr(child_role_name))\n role_names.append(child_role_name)\n\n else:\n logger.debug('Found target in current role '+repr(role_name))\n\n return target",
"def _update_metadata(self, metadata_role, fileinfo, compression=None):\n\n # Construct the metadata filename as expected by the download/mirror modules.\n metadata_filename = metadata_role + '.txt'\n uncompressed_metadata_filename = metadata_filename\n \n # The 'release' or Targets metadata may be compressed. Add the appropriate\n # extension to 'metadata_filename'. \n if compression == 'gzip':\n metadata_filename = metadata_filename + '.gz'\n\n # Extract file length and file hashes. They will be passed as arguments\n # to 'download_file' function.\n compressed_file_length = fileinfo['length']\n uncompressed_file_hashes = fileinfo['hashes']\n\n # Attempt a file download from each mirror until the file is downloaded and\n # verified. If the signature of the downloaded file is valid, proceed,\n # otherwise log a warning and try the next mirror. 'metadata_file_object'\n # is the file-like object returned by 'download.py'. 'metadata_signable'\n # is the object extracted from 'metadata_file_object'. Metadata saved to\n # files are regarded as 'signable' objects, conformant to\n # 'tuf.formats.SIGNABLE_SCHEMA'.\n #\n # Some metadata (presently timestamp) will be downloaded \"unsafely\", in the\n # sense that we can only estimate its true length and know nothing about\n # its hashes. This is because not all metadata will have other metadata\n # for it; otherwise we will have an infinite regress of metadata signing\n # for each other. In this case, we will download the metadata up to the\n # best length we can get for it, not check its hashes, but perform the rest\n # of the checks (e.g signature verification).\n #\n # Note also that we presently support decompression of only \"safe\"\n # metadata, but this is easily extend to \"unsafe\" metadata as well as\n # \"safe\" targets.\n\n if metadata_role == 'timestamp':\n metadata_file_object = \\\n self.unsafely_get_metadata_file(metadata_role, metadata_filename,\n compressed_file_length)\n else:\n metadata_file_object = \\\n self.safely_get_metadata_file(metadata_role, metadata_filename,\n compressed_file_length,\n uncompressed_file_hashes,\n compression=compression)\n\n # The metadata has been verified. Move the metadata file into place.\n # First, move the 'current' metadata file to the 'previous' directory\n # if it exists.\n current_filepath = os.path.join(self.metadata_directory['current'],\n metadata_filename)\n current_filepath = os.path.abspath(current_filepath)\n tuf.util.ensure_parent_dir(current_filepath)\n \n previous_filepath = os.path.join(self.metadata_directory['previous'],\n metadata_filename)\n previous_filepath = os.path.abspath(previous_filepath)\n if os.path.exists(current_filepath):\n # Previous metadata might not exist, say when delegations are added.\n tuf.util.ensure_parent_dir(previous_filepath)\n shutil.move(current_filepath, previous_filepath)\n\n # Next, move the verified updated metadata file to the 'current' directory.\n # Note that the 'move' method comes from tuf.util's TempFile class.\n # 'metadata_file_object' is an instance of tuf.util.TempFile.\n metadata_signable = tuf.util.load_json_string(metadata_file_object.read())\n if compression == 'gzip':\n current_uncompressed_filepath = \\\n os.path.join(self.metadata_directory['current'],\n uncompressed_metadata_filename)\n current_uncompressed_filepath = \\\n os.path.abspath(current_uncompressed_filepath)\n metadata_file_object.move(current_uncompressed_filepath)\n else:\n metadata_file_object.move(current_filepath)\n\n # Extract the metadata object so we can store it to the metadata store.\n # 'current_metadata_object' set to 'None' if there is not an object\n # stored for 'metadata_role'.\n updated_metadata_object = metadata_signable['signed']\n current_metadata_object = self.metadata['current'].get(metadata_role)\n\n # Finally, update the metadata and fileinfo stores.\n logger.debug('Updated '+repr(current_filepath)+'.')\n self.metadata['previous'][metadata_role] = current_metadata_object\n self.metadata['current'][metadata_role] = updated_metadata_object\n self._update_fileinfo(metadata_filename)",
"def addTargets(self, **kwargs):\n self.targets.update(kwargs)\n for key, val in kwargs.items():\n self.namespace[key] = val",
"def rollout(self):\n self.agent.reset()\n for _ in range(self._n_paths_per_trial):\n self.start_rollout()\n while not self.step_rollout():\n pass\n self._agent_infos['batch_idx'] = np.full(len(self._rewards),\n self._worker_number)\n return self.collect_rollout()",
"def update_relus(self):\n\n def relu_backward_hook_function(module, grad_in, grad_out):\n \"\"\"\n If there is a negative gradient, change it to zero\n \"\"\"\n # Get last forward output\n corresponding_forward_output = self.forward_relu_outputs[-1]\n corresponding_forward_output[corresponding_forward_output > 0] = 1\n modified_grad_out = corresponding_forward_output * torch.clamp(grad_in[0], min=0.0)\n del self.forward_relu_outputs[-1] # Remove last forward output\n return (modified_grad_out,)\n\n def relu_forward_hook_function(module, ten_in, ten_out):\n \"\"\"\n Store results of forward pass\n \"\"\"\n self.forward_relu_outputs.append(ten_out)\n\n # Loop through layers, hook up ReLUs",
"def update_all_targets(self):\n soft_update(self.target_critic, self.critic, self.tau)\n soft_update(self.target_policy, self.policy, self.tau)"
] |
[
"0.6127298",
"0.55531466",
"0.5259424",
"0.50196505",
"0.4976388",
"0.49235946",
"0.48447338",
"0.47294107",
"0.45664093",
"0.4490421",
"0.43236414",
"0.43059507",
"0.41776976",
"0.4150512",
"0.41420183",
"0.40831453",
"0.40668333",
"0.40531123",
"0.40435153",
"0.40424895",
"0.4041468",
"0.40302265",
"0.40235174",
"0.40205225",
"0.3994889",
"0.39793685",
"0.39713132",
"0.39691085",
"0.3965934",
"0.39632893"
] |
0.8320714
|
0
|
Return the target information for all the targets of 'rolename'. The returned information is a list conformant to
|
def _targets_of_role(self, rolename, targets=None, skip_refresh=False):
if targets is None:
targets = []
logger.debug('Getting targets of role: '+repr(rolename)+'.')
if not tuf.roledb.role_exists(rolename):
raise tuf.UnknownRoleError(rolename)
# We do not need to worry about the target paths being trusted because
# this is enforced before any new metadata is accepted.
if not skip_refresh:
self._refresh_targets_metadata(rolename)
# Do we have metadata for 'rolename'?
if rolename not in self.metadata['current']:
message = 'No metadata for '+rolename+'. Unable to determine targets.'
logger.debug(message)
return targets
# Get the targets specified by the role itself.
for filepath, fileinfo in self.metadata['current'][rolename]['targets'].items():
new_target = {}
new_target['filepath'] = filepath
new_target['fileinfo'] = fileinfo
targets.append(new_target)
return targets
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def targets_of_role(self, rolename='targets'):\n \n # Does 'rolename' have the correct format?\n # Raise 'tuf.FormatError' if there is a mismatch.\n tuf.formats.RELPATH_SCHEMA.check_match(rolename)\n\n self._refresh_targets_metadata(rolename)\n \n return self._targets_of_role(rolename, skip_refresh=True)",
"def targets(self, rolename: str = Targets.type) -> Targets:\n targets = self.open(rolename).signed\n if not isinstance(targets, Targets):\n raise RuntimeError(\"Unexpected targets type\")\n return targets",
"def Targets(self):\n return self._targets",
"def all_targets(self):\n \n # Load the most up-to-date targets of the 'targets' role and all\n # delegated roles.\n self._refresh_targets_metadata(include_delegations=True)\n \n all_targets = []\n # Fetch the targets for the 'targets' role.\n all_targets = self._targets_of_role('targets', skip_refresh=True)\n\n # Fetch the targets for the delegated roles.\n for delegated_role in tuf.roledb.get_delegated_rolenames('targets'):\n all_targets = self._targets_of_role(delegated_role, all_targets,\n skip_refresh=True)\n \n return all_targets",
"def all_targets(self):\n return self._combined_all_versioned_targets.targets",
"def get_targets(self):\n\t\treturn self.prDoc['inputs']['data'][0]['targets']",
"def GetTargets(self):\n return []",
"def targets(self):\n\n\t\tstatus, targets = self.execute(self.mission, 'target_list', self.kingdom)\n\n\t\t# Nothing specified : default is everyone but me.\n\t\tif targets == self:\n\t\t\ttargets = Kingdom.objects.exclude(id=self.kingdom_id)\n\t\t\n\t\t# Pre-fetch user, for direct access to kingdom name.\n\t\tif isinstance(targets, QuerySet):\n\t\t\ttargets = targets.select_related('user')\n\n\t\treturn targets",
"def get_all_targets():\n\n json_data = gtop.get_json_from_gtop(\"targets\")\n return [Target(t) for t in json_data]",
"def get_targets() -> Generator[dict, dict, list[TargetInfo]]:\n response = yield {\"method\": \"Target.getTargets\", \"params\": {}}\n return [TargetInfo.from_json(t) for t in response[\"targetInfos\"]]",
"def GetRunTargets(self):\n return list(self._run_target_index.keys())",
"def get_all_targets(self):\n return dict(self._targets)",
"def targets_infos(self) -> Dict[str, MetaFile]:\n raise NotImplementedError",
"def get_targets(self):\n\t\n\t\tself.target = []\n\t\ttarget_ins = self.settings['target']\n\t\tfor key in target_ins.keys():\n\t\t\tif key == 'raw':\n\t\t\t\tself.target.append(target_ins[key])\n\t\t\telif key == 'textfile':\n\t\t\t\twith open(target_ins[key],'r') as fp: targs = fp.readlines()\n\t\t\t\tfor t in targs:\n\t\t\t\t\tif re.match('^[a-z,A-Z,_].+\\s*:\\s*[A-Z].+$',t):\n\t\t\t\t\t\tself.target.append(tuple([i.strip() for i in t.split(':')]))\n\t\t\telif key == 'textfile_rna':\n\t\t\t\twith open(target_ins[key],'r') as fp: targs = fp.readlines()\n\t\t\t\tfor t in targs:\n\t\t\t\t\tif re.match('^[a-z,A-Z,0-9,_].+\\s*:\\s*[A-Z,a-z].+$',t):\n\t\t\t\t\t\tself.target.append(list([i.strip() for i in t.split(':')]))\n\t\t\t\t\t\trnaseq = self.target[-1][1]\n\t\t\t\t\t\t#---extra substitutions for later\n\t\t\t\t\t\tif 'regex_subs' in self.settings.keys():\n\t\t\t\t\t\t\tfor regex in self.settings['regex_subs']:\n\t\t\t\t\t\t\t\trnaseq = re.sub(regex[0],regex[1],rnaseq)\n\t\t\t\t\t\trnaseq = rnaseq.upper()\n\t\t\t\t\t\trnaseq = re.sub('T','U',rnaseq)\n\t\t\t\t\t\taminoseq = ''.join([dna_mapping[i] for i in [rnaseq[i:i+3] \n\t\t\t\t\t\t\tfor i in range(0,len(rnaseq),3)]])\n\t\t\t\t\t\tself.target[-1][1] = re.sub('T','U',aminoseq)\n\t\t\t\t\t\tself.target[-1] = tuple(self.target[-1])\n\t\t\telse: raise Exception('except: unclear target type')",
"def target(self) -> list[str]:\n if self._target is None:\n print(self.__class__.target.__doc__)\n raise SilSubProblemError(\n \"The *target* property has not been set (see above).\"\n )\n return self._target",
"def edit_targets(\n self, rolename: str = Targets.type\n ) -> Generator[Targets, None, None]:\n with self.edit(rolename) as targets:\n if not isinstance(targets, Targets):\n raise RuntimeError(f\"Unexpected targets ({rolename}) type\")\n yield targets",
"def _setTargetNames(self):\n\n # full target names\n if (self.dataStream == 0):\n self.targetNames = np.array(['Clear Night', 'Sunny Day', 'Partly cloudy (night)', 'Partly cloudy (day)',\\\n 'Not used', 'Mist', 'Fog', 'Cloudy', 'Overcast', 'Light rain shower (night)', \\\n 'Light rain shower (day)', 'Drizzle', 'Light rain', 'Heavy rain shower (night)', \\\n 'Heavy rain shower (day)', 'Heavy rain', 'Sleet shower (night)', 'Sleet shower (day)', \\\n 'Sleet', 'Hail shower (night)', 'Hail shower (day)', 'Hail', 'Light snow shower (night)', \\\n 'Light snow shower (day)', 'Light snow', 'Heavy snow shower (night)', 'Heavy snow shower (day)', \\\n 'Heavy snow', 'Thunder shower', 'Thunder shower (night)', 'Thunder'])\n\n # main target names\n elif (self.dataStream == 1):\n self.targetNames = np.array(['Clear', 'Partly Cloudy', 'Mist', 'Fog', 'Cloudy', \\\n 'Overcast', 'Rain', 'Sleet', 'Hail', 'Snow', 'Thunder'])\n\n # basic target names\n elif (self.dataStream == 2):\n self.targetNames = np.array(['Clear', 'Cloudy', 'Precipitation'])\n\n return 0",
"def all_versioned_targets(self):\n return self._all_versioned_targets",
"def targets(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['PlanTargetsArgs']]]]:\n return pulumi.get(self, \"targets\")",
"def fetch_target(self, name):\n target_name = []\n \n for x in [\"FFJ0\", \"FFJ3\", \"FFJ4\",\n \"MFJ0\", \"MFJ3\", \"MFJ4\",\n \"RFJ0\", \"RFJ3\", \"RFJ4\",\n \"LFJ0\", \"LFJ3\", \"LFJ4\", \"LFJ5\",\n \"THJ1\", \"THJ2\", \"THJ3\", \"THJ4\", \"THJ5\",\n \"WRJ1\", \"WRJ2\" ]:\n target_name.append( joint(joint_name = x, \n joint_target = rospy.get_param('/targets/'+name+'/'+x)) )\n return target_name",
"def getTargets(targets) -> str:\n return\"\"\"## Target\n| |Category| |Task| Estimated Time | Actual Time |\n| - | -: | - | - | - | - |\n{previousTargets}\"\"\".format(previousTargets=targets)",
"def get_target(self):\n self.target = []\n for tags in self.cfg.target.split(\",\"):\n t = tags.split(\"=\")\n self.target.append({\"Key\": t[0], \"Values\": [t[1]]})\n\n return self.target",
"def ProduceTargets(self):\n\n if self.completion_wanted:\n return self._FindTarget()\n else:\n return []",
"def targets(self):\n\n return [get_target_by_id(i) for i in self._target_ids]",
"def read_targets(self):\n\n c = self.c\n\n new = []\n name = None\n\n for line in c.p.b.split('\\n'):\n\n if line.startswith('NAME: '):\n if name is not None:\n g.es(\"Error reading targets, two NAMEs without an UNL between them\")\n return\n name = line[6:].strip()\n continue\n\n if line.startswith('UNL: '):\n if name is None:\n g.es(\"Error reading targets, UNL without preceeding NAME\")\n return\n unl = line[5:].strip()\n new.append((name, unl))\n name = None\n continue\n\n # other lines are just ignored\n\n g.app.db['_quickmove']['global_targets'] = [\n {'name': name2, 'unl': unl2} for name2, unl2 in new\n ]\n # make sure g.app.db knows it's been changed\n g.app.db['_quickmove'] = g.app.db['_quickmove']\n\n g.es(\"%d targets read - you should delete this node now\" % len(new))",
"def targets(self): # type: () -> t.List[HostConfig]\n return self.host_settings.targets",
"def get_target_ports(self):\n return self.targets",
"def targets(self) -> List[List[float]]:\n return [d.targets for d in self.data]",
"def output_targets(self) -> Set[str]:\n return {\n out.target\n for out in\n self.outputs\n }",
"def hittable_targets(self):\n return [self.current_level.getPlayer()]"
] |
[
"0.77389604",
"0.73334914",
"0.66263235",
"0.6590217",
"0.6384551",
"0.638077",
"0.6363241",
"0.63349277",
"0.6297083",
"0.6225352",
"0.6199212",
"0.60384667",
"0.60043293",
"0.5998279",
"0.59841985",
"0.5959912",
"0.59539056",
"0.5950182",
"0.59437233",
"0.5927598",
"0.59062576",
"0.58912057",
"0.58764577",
"0.58752584",
"0.5854467",
"0.5832669",
"0.57922816",
"0.5788712",
"0.5775398",
"0.57594275"
] |
0.7770341
|
0
|
Return a list of trusted targets directly specified by 'rolename'. The returned information is a list conformant to
|
def targets_of_role(self, rolename='targets'):
# Does 'rolename' have the correct format?
# Raise 'tuf.FormatError' if there is a mismatch.
tuf.formats.RELPATH_SCHEMA.check_match(rolename)
self._refresh_targets_metadata(rolename)
return self._targets_of_role(rolename, skip_refresh=True)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def targets(self, rolename: str = Targets.type) -> Targets:\n targets = self.open(rolename).signed\n if not isinstance(targets, Targets):\n raise RuntimeError(\"Unexpected targets type\")\n return targets",
"def _targets_of_role(self, rolename, targets=None, skip_refresh=False):\n\n if targets is None:\n targets = []\n\n logger.debug('Getting targets of role: '+repr(rolename)+'.')\n\n if not tuf.roledb.role_exists(rolename):\n raise tuf.UnknownRoleError(rolename)\n\n # We do not need to worry about the target paths being trusted because\n # this is enforced before any new metadata is accepted.\n if not skip_refresh:\n self._refresh_targets_metadata(rolename)\n \n # Do we have metadata for 'rolename'?\n if rolename not in self.metadata['current']:\n message = 'No metadata for '+rolename+'. Unable to determine targets.'\n logger.debug(message)\n return targets\n\n # Get the targets specified by the role itself.\n for filepath, fileinfo in self.metadata['current'][rolename]['targets'].items():\n new_target = {} \n new_target['filepath'] = filepath \n new_target['fileinfo'] = fileinfo\n \n targets.append(new_target)\n\n return targets",
"def all_targets(self):\n \n # Load the most up-to-date targets of the 'targets' role and all\n # delegated roles.\n self._refresh_targets_metadata(include_delegations=True)\n \n all_targets = []\n # Fetch the targets for the 'targets' role.\n all_targets = self._targets_of_role('targets', skip_refresh=True)\n\n # Fetch the targets for the delegated roles.\n for delegated_role in tuf.roledb.get_delegated_rolenames('targets'):\n all_targets = self._targets_of_role(delegated_role, all_targets,\n skip_refresh=True)\n \n return all_targets",
"def targets(self):\n\n\t\tstatus, targets = self.execute(self.mission, 'target_list', self.kingdom)\n\n\t\t# Nothing specified : default is everyone but me.\n\t\tif targets == self:\n\t\t\ttargets = Kingdom.objects.exclude(id=self.kingdom_id)\n\t\t\n\t\t# Pre-fetch user, for direct access to kingdom name.\n\t\tif isinstance(targets, QuerySet):\n\t\t\ttargets = targets.select_related('user')\n\n\t\treturn targets",
"def Targets(self):\n return self._targets",
"def list_trusts(self, kwargs):\n verbose = kwargs.get(\"verbose\", False)\n results = self.engine.query(self.engine.TRUSTS_INFO_FILTER())\n\n ATTRIBUTE_TRANSLATION = {\n \"trustDirection\": {\n 0x00000003: \"bidirectional\",\n 0x00000002: \"outbound\",\n 0x00000001: \"inbound\",\n 0x00000000: \"disabled\",\n },\n \"trustType\": {\n 0x00000001: \"Non running Windows domain\",\n 0x00000002: \"Windows domain running Active Directory\",\n 0x00000003: \"Non Windows domain\",\n },\n }\n\n trusts = []\n for result in results:\n for key in ATTRIBUTE_TRANSLATION:\n if key in result:\n result[key] = ATTRIBUTE_TRANSLATION[key][int(result[key])]\n trusts.append(result)\n\n if verbose:\n self.display(results, verbose)\n return\n\n FIELDS_TO_PRINT = [\n \"dn\",\n \"cn\",\n \"securityIdentifier\",\n \"name\",\n \"trustDirection\",\n \"trustPartner\",\n \"trustType\",\n \"trustAttributes\",\n \"flatName\"\n ]\n\n for result in trusts:\n for field in FIELDS_TO_PRINT:\n if field in result:\n val = result[field]\n print(\"{field}: {val}\".format(field=field, val=val))\n print(\"\")",
"def all_targets(self):\n return self._combined_all_versioned_targets.targets",
"def GetTargets(self):\n return []",
"def GetRunTargets(self):\n return list(self._run_target_index.keys())",
"def test_get_trusts_all(self):\n # Simple function that can be used for cleanup\n def set_scope(auth_provider, scope):\n auth_provider.scope = scope\n\n self.create_trust()\n # Listing trusts can be done by trustor, by trustee, or without\n # any filter if scoped to a project, so we must ensure token scope is\n # project for this test.\n original_scope = self.os_admin.auth_provider.scope\n set_scope(self.os_admin.auth_provider, 'project')\n self.addCleanup(set_scope, self.os_admin.auth_provider, original_scope)\n trusts_get = self.trusts_client.list_trusts()['trusts']\n trusts = [t for t in trusts_get\n if t['id'] == self.trust_id]\n self.assertEqual(1, len(trusts))\n self.validate_trust(trusts[0], summary=True)",
"def targets(self): # type: () -> t.List[HostConfig]\n return self.host_settings.targets",
"def edit_targets(\n self, rolename: str = Targets.type\n ) -> Generator[Targets, None, None]:\n with self.edit(rolename) as targets:\n if not isinstance(targets, Targets):\n raise RuntimeError(f\"Unexpected targets ({rolename}) type\")\n yield targets",
"def get_external_targets(cohesity_client):\n external_target_list = cohesity_client.vaults.get_vaults()\n for target in external_target_list:\n # config[target.name] = dict()\n if target.config.amazon:\n config_dict[target.name] = [\"secret_access_key\"]\n elif target.config.azure:\n config_dict[target.name] = [\"storage_access_key\"]\n else:\n config_dict[target.name] = None\n exported_res_dict[\"External Targets\"].append(target.name)\n return external_target_list",
"def all_versioned_targets(self):\n return self._all_versioned_targets",
"def _get_installed_targets(target_types):\r\n lines = [TargetsHelp.INSTALLED_TARGETS_HEADER]\r\n for target_type in sorted(target_types.keys()):\r\n if target_types[target_type].__doc__ is None:\r\n desc = 'Description unavailable.'\r\n else:\r\n desc = target_types[target_type].__doc__.split('\\n')[0]\r\n lines.append(' %s: %s' % (\r\n TargetsHelp.TARGET_TO_ALIAS[target_type].rjust(TargetsHelp.MAX_ALIAS_LEN), desc))\r\n return lines",
"def get_targets(self):\n\t\treturn self.prDoc['inputs']['data'][0]['targets']",
"def ProduceTargets(self):\n\n if self.completion_wanted:\n return self._FindTarget()\n else:\n return []",
"def invalid_targets(self):\n return self._combined_invalid_versioned_targets.targets",
"def get_trust_roles(self, trust_id):\n resp, body = self.get(\"OS-TRUST/trusts/%s/roles\" % trust_id)\n self.expected_success(200, resp.status)\n body = json.loads(body)\n return service_client.ResponseBodyList(resp, body['roles'])",
"def get_target_list(data_dir):\n target_list = os.listdir(data_dir)\n\n return target_list",
"def target(self) -> list[str]:\n if self._target is None:\n print(self.__class__.target.__doc__)\n raise SilSubProblemError(\n \"The *target* property has not been set (see above).\"\n )\n return self._target",
"def get_trusted(self, master_id):\r\n return self._handler.get_trusted(master_id)",
"def target_words(self) -> List[str]:\n return list(map(\n lambda w: self.spaces[w.lower()] \n if w.lower() in self.spaces else w.lower(), \n self.keywords\n ))",
"def _ensure_all_targets_allowed(self, metadata_role, metadata_object):\n \n # Return if 'metadata_role' is 'targets'. 'targets' is not\n # a delegated role.\n if metadata_role == 'targets':\n return\n \n # The targets of delegated roles are stored in the parent's\n # metadata file. Retrieve the parent role of 'metadata_role'\n # to confirm 'metadata_role' contains valid targets.\n parent_role = tuf.roledb.get_parent_rolename(metadata_role)\n\n # Iterate over the targets of 'metadata_role' and confirm they are trusted,\n # or their root parent directory exists in the role delegated paths of the\n # parent role.\n roles = self.metadata['current'][parent_role]['delegations']['roles']\n role_index = tuf.repo.signerlib.find_delegated_role(roles, metadata_role)\n\n # Ensure the delegated role exists prior to extracting trusted paths from\n # the parent's 'paths', or trusted path hash prefixes from the parent's\n # 'path_hash_prefixes'.\n if role_index is not None:\n role = roles[role_index] \n allowed_child_paths = role.get('paths')\n allowed_child_path_hash_prefixes = role.get('path_hash_prefixes')\n actual_child_targets = metadata_object['targets'].keys()\n\n if allowed_child_path_hash_prefixes is not None:\n consistent = self._paths_are_consistent_with_hash_prefixes\n if not consistent(actual_child_targets,\n allowed_child_path_hash_prefixes):\n raise tuf.ForbiddenTargetError('Role '+repr(metadata_role)+\\\n ' specifies target which does not'+\\\n ' have a path hash prefix matching'+\\\n ' the prefix listed by the parent'+\\\n ' role '+repr(parent_role)+'.')\n\n elif allowed_child_paths is not None: \n\n # Check that each delegated target is either explicitly listed or a parent\n # directory is found under role['paths'], otherwise raise an exception.\n # If the parent role explicitly lists target file paths in 'paths',\n # this loop will run in O(n^2), the worst-case. The repository\n # maintainer will likely delegate entire directories, and opt for\n # explicit file paths if the targets in a directory are delegated to \n # different roles/developers.\n for child_target in actual_child_targets:\n for allowed_child_path in allowed_child_paths:\n prefix = os.path.commonprefix([child_target, allowed_child_path])\n if prefix == allowed_child_path:\n break\n else: \n raise tuf.ForbiddenTargetError('Role '+repr(metadata_role)+\\\n ' specifies target '+\\\n repr(child_target)+' which is not'+\\\n ' an allowed path according to'+\\\n ' the delegations set by '+\\\n repr(parent_role)+'.')\n\n else:\n\n # 'role' should have been validated when it was downloaded.\n # The 'paths' or 'path_hash_prefixes' attributes should not be missing,\n # so raise an error in case this clause is reached.\n raise tuf.FormatError(repr(role)+' did not contain one of '+\\\n 'the required fields (\"paths\" or '+\\\n '\"path_hash_prefixes\").')\n\n # Raise an exception if the parent has not delegated to the specified\n # 'metadata_role' child role.\n else:\n raise tuf.RepositoryError(repr(parent_role)+' has not delegated to '+\\\n repr(metadata_role)+'.')",
"def targets(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['PlanTargetsArgs']]]]:\n return pulumi.get(self, \"targets\")",
"def hittable_targets(self):\n return [self.current_level.getPlayer()]",
"def target_lang_titles(self):\n return self.target_lang_topics.keys()",
"def UserApplicationTrusts(self) -> ApplicationTrustCollection:",
"def get_deployment_roles(plan_name):\n pass",
"def get_all_targets():\n\n json_data = gtop.get_json_from_gtop(\"targets\")\n return [Target(t) for t in json_data]"
] |
[
"0.6885515",
"0.673864",
"0.5896159",
"0.58832645",
"0.5720503",
"0.5620511",
"0.55180293",
"0.55109847",
"0.5488529",
"0.54017466",
"0.5373725",
"0.5344688",
"0.5327276",
"0.5268317",
"0.5267869",
"0.52067995",
"0.51624995",
"0.51441246",
"0.5109124",
"0.50881356",
"0.50795823",
"0.5067452",
"0.50571096",
"0.5047449",
"0.5035605",
"0.5029209",
"0.49938604",
"0.49936533",
"0.49526978",
"0.4929311"
] |
0.6902802
|
0
|
Return the target file information for 'target_filepath'.
|
def target(self, target_filepath):
# Does 'target_filepath' have the correct format?
# Raise 'tuf.FormatError' if there is a mismatch.
tuf.formats.RELPATH_SCHEMA.check_match(target_filepath)
# DM FIX for class code
import urllib
target_filepath = urllib.unquote(target_filepath)
# Get target by looking at roles in order of priority tags.
target = self._preorder_depth_first_walk(target_filepath)
# Raise an exception if the target information could not be retrieved.
if target is None:
message = target_filepath+' not found.'
logger.error(message)
raise tuf.UnknownTargetError(message)
# Otherwise, return the found target.
else:
return target
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_target_file(self, target_filepath, compressed_file_length,\n uncompressed_file_hashes):\n\n def verify_uncompressed_target_file(target_file_object):\n # Every target file must have its length and hashes inspected.\n self.__hard_check_compressed_file_length(target_file_object,\n compressed_file_length)\n self.__check_hashes(target_file_object, uncompressed_file_hashes)\n\n return self.__get_file(target_filepath, verify_uncompressed_target_file,\n 'target', compressed_file_length,\n download_safely=True, compression=None)",
"def _get_target_hash(self, target_filepath, hash_function='sha256'):\n\n # Calculate the hash of the filepath to determine which bin to find the \n # target. The client currently assumes the repository uses\n # 'hash_function' to generate hashes.\n\n digest_object = tuf.hash.digest(hash_function)\n\n try:\n digest_object.update(target_filepath)\n except UnicodeEncodeError:\n # Sometimes, there are Unicode characters in target paths. We assume a\n # UTF-8 encoding and try to hash that.\n digest_object = tuf.hash.digest(hash_function)\n encoded_target_filepath = target_filepath.encode('utf-8')\n digest_object.update(encoded_target_filepath)\n\n target_filepath_hash = digest_object.hexdigest() \n\n return target_filepath_hash",
"def _get_target_from_targets_role(self, role_name, targets, target_filepath):\n\n target = None\n\n # Does the current role name have our target?\n logger.debug('Asking role '+repr(role_name)+' about target '+\\\n repr(target_filepath))\n for filepath, fileinfo in targets.iteritems():\n if filepath == target_filepath:\n logger.debug('Found target '+target_filepath+' in role '+role_name)\n target = {'filepath': filepath, 'fileinfo': fileinfo}\n break\n else:\n logger.debug('No target '+target_filepath+' in role '+role_name)\n\n return target",
"def get_target_info():\n target_binary = get_target_binary()\n target_platform, target_architecture = get_target_platform(target_binary)\n target_type = get_target_type()\n return target_binary, target_platform, target_architecture, target_type",
"def _targetFile(self):\n basename = os.path.basename(self.src)\n filename = os.path.join(self.target_dir, basename)\n return open(filename, 'w')",
"def targets_infos(self) -> Dict[str, MetaFile]:\n raise NotImplementedError",
"def get_target_binary():\n file_location = prompt_base(\"where is the file located?\")\n file_location = os.path.abspath(file_location)\n return file_location",
"def get_target_info(\n targetId: Optional[TargetID] = None,\n) -> Generator[dict, dict, TargetInfo]:\n response = yield {\n \"method\": \"Target.getTargetInfo\",\n \"params\": filter_none({\"targetId\": str(targetId) if targetId else None}),\n }\n return TargetInfo.from_json(response[\"targetInfo\"])",
"def _save_target_info(self):\n \n #make sure the file exists\n path = self.communicator.image_store.project_path + \\\n self.target_file_name\n fout = open(path, 'w')\n\n print str(1)\n print str(len(self.target_list)-1)\n for i in range(1, len(self.target_list)):\n fout.write(self.target_list[i].format_info())\n fout.write(\"\\n\\n\")\n fout.close()",
"def get_target(self, target_id: str) -> dict:\n url = GET_SINGLE_TARGET_URI.format(api_version=self.api_version, target_id=target_id)\n response = self.rc.execute(\"GET\",\n self._get_uri(url),\n headers=self.header,\n verify=self.verify)\n return response.json()",
"def get_target(self, ):\n return self.get_parameter('target')",
"def get_target_info(target):\n corners = get_corner_points(target[0])\n return cv2.solvePnP(OBJECT_POINTS, np.array(corners, dtype=\"double\"),\n CAMERA_MATRIX, CAMERA_DIST_COEFFS, flags=cv2.SOLVEPNP_ITERATIVE)",
"def filepath(self):\n return self.filepath_",
"def get_target(self):\n return self._target",
"def co_filename(self):\n assert (\n self.filename is not None\n ), \"co_filename requires Target created from filename\"\n return self.filename.strpath",
"def process_target(self):\n return self.target",
"def __gather_file_info_win(self, file):\n if self.debug:\n Console.auto_line(f\"[*] Gathering binary information: '{file}'\")\n self.target_info = {}\n with open(file, 'rb') as binary:\n binary.seek(int('3C', 16))\n self.target_info['buffer'] = 0\n self.target_info['JMPtoCodeAddress'] = 0\n self.target_info['dis_frm_pehdrs_sectble'] = 248\n self.target_info['pe_header_location'] = struct.unpack('<i', binary.read(4))[0]\n # Start of COFF\n self.target_info['COFF_Start'] = self.target_info['pe_header_location'] + 4\n binary.seek(self.target_info['COFF_Start'])\n self.target_info['MachineType'] = struct.unpack('<H', binary.read(2))[0]\n binary.seek(self.target_info['COFF_Start'] + 2, 0)\n self.target_info['NumberOfSections'] = struct.unpack('<H', binary.read(2))[0]\n self.target_info['TimeDateStamp'] = struct.unpack('<I', binary.read(4))[0]\n binary.seek(self.target_info['COFF_Start'] + 16, 0)\n self.target_info['SizeOfOptionalHeader'] = struct.unpack('<H', binary.read(2))[0]\n self.target_info['Characteristics'] = struct.unpack('<H', binary.read(2))[0]\n # End of COFF\n self.target_info['OptionalHeader_start'] = self.target_info['COFF_Start'] + 20\n\n # if self.target_info['SizeOfOptionalHeader']:\n # Begin Standard Fields section of Optional Header\n binary.seek(self.target_info['OptionalHeader_start'])\n self.target_info['Magic'] = struct.unpack('<H', binary.read(2))[0]\n self.target_info['MajorLinkerVersion'] = struct.unpack(\"!B\", binary.read(1))[0]\n self.target_info['MinorLinkerVersion'] = struct.unpack(\"!B\", binary.read(1))[0]\n self.target_info['SizeOfCode'] = struct.unpack(\"<I\", binary.read(4))[0]\n self.target_info['SizeOfInitializedData'] = struct.unpack(\"<I\", binary.read(4))[0]\n self.target_info['SizeOfUninitializedData'] = struct.unpack(\"<I\",\n binary.read(4))[0]\n self.target_info['AddressOfEntryPoint'] = struct.unpack('<I', binary.read(4))[0]\n self.target_info['PatchLocation'] = self.target_info['AddressOfEntryPoint']\n self.target_info['BaseOfCode'] = struct.unpack('<I', binary.read(4))[0]\n if self.target_info['Magic'] != 0x20B:\n self.target_info['BaseOfData'] = struct.unpack('<I', binary.read(4))[0]\n # End Standard Fields section of Optional Header\n # Begin Windows-Specific Fields of Optional Header\n if self.target_info['Magic'] == 0x20B:\n self.target_info['ImageBase'] = struct.unpack('<Q', binary.read(8))[0]\n else:\n self.target_info['ImageBase'] = struct.unpack('<I', binary.read(4))[0]\n self.target_info['SectionAlignment'] = struct.unpack('<I', binary.read(4))[0]\n self.target_info['FileAlignment'] = struct.unpack('<I', binary.read(4))[0]\n self.target_info['MajorOperatingSystemVersion'] = struct.unpack('<H',\n binary.read(2))[0]\n self.target_info['MinorOperatingSystemVersion'] = struct.unpack('<H',\n binary.read(2))[0]\n self.target_info['MajorImageVersion'] = struct.unpack('<H', binary.read(2))[0]\n self.target_info['MinorImageVersion'] = struct.unpack('<H', binary.read(2))[0]\n self.target_info['MajorSubsystemVersion'] = struct.unpack('<H', binary.read(2))[0]\n self.target_info['MinorSubsystemVersion'] = struct.unpack('<H', binary.read(2))[0]\n self.target_info['Win32VersionValue'] = struct.unpack('<I', binary.read(4))[0]\n self.target_info['SizeOfImageLoc'] = binary.tell()\n self.target_info['SizeOfImage'] = struct.unpack('<I', binary.read(4))[0]\n self.target_info['SizeOfHeaders'] = struct.unpack('<I', binary.read(4))[0]\n self.target_info['CheckSum'] = struct.unpack('<I', binary.read(4))[0]\n self.target_info['Subsystem'] = struct.unpack('<H', binary.read(2))[0]\n self.target_info['DllCharacteristics'] = struct.unpack('<H', binary.read(2))[0]\n if self.target_info['Magic'] == 0x20B:\n self.target_info['SizeOfStackReserve'] = struct.unpack('<Q', binary.read(8))[0]\n self.target_info['SizeOfStackCommit'] = struct.unpack('<Q', binary.read(8))[0]\n self.target_info['SizeOfHeapReserve'] = struct.unpack('<Q', binary.read(8))[0]\n self.target_info['SizeOfHeapCommit'] = struct.unpack('<Q', binary.read(8))[0]\n\n else:\n self.target_info['SizeOfStackReserve'] = struct.unpack('<I', binary.read(4))[0]\n self.target_info['SizeOfStackCommit'] = struct.unpack('<I', binary.read(4))[0]\n self.target_info['SizeOfHeapReserve'] = struct.unpack('<I', binary.read(4))[0]\n self.target_info['SizeOfHeapCommit'] = struct.unpack('<I', binary.read(4))[0]\n self.target_info['LoaderFlags'] = struct.unpack('<I', binary.read(4))[0] # zero\n self.target_info['NumberofRvaAndSizes'] = struct.unpack('<I', binary.read(4))[0]\n # End Windows-Specific Fields of Optional Header\n # Begin Data Directories of Optional Header\n self.target_info['ExportTableRVA'] = struct.unpack('<I', binary.read(4))[0]\n self.target_info['ExportTableSize'] = struct.unpack('<I', binary.read(4))[0]\n self.target_info['ImportTableLOCInPEOptHdrs'] = binary.tell()\n # ImportTable SIZE|LOC\n self.target_info['ImportTableRVA'] = struct.unpack('<I', binary.read(4))[0]\n self.target_info['ImportTableSize'] = struct.unpack('<I', binary.read(4))[0]\n self.target_info['ResourceTable'] = struct.unpack('<Q', binary.read(8))[0]\n self.target_info['ExceptionTable'] = struct.unpack('<Q', binary.read(8))[0]\n self.target_info['CertTableLOC'] = binary.tell()\n self.target_info['CertLOC'] = struct.unpack(\"<I\", binary.read(4))[0]\n self.target_info['CertSize'] = struct.unpack(\"<I\", binary.read(4))[0]\n if self.debug:\n Console.auto_line(f\"[+] Information successfully recovered\")",
"def target(self):\n return self._properties.get('target')",
"def filepath(self):\n return self._filepath.path",
"def __targetFilePath(self, fileSource=\"archive\", contentType=\"model\", formatType=\"pdbx\", version=\"latest\", partitionNumber=\"1\", mileStone=None):\n try:\n if fileSource == \"session\" and self.__inputSessionPath is not None:\n self.__pI.setSessionPath(self.__inputSessionPath)\n fP = self.__pI.getFilePath(\n dataSetId=self.__depDataSetId,\n wfInstanceId=self.__wfInstanceId,\n contentType=contentType,\n formatType=formatType,\n fileSource=fileSource,\n versionId=version,\n partNumber=partitionNumber,\n mileStone=mileStone,\n )\n dN, fN = os.path.split(fP)\n return fP, dN, fN\n except Exception as e:\n if self.__debug:\n self.__lfh.write(\n \"+DataExchange.__targetFilePath() failing for data set %s instance %s file source %s error %r\\n\"\n % (self.__depDataSetId, self.__wfInstanceId, self.__fileSource, str(e))\n )\n traceback.print_exc(file=self.__lfh)\n\n return (None, None, None)",
"def download_target(self, target, destination_directory):\n\n # Do the arguments have the correct format? \n # This check ensures the arguments have the appropriate \n # number of objects and object types, and that all dict\n # keys are properly named.\n # Raise 'tuf.FormatError' if the check fail.\n tuf.formats.TARGETFILE_SCHEMA.check_match(target)\n tuf.formats.PATH_SCHEMA.check_match(destination_directory)\n\n # Extract the target file information.\n target_filepath = target['filepath']\n trusted_length = target['fileinfo']['length']\n trusted_hashes = target['fileinfo']['hashes']\n\n # get_target_file checks every mirror and returns the first target\n # that passes verification.\n target_file_object = self.get_target_file(target_filepath, trusted_length,\n trusted_hashes)\n \n # We acquired a target file object from a mirror. Move the file into\n # place (i.e., locally to 'destination_directory').\n destination = os.path.join(destination_directory, target_filepath)\n destination = os.path.abspath(destination)\n target_dirpath = os.path.dirname(destination)\n if target_dirpath:\n try:\n os.makedirs(target_dirpath)\n except OSError, e:\n if e.errno == errno.EEXIST: pass\n else: raise\n else:\n logger.warn(str(target_dirpath)+' does not exist.')\n\n target_file_object.move(destination)",
"def target(self) -> Optional[str]:\n return pulumi.get(self, \"target\")",
"def filepath(self):\n return self.file.path",
"def get_target(self, target_id):\r\n return self._db(self._db.target.id==target_id).select().first()",
"def filepath(self):\n return self._filepath",
"def filepath(self):\n return self._filepath",
"def target_resource_path(self) -> Optional[str]:\n return pulumi.get(self, \"target_resource_path\")",
"def target(self) -> np.ndarray:\n return self._dist['target']",
"def path(self, target):\n return self.get_paths(target, use_edges=False, downwards=True)[0]",
"def get_source_file(self):\n return self.get_attribute(\"source_file\")"
] |
[
"0.63364285",
"0.62955815",
"0.6287654",
"0.5937286",
"0.5810763",
"0.5805691",
"0.5794956",
"0.56686133",
"0.56082475",
"0.555415",
"0.5541651",
"0.55204785",
"0.5468416",
"0.5457092",
"0.54542214",
"0.545371",
"0.5453521",
"0.5447208",
"0.5433085",
"0.5430546",
"0.54235846",
"0.5407526",
"0.5385379",
"0.5378698",
"0.5367188",
"0.5367188",
"0.5336698",
"0.5319595",
"0.52984524",
"0.5296566"
] |
0.68005747
|
0
|
Determine whether the targets role with the given 'role_name' has the target with the name 'target_filepath'.
|
def _get_target_from_targets_role(self, role_name, targets, target_filepath):
target = None
# Does the current role name have our target?
logger.debug('Asking role '+repr(role_name)+' about target '+\
repr(target_filepath))
for filepath, fileinfo in targets.iteritems():
if filepath == target_filepath:
logger.debug('Found target '+target_filepath+' in role '+role_name)
target = {'filepath': filepath, 'fileinfo': fileinfo}
break
else:
logger.debug('No target '+target_filepath+' in role '+role_name)
return target
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def role_exists(role_name, file_name):\n role_processed = sudo('[ -f %s ] && grep \"^%s$\" %s && echo \"yes\" || echo \"no\"' % (file_name, role_name, file_name))\n if role_processed == 'yes':\n return True\n else:\n return False",
"def _visit_child_role(self, child_role, target_filepath):\n\n child_role_name = child_role['name']\n child_role_paths = child_role.get('paths')\n child_role_path_hash_prefixes = child_role.get('path_hash_prefixes')\n # A boolean indicator that tell us whether 'child_role' has been delegated\n # the target with the name 'target_filepath'.\n child_role_is_relevant = False\n\n if child_role_path_hash_prefixes is not None:\n target_filepath_hash = self._get_target_hash(target_filepath)\n for child_role_path_hash_prefix in child_role_path_hash_prefixes:\n if target_filepath_hash.startswith(child_role_path_hash_prefix):\n child_role_is_relevant = True\n\n elif child_role_paths is not None:\n for child_role_path in child_role_paths:\n # A child role path may be a filepath or directory. The child\n # role 'child_role_name' is added if 'target_filepath' is located\n # under 'child_role_path'. Explicit filepaths are also added.\n prefix = os.path.commonprefix([target_filepath, child_role_path])\n if prefix == child_role_path:\n child_role_is_relevant = True\n\n else:\n # 'role_name' should have been validated when it was downloaded.\n # The 'paths' or 'path_hash_prefixes' fields should not be missing,\n # so we raise a format error here in case they are both missing.\n raise tuf.FormatError(repr(child_role_name)+' has neither ' \\\n '\"paths\" nor \"path_hash_prefixes\"!')\n\n if child_role_is_relevant:\n logger.debug('Child role '+repr(child_role_name)+' has target '+\n repr(target_filepath))\n return child_role_name\n else:\n logger.debug('Child role '+repr(child_role_name)+\n ' does not have target '+repr(target_filepath))\n return None",
"def IsTarget(self, target_name):\n return target_name in self.GetTargets()",
"def matches(self, tgt_residence_dir: str) -> bool:",
"def target_exists(self, target_id=0):\n try:\n target = self.target(target_id=target_id)\n except Exception as e:\n log.error(\"Exception checking if target exists: {} {}\".format(type(e), e))\n return False\n return target is not None",
"def _targets_of_role(self, rolename, targets=None, skip_refresh=False):\n\n if targets is None:\n targets = []\n\n logger.debug('Getting targets of role: '+repr(rolename)+'.')\n\n if not tuf.roledb.role_exists(rolename):\n raise tuf.UnknownRoleError(rolename)\n\n # We do not need to worry about the target paths being trusted because\n # this is enforced before any new metadata is accepted.\n if not skip_refresh:\n self._refresh_targets_metadata(rolename)\n \n # Do we have metadata for 'rolename'?\n if rolename not in self.metadata['current']:\n message = 'No metadata for '+rolename+'. Unable to determine targets.'\n logger.debug(message)\n return targets\n\n # Get the targets specified by the role itself.\n for filepath, fileinfo in self.metadata['current'][rolename]['targets'].items():\n new_target = {} \n new_target['filepath'] = filepath \n new_target['fileinfo'] = fileinfo\n \n targets.append(new_target)\n\n return targets",
"def target(self, target_filepath):\n\n # Does 'target_filepath' have the correct format?\n # Raise 'tuf.FormatError' if there is a mismatch.\n tuf.formats.RELPATH_SCHEMA.check_match(target_filepath)\n\n # DM FIX for class code\n import urllib\n target_filepath = urllib.unquote(target_filepath)\n\n # Get target by looking at roles in order of priority tags.\n target = self._preorder_depth_first_walk(target_filepath)\n\n # Raise an exception if the target information could not be retrieved.\n if target is None:\n message = target_filepath+' not found.'\n logger.error(message)\n raise tuf.UnknownTargetError(message)\n # Otherwise, return the found target.\n else:\n return target",
"def validate_matching_target_name(target_filename, target_obj, inventory_path):\n logger.debug(\"validating target name matches the name of yml file %s\", target_filename)\n try:\n target_name = target_obj[\"vars\"][\"target\"]\n except KeyError:\n error_message = (\n f'Target missing: target \"{target_filename}\" is missing parameters.kapitan.vars.target\\n'\n \"This parameter should be set to the target name\"\n )\n raise InventoryError(error_message)\n\n if target_filename != target_name:\n target_path = os.path.join(os.path.abspath(inventory_path), \"targets\")\n\n error_message = (\n f'Target \"{target_name}\" is missing the corresponding yml file in {target_path}\\n'\n \"Target name should match the name of the target yml file in inventory\"\n )\n raise InventoryError(error_message)",
"def can_target(name):\n return False",
"def __checkDestination(self):\n return os.path.exists(self.__targetPath)",
"def target_validation(target_name, action):\n json_data = read_file('presqt/specs/targets.json', True)\n for data in json_data:\n if data['name'] == target_name:\n if data[\"supported_actions\"][action] is False:\n raise PresQTValidationError(\n \"PresQT Error: '{}' does not support the action '{}'.\".format(target_name, action),\n status.HTTP_400_BAD_REQUEST)\n return True, data['infinite_depth']\n else:\n raise PresQTValidationError(\n \"PresQT Error: '{}' is not a valid Target name.\".format(target_name), status.HTTP_404_NOT_FOUND)",
"def has_role(self, role_name):\n\n return any(r.name == role_name for r in self.roles)",
"def has(self, target):\n return target in self.by_target",
"def has_target(self):\n return self._has_target",
"def has_role(self, role):\n return False",
"def has_role(role, nodename=None):\n return role in get_list('roles', nodename)",
"def has(self, target):\r\n return target in self.by_target",
"def lambda_function_has_iam_role(function_name: str, role_name: str) -> bool:\n lambda_function = aws_lambda.get_function(FunctionName=function_name)\n lambda_function_iam_role: list = lambda_function.get('Configuration').get('Role')\n return role_name in lambda_function_iam_role",
"def target_role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"target_role\")",
"def target_role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"target_role\")",
"def is_target_in(self, newtarget):\n from .utils.shape import HAS_SHAPELY\n # Test if shapely\n if not HAS_SHAPELY:\n print(\"WARNING: could not test if the target is in the image since you do not have SHAPELY\")\n return True\n # Test if WCS \n if not self.has_wcs():\n print(\"WARNING: because there is no wcs solution, \"+\\\n \"I can't test the inclusion of the new astrotarget\")\n return True\n \n return self.wcs.coordsAreInImage(*newtarget.radec)",
"def _ensure_all_targets_allowed(self, metadata_role, metadata_object):\n \n # Return if 'metadata_role' is 'targets'. 'targets' is not\n # a delegated role.\n if metadata_role == 'targets':\n return\n \n # The targets of delegated roles are stored in the parent's\n # metadata file. Retrieve the parent role of 'metadata_role'\n # to confirm 'metadata_role' contains valid targets.\n parent_role = tuf.roledb.get_parent_rolename(metadata_role)\n\n # Iterate over the targets of 'metadata_role' and confirm they are trusted,\n # or their root parent directory exists in the role delegated paths of the\n # parent role.\n roles = self.metadata['current'][parent_role]['delegations']['roles']\n role_index = tuf.repo.signerlib.find_delegated_role(roles, metadata_role)\n\n # Ensure the delegated role exists prior to extracting trusted paths from\n # the parent's 'paths', or trusted path hash prefixes from the parent's\n # 'path_hash_prefixes'.\n if role_index is not None:\n role = roles[role_index] \n allowed_child_paths = role.get('paths')\n allowed_child_path_hash_prefixes = role.get('path_hash_prefixes')\n actual_child_targets = metadata_object['targets'].keys()\n\n if allowed_child_path_hash_prefixes is not None:\n consistent = self._paths_are_consistent_with_hash_prefixes\n if not consistent(actual_child_targets,\n allowed_child_path_hash_prefixes):\n raise tuf.ForbiddenTargetError('Role '+repr(metadata_role)+\\\n ' specifies target which does not'+\\\n ' have a path hash prefix matching'+\\\n ' the prefix listed by the parent'+\\\n ' role '+repr(parent_role)+'.')\n\n elif allowed_child_paths is not None: \n\n # Check that each delegated target is either explicitly listed or a parent\n # directory is found under role['paths'], otherwise raise an exception.\n # If the parent role explicitly lists target file paths in 'paths',\n # this loop will run in O(n^2), the worst-case. The repository\n # maintainer will likely delegate entire directories, and opt for\n # explicit file paths if the targets in a directory are delegated to \n # different roles/developers.\n for child_target in actual_child_targets:\n for allowed_child_path in allowed_child_paths:\n prefix = os.path.commonprefix([child_target, allowed_child_path])\n if prefix == allowed_child_path:\n break\n else: \n raise tuf.ForbiddenTargetError('Role '+repr(metadata_role)+\\\n ' specifies target '+\\\n repr(child_target)+' which is not'+\\\n ' an allowed path according to'+\\\n ' the delegations set by '+\\\n repr(parent_role)+'.')\n\n else:\n\n # 'role' should have been validated when it was downloaded.\n # The 'paths' or 'path_hash_prefixes' attributes should not be missing,\n # so raise an error in case this clause is reached.\n raise tuf.FormatError(repr(role)+' did not contain one of '+\\\n 'the required fields (\"paths\" or '+\\\n '\"path_hash_prefixes\").')\n\n # Raise an exception if the parent has not delegated to the specified\n # 'metadata_role' child role.\n else:\n raise tuf.RepositoryError(repr(parent_role)+' has not delegated to '+\\\n repr(metadata_role)+'.')",
"def check_role(role):\n return role in all_roles",
"def targets_of_role(self, rolename='targets'):\n \n # Does 'rolename' have the correct format?\n # Raise 'tuf.FormatError' if there is a mismatch.\n tuf.formats.RELPATH_SCHEMA.check_match(rolename)\n\n self._refresh_targets_metadata(rolename)\n \n return self._targets_of_role(rolename, skip_refresh=True)",
"def has_target(self):\n return self.target is not None",
"def target_role(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"target_role\")",
"def is_runnable(self):\n \n if len(target_tasks) < 1: \n return False\n # check task names?\n \n if self.run_folder is None or \\\n not os.path.exists(self.run_folder) or \\\n not os.path.exists(os.path.join(self.run_folder, self.run_id, 'SampleSheet.csv')):\n return False\n \n return True",
"def hasRole():\n conn = iamConn()\n try:\n conn.get_role(getArgs().role_name)\n return True\n except boto.exception.BotoServerError:\n return False",
"def ShouldUse(self, target, request_data):\n if self._main_directory is None:\n self._ComputeMainDirectory(request_data)\n\n match = self.output_regex.search(target)\n if match is not None:\n self.completion_wanted = True\n else:\n self.completion_wanted = False\n return self.completion_wanted",
"def check_for_file(self):\n if self.task.file_name in os.listdir(self.task.file_storage):\n return True\n return False"
] |
[
"0.69350505",
"0.67440933",
"0.6718138",
"0.63257766",
"0.6301791",
"0.627432",
"0.60713816",
"0.5954152",
"0.59138453",
"0.5836825",
"0.5782234",
"0.5780698",
"0.5761034",
"0.57535905",
"0.574139",
"0.57165617",
"0.5704403",
"0.5700803",
"0.5692873",
"0.5692873",
"0.5643786",
"0.56302184",
"0.5623281",
"0.55804473",
"0.55669385",
"0.55628943",
"0.55540997",
"0.55285573",
"0.5498351",
"0.5476566"
] |
0.7351772
|
0
|
Determine whether the given 'child_role' has been delegated the target with the name 'target_filepath'. Ensure that we explore only delegated roles trusted with the target. We assume conservation of delegated paths in the complete tree of delegations. Note that the call to _ensure_all_targets_allowed in __verify_uncompressed_metadata_file should already ensure that all targets metadata is valid; i.e. that the targets signed by a delegatee is a proper subset of the targets delegated to it by the delegator. Nevertheless, we check it again here for performance and safety reasons.
|
def _visit_child_role(self, child_role, target_filepath):
child_role_name = child_role['name']
child_role_paths = child_role.get('paths')
child_role_path_hash_prefixes = child_role.get('path_hash_prefixes')
# A boolean indicator that tell us whether 'child_role' has been delegated
# the target with the name 'target_filepath'.
child_role_is_relevant = False
if child_role_path_hash_prefixes is not None:
target_filepath_hash = self._get_target_hash(target_filepath)
for child_role_path_hash_prefix in child_role_path_hash_prefixes:
if target_filepath_hash.startswith(child_role_path_hash_prefix):
child_role_is_relevant = True
elif child_role_paths is not None:
for child_role_path in child_role_paths:
# A child role path may be a filepath or directory. The child
# role 'child_role_name' is added if 'target_filepath' is located
# under 'child_role_path'. Explicit filepaths are also added.
prefix = os.path.commonprefix([target_filepath, child_role_path])
if prefix == child_role_path:
child_role_is_relevant = True
else:
# 'role_name' should have been validated when it was downloaded.
# The 'paths' or 'path_hash_prefixes' fields should not be missing,
# so we raise a format error here in case they are both missing.
raise tuf.FormatError(repr(child_role_name)+' has neither ' \
'"paths" nor "path_hash_prefixes"!')
if child_role_is_relevant:
logger.debug('Child role '+repr(child_role_name)+' has target '+
repr(target_filepath))
return child_role_name
else:
logger.debug('Child role '+repr(child_role_name)+
' does not have target '+repr(target_filepath))
return None
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _ensure_all_targets_allowed(self, metadata_role, metadata_object):\n \n # Return if 'metadata_role' is 'targets'. 'targets' is not\n # a delegated role.\n if metadata_role == 'targets':\n return\n \n # The targets of delegated roles are stored in the parent's\n # metadata file. Retrieve the parent role of 'metadata_role'\n # to confirm 'metadata_role' contains valid targets.\n parent_role = tuf.roledb.get_parent_rolename(metadata_role)\n\n # Iterate over the targets of 'metadata_role' and confirm they are trusted,\n # or their root parent directory exists in the role delegated paths of the\n # parent role.\n roles = self.metadata['current'][parent_role]['delegations']['roles']\n role_index = tuf.repo.signerlib.find_delegated_role(roles, metadata_role)\n\n # Ensure the delegated role exists prior to extracting trusted paths from\n # the parent's 'paths', or trusted path hash prefixes from the parent's\n # 'path_hash_prefixes'.\n if role_index is not None:\n role = roles[role_index] \n allowed_child_paths = role.get('paths')\n allowed_child_path_hash_prefixes = role.get('path_hash_prefixes')\n actual_child_targets = metadata_object['targets'].keys()\n\n if allowed_child_path_hash_prefixes is not None:\n consistent = self._paths_are_consistent_with_hash_prefixes\n if not consistent(actual_child_targets,\n allowed_child_path_hash_prefixes):\n raise tuf.ForbiddenTargetError('Role '+repr(metadata_role)+\\\n ' specifies target which does not'+\\\n ' have a path hash prefix matching'+\\\n ' the prefix listed by the parent'+\\\n ' role '+repr(parent_role)+'.')\n\n elif allowed_child_paths is not None: \n\n # Check that each delegated target is either explicitly listed or a parent\n # directory is found under role['paths'], otherwise raise an exception.\n # If the parent role explicitly lists target file paths in 'paths',\n # this loop will run in O(n^2), the worst-case. The repository\n # maintainer will likely delegate entire directories, and opt for\n # explicit file paths if the targets in a directory are delegated to \n # different roles/developers.\n for child_target in actual_child_targets:\n for allowed_child_path in allowed_child_paths:\n prefix = os.path.commonprefix([child_target, allowed_child_path])\n if prefix == allowed_child_path:\n break\n else: \n raise tuf.ForbiddenTargetError('Role '+repr(metadata_role)+\\\n ' specifies target '+\\\n repr(child_target)+' which is not'+\\\n ' an allowed path according to'+\\\n ' the delegations set by '+\\\n repr(parent_role)+'.')\n\n else:\n\n # 'role' should have been validated when it was downloaded.\n # The 'paths' or 'path_hash_prefixes' attributes should not be missing,\n # so raise an error in case this clause is reached.\n raise tuf.FormatError(repr(role)+' did not contain one of '+\\\n 'the required fields (\"paths\" or '+\\\n '\"path_hash_prefixes\").')\n\n # Raise an exception if the parent has not delegated to the specified\n # 'metadata_role' child role.\n else:\n raise tuf.RepositoryError(repr(parent_role)+' has not delegated to '+\\\n repr(metadata_role)+'.')",
"def _get_target_from_targets_role(self, role_name, targets, target_filepath):\n\n target = None\n\n # Does the current role name have our target?\n logger.debug('Asking role '+repr(role_name)+' about target '+\\\n repr(target_filepath))\n for filepath, fileinfo in targets.iteritems():\n if filepath == target_filepath:\n logger.debug('Found target '+target_filepath+' in role '+role_name)\n target = {'filepath': filepath, 'fileinfo': fileinfo}\n break\n else:\n logger.debug('No target '+target_filepath+' in role '+role_name)\n\n return target",
"def matches(self, tgt_residence_dir: str) -> bool:",
"def target(self, target_filepath):\n\n # Does 'target_filepath' have the correct format?\n # Raise 'tuf.FormatError' if there is a mismatch.\n tuf.formats.RELPATH_SCHEMA.check_match(target_filepath)\n\n # DM FIX for class code\n import urllib\n target_filepath = urllib.unquote(target_filepath)\n\n # Get target by looking at roles in order of priority tags.\n target = self._preorder_depth_first_walk(target_filepath)\n\n # Raise an exception if the target information could not be retrieved.\n if target is None:\n message = target_filepath+' not found.'\n logger.error(message)\n raise tuf.UnknownTargetError(message)\n # Otherwise, return the found target.\n else:\n return target",
"def IsTarget(self, target_name):\n return target_name in self.GetTargets()",
"def has_target(self):\n return self._has_target",
"def validate_target(new_data_folder, target_name, proposal_ref):\n # Don't need\n del proposal_ref\n\n validate_dict = {'Location': [], 'Error': [], 'Line number': []}\n\n # Check if there is any data to process\n target_path = os.path.join(new_data_folder, target_name)\n\n # Assume success...\n validated = True\n\n # A target directory must exist\n if not os.path.isdir(target_path):\n validate_dict = add_tset_warning(validate_dict, 'Folder',\n 'Folder does not match target name.'\n f' Expected \"{target_name}\".'\n f' Is the upload called \"{target_name}.zip\"?', 0)\n # No point in checking anything else if this check fails\n validated = False\n\n if validated:\n # An 'aligned' directory must exist\n aligned_path = os.path.join(target_path, 'aligned')\n if not os.path.isdir(aligned_path):\n validate_dict = add_tset_warning(validate_dict, 'Folder',\n 'No aligned folder present.'\n f' Expected \"{target_name}/{aligned_path}\"', 0)\n # No point in checking anything else if this check fails\n ok_so_far = False\n\n if validated:\n # A metadata.csv file must exist\n metadata_file = os.path.join(aligned_path, 'metadata.csv')\n if os.path.isfile(metadata_file):\n validated, validate_dict = check_metadata(metadata_file, validate_dict)\n else:\n validate_dict = add_tset_warning(validate_dict, 'File',\n 'No metedata file present.'\n f' Expected \"{target_name}/{aligned_path}/{metadata_file}\"', 0)\n validated = False\n\n return validated, validate_dict",
"def target_validation(target_name, action):\n json_data = read_file('presqt/specs/targets.json', True)\n for data in json_data:\n if data['name'] == target_name:\n if data[\"supported_actions\"][action] is False:\n raise PresQTValidationError(\n \"PresQT Error: '{}' does not support the action '{}'.\".format(target_name, action),\n status.HTTP_400_BAD_REQUEST)\n return True, data['infinite_depth']\n else:\n raise PresQTValidationError(\n \"PresQT Error: '{}' is not a valid Target name.\".format(target_name), status.HTTP_404_NOT_FOUND)",
"def target_exists(self, target_id=0):\n try:\n target = self.target(target_id=target_id)\n except Exception as e:\n log.error(\"Exception checking if target exists: {} {}\".format(type(e), e))\n return False\n return target is not None",
"def is_targeted(self, targets):\n\n if targets:\n if isinstance(targets, str):\n # See if the string is a '|' separated list of targets.\n targets = targets.split('|')\n if len(targets) == 1:\n # There was no '|' so restore the original string.\n targets = targets[0]\n\n if isinstance(targets, str):\n # String targets can come from the project file (ie. the user)\n # and so need to be validated.\n if targets.startswith('!'):\n # Note that this assumes that the target is a platform\n # rather than an architecture. If this is incorrect then\n # it is a bug in the meta-data somewhere.\n platform = Platform.platform(targets[1:])\n covered = (self.platform is not platform)\n elif '-' in targets:\n architecture = Architecture.architecture(targets)\n covered = (self is architecture)\n else:\n platform = Platform.platform(targets)\n covered = (self.platform is platform)\n else:\n covered = (self.platform.name in targets)\n else:\n covered = True\n\n return covered",
"def _targets_of_role(self, rolename, targets=None, skip_refresh=False):\n\n if targets is None:\n targets = []\n\n logger.debug('Getting targets of role: '+repr(rolename)+'.')\n\n if not tuf.roledb.role_exists(rolename):\n raise tuf.UnknownRoleError(rolename)\n\n # We do not need to worry about the target paths being trusted because\n # this is enforced before any new metadata is accepted.\n if not skip_refresh:\n self._refresh_targets_metadata(rolename)\n \n # Do we have metadata for 'rolename'?\n if rolename not in self.metadata['current']:\n message = 'No metadata for '+rolename+'. Unable to determine targets.'\n logger.debug(message)\n return targets\n\n # Get the targets specified by the role itself.\n for filepath, fileinfo in self.metadata['current'][rolename]['targets'].items():\n new_target = {} \n new_target['filepath'] = filepath \n new_target['fileinfo'] = fileinfo\n \n targets.append(new_target)\n\n return targets",
"def ShouldUse(self, target, request_data):\n if self._main_directory is None:\n self._ComputeMainDirectory(request_data)\n\n match = self.output_regex.search(target)\n if match is not None:\n self.completion_wanted = True\n else:\n self.completion_wanted = False\n return self.completion_wanted",
"def _import_delegations(self, parent_role):\n \n current_parent_metadata = self.metadata['current'][parent_role]\n \n if 'delegations' not in current_parent_metadata:\n return\n\n # This could be quite slow with a huge number of delegations.\n keys_info = current_parent_metadata['delegations'].get('keys', {})\n roles_info = current_parent_metadata['delegations'].get('roles', [])\n\n logger.debug('Adding roles delegated from '+repr(parent_role)+'.')\n \n # Iterate through the keys of the delegated roles of 'parent_role'\n # and load them.\n for keyid, keyinfo in keys_info.items():\n if keyinfo['keytype'] in ['rsa', 'ed25519']:\n key = tuf.keys.format_metadata_to_key(keyinfo)\n \n # We specify the keyid to ensure that it's the correct keyid\n # for the key.\n try:\n tuf.keydb.add_key(key, keyid)\n except tuf.KeyAlreadyExistsError:\n pass\n except (tuf.FormatError, tuf.Error), e:\n logger.exception('Failed to add keyid: '+repr(keyid)+'.')\n logger.error('Aborting role delegation for parent role '+parent_role+'.')\n raise\n else:\n logger.warn('Invalid key type for '+repr(keyid)+'.')\n continue\n\n # Add the roles to the role database.\n for roleinfo in roles_info:\n try:\n # NOTE: tuf.roledb.add_role will take care\n # of the case where rolename is None.\n rolename = roleinfo.get('name')\n logger.debug('Adding delegated role: '+str(rolename)+'.')\n tuf.roledb.add_role(rolename, roleinfo)\n except tuf.RoleAlreadyExistsError, e:\n logger.warn('Role already exists: '+rolename)\n except:\n logger.exception('Failed to add delegated role: '+rolename+'.')\n raise",
"def is_runnable(self):\n \n if len(target_tasks) < 1: \n return False\n # check task names?\n \n if self.run_folder is None or \\\n not os.path.exists(self.run_folder) or \\\n not os.path.exists(os.path.join(self.run_folder, self.run_id, 'SampleSheet.csv')):\n return False\n \n return True",
"def verify_as_target(self, message_handler):\n\n self.platform.verify_as_target(message_handler)",
"def has_target_href(self) -> bool:\n return self._target_href is not None",
"def __checkDestination(self):\n return os.path.exists(self.__targetPath)",
"def has_target(self):\n return self.target is not None",
"def is_target(X, require_attrs=None):\n\n if require_attrs is None:\n require_attrs = (\n name for name in vars(Target) if not name.startswith(\"_\")\n )\n\n return all([hasattr(X, name) for name in require_attrs])",
"def can_target(name):\n return False",
"def _preorder_depth_first_walk(self, target_filepath):\n\n target = None\n current_metadata = self.metadata['current']\n role_names = ['targets']\n\n # Ensure the client has the most up-to-date version of 'targets.txt'.\n # Raise 'tuf.NoWorkingMirrorError' if the changed metadata cannot be successfully\n # downloaded and 'tuf.RepositoryError' if the referenced metadata is\n # missing. Target methods such as this one are called after the top-level\n # metadata have been refreshed (i.e., updater.refresh()).\n self._update_metadata_if_changed('targets')\n\n # Preorder depth-first traversal of the tree of target delegations.\n while len(role_names) > 0 and target is None:\n\n # Pop the role name from the top of the stack.\n role_name = role_names.pop(-1)\n\n # The metadata for 'role_name' must be downloaded/updated before\n # its targets, delegations, and child roles can be inspected.\n # self.metadata['current'][role_name] is currently missing.\n # _refresh_targets_metadata() does not refresh 'targets.txt', it\n # expects _update_metadata_if_changed() to have already refreshed it,\n # which this function has checked above.\n self._refresh_targets_metadata(role_name, include_delegations=False)\n\n role_metadata = current_metadata[role_name]\n targets = role_metadata['targets']\n delegations = role_metadata.get('delegations', {})\n child_roles = delegations.get('roles', [])\n target = self._get_target_from_targets_role(role_name, targets,\n target_filepath)\n\n if target is None:\n\n # Push children in reverse order of appearance onto the stack.\n # NOTE: This may be a slow operation if there are many delegated roles.\n for child_role in reversed(child_roles):\n child_role_name = self._visit_child_role(child_role, target_filepath)\n if child_role_name is None:\n logger.debug('Skipping child role '+repr(child_role_name))\n else:\n logger.debug('Adding child role '+repr(child_role_name))\n role_names.append(child_role_name)\n\n else:\n logger.debug('Found target in current role '+repr(role_name))\n\n return target",
"def __verify_uncompressed_metadata_file(self, metadata_file_object,\n metadata_role):\n\n metadata = metadata_file_object.read()\n try:\n metadata_signable = tuf.util.load_json_string(metadata)\n except Exception, exception:\n raise tuf.InvalidMetadataJSONError(exception)\n else:\n # Ensure the loaded 'metadata_signable' is properly formatted.\n tuf.formats.check_signable_object_format(metadata_signable)\n\n # Is 'metadata_signable' newer than the currently installed\n # version?\n current_metadata_role = self.metadata['current'].get(metadata_role)\n\n # Compare metadata version numbers. Ensure there is a current\n # version of the metadata role to be updated.\n if current_metadata_role is not None:\n current_version = current_metadata_role['version']\n downloaded_version = metadata_signable['signed']['version']\n if downloaded_version < current_version:\n raise tuf.ReplayedMetadataError(metadata_role, downloaded_version,\n current_version)\n\n # Reject the metadata if any specified targets are not allowed.\n if metadata_signable['signed']['_type'] == 'Targets':\n self._ensure_all_targets_allowed(metadata_role,\n metadata_signable['signed'])\n\n # Verify the signature on the downloaded metadata object.\n valid = tuf.sig.verify(metadata_signable, metadata_role)\n if not valid:\n raise tuf.BadSignatureError(metadata_role)",
"def matches(self, target):\n raise NotImplementedError()",
"def verify_as_target(self, message_handler):",
"def supported_target(self, target, message_handler):\n\n # This default implementation checks that the architectures are the\n # same.\n return target is self",
"async def event_is_target_of_relation(self, parent_id: str) -> bool:\n\n result = await self.db_pool.simple_select_one_onecol(\n table=\"event_relations\",\n keyvalues={\"relates_to_id\": parent_id},\n retcol=\"event_id\",\n allow_none=True,\n desc=\"event_is_target_of_relation\",\n )\n return result is not None",
"def _refresh_targets_metadata(self, rolename='targets', include_delegations=False):\n\n roles_to_update = []\n\n # See if this role provides metadata and, if we're including\n # delegations, look for metadata from delegated roles.\n role_prefix = rolename + '/'\n for metadata_path in self.metadata['current']['release']['meta'].keys():\n if metadata_path == rolename + '.txt':\n roles_to_update.append(metadata_path[:-len('.txt')])\n elif include_delegations and metadata_path.startswith(role_prefix):\n # Add delegated roles. Skip roles names containing compression\n # extensions.\n if metadata_path.endswith('.txt'): \n roles_to_update.append(metadata_path[:-len('.txt')])\n\n # Remove the 'targets' role because it gets updated when the targets.txt\n # file is updated in _update_metadata_if_changed('targets').\n if rolename == 'targets':\n try:\n roles_to_update.remove('targets')\n except ValueError:\n message = 'The Release metadata file is missing the targets.txt entry.'\n raise tuf.RepositoryError(message)\n \n # If there is nothing to refresh, we are done.\n if not roles_to_update:\n return\n\n # Sort the roles so that parent roles always come first.\n roles_to_update.sort()\n logger.debug('Roles to update: '+repr(roles_to_update)+'.')\n\n # Iterate through 'roles_to_update', load its metadata\n # file, and update it if it has changed.\n for rolename in roles_to_update:\n self._load_metadata_from_file('previous', rolename)\n self._load_metadata_from_file('current', rolename)\n\n self._update_metadata_if_changed(rolename)\n\n # Remove the role if it has expired.\n try:\n self._ensure_not_expired(rolename)\n except tuf.ExpiredMetadataError:\n tuf.roledb.remove_role(rolename)",
"def is_target_in(self, newtarget):\n from .utils.shape import HAS_SHAPELY\n # Test if shapely\n if not HAS_SHAPELY:\n print(\"WARNING: could not test if the target is in the image since you do not have SHAPELY\")\n return True\n # Test if WCS \n if not self.has_wcs():\n print(\"WARNING: because there is no wcs solution, \"+\\\n \"I can't test the inclusion of the new astrotarget\")\n return True\n \n return self.wcs.coordsAreInImage(*newtarget.radec)",
"def isa_descendant_weight_of(self, target, weight):\n weights = self.get_descendant_weights()\n if weight.get_name() in weights:\n return target.get_name() in weights[weight.get_name()]\n else:\n raise Exception(\"weight %s is not connect to this node: %s\"\n %(weight, self))",
"def target_is_valid(self, target_id=0):\n try:\n target = self.target(target_id=target_id)\n except:\n return False\n return target['state'] != \"invalid\""
] |
[
"0.71726364",
"0.57336676",
"0.554071",
"0.5227213",
"0.51975995",
"0.51790684",
"0.51129013",
"0.5097449",
"0.50932217",
"0.5032808",
"0.5009551",
"0.5002774",
"0.49467227",
"0.49132517",
"0.49037033",
"0.4885482",
"0.48389253",
"0.48268166",
"0.47559568",
"0.47556943",
"0.47552824",
"0.47516808",
"0.4748322",
"0.47246677",
"0.47118753",
"0.46971667",
"0.46962196",
"0.4661326",
"0.4660341",
"0.4632095"
] |
0.7743372
|
0
|
Compute the hash of 'target_filepath'. This is useful in conjunction with the "path_hash_prefixes" attribute in a delegated targets role, which tells us which paths it is implicitly responsible for.
|
def _get_target_hash(self, target_filepath, hash_function='sha256'):
# Calculate the hash of the filepath to determine which bin to find the
# target. The client currently assumes the repository uses
# 'hash_function' to generate hashes.
digest_object = tuf.hash.digest(hash_function)
try:
digest_object.update(target_filepath)
except UnicodeEncodeError:
# Sometimes, there are Unicode characters in target paths. We assume a
# UTF-8 encoding and try to hash that.
digest_object = tuf.hash.digest(hash_function)
encoded_target_filepath = target_filepath.encode('utf-8')
digest_object.update(encoded_target_filepath)
target_filepath_hash = digest_object.hexdigest()
return target_filepath_hash
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _actual_hash(self):\n return hash_of_file(join(self._temp_path, self._downloaded_filename()))",
"def get_hash(self, filepath):\n if (os.path.isfile(filepath) and not (\n os.path.islink(filepath) and self.ignorelinks)):\n file_hash = self.hashfile(open(filepath, 'rb'))\n else:\n file_hash = self.hashstring(filepath)\n if not self._increment_hash:\n self._increment_hash = file_hash\n else:\n self._increment_hash = self.hashstring(\n self._increment_hash + file_hash)\n return file_hash",
"def get_target_hash(self, target_id):\r\n try:\r\n return self._db(self._db.target.id==target_id).select().first().hashpass\r\n except:\r\n return False",
"def _calculate_hash(self, file_object):\n hasher = self.hashlib()\n for chunk in self.iterchunks(file_object):\n hasher.update(chunk)\n return hasher.hexdigest()",
"def __hash__(self):\n return hash(self._full_path)",
"def calc_file_hash(filepath):\n with open(filepath, 'rb') as f:\n return md5(f.read()).hexdigest()",
"def static_file_hash(filepath):\n hasher = hashlib.md5() # nosec: B303\n\n with contextlib.closing(open(filepath, 'rb')) as file:\n hasher.update(file.read())\n return hasher.hexdigest()",
"def ondisk_digest(self):\n with open(self.rename_phase_src) as f:\n return hasher(f.read()).hexdigest()",
"def hash_file(path: str) -> str:\n return _hash_file(path, hashlib.md5()).hexdigest()",
"def compute(self):\n self.checksum = self.get_files_hashes_in_path()\n self.real_checksum = self.checksum\n # This appends the filename when checksum was made for a single file.\n # We need to get this when testing the consistency on the moment of\n # restore.\n if self.count == 1:\n self.checksum = self.real_checksum + os.path.basename(self.path)\n return self.checksum",
"def get_file_hash(self, filepath):\n if filepath not in self._file_hash_cache:\n self._file_hash_cache[filepath] = self.static_file_hash(filepath)\n return self._file_hash_cache[filepath]",
"def hash_from_path(self, toppath):\n for dirpath, _, filenames in os.walk(toppath):\n for fname in filenames:\n filepath = os.path.join(dirpath, fname)\n # Skip symlinks\n if os.path.islink(filepath):\n continue\n try:\n filehash = sha256sum_file(filepath)\n relpath = filepath.replace(toppath, '')\n self['hashes'][relpath] = [filehash]\n except (PermissionError, FileNotFoundError) as err:\n print(f'Unable to hash {filepath}: {err}')",
"def _get_tmp_file_path(self):\n return os.path.join(self.tmp_dir, self.hash)",
"def get_hash(self):\n source = \"\"\n for cell in self.original_cells:\n source += \"\\n\".join(get_source(cell))\n return hashlib.sha256(source.encode(\"utf-8\")).hexdigest()",
"def file_hash(filepath: Path):\n hsh = hashlib.sha256()\n b = bytearray(128 * 1024)\n mv = memoryview(b)\n with Path(filepath).open(\"rb\", buffering=0) as f:\n for n in iter(lambda: f.readinto(mv), 0):\n hsh.update(mv[:n])\n return hsh.hexdigest()",
"def calculate_target_path(self):\n self.path = self.game.find_path(self, self.target)\n if not self.path:\n print(f\"{self.name} can't path to {self.target.name} {self.target.x}, {self.target.y}\")\n self.broken_target(self.target)\n self.target = None",
"def _hash_file_content(self, path):\n hasher = hashlib.sha1()\n with open(path, 'rb') as file:\n buffer = file.read(self.hash_block_size)\n while len(buffer) > 0:\n hasher.update(buffer)\n buffer = file.read(self.hash_block_size)\n return hasher.hexdigest()",
"def generate_content_hash(source_path):\n\n sha256 = hashlib.sha256()\n\n if os.path.isdir(source_path):\n source_dir = source_path\n for source_file in list_files(source_dir):\n update_hash(sha256, source_dir, source_file)\n else:\n source_dir = os.path.dirname(source_path)\n source_file = source_path\n update_hash(sha256, source_dir, source_file)\n\n return sha256",
"def hash(self):\n return self.audio_file.hash() + self.transcript_file.hash()",
"def _get_hash(self, path):\n with open(path, \"r\") as fp:\n content = fp.read()\n\n return sha256(content).hexdigest()",
"def generate_hash(self):\r\n\r\n hash_list = []\r\n for root, dirs, files in os.walk(self.options['source']):\r\n for f in sorted([f for f in files if not f.startswith('.')]):\r\n hash_list.append(os.path.join(root, f))\r\n hash_list.append(str(os.path.getmtime(os.path.join(root, f))))\r\n hash_list = ''.join(hash_list)\r\n\r\n if sys.version < '3':\r\n return hashlib.sha1(hash_list).hexdigest()\r\n return hashlib.sha1(hash_list.encode('utf-8')).hexdigest()",
"def hash(self):\n return self._obs_file.hash()",
"def co_filename(self):\n assert (\n self.filename is not None\n ), \"co_filename requires Target created from filename\"\n return self.filename.strpath",
"def hash(self):\n hash_properties = self.artifacts\n return hashlib.md5(','.join(hash_properties).encode()).hexdigest()",
"def get_file_hash(file_path):\n with open(file_path, 'rb') as f:\n file_name = os.path.basename(file_path)\n to_hash = f.read() + file_name.encode('utf-8')\n new_hash = hashlib.md5(to_hash).hexdigest()\n return new_hash",
"def file_hash(load, fnd):\n if \"env\" in load:\n # \"env\" is not supported; Use \"saltenv\".\n load.pop(\"env\")\n\n ret = {}\n\n if \"saltenv\" not in load:\n return ret\n\n if \"path\" not in fnd or \"bucket\" not in fnd or not fnd[\"path\"]:\n return ret\n\n cached_file_path = _get_cached_file_name(\n fnd[\"bucket\"], load[\"saltenv\"], fnd[\"path\"]\n )\n\n if os.path.isfile(cached_file_path):\n ret[\"hsum\"] = salt.utils.hashutils.get_hash(cached_file_path)\n ret[\"hash_type\"] = \"md5\"\n\n return ret",
"def hash_of(self, arcpath) -> str:\n return self._records[arcpath].hash",
"def hash(self):\n block = 1024 * 1024 * 4 # 4 MB.\n hasher = hashlib.sha256()\n\n with open(self.path, \"rb\") as f:\n while True:\n chunk = f.read(block)\n if not chunk:\n break\n hasher.update(hashlib.sha256(chunk).digest())\n\n digest = hasher.hexdigest()\n pdbox.debug(\"Hash for %s: %s\" % (self.path, digest))\n return digest",
"def calculate_hash(self, include_md: bool = True) -> str:\n # sourcery skip: reintroduce-else, swap-if-else-branches, use-named-expression\n # BUF_SIZE is totally arbitrary,\n BUF_SIZE = 65536 * 16 # lets read stuff in 16 x 64kb chunks!\n\n file_hash = hashlib.sha1()\n # Stubs Only\n files = list((self.package_path).rglob(\"**/*.pyi\"))\n if include_md:\n files += (\n [self.package_path / \"LICENSE.md\"]\n + [self.package_path / \"README.md\"]\n # do not include [self.toml_file]\n )\n for file in sorted(files):\n # TODO: Extract function to allow for retry on file not found\n try:\n with open(file, \"rb\") as f:\n while True:\n data = f.read(BUF_SIZE)\n if not data:\n break\n file_hash.update(data)\n except FileNotFoundError:\n log.warning(f\"File not found {file}\")\n # ignore file not found errors to allow the hash to be created WHILE GIT / VIRUS SCANNERS HOLD LINGERING FILES\n return file_hash.hexdigest()",
"def get_hash(self):\n return hashlib.sha1(\"\".join(asset.get_hash() for asset in self._assets).encode(\"utf-8\")).hexdigest()"
] |
[
"0.66744316",
"0.63947934",
"0.6372364",
"0.62910396",
"0.626336",
"0.62408173",
"0.6168245",
"0.597555",
"0.597499",
"0.5905806",
"0.5832505",
"0.5783359",
"0.57262033",
"0.57071465",
"0.569763",
"0.5665912",
"0.565811",
"0.56297946",
"0.5606447",
"0.5602915",
"0.5594297",
"0.55532384",
"0.5538297",
"0.5525974",
"0.5517021",
"0.5485584",
"0.5479802",
"0.54687107",
"0.5467807",
"0.5452669"
] |
0.85003406
|
0
|
Remove any files that are in 'previous' but not 'current'. This makes it so if you remove a file from a repository, it actually goes away. The targets for the 'targets' role and all delegated roles are checked.
|
def remove_obsolete_targets(self, destination_directory):
# Does 'destination_directory' have the correct format?
# Raise 'tuf.FormatError' if there is a mismatch.
tuf.formats.PATH_SCHEMA.check_match(destination_directory)
# Iterate through the rolenames and verify whether the 'previous'
# directory contains a target no longer found in 'current'.
for role in tuf.roledb.get_rolenames():
if role.startswith('targets'):
if role in self.metadata['previous'] and self.metadata['previous'][role] != None:
for target in self.metadata['previous'][role]['targets'].keys():
if target not in self.metadata['current'][role]['targets'].keys():
# 'target' is only in 'previous', so remove it.
logger.warn('Removing obsolete file: '+repr(target)+'.')
# Remove the file if it hasn't been removed already.
destination = os.path.join(destination_directory, target)
try:
os.remove(destination)
except OSError, e:
# If 'filename' already removed, just log it.
if e.errno == errno.ENOENT:
logger.info('File '+repr(destination)+' was already removed.')
else:
logger.error(str(e))
except Exception, e:
logger.error(str(e))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def delete_previous_files():\n def delete(root: Path):\n shutil.rmtree(root / 'output', ignore_errors=True)\n for p in root.iterdir():\n if str(p).endswith(('.log', 'jobs.csv', 'csv.lock', '.yaml')):\n p.unlink()\n\n delete(wt_registration_dir)\n delete(mut_registration_dir)",
"def git_removed_files(self):\n\n etc_tracked = self.repo.tracked_files('etc-tmp')\n for rpath in etc_tracked:\n etc_file = os.path.join(self.root_dir, rpath)\n if not os.path.lexists(etc_file):\n self.etc_commits.removed.rpaths.append(rpath)\n self.etc_commits.removed.commit()\n\n master_tracked = self.repo.tracked_files('master-tmp')\n for rpath in master_tracked:\n etc_file = os.path.join(self.root_dir, rpath)\n if not os.path.lexists(etc_file):\n self.master_commits.removed.rpaths.append(rpath)\n self.master_commits.removed.commit()",
"def _move_current_to_previous(self, metadata_role):\n\n # Get the 'current' and 'previous' full file paths for 'metadata_role'\n metadata_filepath = metadata_role + '.txt'\n previous_filepath = os.path.join(self.metadata_directory['previous'],\n metadata_filepath)\n current_filepath = os.path.join(self.metadata_directory['current'],\n metadata_filepath)\n\n # Remove the previous path if it exists.\n if os.path.exists(previous_filepath):\n os.remove(previous_filepath)\n\n # Move the current path to the previous path. \n if os.path.exists(current_filepath):\n tuf.util.ensure_parent_dir(previous_filepath)\n os.rename(current_filepath, previous_filepath)",
"def _remove_files(self):\n if hasattr(self, 'files'):\n for file in self.files:\n if os.path.exists(file):\n os.remove(file)\n\n self._remove_changes()\n self._remove_temporary_files()",
"def create_remove_previous_sources_instructions_for_source(self, folder, source):\n\n retVal = AnonymousAccum()\n iid, source_path, source_type = source[0], source[1], source[2]\n if not source_path:\n log.warning(f\"\"\"empty previous source for {iid}\"\"\")\n return retVal\n\n to_remove_path = os.path.normpath(os.path.join(folder, source_path))\n\n if source_type == '!dir': # remove whole folder\n retVal += RmDir(to_remove_path)\n elif source_type == '!file': # remove single file\n retVal += RmFile(to_remove_path)\n elif source_type == '!dir_cont':\n raise Exception(f\"{iid} previous_sources cannot have tag !dir_cont\")\n\n return retVal",
"def remove_target_files(topdir):\n print 'begin to remove unused [target] dirs'\n os.popen('find %s -name \"target\" | xargs rm -rf' % topdir).read()",
"def remove_files(self, files: Set[str]) -> None:\n for f in files:\n src = os.path.join(self.get_directory(), f)\n os.remove(src)",
"def __rm(files):\n # Open master file and read in lines\n master = open('master.tex', 'r')\n lines = master.readlines()\n master.close()\n\n # Remove lines from master.tex if they exist\n for file in files:\n # Ask if the user really wants to remove preamble.tex\n if 'preamble.tex' in file:\n if not milc.questions.yesno(f'Are you sure you want to remove {emph(file)}?'):\n continue\n if not os.path.isfile(file):\n cli.log.error(f'I can\\'t find {emph(file)}.')\n else:\n try:\n lines.remove('\\\\input{'\n f'{file}'\n '}\\n')\n cli.log.info(f'Removed {emph(file)} from {emph(\"master.tex\")}')\n\n # Delete files if --delete flag is on\n if cli.config.rm.delete:\n cli.log.info(f'Deleting {emph(file)}')\n os.remove(file)\n except:\n cli.log.error(f'I can\\'t find {emph(file)} in the {emph(\"master.tex\")} file.')\n\n # Write new master.tex\n master = open('master.tex', 'w')\n master.writelines(lines)\n master.close()",
"def remove_current_logs_and_mv_comp_files(to_move_files, files_to_be_moved):\n [os.remove(f\"{file_name}\") for file_name in to_move_files]\n [shutil.move(os.path.join(LOGS_PATH, file_name), DESTINATION) for file_name in files_to_be_moved]",
"def remove_target_files(self):\n\n LOGGER.info(f'start removing target files')\n count = 0\n for tfp in tqdm(list(self.target_fps), disable=self.disable_tqdm):\n tfp.unlink()\n self.target_fps.remove(tfp)\n count += 1\n LOGGER.info(f'removed total {count} target files, now total {len(self.target_fps)} target files exist')",
"def remove(args):\n files = []\n for path in args.files:\n if os.path.isdir(path):\n ft = filetree(path)\n files.extend(ft.filelist())\n else:\n files.append(path)\n for path in files:\n relpath = os.path.normpath(os.path.relpath(path, args.base))\n if relpath in args.cache:\n del args.cache[args.cache.index(relpath)]\n if args.delete and os.path.exists(path):\n os.remove(path)\n args.update = True\n return",
"def clean(self):\n if self.verbosity:\n self.header(\"Cleaning data files\")\n\n tsv_list = os.listdir(self.tsv_dir)\n\n if self.resume_mode:\n # get finished clean command logs of last update\n prev_cleaned = [\n x.file_name + '.TSV'\n for x in self.log_record.called.filter(\n command='cleancalaccessrawfile',\n finish_datetime__isnull=False\n )\n ]\n self.log(\"{} files already cleaned.\".format(len(prev_cleaned)))\n # remove these from tsv_list\n tsv_list = [x for x in tsv_list if x not in prev_cleaned]\n\n # Loop through all the files in the source directory\n if self.verbosity:\n tsv_list = progress.bar(tsv_list)\n for name in tsv_list:\n call_command(\n \"cleancalaccessrawfile\",\n name,\n verbosity=self.verbosity,\n keep_files=self.keep_files,\n )",
"def remove_workflow(input_file_parameters, dir_stack):\r\n dirs_to_del = []\r\n files_to_del = 0\r\n for d in dir_stack[1:]:\r\n if not os.path.exists(d.path): continue\r\n dirs_to_del.append(d.path)\r\n files_to_del += len(os.listdir(d.path))\r\n if os.path.exists(input_file_parameters.output_dir):\r\n dirs_to_del.append(input_file_parameters.output_dir)\r\n files_to_del += len(os.listdir(input_file_parameters.output_dir))\r\n\r\n if not dirs_to_del:\r\n print 'Nothing to remove. This workflow has not been created yet.'\r\n return\r\n else:\r\n print 'Removing {0} directories containing {1} files.'.format(\r\n len(dirs_to_del), files_to_del)\r\n if raw_input('Continue?\\n(y/n): ') == 'y':\r\n print 'Removing...'\r\n try:\r\n for d in dirs_to_del:\r\n shutil.rmtree(d)\r\n except OSError as err:\r\n raise STAPLERerror.STAPLERerror('Unable to remove workflow. Reason:\\n'\r\n '{0}'.format(str(err)))\r\n print 'Done.'\r\n else:\r\n print 'Canceled.'",
"def cleanup(self):\n files = self.nlst()\n latest = self.latest_filename\n for filename in files:\n if filename != latest:\n result = self.delete(filename)\n logger.info(f\"Deleted old export from FTP: {result}\")",
"def remove(args):\n osf = _setup_osf(args)\n if osf.username is None or osf.password is None:\n sys.exit('To remove a file you need to provide a username and'\n ' password.')\n\n project = osf.project(args.project)\n\n storage, remote_path = split_storage(args.target)\n\n store = project.storage(storage)\n for f in store.files:\n if norm_remote_path(f.path) == remote_path:\n f.remove()",
"def delete_leftovers(self):\n for each_file, artist in self.past_songs_db_data:\n if os.path.isfile(each_file): \n os.remove(each_file)\n print \"Deleted \" + each_file\n\n for each_file in os.listdir(\".\"):\n if each_file.endswith(\".jpg\"):\n os.remove(each_file)",
"def remover_files():\n directory = os.getcwd()\n for file_name in glob.glob((\"{}/tmp/*\").format(directory)):\n remove(file_name)",
"def check_removed_files(store: dict[str, Any]) -> ValidationStepResult:\n labels: set[Label] = set()\n all_labels: dict[str, Label] = store[\"possible_labels\"]\n errors: dict[os.PathLike, list[str]] = {}\n deleted_files_in_hub_mirrored_dir: set[os.PathLike] = set()\n \n repository: Repository = store[\"repository\"]\n filtered_files: dict[PullRequestFileType, list[File]] = (\n store[\"filtered_files\"]\n )\n\n logger.info(\"Checking if the PR contains updates to existing forecasts/metadata...\")\n\n forecasts = filtered_files.get(PullRequestFileType.FORECAST, [])\n metadatas = filtered_files.get(PullRequestFileType.METADATA, [])\n removed_files: bool = False\n success: bool = True\n\n for forecast_file in forecasts:\n if forecast_file.status == \"removed\":\n existing_forecast_file = get_existing_forecast_file(\n repository,\n forecast_file,\n store[\"HUB_MIRRORED_DIRECTORY_ROOT\"]\n )\n if existing_forecast_file is not None:\n removed_files = True\n deleted_files_in_hub_mirrored_dir.add(existing_forecast_file)\n path = pathlib.Path(forecast_file.filename)\n errors[path] = [(\n \"The forecast CSV or metadata file is deleted.\"\n \"Please put the file back as we do not allow file deletion at the moment.\")]\n\n for metadata_file in metadatas:\n if metadata_file.status == \"removed\":\n existing_forecast_file = get_existing_forecast_file(\n repository,\n metadata_file,\n store[\"HUB_MIRRORED_DIRECTORY_ROOT\"]\n )\n if existing_forecast_file is not None:\n removed_files = True\n deleted_files_in_hub_mirrored_dir.add(existing_forecast_file)\n path = pathlib.Path(metadata_file.filename)\n errors[path] = [(\n \"The forecast CSV or metadata file is deleted. \"\n \"Please put the file back as we do not allow file deletion at the moment.\")]\n\n if removed_files:\n success = False\n logger.info(\"❌ PR deleted existing forecast/metadata file.\")\n labels.add(all_labels[\"file-deletion\"])\n\n else:\n logger.info(\"✔️ PR does not include file deletion.\")\n\n return ValidationStepResult(\n success=success,\n labels=labels,\n file_errors = errors,\n to_store={\n \"deleted_existing_files_paths\": deleted_files_in_hub_mirrored_dir\n }\n )",
"def delete_old():\n folder = '../build/data/vtk'\n for the_file in os.listdir(folder):\n file_path = os.path.join(folder, the_file)\n try:\n if os.path.isfile(file_path):\n os.unlink(file_path)\n elif os.path.isdir(file_path):\n shutil.rmtree(file_path) # subdirs\n except Exception as e:\n print(e)\n folder = '../build/log'\n for the_file in os.listdir(folder):\n file_path = os.path.join(folder, the_file)\n try:\n if os.path.isfile(file_path):\n os.unlink(file_path)\n # elif os.path.isdir(file_path): shutil.rmtree(file_path) #subdirs\n except Exception as e:\n print(e)",
"def remove_custom_installation(self):\n\n logger.info(\"Removing old customization\")\n for candidate in os.listdir(self.rundir):\n if candidate not in (\"config\", \"delta\"):\n candidate = os.path.join(self.rundir, candidate)\n try:\n shutil.rmtree(candidate)\n except NotADirectoryError:\n os.remove(candidate)",
"def _remove_files_dirs(self):\n if self.remove_remote_files_dirs:\n self._remove_remote_files_dirs()",
"def unlink(self):\n\t\tadiff = ApplicationDifferencer()\n\n\t\t# Determine the differences between what's in the\n\t\t# application's directory and what's currently\n\t\t# available from the root filesystem (in relation\n\t\t# to this application).\n\t\tresults = adiff.scan(\n\t\t\t\tos.path.join(\n\t\t\t\t\tAppFolders.get(self.type),\n\t\t\t\t\tself.name + \"/\" + self.version\n\t\t\t\t\t),\n\t\t\t\tTrue\n\t\t\t\t);\n\t\t\n\t\tsafe_app_dir = os.path.join(\n AppFolders.get(self.type),\n self.name # We exclude the version here because we could be\n # checking against a link that's under Current or\n # a specific version.\n )\n\t\t\n\t\t# Preemptively go through the list of directories, removing those\n\t\t# that are symlinks to the application folder. This is from the legacy\n\t\t# link system and unfortunatly if you let the block below this run\n\t\t# through a system with said symlinks, you'll end up annihilating the\n\t\t# the application files (because it'll walk through the symlink into\n\t\t# the application directory and start rm'ing stuff we don't want to)\n\t\t# The solution here is to go through and remove directory symlinks before\n\t\t# hand, with a reversed result list (in effect reversing the walk process\n\t\t# in adiff.scan) so that we elimate the top level symlinks first, preventing\n\t\t# it from annihilating symlinked directories inside the application folder.\n\t\t# Very annoying stuff.\n\t\t#\n\t\t# XXX: I almost hosed the entire Elementary system with this. Apparently it\n\t\t# that removing symlinked directories included some of the base ones\n\t\t# such as /lib and /bin (because the Python install contains those dirs\n\t\t# too :P). The only_sub variable defines that only paths that resolve\n\t\t# to a *subdirectory* of those specified can be removed if it's a symlinked\n\t\t# directory. This prevents removal of /bin, /lib, etc.. symlinks.\n\t\t#\n\t\tonly_sub = [\n\t\t\t\t\"/System/Utilities/Applications\",\n\t\t\t\t\"/System/Utilities/Libraries\",\n\t\t\t\t\"/Applications\",\n\t\t\t\t\"/Users\"\n\t\t\t]\n\t\tresults.reverse()\n\t\ttrip_safety = False\n\t\tfor i in results:\n\t\t\t# Legacy removal is a special case because directories will be detected\n\t\t\t# as file entries (because they are symlinks). Therefore, we need to use\n\t\t\t# os.path.realpath and os.path.isdir to find out whether it's really a directory\n\t\t\t# or not.\n\t\t\tis_directory = os.path.isdir(os.path.realpath(i[2]))\n\n\t\t\t# Get file information.\n\t\t\ttry:\n\t\t\t\tpstat = os.lstat(i[2])[stat.ST_MODE]\n\t\t\texcept:\n\t\t\t\t# Likely broken when we removed a directory symlink.\n\t\t\t\tcontinue\n\t\t\t\n\t\t\t# Determine whether we should proceed with this entry.\n\t\t\tif (not is_directory):\n\t\t\t\tcontinue\n\t\t\tif (not stat.S_ISLNK(pstat)):\n\t\t\t\tcontinue\n\n\t\t\t# Determine whether it's safe to remove this symlinked dir.\n\t\t\tif (not self.isApplicationOwned(i[2], safe_app_dir)):\n\t\t\t\tcontinue\n\t\t\t\n\t\t\t# Double-check before we go unlinking (in case of a logic oversight).\n\t\t\tif (is_directory and stat.S_ISLNK(pstat)):\n\t\t\t\ttrip_safety = True\n\t\t\t\ttry:\n\t\t\t\t\tself.oper_unlink(i[2])\n\t\t\t\t\tlog.showWarningW(\"Removed symlinked directory at: \" + i[2])\n\t\t\t\t\tlog.showWarningW(\"The full path was: \" + rpath)\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\t\tresults.reverse()\t\t\n\n\t\tif (trip_safety):\n\t\t\tlog.showErrorW(\"Legacy system safety switch was tripped. This indicates you have\")\n\t\t\tlog.showErrorO(\"symlinked directories on your system (from legacy linkage systems).\")\n\t\t\tlog.showErrorO(\"The unlinking process has removed at least one of those symlinked\")\n\t\t\tlog.showErrorO(\"directories. In order to make sure application files don't get\")\n\t\t\tlog.showErrorO(\"removed, you need to run the unlink process again to ensure the system\")\n\t\t\tlog.showErrorO(\"is scanned without symlinked directories. If the process shows this\")\n\t\t\tlog.showErrorO(\"message twice, then STOP and REMOVE THE SYMLINKS MANUALLY. You risk\")\n\t\t\tlog.showErrorO(\"destroying application installations if you continue.\")\n\t\t\tsys.exit(1)\n\t\t\n\n\t\t# Now go through the results, removing directories (if they're\n\t\t# empty) and un-symlinking files (but making sure that we only\n\t\t# remove symlinks and not normal files).\n\t\tattempt_successes = list()\n\t\tattempt_failures = list()\n\t\tattempt_notexists = list()\n\t\ttotal_files = 0\n\t\tfor i in results:\n\t\t\ttotal_files += 1\n\t\t\ttry:\n\t\t\t\tpstat = os.lstat(i[2])[stat.ST_MODE]\n\t\t\texcept:\n\t\t\t\t# File doesn't exist. Likely got removed while we unlinked\n\t\t\t\t# a `symlinked' directory (from old linkage system).\n\t\t\t\tcontinue\n\n\t\t\t# Check to make sure that the file we're going to remove is located\n\t\t\t# within a safe directory.\n\t\t\tif (not self.isApplicationOwned(i[2], safe_app_dir)):\n\t\t\t\t# This check only applies to symlinks, not real directories.\n\t\t\t\tif ((i[0] == \"file\" or i[0] == \"directory\") and stat.S_ISLNK(pstat)):\n\t\t\t\t\tlog.showInfoW(\"Ignoring \" + i[2] + \" because it's not owned by the application.\")\n\t\t\t\t\tcontinue\n\n\t\t\tif (i[0] == \"directory\" and not stat.S_ISLNK(pstat)):\n\t\t\t\ttry:\n\t\t\t\t\tself.oper_rmdir(i[2])\n\t\t\t\t\tattempt_successes.append(i[2])\n\t\t\t\texcept:\n\t\t\t\t\tlog.showInfoW(\"Still in use: \" + i[2])\n\t\t\t\t\t# Failure to remove a directory should not be counted\n\t\t\t\t\t# as a failure since quite often directories will not be\n\t\t\t\t\t# removed because they are still in use by other applications.\n\t\t\t\t\t#attempt_failures.append(i[2])\n\t\t\telif ((i[0] == \"file\" or i[0] == \"directory\") and stat.S_ISLNK(pstat)):\n\t\t\t\ttry:\n\t\t\t\t\tself.oper_unlink(i[2])\n\t\t\t\t\tattempt_successes.append(i[2])\n\t\t\t\texcept:\n\t\t\t\t\tlog.showErrorW(\"Unable to symlink file \" + i[2])\n\t\t\t\t\tattempt_failures.append(i[2])\n\t\t\telif (i[0] == \"notexists\"):\n\t\t\t\tlog.showInfoW(\" N \" + i[2])\n\t\t\t\tattempt_notexists.append(i[2])\n\t\t\telif (i[0] != \"notexists\" and i[0] != \"file\" and i[0] != \"directory\"):\n\t\t\t\tlog.showWarningW(\"Unknown operation for \" + i[1])\n\n\t\treturn attempt_successes, attempt_failures, total_files",
"def remove_frames(tmpdir, files):\n for fname in files: os.remove(os.path.join(tmpdir, fname))\n if not(tmpdir == None): os.rmdir(tmpdir)",
"def del_files(rels_fl, last_fl, path):\n for i in rels_fl:\n if i[:-5] not in last_fl:\n os.remove(f'{path}/{i}')\n return",
"def remove_tmp_sources(source_filename):\n logging.info('Removing temporary files ...')\n source_dir = os.path.dirname(source_filename)\n if os.path.exists(source_filename):\n os.remove(source_filename)\n for f in os.listdir(source_dir):\n if f.startswith('tmp_'):\n os.remove(os.path.join(source_dir, f))",
"def remove(self): \n self.doRoot(self.removeDir)\n settings.getChanged('mosh.resourceReplacer.applied').remove(self.file)",
"def deleteIntermediateFiles(self):\n uniq_files = set(self.files_to_delete)\n print (\"Deleting %d intermediate files\" % len(uniq_files))\n for fn in uniq_files:\n # don't delete log files\n if not fn.endswith(\".log\"):\n os.remove(fn)",
"def remove_old_logs():\r\n three_days_old = dt.date.today() - dt.timedelta(days=2)\r\n three_days_ago = three_days_old.strftime('%Y%m%d')\r\n\r\n for f in os.listdir(ANCILS_DIR):\r\n if not f.startswith(('model_configs_latest.txt', 'model_configs-2019-11-02.txt')):\r\n file_date = f.strip('.txt').split('_')[2].replace(\"-\",\"\")\r\n\r\n if not file_date.endswith('01'):\r\n if int(file_date) < int(three_days_ago):\r\n cmd1 = \"git add {}\".format(os.path.join(ANCILS_DIR, f))\r\n subprocess.run(cmd1, shell=True)\r\n cmd = \"git rm -f {}\".format(os.path.join(ANCILS_DIR, f))\r\n subprocess.run(cmd, shell=True)",
"def _remove_changes(self):\n if os.path.exists(self.changes_file):\n os.remove(self.changes_file)",
"def clean_retrosheet_files(self):\n # Get zipped and unzipped folder names\n zippedFileFolder = Filepath.get_retrosheet_folder(folder='zipped')\n unzippedFileFolder = Filepath.get_retrosheet_folder(folder='unzipped')\n\n # Clean out all files in both folders\n for folder in (zippedFileFolder, unzippedFileFolder):\n os.chdir(folder)\n for file in os.listdir(os.getcwd()): \n if os.path.isdir(file): \n shutil.rmtree(file)\n else: \n os.remove(file)"
] |
[
"0.6814799",
"0.6449817",
"0.6353513",
"0.61904424",
"0.6107088",
"0.5952603",
"0.5881425",
"0.58807445",
"0.586403",
"0.58522594",
"0.5755024",
"0.5734811",
"0.5718269",
"0.5690973",
"0.56899154",
"0.56445503",
"0.5625026",
"0.5623838",
"0.5616736",
"0.5609417",
"0.5593584",
"0.55851",
"0.5579901",
"0.5555958",
"0.5539352",
"0.5530068",
"0.55285656",
"0.5525531",
"0.55212814",
"0.55176663"
] |
0.65018755
|
1
|
Return the targets in 'targets' that have changed. Targets are considered changed if they do not exist at 'destination_directory' or the target located there has mismatched file properties. The returned information is a list conformant to
|
def updated_targets(self, targets, destination_directory):
# Do the arguments have the correct format?
# Raise 'tuf.FormatError' if there is a mismatch.
tuf.formats.TARGETFILES_SCHEMA.check_match(targets)
tuf.formats.PATH_SCHEMA.check_match(destination_directory)
updated_targets = []
for target in targets:
# Get the target's filepath located in 'destination_directory'.
# We will compare targets against this file.
target_filepath = os.path.join(destination_directory, target['filepath'])
# Try one of the algorithm/digest combos for a mismatch. We break
# as soon as we find a mismatch.
for algorithm, digest in target['fileinfo']['hashes'].items():
digest_object = None
try:
digest_object = tuf.hash.digest_filename(target_filepath,
algorithm=algorithm)
# This exception would occur if the target does not exist locally.
except IOError:
updated_targets.append(target)
break
# The file does exist locally, check if its hash differs.
if digest_object.hexdigest() != digest:
updated_targets.append(target)
break
return updated_targets
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def invalid_targets(self):\n return self._combined_invalid_versioned_targets.targets",
"def all_targets(self):\n return self._combined_all_versioned_targets.targets",
"def _sort_and_validate_targets(self, targets):\r\n # We must check the targets in this order, to ensure correctness if invalidate_dependents=True,\r\n # since we use earlier cache keys to compute later cache keys in this case.\r\n ordered_targets = self._order_target_list(targets)\r\n\r\n # This will be a list of VersionedTargets that correspond to @targets.\r\n versioned_targets = []\r\n\r\n # This will be a mapping from each target to its corresponding VersionedTarget.\r\n versioned_targets_by_target = {}\r\n\r\n # Map from id to current fingerprint of the target with that id. We update this as we iterate,\r\n # in topological order, so when handling a target, this will already contain all its deps (in\r\n # this round).\r\n id_to_hash = {}\r\n\r\n for target in ordered_targets:\r\n dependency_keys = set()\r\n if self._invalidate_dependents and hasattr(target, 'dependencies'):\r\n # Note that we only need to do this for the immediate deps, because those will already\r\n # reflect changes in their own deps.\r\n for dep in target.dependencies:\r\n # We rely on the fact that any deps have already been processed, either in an earlier\r\n # round or because they came first in ordered_targets.\r\n # Note that only external deps (e.g., JarDependency) or targets with sources can\r\n # affect invalidation. Other targets (JarLibrary, Pants) are just dependency scaffolding.\r\n if isinstance(dep, ExternalDependency):\r\n dependency_keys.add(dep.cache_key())\r\n elif isinstance(dep, TargetWithSources):\r\n fprint = id_to_hash.get(dep.id, None)\r\n if fprint is None:\r\n # It may have been processed in a prior round, and therefore the fprint should\r\n # have been written out by the invalidator.\r\n fprint = self._invalidator.existing_hash(dep.id)\r\n # Note that fprint may still be None here. E.g., a codegen target is in the list\r\n # of deps, but its fprint is not visible to our self._invalidator (that of the\r\n # target synthesized from it is visible, so invalidation will still be correct.)\r\n #\r\n # Another case where this can happen is a dep of a codegen target on, say,\r\n # a java target that hasn't been built yet (again, the synthesized target will\r\n # depend on that same java target, so invalidation will still be correct.)\r\n # TODO(benjy): Make this simpler and more obviously correct.\r\n if fprint is not None:\r\n dependency_keys.add(fprint)\r\n elif isinstance(dep, JarLibrary) or isinstance(dep, Pants):\r\n pass\r\n else:\r\n raise ValueError('Cannot calculate a cache_key for a dependency: %s' % dep)\r\n cache_key = self._key_for(target, dependency_keys)\r\n id_to_hash[target.id] = cache_key.hash\r\n\r\n # Create a VersionedTarget corresponding to @target.\r\n versioned_target = VersionedTarget(self, target, cache_key)\r\n\r\n # Add the new VersionedTarget to the list of computed VersionedTargets.\r\n versioned_targets.append(versioned_target)\r\n\r\n # Add to the mapping from Targets to VersionedTargets, for use in hooking up VersionedTarget\r\n # dependencies below.\r\n versioned_targets_by_target[target] = versioned_target\r\n\r\n # Having created all applicable VersionedTargets, now we build the VersionedTarget dependency\r\n # graph, looking through targets that don't correspond to VersionedTargets themselves.\r\n versioned_target_deps_by_target = {}\r\n\r\n def get_versioned_target_deps_for_target(target):\r\n # For every dependency of @target, we will store its corresponding VersionedTarget here. For\r\n # dependencies that don't correspond to a VersionedTarget (e.g. pass-through dependency\r\n # wrappers), we will resolve their actual dependencies and find VersionedTargets for them.\r\n versioned_target_deps = set([])\r\n if hasattr(target, 'dependencies'):\r\n for dep in target.dependencies:\r\n for dependency in dep.resolve():\r\n if dependency in versioned_targets_by_target:\r\n # If there exists a VersionedTarget corresponding to this Target, store it and\r\n # continue.\r\n versioned_target_deps.add(versioned_targets_by_target[dependency])\r\n elif dependency in versioned_target_deps_by_target:\r\n # Otherwise, see if we've already resolved this dependency to the VersionedTargets it\r\n # depends on, and use those.\r\n versioned_target_deps.update(versioned_target_deps_by_target[dependency])\r\n else:\r\n # Otherwise, compute the VersionedTargets that correspond to this dependency's\r\n # dependencies, cache and use the computed result.\r\n versioned_target_deps_by_target[dependency] = get_versioned_target_deps_for_target(\r\n dependency)\r\n versioned_target_deps.update(versioned_target_deps_by_target[dependency])\r\n\r\n # Return the VersionedTarget dependencies that this target's VersionedTarget should depend on.\r\n return versioned_target_deps\r\n\r\n # Initialize all VersionedTargets to point to the VersionedTargets they depend on.\r\n for versioned_target in versioned_targets:\r\n versioned_target.dependencies = get_versioned_target_deps_for_target(versioned_target.target)\r\n\r\n return versioned_targets",
"def check_dependency_change(targets: List[str], dependencies: List[str]) -> bool:\n min_target_mtime = min([get_mtime(path) for path in targets])\n max_dep_mtime = max([get_mtime(path) for path in dependencies])\n return max_dep_mtime > min_target_mtime",
"def all_versioned_targets(self):\n return self._all_versioned_targets",
"def metadata_update_targets(targets):\n filenames = []\n for target in targets:\n if target == 'stable':\n filename = _generate_metadata_kind('firmware.xml.gz', targets=['stable'])\n filenames.append(filename)\n elif target == 'testing':\n filename = _generate_metadata_kind('firmware-testing.xml.gz', targets=['stable', 'testing'])\n filenames.append(filename)\n\n # return all the files we have to sign\n return filenames",
"def _targets_of_role(self, rolename, targets=None, skip_refresh=False):\n\n if targets is None:\n targets = []\n\n logger.debug('Getting targets of role: '+repr(rolename)+'.')\n\n if not tuf.roledb.role_exists(rolename):\n raise tuf.UnknownRoleError(rolename)\n\n # We do not need to worry about the target paths being trusted because\n # this is enforced before any new metadata is accepted.\n if not skip_refresh:\n self._refresh_targets_metadata(rolename)\n \n # Do we have metadata for 'rolename'?\n if rolename not in self.metadata['current']:\n message = 'No metadata for '+rolename+'. Unable to determine targets.'\n logger.debug(message)\n return targets\n\n # Get the targets specified by the role itself.\n for filepath, fileinfo in self.metadata['current'][rolename]['targets'].items():\n new_target = {} \n new_target['filepath'] = filepath \n new_target['fileinfo'] = fileinfo\n \n targets.append(new_target)\n\n return targets",
"def changed_targets(inventory_path, output_path):\n targets = []\n inv = inventory_reclass(inventory_path)\n\n saved_inv_cache = None\n saved_inv_cache_path = os.path.join(output_path, \"compiled/.kapitan_cache\")\n if os.path.exists(saved_inv_cache_path):\n with open(saved_inv_cache_path, \"r\") as f:\n try:\n saved_inv_cache = yaml.safe_load(f)\n except Exception:\n raise CompileError(\"Failed to load kapitan cache: %s\", saved_inv_cache_path)\n\n targets_list = list(inv[\"nodes\"])\n\n # If .kapitan_cache doesn't exist or failed to load, recompile all targets\n if not saved_inv_cache:\n return targets_list\n else:\n for key, hash in cached.inv_cache[\"folder\"].items():\n try:\n if hash != saved_inv_cache[\"folder\"][key]:\n logger.debug(\"%s folder hash changed, recompiling all targets\", key)\n return targets_list\n except KeyError:\n # Errors usually occur when saved_inv_cache doesn't contain a new folder\n # Recompile anyway to be safe\n return targets_list\n\n for target in targets_list:\n try:\n if (\n cached.inv_cache[\"inventory\"][target][\"classes\"]\n != saved_inv_cache[\"inventory\"][target][\"classes\"]\n ):\n logger.debug(\"classes hash changed in %s, recompiling\", target)\n targets.append(target)\n elif (\n cached.inv_cache[\"inventory\"][target][\"parameters\"]\n != saved_inv_cache[\"inventory\"][target][\"parameters\"]\n ):\n logger.debug(\"parameters hash changed in %s, recompiling\", target)\n targets.append(target)\n except KeyError:\n # Errors usually occur when saved_inv_cache doesn't contain a new target\n # Recompile anyway to be safe\n targets.append(target)\n\n return targets",
"def remove_obsolete_targets(self, destination_directory):\n \n # Does 'destination_directory' have the correct format?\n # Raise 'tuf.FormatError' if there is a mismatch.\n tuf.formats.PATH_SCHEMA.check_match(destination_directory)\n\n # Iterate through the rolenames and verify whether the 'previous'\n # directory contains a target no longer found in 'current'.\n for role in tuf.roledb.get_rolenames():\n if role.startswith('targets'):\n if role in self.metadata['previous'] and self.metadata['previous'][role] != None:\n for target in self.metadata['previous'][role]['targets'].keys():\n if target not in self.metadata['current'][role]['targets'].keys():\n # 'target' is only in 'previous', so remove it.\n logger.warn('Removing obsolete file: '+repr(target)+'.')\n # Remove the file if it hasn't been removed already.\n destination = os.path.join(destination_directory, target) \n try:\n os.remove(destination)\n except OSError, e:\n # If 'filename' already removed, just log it.\n if e.errno == errno.ENOENT:\n logger.info('File '+repr(destination)+' was already removed.')\n else:\n logger.error(str(e))\n except Exception, e:\n logger.error(str(e))",
"def getProperties(targets):",
"def getTargets(targets) -> str:\n return\"\"\"## Target\n| |Category| |Task| Estimated Time | Actual Time |\n| - | -: | - | - | - | - |\n{previousTargets}\"\"\".format(previousTargets=targets)",
"def drag_dest_get_target_list(self): # real signature unknown; restored from __doc__\n pass",
"def get_new_targets(self):\n self.active_changes = False # (flag) Once changes are retrieved, we assume that they will be sent to the controller\n return self.settings",
"def diffs(current, target):\r\n \r\n additions = [val for val in target if val not in current]\r\n deletions = [val for val in current if val not in target]\r\n\r\n return additions, deletions",
"def Targets(self):\n return self._targets",
"def invalid_versioned_targets(self):\n return self._invalid_versioned_targets",
"def output_targets(self, input_targets):\n return input_targets",
"def _metadata_update_targets(targets):\n affidavit = _create_affidavit()\n firmwares = db.firmware.get_all()\n for target in targets:\n firmwares_filtered = []\n for f in firmwares:\n if f.target == 'private':\n continue\n if f.target != target:\n continue\n firmwares_filtered.append(f)\n if target == 'stable':\n _generate_metadata_kind('firmware.xml.gz',\n firmwares_filtered,\n affidavit=affidavit)\n elif target == 'testing':\n _generate_metadata_kind('firmware-testing.xml.gz',\n firmwares_filtered,\n affidavit=affidavit)",
"def targets(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['PlanTargetsArgs']]]]:\n return pulumi.get(self, \"targets\")",
"def file_changes(self):\n new = []\n changed = []\n deleted = []\n parent = self.parent_tag\n # Loop through the files and find the ones that have changed\n for relative_path, file_dict in self.checksum[\"files\"].items():\n if relative_path not in parent[\"files\"]:\n new.append(relative_path)\n elif file_dict[\"checksum\"] != parent[\"files\"][relative_path][\"checksum\"]:\n changed.append(relative_path)\n # Loop through the parent files and see which files have been deleted\n for relative_path in parent[\"files\"].keys():\n if relative_path not in self.checksum[\"files\"]:\n deleted.append(relative_path)\n return {\"new\": new, \"changed\": changed, \"deleted\": deleted}",
"def combined_all_versioned_targets(self):\n return self._combined_all_versioned_targets",
"def targets_infos(self) -> Dict[str, MetaFile]:\n raise NotImplementedError",
"def _findChangedFiles(self):\n changedFiles = []\n # calculate and update checksums always for ALL files\n for observedFile in self.observedFiles:\n if os.path.isfile(observedFile.filePath):\n currentChecksum = checksumFile(observedFile.filePath)\n else:\n currentChecksum = None\n # different values with None value checking\n if ((observedFile.lastChecksum is None\n and currentChecksum is not None)\n or observedFile.lastChecksum != currentChecksum):\n changedFiles.append(observedFile) # notify change\n observedFile.lastChecksum = currentChecksum # update checksum\n\n return changedFiles",
"def newer_group(sources, target, missing='error'):\n # If the target doesn't even exist, then it's definitely out-of-date.\n if not os.path.exists(target):\n return True\n\n # Otherwise we have to find out the hard way: if *any* source file\n # is more recent than 'target', then 'target' is out-of-date and\n # we can immediately return true. If we fall through to the end\n # of the loop, then 'target' is up-to-date and we return false.\n target_mtime = os.stat(target).st_mtime\n\n for source in sources:\n if not os.path.exists(source):\n if missing == 'error': # blow up when we stat() the file\n pass\n elif missing == 'ignore': # missing source dropped from\n continue # target's dependency list\n elif missing == 'newer': # missing source means target is\n return True # out-of-date\n\n if os.stat(source).st_mtime > target_mtime:\n return True\n\n return False",
"def BuildFileTargets(target_list, build_file):\n return [p for p in target_list if BuildFile(p) == build_file]",
"def combined_invalid_versioned_targets(self):\n return self._combined_invalid_versioned_targets",
"def targets(self): # type: () -> t.List[HostConfig]\n return self.host_settings.targets",
"def drag_source_get_target_list(self): # real signature unknown; restored from __doc__\n pass",
"def drag_dest_info(self):\n return DdTargets.URI_LIST",
"def get_targets() -> Generator[dict, dict, list[TargetInfo]]:\n response = yield {\"method\": \"Target.getTargets\", \"params\": {}}\n return [TargetInfo.from_json(t) for t in response[\"targetInfos\"]]"
] |
[
"0.59195954",
"0.59108806",
"0.5899564",
"0.5894745",
"0.5699647",
"0.5696529",
"0.56604826",
"0.5591113",
"0.55762184",
"0.55114293",
"0.55009407",
"0.54718184",
"0.54223627",
"0.53630525",
"0.5358456",
"0.5324592",
"0.5271276",
"0.52465874",
"0.5198097",
"0.5184535",
"0.51824176",
"0.51690084",
"0.5156294",
"0.5127235",
"0.51181686",
"0.5109532",
"0.50992715",
"0.5086002",
"0.50786805",
"0.5064388"
] |
0.78240633
|
0
|
Download 'target' and verify it is trusted. This will only store the file at 'destination_directory' if the downloaded file matches the description of the file in the trusted metadata.
|
def download_target(self, target, destination_directory):
# Do the arguments have the correct format?
# This check ensures the arguments have the appropriate
# number of objects and object types, and that all dict
# keys are properly named.
# Raise 'tuf.FormatError' if the check fail.
tuf.formats.TARGETFILE_SCHEMA.check_match(target)
tuf.formats.PATH_SCHEMA.check_match(destination_directory)
# Extract the target file information.
target_filepath = target['filepath']
trusted_length = target['fileinfo']['length']
trusted_hashes = target['fileinfo']['hashes']
# get_target_file checks every mirror and returns the first target
# that passes verification.
target_file_object = self.get_target_file(target_filepath, trusted_length,
trusted_hashes)
# We acquired a target file object from a mirror. Move the file into
# place (i.e., locally to 'destination_directory').
destination = os.path.join(destination_directory, target_filepath)
destination = os.path.abspath(destination)
target_dirpath = os.path.dirname(destination)
if target_dirpath:
try:
os.makedirs(target_dirpath)
except OSError, e:
if e.errno == errno.EEXIST: pass
else: raise
else:
logger.warn(str(target_dirpath)+' does not exist.')
target_file_object.move(destination)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def update_target(target_info, temp_dir, images_dir, inventory, args):\n target_name = target_info.get(\"target\")\n target_sha256 = target_info.get(\"sha256_hash\")\n filename = target_info.get(\"filename\")\n temp_path = os.path.join(temp_dir, filename)\n # Add a trailing slash to make sure that urljoin handles things properly\n full_url = urljoin(args.base_url+'/', target_info.get(\"url\"))\n _, downloaded_size, downloaded_sha256 = download(\n images_url=full_url,\n filename=temp_path,\n buffer_size=args.buffer_size,\n print_progress=(_LOG_LEVEL <= _LOG_LEVELS.get(\"INFO\", 3))\n )\n if downloaded_size == 0:\n log(\"INFO\", \"Skipping target: {}\".format(target_name))\n return\n log(\"TRACE\", \"{} successfully downloaded ({} Bytes)\"\n .format(temp_path, downloaded_size))\n # If the SHA256 in the manifest has the value '0', this is a special case\n # and we just skip the verification step\n if target_sha256 == '0':\n log(\"DEBUG\", \"Skipping SHA256 check for {}.\".format(full_url))\n # If the check fails, print an error and don't unzip the file\n elif downloaded_sha256 != target_sha256:\n log(\"ERROR\", \"Downloaded SHA256 does not match manifest for {}!\"\n .format(full_url))\n return\n # Note: this skips the --keep option, so we'll never keep image packages\n # that fail the SHA256 checksum\n ## Now copy the contents to the final destination (the images directory)\n delete_from_inv(target_info, inventory, images_dir)\n if os.path.splitext(temp_path)[1].lower() == '.zip':\n archive_namelist = extract(\n temp_path,\n images_dir,\n args.test)\n if args.keep:\n # If the user wants to keep the downloaded archive,\n # save it to the images directory and add it to the inventory\n shutil.copy(temp_path, images_dir)\n archive_namelist.append(filename)\n else:\n archive_namelist = []\n shutil.copy(temp_path, images_dir)\n ## Update inventory\n inventory[target_name] = {\"repo_hash\": target_info.get(\"repo_hash\"),\n \"contents\": archive_namelist,\n \"filename\": filename}",
"def unavoidable_download_method(self, target, name):\n # Get path to file\n file_path = os.path.join(self.work_dir, name)\n\n # Create necessary directories if not present\n self.mkdir_p(self.work_dir)\n\n # Check if file exists, download if not presente\n if not os.path.exists(file_path):\n try:\n subprocess.check_call(['curl', '-fs', self.input_urls[name], '-o', file_path])\n except subprocess.CalledProcessError:\n raise RuntimeError('\\nNecessary file could not be acquired: {}. Check input URL')\n except OSError:\n raise RuntimeError('Failed to find \"curl\". Install via \"apt-get install curl\"')\n\n assert os.path.exists(file_path)\n\n # Update FileStoreID\n target.updateGlobalFile(self.ids[name], file_path)\n\n return file_path",
"def maybe_download(filename, expected_bytes, force=False):\n dest_filename = os.path.join(data_root, filename)\n if force or not os.path.exists(dest_filename):\n print('Attempting to download:', filename)\n filename, _ = urlretrieve(url + filename, dest_filename, reporthook=download_progress_hook)\n print('\\nDownload Complete!')\n statinfo = os.stat(dest_filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified', dest_filename)\n else:\n raise Exception(\n 'Failed to verify ' + dest_filename + '. Can you get to it with a browser?')\n return dest_filename",
"def verify_destination(self, destination):\n # Make sure the text file was copied to the destination.\n text_file = os.path.join(destination, 'notes.txt')\n assert os.path.isfile(text_file)\n with open(text_file) as handle:\n assert handle.read() == \"This file should be included in the backup.\\n\"\n # Make sure the subdirectory was copied to the destination.\n subdirectory = os.path.join(destination, 'subdirectory')\n assert os.path.isdir(subdirectory)\n # Make sure the symbolic link was copied to the destination.\n symlink = os.path.join(subdirectory, 'symbolic-link')\n assert os.path.islink(symlink)",
"def check_file_transferred(replica, location):\n\n from tardis.tardis_portal.models import Dataset_File\n datafile = Dataset_File.objects.get(pk=replica.datafile.id)\n\n # If the remote is capable, get it to send us the checksums and / or\n # file length for its copy of the file\n try:\n # Fetch the remote's metadata for the file\n m = location.provider.get_metadata(replica)\n _check_attribute(m, datafile.size, 'length')\n if (_check_attribute(m, datafile.sha512sum, 'sha512sum') or \\\n _check_attribute(m, datafile.md5sum, 'md5sum')):\n return True\n if location.trust_length and \\\n _check_attribute(m, datafile.size, 'length') :\n return False\n raise MigrationError('Not enough metadata for verification')\n except NotImplementedError:\n pass\n except HTTPError as e:\n # Bad request means that the remote didn't recognize the query\n if e.code != 400:\n raise\n\n if location.provider.trust_length :\n try:\n length = location.provider.get_length(replica)\n if _check_attribute2(length, datafile.size, 'length'):\n return False\n except NotImplementedError:\n pass\n\n # Fetch back the remote file and verify it locally.\n f = location.provider.get_opener(replica)()\n md5sum, sha512sum, size, x = generate_file_checksums(f, None)\n _check_attribute2(str(size), datafile.size, 'length')\n if _check_attribute2(sha512sum, datafile.sha512sum, 'sha512sum') or \\\n _check_attribute2(md5sum, datafile.md5sum, 'md5sum'):\n return True\n raise MigrationError('Not enough metadata for file verification')",
"def save(self, url, destination, payload={}, overwrite=False):\n head_args = self._fmt_request_args(\"GET\", self.headers, url, payload)\n head_args.pop(\"method\")\n head_args[\"verify\"] = False\n h = requests.head(**head_args)\n header = h.headers\n content_type = header.get(\"content-type\")\n\n # Figure out the local file name and check if it's available.\n local_phile_name = self._determine_save_file_name(url, content_type, destination)\n if os.path.exists(local_phile_name) and not overwrite:\n logging.error(\"File %s already exists, use carpetbag.save(overwrite=True) to overwrite.\" % local_phile_name)\n raise errors.CannotOverwriteFile\n\n # Check content length\n content_length = header.get(\"content-length\", None)\n if content_length.isdigit():\n content_length = int(content_length)\n if content_length > self.max_content_length:\n logging.warning(\"Remote content-length: %s is greater then current max: %s\")\n return False\n\n # Get the file.\n response = self.get(url, payload=payload)\n\n open(local_phile_name, \"wb\").write(response.content)\n\n return local_phile_name",
"def maybe_download(filename, expected_bytes):\n filepath = datapath + filename\n if not os.path.exists(filepath):\n # urlretrieve returns a tuple of saved filepath and info() of the downloaded file\n filepath, _ = urllib.request.urlretrieve(url+filename, filepath)\n statinfo = os.stat(filepath)\n if statinfo.st_size == expected_bytes:\n print('Found and verified', filepath)\n else:\n print(statinfo.st_size)\n raise Exception(\n 'Failed to verify ' + filepath + '. Can you get to it with a browser?')\n return filepath",
"def verifyFile(source, destination):\n\tsourceHash = hashlib.sha256(open(source, 'rb').read()).digest()\n\tdestinationHash = hashlib.sha256(open(destination, 'rb').read()).digest()\n\n\tif sourceHash == destinationHash:\n\t\treturn (True, str(sourceHash))\n\n\treturn False",
"def filedownload(source, destination):\n\n # Initiate the download\n urllib.request.urlretrieve(source, destination)",
"def copy_to_cache(cls, target_filename):\n is_cached = cls.is_remote_cached(target_filename)\n if not is_cached:\n cache = cls.CACHE_BACKEND()\n cache.upload(target_filename)\n logger.debug('File %r was uploaded to %r', target_filename, cls.CACHE_BACKEND)",
"def maybe_download(filename, expected_bytes):\n if not os.path.exists(filename):\n filename, _ = urllib.request.urlretrieve(url + filename, filename)\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print ('Found and verified', filename)\n else:\n raise Exception(\n 'Failed to verify' + filename + '. Can you get to it with a browser?')\n return filename",
"def test_file_managed_http_source_skip_verify(file, tmp_path, remote_grail_scene33):\n name = str(tmp_path / \"testfile\")\n ret = file.managed(name=name, source=remote_grail_scene33.url, skip_verify=True)\n assert ret.result is True",
"def maybe_download(filename):\n\n if not tf.gfile.Exists(WORK_DIRECTORY):\n tf.gfile.MakeDirs(WORK_DIRECTORY)\n filepath = os.path.join(WORK_DIRECTORY, filename)\n if not tf.gfile.Exists(filepath):\n filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath)\n with tf.gfile.GFile(filepath) as f:\n size = f.Size()\n print('Successfully downloaded', filename, size, 'bytes.')\n return filepath",
"def maybe_download(filename, expected_bytes):\n if not os.path.exists(filename):\n filename, _ = urllib.request.urlretrieve(url + filename, filename)\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified', filename)\n else:\n print(statinfo.st_size)\n raise Exception(\n 'Failed to verify ' + filename + '. Can you get to it with a browser?')\n return filename",
"def maybe_download(filename, expected_bytes):\n if not os.path.exists(filename):\n filename, _ = urllib.request.urlretrieve(url + filename, filename)\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified', filename)\n else:\n print(statinfo.st_size)\n raise Exception(\n 'Failed to verify ' + filename + '. Can you get to it with a browser?')\n return filename",
"def maybe_download(filename):\n if not tf.gfile.Exists(WORK_DIRECTORY):\n tf.gfile.MakeDirs(WORK_DIRECTORY)\n filepath = os.path.join(WORK_DIRECTORY, filename)\n if not tf.gfile.Exists(filepath):\n filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath)\n with tf.gfile.GFile(filepath) as f:\n size = f.size()\n print('Successfully downloaded', filename, size, 'bytes.')\n return filepath",
"def maybe_download(filename):\n if not tf.gfile.Exists(WORK_DIRECTORY):\n tf.gfile.MakeDirs(WORK_DIRECTORY)\n filepath = os.path.join(WORK_DIRECTORY, filename)\n if not tf.gfile.Exists(filepath):\n filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath)\n with tf.gfile.GFile(filepath) as f:\n size = f.size()\n print('Successfully downloaded', filename, size, 'bytes.')\n return filepath",
"def download(self,\n targetdir: Union[str, Path],\n overwrite: bool = False,\n verbose: bool = False):\n targetname = Path(targetdir, self.filename)\n \n # Check if targetname exists\n if overwrite or not targetname.exists():\n \n # Get the URL\n r = requests.get(self.url)\n \n # Print message if URL does not exist\n if r.status_code == 404:\n print(f'File URL not found: {self.url}')\n return False\n \n else:\n # Raise any other request errors\n r.raise_for_status()\n\n # Save downloaded content\n with open(targetname, 'wb') as f:\n f.write(r.content)\n if verbose:\n print(f'{self.filename} downloaded to {targetdir}')\n return True\n else:\n # Skip files that already exist\n if verbose:\n print(f'{self.filename} already in {targetdir}')\n return False",
"def maybe_download(filename, url, expected_bytes):\n if not os.path.exists(filename):\n filename, _ = urllib.request.urlretrieve(url + filename, filename)\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified', filename)\n else:\n print(statinfo.st_size)\n raise Exception(\n 'Failed to verify ' + filename + '. Can you get to it with a browser?')\n return filename",
"def maybe_download(filename, url, expected_bytes):\n if not os.path.exists(filename):\n filename, _ = urllib.request.urlretrieve(url + filename, filename)\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified', filename)\n else:\n print(statinfo.st_size)\n raise Exception(\n 'Failed to verify ' + filename + '. Can you get to it with a browser?')\n return filename",
"def maybe_download(filename, url, expected_bytes):\n if not os.path.exists(filename):\n filename, _ = urllib.request.urlretrieve(url + filename, filename)\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified', filename)\n else:\n print(statinfo.st_size)\n raise Exception(\n 'Failed to verify ' + filename + '. Can you get to it with a browser?')\n return filename",
"def download_build(self, name, dst_directory):\n logging.info('Not downloading build because no Filestore.')",
"def validate_target(new_data_folder, target_name, proposal_ref):\n # Don't need\n del proposal_ref\n\n validate_dict = {'Location': [], 'Error': [], 'Line number': []}\n\n # Check if there is any data to process\n target_path = os.path.join(new_data_folder, target_name)\n\n # Assume success...\n validated = True\n\n # A target directory must exist\n if not os.path.isdir(target_path):\n validate_dict = add_tset_warning(validate_dict, 'Folder',\n 'Folder does not match target name.'\n f' Expected \"{target_name}\".'\n f' Is the upload called \"{target_name}.zip\"?', 0)\n # No point in checking anything else if this check fails\n validated = False\n\n if validated:\n # An 'aligned' directory must exist\n aligned_path = os.path.join(target_path, 'aligned')\n if not os.path.isdir(aligned_path):\n validate_dict = add_tset_warning(validate_dict, 'Folder',\n 'No aligned folder present.'\n f' Expected \"{target_name}/{aligned_path}\"', 0)\n # No point in checking anything else if this check fails\n ok_so_far = False\n\n if validated:\n # A metadata.csv file must exist\n metadata_file = os.path.join(aligned_path, 'metadata.csv')\n if os.path.isfile(metadata_file):\n validated, validate_dict = check_metadata(metadata_file, validate_dict)\n else:\n validate_dict = add_tset_warning(validate_dict, 'File',\n 'No metedata file present.'\n f' Expected \"{target_name}/{aligned_path}/{metadata_file}\"', 0)\n validated = False\n\n return validated, validate_dict",
"def maybe_download(filename, expected_bytes, force=False):\n if force or not os.path.exists(filename):\n filename, _ = urlretrieve(url + filename, filename)\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified', filename)\n else:\n raise Exception(\n 'Failed to verify' + filename + '. Can you get to it with a browser?')\n return filename",
"def download_corpus(self, name, dst_directory):\n logging.info('Not downloading corpus because no Filestore.')",
"def maybe_download(filename, expected_bytes):\n if not os.path.exists(filename):\n filename, _ = urllib.request.urlretrieve(url + filename, filename)\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified', filename)\n else:\n print(statinfo.st_size)\n raise Exception(\n 'Failed to verify ' + filename + '. Can you get to it with a browser?')\n return filename",
"def maybe_download(filename, work_directory, source_url):\n\tif not gfile.Exists(work_directory):\n\t\tgfile.MakeDirs(work_directory)\n\tfilepath = os.path.join(work_directory, filename)\n\tif not gfile.Exists(filepath):\n\t\ttemp_file_name, _ = urlretrieve_with_retry(source_url)\n\t\tgfile.Copy(temp_file_name, filepath)\n\t\twith gfile.GFile(filepath) as f:\n\t\t\tsize = f.size()\n\t\tprint('Successfully downloaded', filename, size, 'bytes.')\n\treturn filepath",
"def download(self, dest, overwrite=False):\n dest = os.path.abspath(dest)\n try:\n local = get_local(dest)\n except ValueError: # Nothing exists at dest, nothing to worry about.\n local = None\n else: # Something exists here.\n if local.hash() == self.hash: # Nothing to update.\n pdbox.info(\"%s and %s are identical\" % (self.uri, local.path))\n return\n if not overwrite:\n raise ValueError(\"%s already exists\" % local.path)\n\n # To avoid any weird overwriting behaviour in the case of errors, we'll\n # download to a different location first, then move to dest afterwards.\n tmp_dest = os.path.join(\n pdbox.TMP_DOWNLOAD_DIR,\n os.path.basename(dest),\n )\n while os.path.exists(tmp_dest): # Make sure the temp name is unique.\n tmp_dest += \"_\"\n\n if pdbox._args.get(\"dryrun\"):\n pdbox.info(\"Downloaded %s to %s\" % (self.uri, dest))\n return None\n\n # TODO: Progress bars.\n meta = execute(pdbox.dbx.files_download_to_file, tmp_dest, self.path)\n pdbox.debug(\"Metadata response: %s\" % meta)\n\n if not os.path.isdir(os.path.dirname(dest)):\n # Create the parent directories of dest.\n os.makedirs(os.path.dirname(dest))\n\n if not pdbox._args.get(\"dryrun\"):\n # os.rename overwrites files just fine, but not directories.\n if local and isinstance(local, LocalFolder):\n shutil.rmtree(local.path)\n # Move the file from the temp location to dest.\n os.rename(tmp_dest, dest)\n\n pdbox.info(\"Downloaded %s to %s\" % (self.uri, dest))\n return LocalFile(dest) # Return the newly created file.",
"def download_site_and_store_its_content(self, site_url, bucket_name, target_directory):\n logger.info(f'Trying to download site content from url: {site_url}')\n try:\n content = self.web_client.get(site_url)\n\n date_string = datetime.now().strftime('%Y/%m/%d/%H/%M')\n file_key = f'{target_directory}/{date_string}/site.html'\n\n self.storage_client.upload(\n bucket_name=bucket_name,\n file_key=file_key,\n file_content=content)\n\n except Exception as e:\n logger.error(f'Something went wrong: {e}')\n return False, str(e)\n\n return True, None",
"def maybe_download(filename, expected_bytes):\n if not os.path.exists(filename):\n filename, _ = urlretrieve(url + filename, filename)\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified %s' % filename)\n else:\n print(statinfo.st_size)\n raise Exception(\n 'Failed to verify ' + filename + '. Can you get to it with a browser?')\n return filename"
] |
[
"0.59671676",
"0.5967019",
"0.5953204",
"0.5891534",
"0.5793983",
"0.5768841",
"0.5755365",
"0.574282",
"0.56129616",
"0.5590517",
"0.5573866",
"0.5563315",
"0.55612004",
"0.55537355",
"0.55537355",
"0.55136275",
"0.55136275",
"0.55097127",
"0.5508454",
"0.5508454",
"0.5508454",
"0.55040145",
"0.54929715",
"0.5474552",
"0.547399",
"0.54724807",
"0.54535455",
"0.54481477",
"0.54453945",
"0.5417494"
] |
0.7110834
|
0
|
Make a player to try to leave jail.
|
def try_leave_jail(self):
player = self.player_list[self.current_player]
print(player, "trying to leave jail")
if player.strategy.get_out_of_jail_with_card(self, player=player):
# we just leave jail using the card.
card = player.get_card(GetOutJailFree)
if len(self.chance_cards) > len(self.community_cards):
self.receive_community_card(card)
else:
self.receive_chance_card(card)
player.leave_jail()
return True
if player.strategy.get_out_of_jail_with_cash(self, player):
self.transaction_to_player(Bank(), -50, player)
# now roll the dice, an mov
thr = Throw()
new_position = self.compute_new_position_from_dice(self.current_player, thr)
self.move_player_to(self.current_player, new_position, thr=thr)
player.leave_jail()
return True
# just try to roll the dice
thr = Throw()
if thr.is_double():
new_position = self.compute_new_position_from_dice(self.current_player, thr)
self.move_player_to(self.current_player, new_position, thr=thr)
player.leave_jail()
return True
player.count_failed_leave_fail()
if player.count_failed_attempts_fail() > 3:
# just play and leave
self.transaction_to_player(Bank(), -50, player)
# now roll the dice, an mov
thr = Throw()
new_position = self.compute_new_position_from_dice(self.current_player, thr)
self.move_player_to(self.current_player, new_position, thr=thr)
player.leave_jail()
return True
return False
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _leave(self, *args):\n if not self.game:\n raise ServerException('not playing a game')\n self.game.leave(self)\n self.game = self.player = None",
"def leaveGame(game, player): # is also called in register player if THE UNPROBABLE happens (e.g. there was a crash and bobby can't come in again)\n\t#check if player is in game and game exists, if the player is the creator close the game\n\tgame_key = game.key()\n\tplayer_key = player.key()\n\n\tif game != None and player != None:\t\t\t\n\t\tif game.creator.key() == player.key():\n\t\t\t#TODO: close game\n\n\t\t\tplayer.currentGame = None\n\t\t\tplayer.put()\n\n\t\t\tgame.status = 2\n\t\t\tgame.players.remove(player.key())\n\t\t\tgame.playerCount -= 1\n\t\t\tgame.put()\n\n\t\t\tlogging.info('Creator %s left game %s, game stopped'%(player_key,game_key))\n\t\t\tvalue = \"done\"\n\t\telif player.key() in game.players:\n\t\t\tplayer.currentGame = None\n\t\t\tplayer.put()\n\n\t\t\tgame.players.remove(player.key())\n\t\t\tgame.playerCount -= 1\n\t\t\tgame.put()\n\n\t\t\tlogging.info('Player %s left game %s, game has now %s players left'%(player_key,game_key,game.playerCount))\n\n\t\t\t#TODO: deal with the horrible aftermath\n\t\t\t#maybe if only 2 left start showdown, give 2 minutes then set marker in between them\n\t\t\tvalue = \"done\"\n\t\telse:\n\t\t\tlogging.error('Attempt to leave game %s by player %s failed, not in list apparently and not creator'%(game_key,player_key))\t\t\t\n\t\t\tvalue = \"error\"\t\t\n\telse:\n\t\tlogging.error('Attempt to leave game %s by player %s failed, no game or player'%(game_key,player_key))\t\t\t\n\t\tvalue = \"error\"\n\n\treturn value",
"def on_leave(self, event):\n self.pre_check(event)\n self.remove_player(event.guild.id)",
"def leave_loose_game(self):\n self.update_json_file()\n self.end = True\n self.root.destroy()\n GameOver()",
"def endgame(winner):",
"async def leave(ctx):\n if ctx.message.channel.name.lower() not in tod_channels:\n return\n\n room = ctx.message.channel.name.lower()\n if room not in tod_games:\n await amor_manager.say(\"Truth Or Dare not in progress in {}\".format(room))\n else:\n player = ctx.message.author.name\n if player.lower() not in list(tod_games[room]['participants'].keys()):\n await amor_manager.say(\"{}, you cannot leave the game if you have not joined\".format(player))\n elif player == tod_games[room]['host']:\n await amor_manager.say(\"{}, you cannot leave the game you're the host\".format(player))\n else:\n del tod_games[room]['participants'][player.lower()]\n await amor_manager.say(\"{} has left Truth or Dare.\".format(player))",
"def _decrease_lives(self, player):\n player.lives -= 1\n if player.lives:\n self.dead_player = True\n player.is_alive = False\n else:\n self.game_over = True",
"def leave_game(players_cursor, states_cursor, user, room_id):\n leave_query = '''DELETE FROM players_table WHERE user = ? AND room_id = ?'''\n players_cursor.execute(leave_query, (user, room_id))\n FRAMES.append(display_game(players_cursor, states_cursor, user, room_id))",
"def endGame(self):\n pass",
"def leave(self):\n self.game.leave(self)\n return self.game",
"def leave(self):\n self.game.dealer_leave(self)\n return self.game",
"def leave(self):\n self.pleaseQuit=1",
"def ending(self, board, width, height):\n game = LocalGame((LocalPlayer(), LocalPlayer()))\n game.logic = Logic(width=width, height=height, std_board=False)\n game.logic.board = self.board_generator(board, width, height)\n game.current_player = game.first_player\n return game",
"def stand(self):\n self.endgame()",
"async def tod_leave(self, ctx, *args):\n try:\n self.players.remove(ctx.author)\n role = discord.utils.get(ctx.guild.roles, name=\"Player\")\n await ctx.author.remove_roles(role)\n except ValueError:\n pass\n message = f\"{ctx.author.mention} has been removed from the game!\"\n await ctx.send(message)",
"def resign_game(self):\n if self._current_player == \"BLACK\":\n self._game_state = \"WHITE_WON\"\n\n else:\n self._game_state = \"BLACK_WON\"",
"def stop(self):\n self.set_state_null()\n self.player = None",
"def end_of_game(self, winner):\n pass",
"def resign_game(self):\n # If entered, it will return winner status for the opposing player\n if self._current_player == 'W':\n self._game_status = 'BLACK_WON'\n if self._current_player == 'B':\n self._game_status = 'WHITE_WON'",
"def end_game(self):\n print(str(self.__players[0]._Player__name) + \" score is: \"\n + str(self.__fields[0].score))\n print(str(self.__players[1]._Player__name) + \" score is: \"\n + str(self.__fields[1].score))\n Game.play = False",
"def leave_game(game_id, player_id):\n game_data = load_state(game_id)\n if not game_data:\n return False\n players = game_data.get('players')\n if player_id not in [p['id'] for p in players]:\n return False\n if game_data['ended_at']:\n return False\n quitter = [p for p in players if p['id'] == player_id][0]\n msg = make_info_message('You have left the game')\n alt_msg = make_info_message('{} has left the game'.format(quitter['name']))\n flash_player(game_data, quitter, msg, alt_msg)\n if quitter['active']:\n activate_next_player(game_data, player_quit=True)\n game_data['players'].remove(quitter)\n # If the quitter was the admin and there is at least one player\n # remaining, reassign the admin role to the first position.\n new_admin = None\n if quitter['admin'] and game_data['players']:\n new_admin = game_data['players'][0]\n new_admin['admin'] = True\n # If one player remaining in an active game, end the game now.\n if game_data['active'] and len(game_data['players']) == 1:\n game_data['active'] = False\n game_data['players'][0]['active'] = False\n game_data['ended_at'] = serialize_datetime(datetime.utcnow())\n msg = make_info_message('The game has ended')\n flash_broadcast(game_data, msg)\n if new_admin and not (game_data['started_at'] or game_data['ended_at']):\n msg = make_info_message('You are now the game administrator')\n flash_player(game_data, new_admin, msg)\n # If no players remaining, end the game now.\n if not game_data['players']:\n game_data['ended_at'] = serialize_datetime(datetime.utcnow())\n # If game is still active at this point, reclaim the quitter's cards.\n if game_data['active']:\n reclaim_player_cards(game_data, quitter)\n\n save_state(game_data)\n return True",
"async def leave_room(self, label):\n user = self.user\n room = await self.get_room(label)\n\n await self.channel_layer.group_send(\n room.group_name,\n {\n 'type': 'chat.leave',\n 'label': label,\n 'username': user.username,\n 'title': room.name,\n }\n )\n # Remove that we're in the room\n self.rooms.discard(label)\n\n # Remove client from the group so he no longer get room messages\n await self.channel_layer.group_discard(\n room.group_name,\n self.channel_name\n )\n\n await self.send_json(\n return_value(\n ACTION_LEAVE, room.label, TO_ME, MSG_LEAVE, NO_MESSAGE\n )\n )",
"def game_lose(self):\n self.lose = True\n self.player.reset_animations()\n self.player.reset_actions()\n self.msg.set_text(u'YOU LOSE <Press Space>')\n self.msg.show(True)",
"def leave(self):\n p = GameOverPopup(self)\n p.open()",
"def end_game(self):\n self.game.stop_running()",
"def test_resume_game(self):\r\n\r\n a_player_1 = RandomPlayer(1)\r\n a_player_2 = UserPlayer(2)\r\n a_player_2.set_choice(0)\r\n a_players = [a_player_1, a_player_2]\r\n a_x_dist = 5\r\n a_y_dist = 5\r\n a_num_to_win = 3\r\n a_game = Game(a_players,a_x_dist,a_y_dist,a_num_to_win)\r\n\r\n #game will pause\r\n a_game.play_game()\r\n\r\n while a_game.winner != -1:\r\n a_player_2.set_choice(0)\r\n a_game.resume_game()",
"def end_turn(self):\n if self.current_player_piece == Piece.PLAYER1:\n self.current_player_piece = Piece.PLAYER2\n elif self.current_player_piece == Piece.PLAYER2:\n self.current_player_piece = Piece.PLAYER1\n else:\n raise RuntimeError('Invalid current player piece')",
"def playerForfeit(self):\n self.handleWin(self.currentplayer*-1)",
"def leave_win_game(self):\n self.end = True\n self.canevas.config(bg='black')\n self.canevas.itemconfig(self.ball.ball, fill='black')\n self.canevas.itemconfig(self.paddle.paddle, fill='black')\n self.canevas.update()\n time.sleep(2)\n self.canevas.config(bg='light blue')\n self.canevas.itemconfig(self.ball.ball, fill='red')\n self.canevas.itemconfig(self.paddle.paddle, fill='grey')\n self.brick.next_level()",
"def lose_life(self):\n self.lives -= 1\n self.alive = self.calculate_alive()"
] |
[
"0.6754132",
"0.63794553",
"0.6148522",
"0.6108706",
"0.6084471",
"0.5965771",
"0.5953274",
"0.58382595",
"0.58152115",
"0.57858145",
"0.57740587",
"0.5755289",
"0.5752021",
"0.5657248",
"0.565241",
"0.56184435",
"0.56099445",
"0.5607249",
"0.5576661",
"0.5569093",
"0.55679363",
"0.55666727",
"0.5555029",
"0.55225813",
"0.5517249",
"0.55140996",
"0.55057204",
"0.5486557",
"0.5472744",
"0.5468197"
] |
0.7402688
|
0
|
Return the index for the player.
|
def get_player_index(self, player):
# find the player by going through the list
for i in range(len(self.player_list)):
if player == self.player_list[i]:
return i
raise PlayerNotFound
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_player_index(self, id_) -> int:\n return self._players_list.index(self._nodes[id_]['player'])",
"def get_player_num(self):\r\n return self.player_control.get_player_num()",
"def get_current_player(self):\r\n\r\n return self.players[(self.turn_number) % len(self.players)].get_id()",
"def index(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"index\")",
"def index(self) -> int:\r\n return self._index",
"def index(self) -> int:",
"def idx(self):\n return int(self.__ph.get('idx', 0))",
"def index(self) -> int:\n return self._index",
"def index(self) -> int:\n return self._index",
"def index(self) -> int:\n return self._index",
"def index(self) -> int:\n return self._index",
"def index(self) -> int:\n return self._index",
"def getPlayer(self, idx):\n return self.players[idx]",
"def get_next_player(self, player):\r\n return player * -1",
"def get_next_player(self, player):\r\n return player * -1",
"def get_player_id(self, player):\n url = self.base_url + \"/player/player/findplayer.html\"\n params = {\"q\": player, \"start\": 0, \"count\": \"Infinity\"}\n url += \"?\" + urllib.parse.urlencode(params)\n resp = self.fetch(url)\n resp_json = json.loads(resp)\n if len(resp_json[\"items\"]) == 0:\n return -1\n return resp_json[\"items\"][0][\"id\"]",
"def i_to_player_id(self, i):\n game = self.ctrl.game\n if self.hot_seat:\n return game.current_player if i == 0 else (1 - game.current_player)\n else:\n return self.main_player_id if i == 0 else (1 - self.main_player_id)",
"def get_player(self):\n return 2 - int((np.sum(self.state) % 2))",
"def player(self, state, current):\n return (current + 1) % state.getNumAgents()",
"def getPidx(self):\n return int(bytes(self.keeper.getGbl(b\"pidx\")), 16)",
"def idx(self):\n return self._idx",
"def name_to_pindex(session, name):\n for i, player in enumerate(session.players):\n if name == player.name:\n return i\n return 'Not found.'",
"def get_current_player(self):\n return self.in_game_players[self.curr_player_index]",
"def _get_index(self):\n\n return self.index\n\n # to avoid having differences bigger than 2pi",
"def getPlayer(self, index):\n if type(index) is not int:\n raise TypeError(\"The index passed to getPlayer must be of type int.\")\n elif index < 0 or index >= len(self.__colordict__.keys()):\n raise IndexError(\"Index less than 0 or greater than or equal to the number of \" +\n \"players in the game.\")\n node_i = self.__currentnode__\n for i in range(index):\n node_i = node_i.getNext()\n return node_i.getPlayer()",
"def _index(self) -> int:\n return -1",
"def find_player(self):\n for y, line in enumerate(self.maze):\n for x, character in enumerate(line):\n if character == \"m\":\n return y, x\n return None",
"def __get_current_player(self):\n return engine.Engine.game_manager.players[self.current_player_index]",
"def get_index(self):\n return self.index",
"def get_index(self):\n return self.index"
] |
[
"0.78155226",
"0.72656244",
"0.7010457",
"0.7005153",
"0.69597197",
"0.69525254",
"0.6930846",
"0.69154924",
"0.69154924",
"0.69154924",
"0.69154924",
"0.69154924",
"0.69094145",
"0.684157",
"0.684157",
"0.6797731",
"0.67746806",
"0.6772885",
"0.6744422",
"0.67356735",
"0.66867477",
"0.6660931",
"0.6656831",
"0.66453075",
"0.6597261",
"0.65709245",
"0.65496266",
"0.65173376",
"0.65061593",
"0.65061593"
] |
0.80226827
|
0
|
Return the index of the square.
|
def get_square_index(self, square):
# find the player by going through the list
for i in range(len(self.squares)):
if square == self.squares[i]:
return i
raise SquareNotFound
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_square_index(self, cell):\n return next(s for s, square in enumerate(self.squares) if cell in square)",
"def getIndex(x, y, rows, cols):\n x = cols-x-1\n if x % 2 != 0:\n return (x*rows)+y\n else:\n return (x*rows)+(rows-1-y)",
"def get_square(self, index: int):\n return self.squares[index]",
"def i_index(self, coord):\n return coord + 1 if coord + 1 > self.dimensions - 1 else 0",
"def get_index(self):\n return (np.sqrt(self.dielectric))",
"def find_index(self):\n current = self.from_grid\n #find index of \"*\"\n for x in range(len(current)):\n for y in range(len(current[x])):\n if current[x][y] == \"*\":\n index = (x,y)\n return index",
"def get_index(self, row, col):\n return (row * self.cols) + col",
"def index_to_square(self, this_index: int) -> Square:\n return self.squares[this_index]",
"def square(i, j):\n return map(sq_start, [i, j, i + 1, j + 1])",
"def get_square_index_by_name(self, square_name, from_square=None):\r\n if from_square is not None:\r\n # don't start at the begining\r\n raise Exception\r\n\r\n for i in range(len(self.squares)):\r\n print(self.squares[i].name, square_name)\r\n if self.squares[i].name == square_name:\r\n return i\r\n\r\n raise SquareNotFound",
"def find_blank_square(self, state):\n\n return state.index(0)",
"def loc_2_idx(loc, board_size):\n return loc[0] * board_size + loc[1]",
"def get_index(self, point, cell_size):\n return (point[1] / cell_size, point[0] / cell_size)",
"def grid_to_index(mapdata, x, y):\n i = (y * mapdata.info.width) + x\n return int (i)",
"def to_index(self):\r\n return (BOARD_HEIGHT - 1 - self.y) * BOARD_HEIGHT + (BOARD_WIDTH - 1 - self.x)",
"def index(self, x) -> int:\n pass",
"def one_dim_index(self, i, j):\n return int(i + j * self.nx)",
"def getIndex(mos, x, y): # should be a method of lsst.afw.display.utils.Mosaic\n\n ix = int(x + 0.5)//(mos.xsize + mos.gutter)\n iy = int(y + 0.5)//(mos.ysize + mos.gutter)\n\n return ix + iy*mos.nx",
"def square(self, row, col):\n return self.board[row][col]",
"def find_square(self, target_name: str) -> int:\n found_square_num = None\n for i in range(len(self.squares)):\n if target_name == self.squares[i].name:\n found_square_num = i\n break\n return found_square_num",
"def get_square(self, row, col):\n\n return self.board[row][col]",
"def __pos(self, i, j):\n return i * (i - 1) / 2 + j",
"def index(self) -> int:",
"def getSquare(x, y):\n\n\tglobal theMap, width, height\n\n\treturn theMap[x + y * width]",
"def square(self):\n return self.x * self.x + self.y * self.y",
"def position_index(x, y):\r\n position_action_idx = x + y*8\r\n return position_action_idx",
"def get_pos_index(self):\n return [self.row-1, self.col-1]",
"def sq_start(i):\n return i * pixel_width / n",
"def get_index(self, x, y):\n i = (y - self.y0) // self.dy\n j = (x - self.x0) // self.dx\n i = min(max(i, 0), self.n-1)\n j = min(max(j, 0), self.m-1)\n return [i, j]",
"def get_piece(self, index):\n return self.squares[index]"
] |
[
"0.8003269",
"0.7540733",
"0.71379787",
"0.71144485",
"0.71032023",
"0.7096975",
"0.7057881",
"0.6839868",
"0.67621505",
"0.675864",
"0.66842747",
"0.6590778",
"0.6580608",
"0.6566204",
"0.6562753",
"0.64829534",
"0.64553094",
"0.64529425",
"0.6446773",
"0.6426442",
"0.64216167",
"0.6419109",
"0.64141434",
"0.64128",
"0.63883597",
"0.63575196",
"0.63430375",
"0.63422394",
"0.6340719",
"0.6340574"
] |
0.7821449
|
1
|
Return the first square of a given name.
|
def get_square_index_by_name(self, square_name, from_square=None):
if from_square is not None:
# don't start at the begining
raise Exception
for i in range(len(self.squares)):
print(self.squares[i].name, square_name)
if self.squares[i].name == square_name:
return i
raise SquareNotFound
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def singularize(name):\n n = str(name)\n if n[-1:] == 's':\n return n[:-1]\n return n",
"def find_square(self, target_name: str) -> int:\n found_square_num = None\n for i in range(len(self.squares)):\n if target_name == self.squares[i].name:\n found_square_num = i\n break\n return found_square_num",
"def xd_element(name):\n try:\n name = name[:2]\n except:\n pass\n try:\n covalence_radius[name]\n except:\n name = name[0]\n return name",
"def first(word):\n return word[0]",
"def first(word):\n\treturn word[0]",
"def index_to_square_name(self, this_index: int) -> str:\n return self.index_to_square(this_index).name",
"def get_shape(self, name):\n\n if name == \"circle\":\n return Circle(random.randint(1, 10))\n\n elif name == \"square\":\n return Square(random.randint(1, 10))\n\n elif name == \"rectangle\":\n return Rectangle(random.randint(1, 10), random.randint(1, 10))",
"def size_from_name(size, sizes):\n\n by_name = [s for s in sizes if s.name == size]\n if len(by_name) > 1:\n raise Exception('more than one image named %s exists' % size)\n return by_name[0]",
"def get_square(self, col, row) -> str:\n row_index = row - 1\n col_index = ord(col.lower()) - 97 # ord('a') is 97\n return self.state[row_index][col_index]",
"def square_name(row_or_tuple, col=None):\n\n if col is None:\n # Then row_or_tuple should be a tuple, extract row and col from there\n try:\n row = row_or_tuple[0]\n col = row_or_tuple[1]\n except TypeError:\n raise ValueError(\"Row and column are both required\")\n else:\n # row_or_tuple is the row\n row = row_or_tuple\n\n if 0 == row:\n if 0 == col:\n return \"top left\"\n elif 1 == col:\n return \"top centre\"\n elif 2 == col:\n return \"top right\"\n elif 1 == row:\n if 0 == col:\n return \"middle left\"\n elif 1 == col:\n return \"middle centre\"\n elif 2 == col:\n return \"middle right\"\n elif 2 == row:\n if 0 == col:\n return \"bottom left\"\n elif 1 == col:\n return \"bottom centre\"\n elif 2 == col:\n return \"bottom right\"\n raise TypeError(\n \"No such (row, column) pair: each must be in range 0-2 inclusive\")",
"def shortest_id(name, names):\n if name in names:\n names.remove(name)\n\n for i, letter in enumerate(name):\n for other_name in names:\n if other_name[i:i+1] == letter:\n break\n else:\n break\n\n return name[0:i+1]",
"def first(self, name: str) -> etree.Element:\n return self.from_name(name)[0]",
"def find_shape(name):\n for path in shapes():\n shape_name = os.path.basename(path).replace('.json', '')\n\n if shape_name == name:\n return path\n\n return None",
"def getSquare(x, y):\n\n\tglobal theMap, width, height\n\n\treturn theMap[x + y * width]",
"def get_school(self, name: str) -> School:\n\n schools = self.get_schools()\n f = lambda school, name: school.name == name\n\n idx = [idx for idx, school in enumerate(schools) if f(school, name)]\n\n return schools[idx[0]] if idx else None",
"def get_member(self, name):\n members = self.wls_board.get_members()\n for member in members:\n if name in member.full_name:\n return member\n return 'None'",
"def shortest_first_name(names):\n names = dedup_and_title_case_names(names)\n split_names = [name.split(' ') for name in names]\n first_name = [first for first, last in split_names]\n shortest = first_name[0]\n for name in first_name:\n if len(name) < len(shortest):\n shortest = name\n\n return shortest",
"def get_square(self, index: int):\n return self.squares[index]",
"def find_blank_square(self, state):\n\n return state.index(0)",
"def get_center(name, shape):\n x,y, (w,h) = shape\n return Point(x + w/2, y + h/2, name=name)",
"def get_square(self, x, y):\n if x < 0 or x > self.width-1 or y < 0 or y > self.height-1:\n return MapSquare(x, y, Tile.Wall, '~') # return a wall if at end of map\n return self.mapArray[y][x]",
"def simple(self, name: str) -> str:\n return content(self.first(name))",
"def _squish_name(self, name, space):\n if len(name) <= space:\n return name\n if space < 3:\n raise ValueError(\"too much squishing!\")\n return name[:space - 2] + \"~\" + name[-1]",
"def test_from_smiles_name(self):\n mol = Molecule.from_smiles(\"C\")\n assert mol.name == \"\"\n\n mol = Molecule.from_smiles(\"C\", name=\"bob\")\n assert mol.name == \"bob\"",
"def shortest_first_name(names):\n names = dedup_and_title_case_names(names)\n names_splitted = [name.split() for name in names]\n names_splitted.sort(key=sort_length)\n names_sorted = [\" \".join(name) for name in names_splitted]\n return names_sorted[0]",
"def first_name(seed):\n if consistent_hash(seed, 1):\n first = femaleNames\n else:\n first = maleNames\n return first[consistent_hash(seed, len(first))]",
"def male_middle_name():\r\n\r\n return male_first()",
"def square(num1):\n squared = num1*num1\n return squared",
"def first_half(s):\n if len(s) % 2 == 0:\n num = len(s) / 2\n num = int(num)\n first = s[0:num]\n return first\n elif len(s) % 2 != 0:\n s_str = s[:-1]\n num = len(s_str) / 2\n num = int(num)\n sec = s_str[0:num]\n return sec",
"def square_handler(string):\n\n a = int(input(string[0]))\n\n square = Shape.Square(a)\n\n print(string[1] + square.display() + \" is \" + str(square.getarea()))"
] |
[
"0.62641704",
"0.6074091",
"0.58869636",
"0.5863246",
"0.5843857",
"0.57271814",
"0.5718565",
"0.56371987",
"0.5565492",
"0.5564565",
"0.549277",
"0.5450085",
"0.5432684",
"0.5428434",
"0.54156905",
"0.5396309",
"0.5377145",
"0.5367631",
"0.5342714",
"0.53405",
"0.53291464",
"0.53099215",
"0.5299159",
"0.5276358",
"0.525694",
"0.52524453",
"0.52461755",
"0.5243724",
"0.5233878",
"0.5232115"
] |
0.6145324
|
1
|
Return the first square of a given class.
|
def get_square_by_class(self, square_class, from_square=None):
start_index = 0
if from_square is not None:
# don't start at the begining
for i in range(0, len(self.squares)):
if self.squares[i] == from_square:
start_index = i
break
while True:
if issubclass(self.squares[start_index].__class__, square_class):
return self.squares[start_index]
start_index += 1
if start_index >= len(self.squares):
start_index = 0
raise SquareNotFound
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def find_closest(self, cls):\n closest = None\n shortest_dist = None\n for sprite in self.game.entities[ALL_SPRITES]:\n if isinstance(sprite, cls):\n curr_dist = distance((self.x, self.y), (sprite.x, sprite.y))\n if shortest_dist is None or curr_dist < shortest_dist:\n closest = sprite\n shortest_dist = curr_dist\n return closest",
"def test_square_class(self):\n s1 = Square(10)\n self.assertEqual(10, s1.size)\n\n s2 = Square(10, 2)\n self.assertEqual(10, s2.size)\n self.assertEqual(2, s2.x)\n\n s3 = Square(3, 5, 2)\n self.assertEqual(3, s3.size)\n self.assertEqual(5, s3.x)\n self.assertEqual(2, s3.y)\n\n s4 = Square(10, 2, 0, 12)\n self.assertEqual(10, s4.size)\n self.assertEqual(12, s4.id)\n self.assertEqual(2, s4.x)\n self.assertEqual(0, s4.y)",
"def get_skill_class(cursor, _class):\n cursor.execute('SELECT id FROM classes WHERE temp_id = ?', (_class,))\n data = cursor.fetchone()\n try:\n return data[0]\n except TypeError:\n l.error(\"The Class {} doesn't exists.\".format(_class))",
"def test_get_squarerect_sqr_all_int(self):\n result = get_squarerectangle_type(2, 2, 2, 2,)\n self.assertEqual(result, 'square')",
"def get_square(self, index: int):\n return self.squares[index]",
"def test_returns_class(self):\n assert type is simple_class().__class__",
"def test_get_sqrrect_sqr_all_float(self):\n result = get_squarerectangle_type(2.22, 2.22, 2.22, 2.22,)\n self.assertEqual(result, 'square')",
"def get_square(self, row, col):\n\n return self.board[row][col]",
"def get_square(self, x, y):\n if x < 0 or x > self.width-1 or y < 0 or y > self.height-1:\n return MapSquare(x, y, Tile.Wall, '~') # return a wall if at end of map\n return self.mapArray[y][x]",
"def getSameClass(_class, classes, data):\n _class = int(_class)\n assert (isinstance(_class, int))\n assert (classes.shape[1] == 1)\n assert (data.shape[1] == 5)\n\n mask = classes == _class\n return data[mask.squeeze()]",
"def safe_get(cls: Type[T]) -> T:\n global _metrics_singularities\n\n if cls not in _metrics_singularities:\n _metrics_singularities[cls] = cls()\n\n return _metrics_singularities[cls]",
"def getSn(classObj):\r\n temp = []\r\n noOfColl = len(classObj.dataSet[0])\r\n mean = classObj.meanOf()\r\n\t#print(mean)\r\n\t#print(classObj.dataSet[:,2])\r\n\r\n for i in range(noOfColl):\r\n\r\n noOfElems = classObj.noOfElem(i)\r\n\t\t\t\r\n squareSum = classObj.diffSquaredSum(classObj.dataSet[:, i], mean[i])\r\n sn = np.sqrt(squareSum / (noOfElems - 1))\r\n temp.append(sn)\r\n\r\n return temp",
"def getone(self, Cl):\n for object in self.ginfo.sprites():\n if isinstance(object, Cl):\n return object\n else:\n return None",
"def index_to_square(self, this_index: int) -> Square:\n return self.squares[this_index]",
"def playable_square(self, col) -> Square:\n for row in reversed(range(len(self.state[0]))):\n square = Square(row, col)\n if self.is_empty(square):\n return square",
"def _class(self, class_):\r\n\r\n if class_:\r\n if hasattr(class_, '__mro__'):\r\n #this is a class\r\n return class_\r\n else:\r\n #this is an instance\r\n return type(class_)",
"def square(num1):\n squared = num1*num1\n return squared",
"def _resolve_class_min(self, class_min):\n if isinstance(class_min, int) or isinstance(class_min, float):\n return class_min\n raise TypeError('class_min has to be either non-negative int or float')",
"def my_square(y):\n\treturn (y **2)",
"def square(self, row, col):\n return self.board[row][col]",
"def find_general_class(self, class_id):\n for class_ in my_classes:\n if class_.class_id == class_id:\n return class_\n\n return None",
"def get_checker(self, square):\n\n logger.debug(u'get_checker({})'.format(square))\n\n row, column = square\n if row < 0 or row > 7 or column < 0 or column > 7:\n return None\n else:\n return self.squares[row][column]",
"def find_blank_square(self, state):\n\n return state.index(0)",
"def squares(cls) -> FieldArray:\n x = cls.elements\n is_square = x.is_square()\n return x[is_square] # pylint: disable=unsubscriptable-object",
"def getSquare(x, y):\n\n\tglobal theMap, width, height\n\n\treturn theMap[x + y * width]",
"def my_square(y):\n\treturn (y ** 2)",
"def my_square(x):\n return x ** 2",
"def test_get_sqrrect_rect_all_int(self):\n result = get_squarerectangle_type(1, 2, 1, 2)\n self.assertEqual(result, 'rectangle')",
"def get_square_box(box):\r\n left_x = box[0]\r\n top_y = box[1]\r\n right_x = box[2]\r\n bottom_y = box[3]\r\n\r\n box_width = right_x - left_x\r\n box_height = bottom_y - top_y\r\n\r\n # Check if box is already a square. If not, make it a square.\r\n diff = box_height - box_width\r\n delta = int(abs(diff) / 2)\r\n\r\n if diff == 0: # Already a square.\r\n return box\r\n elif diff > 0: # Height > width, a slim box.\r\n left_x -= delta\r\n right_x += delta\r\n if diff % 2 == 1:\r\n right_x += 1\r\n else: # Width > height, a short box.\r\n top_y -= delta\r\n bottom_y += delta\r\n if diff % 2 == 1:\r\n bottom_y += 1\r\n\r\n # Make sure box is always square.\r\n assert ((right_x - left_x) == (bottom_y - top_y)), 'Box is not square.'\r\n\r\n return [left_x, top_y, right_x, bottom_y]",
"def first(self):\n return self._reduce_for_stat_function(F.first, only_numeric=False)"
] |
[
"0.59225607",
"0.5723245",
"0.55512625",
"0.5535083",
"0.5414248",
"0.53844106",
"0.5375716",
"0.53355664",
"0.5303055",
"0.52700686",
"0.52453876",
"0.5232349",
"0.519124",
"0.5178258",
"0.51506567",
"0.5148435",
"0.5138552",
"0.51305276",
"0.5073375",
"0.50638586",
"0.5049378",
"0.5044289",
"0.5040156",
"0.5036123",
"0.501963",
"0.5006087",
"0.49918768",
"0.4983721",
"0.49798426",
"0.49760893"
] |
0.71759033
|
0
|
Move a player to the n.
|
def move_player_n_square(
self, player_index, n, player=None, pass_on_squares=True, thr=None
):
if player is not None:
player_index = self.get_player_index(player)
square_index = self.player_positions[player_index] + n
if square_index < 0:
square_index = -square_index
if square_index >= len(self.squares):
square_index -= len(self.squares)
self.move_player_to(
player_index, square_index, pass_on_squares=pass_on_squares, thr=thr
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def move(self, row, col, player):",
"async def make_move(index):\n if enough_players():\n GAME.make_move(index)\n await update_players()",
"def makeMove(self, move, player):",
"async def move_player(self, player : Player, channel):\r\n await player.move_to(channel)",
"def __play_move(self, x, y):\n\t\tself.board[y][x] = self.current_player\n\t\tself.current_player = self.__negated_player(self.current_player)",
"def make_move(self, index):\n if self.board[index] is None and self.get_winner() is None:\n self.board[index] = self.player\n self.player = 'O' if self.player == 'X' else 'X'\n self.winner = self.get_winner()",
"def next_player(self) -> None:\n self.player = (self.player + 1) % len(self.players)",
"def take_move_player_turn(self, move_player_fxn):\n x, y = self.player.x, self.player.y\n tiles = self.board.get_landable_tiles_around(x, y)\n target = random.choice(tiles)\n move_player_fxn(target.x, target.y)",
"def make_move(board, position, player):\n # only valid moves are passed in here\n board[position-1] = player",
"def movePlayerTo(self, target):\n if self.player:\n row = 1\n if not self.player.first: # player 1 or 2\n row = -1\n\n if self.player.king:\n if abs(target.row - self.row) == 1 and abs(target.col - self.col) == 1: # move\n target.player = self.player\n self.player = None\n self.diselect()\n target.checkKing()\n return 1\n if abs(target.row - self.row) == 2 and abs(target.col - self.col) == 2: # eat\n mid = getBlockBetween(self, target)\n debugBoard()\n if mid.player and mid.player.first != self.player.first: # can eat\n mid.player = None\n target.player = self.player\n self.player = None\n self.diselect()\n target.checkKing()\n return 2\n pass\n else:\n if target.row == self.row + row and abs(target.col - self.col) == 1: # move\n target.player = self.player\n self.player = None\n self.diselect()\n target.checkKing()\n return 1\n if target.row == self.row + row * 2 and abs(target.col - self.col) == 2: # eat\n mid = getBlockBetween(self, target)\n debugBoard()\n if mid.player and mid.player.first != self.player.first: # can eat\n mid.player = None\n target.player = self.player\n self.player = None\n self.diselect()\n target.checkKing()\n getGame().board.checkWin()\n return 2\n return 0",
"def turn(board: Board, n: int, _round: int = 1) -> int:\n\n player = 2 - _round % 2\n pawn_value = 1 if player is 1 else 3 # player's pawns value\n\n print(\"This is Player \" + str(player) + \"'s turn.\")\n display(board)\n\n x, y = select_square(board, n, pawn_value)\n update(board, pawn_value, x, y)\n\n if not_finish(board):\n return turn(board, n, _round+1)\n else:\n display(board)\n return player",
"def move(self, n):\n return self.file.seek(n, 0)",
"def move(self, p):\r\n self.position.setvalue(p)",
"def move(self, n: int) -> \"Linked[T]\":\n out = self\n if n >= 0:\n for _ in range(n):\n out = out.forward\n else:\n for _ in range(-n):\n out = out.backward\n return out",
"def move_player(self, delta):\n if self.player_pos + delta in MAP:\n self.player_pos = self.player_pos + delta",
"def move(self, row: int, col: int, player: int) -> int:\n s = -1 if player == 1 else 1\n\n self.rows[row] += s\n if abs(self.rows[row]) == self.n:\n return player\n\n self.cols[col] += s\n if abs(self.cols[col]) == self.n:\n return player\n\n if row == col:\n self.diagonals[0] += s\n if abs(self.diagonals[0]) == self.n:\n return player\n\n if (row + col) == self.n - 1:\n self.diagonals[1] += s\n if abs(self.diagonals[1]) == self.n:\n return player\n\n return 0",
"def move_to(self, x, y):\r\n self.__current_room = x, y",
"def switch_player(self):\n self.player = Nim.other_player(self.player)",
"def computer_move(board,move,player):\r\n com_execution(board, move, player)",
"def move(self, row, col, player):\n self.board[row][col] = player",
"def do_move(self, board):\n raise NotImplementedError(\"do_move method not implemented for Player: {}\".format(self))",
"def move_to(self, x, y):\n self.x = x\n self.y = y",
"def move_to(self, x, y):\n self.x = x\n self.y = y",
"def move_player_to(\r\n self,\r\n player_index,\r\n square_index,\r\n player=None,\r\n square=None,\r\n pass_on_squares=True,\r\n thr=None,\r\n process_square=True,\r\n ):\r\n if player is not None:\r\n player_index = self.get_player_index(player)\r\n\r\n if square is not None:\r\n square_index = self.get_square_index(square)\r\n\r\n print(\r\n self.full_turn_count,\r\n self.player_list[player_index],\r\n \"moving to\",\r\n self.squares[square_index],\r\n pass_on_squares,\r\n )\r\n\r\n if pass_on_squares:\r\n # we need to send the pass action to each square\r\n # determine the number of square we need to move\r\n moves = 0\r\n if self.player_positions[player_index] < square_index:\r\n moves = square_index - self.player_positions[player_index] - 1\r\n else:\r\n # then the number of moves is just the square_index (minus 1) and the move\r\n # to get to the Start square_index\r\n moves = (\r\n square_index\r\n - 1\r\n + (self.player_positions[player_index] - len(self.squares))\r\n )\r\n\r\n while moves > 0:\r\n self.player_positions[player_index] += 1\r\n if self.player_positions[player_index] >= len(self.squares):\r\n self.player_positions[player_index] = 0\r\n\r\n self.squares[self.player_positions[player_index]].pass_on(\r\n self, self.player_list[player_index]\r\n )\r\n\r\n moves -= 1\r\n\r\n # lastly increase the position to peform the land afterwards\r\n self.player_positions[player_index] += 1\r\n if self.player_positions[player_index] >= len(self.squares):\r\n self.player_positions[player_index] = 0\r\n\r\n else:\r\n # we mode directly to the new square\r\n self.player_positions[player_index] = square_index\r\n\r\n square = self.squares[square_index]\r\n player = self.player_list[player_index]\r\n\r\n if not process_square:\r\n return\r\n\r\n square.land(self, player, thr=thr)\r\n\r\n if square.is_owner(player):\r\n # check if the player should build a house\r\n if player.strategy.should_buy_house(self, player, square):\r\n # buy a house\r\n self.buy_house(player, square)\r\n return\r\n\r\n # determine if we should buy the property\r\n if not square.can_be_bought():\r\n return\r\n\r\n # check if the player wants to buy the property\r\n if player.strategy.should_buy_property(self, player, square):\r\n # buy it\r\n self.buy_property(player, square)\r\n return\r\n\r\n # make the auction\r",
"def make_move(board, player_num, row, col):\n board[row][col] = 'X' if player_num == 1 else 'O'",
"def move(self,amount):\n self.positionx=self.positionx+self.amount\n return self.positionx",
"def movePlayer(self,direction):\n if direction == Direction.UP:\n self.y -= 1\n elif direction == Direction.DOWN:\n self.y += 1\n elif direction == Direction.LEFT:\n self.x -= 1\n elif direction == Direction.RIGHT:\n self.x += 1",
"def __init__(self, n):\n count = collections.Counter()\n\n def move(row, col, player):\n for i, x in enumerate((row, col, row+col, row-col)):\n count[i, x, player] += 1\n if count[i, x, player] == n:\n return player\n return 0\n self.move = move",
"def move_to_position1(self):",
"def move(self, delta):\n newPos = (self._pos + delta) % self._board.size\n # check for Pass GO condition\n if delta > 0 and newPos < self._pos:\n self._cash += 200\n self._board.acceptNotification(notification.PNPassGo(self))\n\n self._pos = newPos\n self._board.acceptNotification(notification.PNPlayerMove(self))"
] |
[
"0.6816062",
"0.68137366",
"0.6647461",
"0.6624199",
"0.65511507",
"0.64370066",
"0.64218724",
"0.64007497",
"0.63077587",
"0.6304666",
"0.6243999",
"0.6195381",
"0.6159438",
"0.6115476",
"0.60905296",
"0.6081004",
"0.6080895",
"0.6078984",
"0.60673445",
"0.6060743",
"0.6038023",
"0.60282296",
"0.60282296",
"0.6027908",
"0.6021484",
"0.6017134",
"0.6016199",
"0.6014566",
"0.6003886",
"0.6001467"
] |
0.76031613
|
0
|
Remove the player from the board.
|
def remove_player(self, player):
print("REMOVING", player)
player_index = self.get_player_index(player)
# if we are the current player, move back the index once
if self.current_player == player_index:
self.current_player -= 1
if self.current_player < 0:
self.current_player = len(self.player_list) - 2
self.player_positions.pop(player_index)
self.player_list.pop(player_index)
# TODO: put any cards owned by the player back in to the cards list
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def remove_player(self, player):\n\t\tself.players.remove(player)",
"def remove_player_from_game(self, player):\n if player in self.players:\n cards = player.cards\n for card in cards:\n self.cards.append(card)\n\n self.__shuffle_cards()\n player.cards = []\n if player == self.current_player:\n self.choose_next_player()\n\n self.players.remove(player)",
"def removePlayer(self, player):\n self.players.remove(player)\n for observer in self.observers:\n observer.playerRemoved(player)",
"def removePlayer(self, player):\n\t\tfor i in range(len(self.playerList)):\n\t\t\tif self.playerList [i] == player:\n\t\t\t\tself.playerList[i] = None\n\t\t\t\treturn",
"def removePlayer(self, player):\n #if (not self.__configuring) and (player in self.__players):\n if (player in self.__players):\n self.__players.remove(player)\n for event in self.__events:\n if player in event:\n del event[player]\n player.unbind(self)",
"def removePlayer(self, index):\n\n self.eloList.pop(index)\n self.idList.pop(index)",
"def removePlayer(self, index):\n serial = self.seats[index]\n self.seats[index]=0\n if serial in self.players:\n del self.players[serial]",
"def remove_card_from_player(self, card, player):\n if player in self.players:\n if card in player.cards:\n player.cards.remove(card)",
"def delPlayer(self, idx):\n self.players[idx:idx + 1] = []",
"def __remove_player(self, color):\n self.state.remove_player(color)\n self.violators.append(self.players[color])",
"def removePlayer(self, userid):\r\n userid = int(userid)\r\n if self.__contains__(userid):\r\n del self.players[userid].command \r\n # we have to manually delete the underlying object so we have no other references to PlayerObject class.\r\n del self.players[userid] # calls deconstructor on PlayerObject class\r",
"def remove_player(self, player):\n super().remove_player(player)\n if self._waiting_for_players:\n if self.__current_player == player:\n self.player_done(player)",
"def removeFromPlayerList(self):\n\t\tfor x in self.playerRemoveList:\n\t\t\tself.removePlayer(x)",
"def unset_player(self):\n self.server.object_manager.remove_player(self.player.guid)\n self.player = None",
"def _remove_player(self, player, player_party, other_party):\n\n party = vars(self)[player_party][:]\n party.remove(player)\n vars(self)[player_party].remove(player)\n for other in vars(self)[other_party]:\n if player in other.prefs:\n other.forget(player)",
"def remove_player(self, room_code: str, player_name: str) -> GameInfo:\n game = self.read_game(room_code)\n new_player_list = game.players.copy()\n\n matched_players = [\n player for player in game.players if player.name == player_name\n ]\n if len(matched_players) == 0:\n return game\n (player,) = matched_players\n\n new_player_list.remove(player)\n\n turn_player_name = game.turn_player_name\n if turn_player_name == player.name:\n # Pass to the next player\n player_index = game.players.index(player)\n if len(new_player_list) == 0:\n turn_player_name = None\n else:\n turn_player = new_player_list[player_index % len(new_player_list)]\n turn_player_name = turn_player.name\n\n self.games_table.update_item(\n Key={\"room_code\": room_code},\n UpdateExpression=(\"set turn_player_name=:t, players=:p\"),\n ExpressionAttributeValues={\n \":t\": turn_player_name,\n \":p\": [player.dict() for player in new_player_list],\n },\n ConditionExpression=Attr(\"players\").eq(game.dict()[\"players\"]),\n )\n\n self._determine_winner(room_code)\n\n return self.read_game(room_code)",
"def remove(self):\r\n game_ref.remove(self)",
"def remove_player(self, seat_id):\n player_id = seat_id\n try:\n idx = self._seats.index(self._player_dict[player_id])\n self._seats[idx] = Player(0, stack=0, emptyplayer=True)\n del self._player_dict[player_id]\n self.emptyseats += 1\n except ValueError:\n pass",
"def remove(self, user):\n if user != self.head:\n user.group = None\n user.save()\n self.players.remove(user)",
"def remove_from_hand(self):\n pass",
"def __delitem__(self, userid):\r\n self.removePlayer(userid)",
"def player_removes_tile(self, x, y):\n activePlayer = self.get_active_player()\n if activePlayer.humanControlled:\n super(RobotGame, self).player_removes_tile(x, y)",
"def remove_player(lst,player):\n print(\"Removing\",player)\n cursor=lst.head\n while cursor.data!=player:\n cursor=cursor.next\n if cursor==lst.head:\n cursor.next.prev=lst.tail\n cursor.prev.next=cursor.next\n lst.head=cursor.next\n if cursor==lst.tail:\n cursor.next.prev=cursor.prev\n cursor.prev.next=lst.head\n lst.tail=cursor.prev\n cursor.prev.next=cursor.next\n cursor.next.prev=cursor.prev\n lst.size-=1",
"async def deluser(self, ctx, member: discord.Member):\r\n for k, v in player.items():\r\n if k == member.name:\r\n del player[k]\r\n cur.execute(\"DELETE FROM players WHERE name=%s\", [k])\r\n conn.commit()\r\n await ctx.send(k + ' has been removed from the player-base')\r\n break",
"async def tod_leave(self, ctx, *args):\n try:\n self.players.remove(ctx.author)\n role = discord.utils.get(ctx.guild.roles, name=\"Player\")\n await ctx.author.remove_roles(role)\n except ValueError:\n pass\n message = f\"{ctx.author.mention} has been removed from the game!\"\n await ctx.send(message)",
"def removePlayer(self, color):\n if type(color) not in (player.Player, int):\n raise TypeError(\"Input to removePlayer must be of type int or Player.\")\n if type(color) is player.Player:\n color = color.getColor()\n if color not in self.__colordict__:\n raise IndexError(\"The input color/player was not found in the Rotator.\")\n # Empty the Rotator if this is the last node\n if len(self.__colordict__.keys()) <= 1:\n self.__colordict__ = {}\n self.__currentnode__ = None\n return\n if self.__currentnode__.getPlayer().getColor() == color:\n self.__currentnode__ = self.__currentnode__.getNext()\n node_to_remove = self.__colordict__[color]\n # Connect the previous node to the next node\n previous_node = node_to_remove.getPrevious()\n next_node = node_to_remove.getNext()\n previous_node.connectForward(next_node)\n next_node.connectBackward(previous_node)\n self.__colordict__.pop(color, None)",
"async def _clear_player(self, ctx: Context):\n\n await self.config.guild(ctx.guild).player_id.clear()\n\n await ctx.message.add_reaction(CHECK_MARK)",
"def remove_piece(self) -> None:\r\n if self.has_piece():\r\n self.piece.square = None\r\n self.piece = None",
"def detached(self, mind):\n self.remote = None\n players.remove(self)",
"def play(self, player, game):\n player.get_hand().get_cards().remove(self)\n card = game.pick_card()\n player.get_hand().add_cards(card)\n game.next_player()\n game.set_action(\"NO_ACTION\")"
] |
[
"0.8489572",
"0.80499756",
"0.77806044",
"0.76676995",
"0.76405525",
"0.7366387",
"0.7310438",
"0.72776914",
"0.7189522",
"0.71356475",
"0.7114337",
"0.7092161",
"0.7065603",
"0.7014058",
"0.6963003",
"0.69061786",
"0.68282884",
"0.66684633",
"0.66300315",
"0.6582116",
"0.65313494",
"0.64793503",
"0.6459942",
"0.6422543",
"0.64141464",
"0.6413147",
"0.64020264",
"0.64007354",
"0.6384578",
"0.63717717"
] |
0.81271565
|
1
|
Buy this property for the player.
|
def buy_property(self, player, square):
# pay the bank
print(player, "buying", square)
self.transaction_to_player(Bank(), -square.get_price(), player)
# set the owner on the property, and on the player
square.set_owner(player)
player.add_property(square)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def buy_prop(player, prop):\r\n if player.money >= prop.price:\r\n player.money -= prop.price\r\n prop.owner = player\r\n player.properties.append(prop)\r\n return True\r\n else:\r\n return False",
"def buy_property(self, player_name, movement_manager):\n current_property_name = self.get_current_property_name(player_name, movement_manager)\n balance_before_purchase = self.get_balance(player_name)\n property_cost = self.get_property_price(current_property_name)\n new_balance = balance_before_purchase - property_cost\n\n if self.get_is_property_available(current_property_name) == \"yes\":\n if balance_before_purchase >= property_cost:\n self.db.write_value(\"is_available_for_purchase\", \"no\", current_property_name)\n self.db.write_value(\"owner\", player_name, current_property_name)\n self.db.write_value(\"money\", new_balance, player_name)\n else:\n print(\"You do not have enough money to buy this property\")\n else:\n print(\"This property is not available for purchase\")",
"def buyHealthPotion(self):\n\t\turl = \"https://habitica.com/api/v3/user/buy-health-potion\"\n\t\tresponse = postUrl(url, self.credentials)\n\t\tif response['success'] == True:\n\t\t\tself.hp += 15\n\t\treturn(response)",
"def doBuyIn(self):\n self.protocol.sendPacket(networkpackets.PacketPokerBuyIn(amount=self.max_buy_in, **self._serial_and_game_id))\n self.protocol.sendPacket(networkpackets.PacketPokerAutoBlindAnte(**self._serial_and_game_id))",
"def buy_house(self, player, square):\r\n # pay the bank\r\n house_price = square.get_house_price()\r\n if house_price is None:\r\n return\r\n\r\n print(player, \"buying a house on\", square)\r\n self.transaction_to_player(Bank(), -house_price, player)\r\n\r\n # set the owner on the house, and on the player\r\n square.buy_house()",
"def _buy(self, units=1):\n self.quantity -= units",
"def buy_card(self):\n\n print(f\"Hand has buying power {self.hand_buying_power}...\")\n bought_card = None\n\n # by Platinium, if possible\n # otherwise (game stage agnostic) can buy a province or colony, always buy it\n if ((self.highest_buyable_money == cards.PLATINUM) and\n (self.game_stage == GameStage.early_game)):\n bought_card = cards.PLATINUM\n elif ((self.highest_buyable_victory_points == cards.PROVINCE) or\n (self.highest_buyable_victory_points == cards.COLONY)):\n bought_card = self.highest_buyable_victory_points\n else:\n # buy the highest buyable money by default\n if (self.highest_buyable_money != cards.COPPER):\n bought_card = self.highest_buyable_money\n\n # except if in the late game stage, in which case buy the highest\n # buyable victory points instead\n if ((self.game_stage == GameStage.late_game) and\n (self.highest_buyable_victory_points) and\n (self.highest_buyable_victory_points.victory_points > 0)):\n bought_card = self.highest_buyable_victory_points\n print(f\"Late Stage Game, so buying victory points over money\")\n\n # explain the play\n self.speak_hand()\n s = f\"for total buying power of {self.hand_buying_power}\"\n self.game.speak_str(s)\n\n # gain the card bought, if any, to the discard pile:\n if bought_card:\n s = f\"I buy {bought_card.name}\"\n self.game.speak_str(s)\n\n # gain the card to the discard pile\n self.deck.discard.append(bought_card)\n self.game.buy_card(bought_card)\n else:\n s = f\"I do not buy anything\"\n self.game.speak_str(s)\n\n # the whole hand is used up buying the card, discard the hand\n self.deck.discard_hand()",
"def buy_shoppingitem(self, user_id, shoppinglist_id, item_id):\n item = self.get_shoppingitem(user_id, shoppinglist_id, item_id)\n if not item['bought']:\n item['bought'] = True",
"def buy(self, key):\n\t\turl = \"https://habitica.com/api/v3/user/buy/\" + str(key)\n\t\treturn(postUrl(url, self.credentials))",
"def buy_supplement(self, supplement):\n self._money = self._money - supplement.cards[supplement.size() - 1].cost\n card = supplement.pop()\n self._discard.push(card)\n self._strength = self._strength + card.attack\n print \"\\nSupplement bought:\\n%s\" % card",
"def buyTradedVal(self, buyTradedVal):\n\n self._buyTradedVal = buyTradedVal",
"def play(self):\n hand = self.state.hand\n supply = self.state.supply\n money = count_money(hand) - self.state.used_money\n if supply['Province'] > 0 and money >= Province.Cost:\n self.game_client.buy('Province')\n elif supply['Duchy'] > 0 and money >= Duchy.Cost:\n self.game_client.buy('Duchy')\n elif supply['Estate'] > 0 and money >= Estate.Cost:\n self.game_client.buy('Estate')\n\n self.game_client.done()",
"def buy_and_pay(self):\n return self.price",
"def perform(self, context):\r\n context.owner.spendPower(self.power)",
"async def enable_buy(self, pair: str, detection_name: str, trigger_data: dict):\n\n if not self.pair_states[pair]['enable_buy']:\n await self.reporter.send_alert(pair, trigger_data, detection_name, prefix='ENABLE BUY')\n self.pair_states[pair]['enable_buy'] = True",
"def target_sell_price(self):\n return super(Player, self).target_sell_price",
"def buy(self, stock, amount):\n self.orders[stock] += amount",
"async def _vis_buy(self, ctx, *args):\n if has_post_permission(ctx.guild.id, ctx.channel.id):\n number, item = ch.parse_number_and_name(args)\n if item:\n await ctx.send(vis_helpers.shop_buy(ctx.user_object, item, number))",
"def buyArmoire(self, key):\n\t\turl = \"https://habitica.com/api/v3/user/buy-armoire\"\n\t\treturn(postUrl(url, self.credentials))",
"async def buy(self, ctx, *args):\n if has_post_permission(ctx.guild.id, ctx.channel.id):\n number, item = ch.parse_number_and_name(args)\n if number and item:\n out = item_helpers.buy(ctx.author.id, item, number=number)\n await ctx.send(out)",
"def buy(self):\n\n from_symbol = self.symbol\n to_symbol = self.currency\n price = self.data[0].close\n amount = self.portfolio['buy_sell_amount'][self.currency]\n date = self.date\n\n if self.slippage:\n slip_factor = (self.data[-1].high - self.data[-1].close)*self.slippage\n price += np.abs(slip_factor)\n\n self.trade_manager.buy(from_symbol, to_symbol, price, amount, date)",
"def userBuyWeaponIndex(self, user : bbUser.bbUser, index : int):\n self.userBuyWeaponObj(user, self.weaponsStock[index].item)",
"def purchase(self, item_type):",
"def collect(self, player: Player):\n player.set_invincible(True)",
"def buyGemItem(self, itemType, key):\n\t\turl = \"https://habitica.com/api/v3/user/purchase/\" + itemType + \"/\" + key\n\t\treturn(postUrl(url, self.credentials))",
"def produce(self):\n\t\tself.stock = min(self.stock, self.stock_max)\n\t\tself.stock += self.site._exploit(self.skill)",
"def save(self, *args, **kwargs):\n self.stock_value = self.purchase_price * self.stock_level\n super().save(*args, **kwargs)",
"def buyGem(self, amount):\n returnVal = False\n if self.spendCoin(amount=25):\n self.gems += 1\n returnVal = True\n \n return returnVal",
"def hook_buy_card(self, game, player, card):\n if card.isVictory():\n player.output(\"Gaining Gold from Hoard\")\n player.add_card(game[\"Gold\"].remove())",
"def userBuyWeaponObj(self, user : bbUser.bbUser, requestedWeapon : bbWeapon.bbWeapon):\n if self.userCanAffordItemObj(user, requestedWeapon):\n self.weaponsStock.removeItem(requestedWeapon)\n user.credits -= requestedWeapon.getValue()\n user.inactiveShips.addItem(requestedWeapon)\n else:\n raise RuntimeError(\"user \" + str(user.id) + \" attempted to buy weapon \" + requestedWeapon.name + \" but can't afford it: \" + str(user.credits) + \" < \" + str(requestedWeapon.getValue()))"
] |
[
"0.73201454",
"0.71901083",
"0.6011373",
"0.58200026",
"0.5769583",
"0.5717164",
"0.5642328",
"0.56227255",
"0.55898637",
"0.5552486",
"0.55257046",
"0.54936653",
"0.54706025",
"0.5382395",
"0.538109",
"0.53658086",
"0.5355954",
"0.5347377",
"0.5340484",
"0.533603",
"0.53320146",
"0.5325702",
"0.5311242",
"0.53103834",
"0.5302985",
"0.5289183",
"0.5203997",
"0.5185391",
"0.516979",
"0.5159044"
] |
0.757466
|
0
|
Buy this house for the player.
|
def buy_house(self, player, square):
# pay the bank
house_price = square.get_house_price()
if house_price is None:
return
print(player, "buying a house on", square)
self.transaction_to_player(Bank(), -house_price, player)
# set the owner on the house, and on the player
square.buy_house()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def doBuyIn(self):\n self.protocol.sendPacket(networkpackets.PacketPokerBuyIn(amount=self.max_buy_in, **self._serial_and_game_id))\n self.protocol.sendPacket(networkpackets.PacketPokerAutoBlindAnte(**self._serial_and_game_id))",
"def buy_property(self, player, square):\r\n # pay the bank\r\n print(player, \"buying\", square)\r\n self.transaction_to_player(Bank(), -square.get_price(), player)\r\n\r\n # set the owner on the property, and on the player\r\n square.set_owner(player)\r\n player.add_property(square)",
"def buy_card(self):\n\n print(f\"Hand has buying power {self.hand_buying_power}...\")\n bought_card = None\n\n # by Platinium, if possible\n # otherwise (game stage agnostic) can buy a province or colony, always buy it\n if ((self.highest_buyable_money == cards.PLATINUM) and\n (self.game_stage == GameStage.early_game)):\n bought_card = cards.PLATINUM\n elif ((self.highest_buyable_victory_points == cards.PROVINCE) or\n (self.highest_buyable_victory_points == cards.COLONY)):\n bought_card = self.highest_buyable_victory_points\n else:\n # buy the highest buyable money by default\n if (self.highest_buyable_money != cards.COPPER):\n bought_card = self.highest_buyable_money\n\n # except if in the late game stage, in which case buy the highest\n # buyable victory points instead\n if ((self.game_stage == GameStage.late_game) and\n (self.highest_buyable_victory_points) and\n (self.highest_buyable_victory_points.victory_points > 0)):\n bought_card = self.highest_buyable_victory_points\n print(f\"Late Stage Game, so buying victory points over money\")\n\n # explain the play\n self.speak_hand()\n s = f\"for total buying power of {self.hand_buying_power}\"\n self.game.speak_str(s)\n\n # gain the card bought, if any, to the discard pile:\n if bought_card:\n s = f\"I buy {bought_card.name}\"\n self.game.speak_str(s)\n\n # gain the card to the discard pile\n self.deck.discard.append(bought_card)\n self.game.buy_card(bought_card)\n else:\n s = f\"I do not buy anything\"\n self.game.speak_str(s)\n\n # the whole hand is used up buying the card, discard the hand\n self.deck.discard_hand()",
"def play(self):\n hand = self.state.hand\n supply = self.state.supply\n money = count_money(hand) - self.state.used_money\n if supply['Province'] > 0 and money >= Province.Cost:\n self.game_client.buy('Province')\n elif supply['Duchy'] > 0 and money >= Duchy.Cost:\n self.game_client.buy('Duchy')\n elif supply['Estate'] > 0 and money >= Estate.Cost:\n self.game_client.buy('Estate')\n\n self.game_client.done()",
"async def buy(self, ctx, *args):\n if has_post_permission(ctx.guild.id, ctx.channel.id):\n number, item = ch.parse_number_and_name(args)\n if number and item:\n out = item_helpers.buy(ctx.author.id, item, number=number)\n await ctx.send(out)",
"def buy(self, price, volume):\r\n self.order(\"bid\", price, volume)",
"def buy(self):\n\n from_symbol = self.symbol\n to_symbol = self.currency\n price = self.data[0].close\n amount = self.portfolio['buy_sell_amount'][self.currency]\n date = self.date\n\n if self.slippage:\n slip_factor = (self.data[-1].high - self.data[-1].close)*self.slippage\n price += np.abs(slip_factor)\n\n self.trade_manager.buy(from_symbol, to_symbol, price, amount, date)",
"async def _vis_buy(self, ctx, *args):\n if has_post_permission(ctx.guild.id, ctx.channel.id):\n number, item = ch.parse_number_and_name(args)\n if item:\n await ctx.send(vis_helpers.shop_buy(ctx.user_object, item, number))",
"def buy(self, key):\n\t\turl = \"https://habitica.com/api/v3/user/buy/\" + str(key)\n\t\treturn(postUrl(url, self.credentials))",
"async def buy(self, ctx, quantity: int, symbol: str):\r\n symbol = symbol.upper()\r\n author = ctx.author\r\n with DB() as db:\r\n company = await self.get_active_company(ctx, db, author)\r\n await self.market_open_check(ctx)\r\n await self.stock_symbol_check(ctx, db, symbol)\r\n \r\n price = self.iex.price(symbol)\r\n cost = quantity * price\r\n if company.balance < cost:\r\n await ctx.send(f\"{company.name}\\nBalance: {company.balance} USD\\nPurchase cost: {cost} USD\")\r\n raise StonksError()\r\n\r\n value = price * quantity\r\n self.iex.buy(db, company.id, symbol, quantity, price)\r\n await ctx.send(f\"``-{value} {company.name} ⯮ {quantity} {symbol} @ {price}``\")",
"def buy_property(self, player_name, movement_manager):\n current_property_name = self.get_current_property_name(player_name, movement_manager)\n balance_before_purchase = self.get_balance(player_name)\n property_cost = self.get_property_price(current_property_name)\n new_balance = balance_before_purchase - property_cost\n\n if self.get_is_property_available(current_property_name) == \"yes\":\n if balance_before_purchase >= property_cost:\n self.db.write_value(\"is_available_for_purchase\", \"no\", current_property_name)\n self.db.write_value(\"owner\", player_name, current_property_name)\n self.db.write_value(\"money\", new_balance, player_name)\n else:\n print(\"You do not have enough money to buy this property\")\n else:\n print(\"This property is not available for purchase\")",
"def buy(self, irc, msg, args, optlist, amount, thing, price, otherthing, notes):\n self.db.deleteExpired(self.registryValue('orderExpiry'))\n gpgauth = self._checkGPGAuth(irc, msg.prefix)\n if gpgauth is None:\n irc.error(\"For identification purposes, you must be identified via GPG \"\n \"to use the order book.\")\n return\n results = self.db.getByNick(gpgauth['nick'])\n if len(results) >= self.registryValue('maxUserOpenOrders'):\n irc.error(\"You may not have more than %s outstanding open orders.\" % \\\n self.registryValue('maxUserOpenOrders'))\n return\n extratime = 0\n if dict(optlist).has_key('long'):\n extratime = self.registryValue('longOrderDuration')\n trust = self._getTrust(irc, 'nanotube', gpgauth['nick'])\n sumtrust = sum([t for t,n in trust])\n if sumtrust < self.registryValue('minTrustForLongOrders'):\n irc.error(\"You must have a minimum of %s cumulative trust at \"\n \"level 1 and level 2 from nanotube to \"\n \"to place long orders.\" % (self.registryValue('minTrustForLongOrders'),))\n return\n orderid = self.db.buy(gpgauth['nick'], msg.host, amount, thing, price, otherthing, notes, extratime)\n irc.reply(\"Order id %s created.\" % (orderid,))\n if not world.testing:\n irc.queueMsg(ircmsgs.privmsg(\"#bitcoin-otc-ticker\",\n \"#%s || %s || BUY %s %s @ %s %s || %s\" % (orderid,\n gpgauth['nick'],\n amount,\n thing,\n self._getIndexedValue(price),\n otherthing,\n notes,)))",
"def _buy(self, units=1):\n self.quantity -= units",
"def buy_prop(player, prop):\r\n if player.money >= prop.price:\r\n player.money -= prop.price\r\n prop.owner = player\r\n player.properties.append(prop)\r\n return True\r\n else:\r\n return False",
"def hook_buy_card(self, game, player, card):\n if card.isVictory():\n player.output(\"Gaining Gold from Hoard\")\n player.add_card(game[\"Gold\"].remove())",
"def buy(self, stock, amount):\n self.orders[stock] += amount",
"def buy_shoppingitem(self, user_id, shoppinglist_id, item_id):\n item = self.get_shoppingitem(user_id, shoppinglist_id, item_id)\n if not item['bought']:\n item['bought'] = True",
"def buyGemItem(self, itemType, key):\n\t\turl = \"https://habitica.com/api/v3/user/purchase/\" + itemType + \"/\" + key\n\t\treturn(postUrl(url, self.credentials))",
"def Buy(self, X, Y):\n if self.money - (int(Y) * self.price[X][0] * (1 + self.taxe)) < 0:\n raise TradeError(\"Not Enough Money\")\n self.share[X] += int(Y)\n self.money -= int(Y) * self.price[X][0] * (1 + self.taxe)\n print(f\"BUY:{str(int(Y))}:{str(X)}\", flush = True)",
"def buy(self, price, chosen_class):\n return self.customer_classes[chosen_class - 1].buy(price)",
"def buy(self, bar, volume):\n self.place(Order(symbol=bar.symbol,\n volume=volume,\n price=bar.close,\n transaction=TransactionType.BUY,\n timestamp=bar.timestamp))",
"def market_buy(self, order_id, quantity):\n Library.functions.market_buy(self._book, order_id, quantity)",
"def buyQuest(self, key):\n\t\turl = \"https://habitica.com/api/v3/user/buy-quest/\" + str(key)\n\t\treturn(postUrl(url, self.credentials))",
"async def buy(self, ctx, amount : float, symbol, date=None):\n user = ctx.message.author\n portfolio = GetPortfolio(user.id, util.GetTimestamp(date))\n portfolio.Buy(amount, symbol)\n await self.bot.say('%s\\'s portfolio is now worth $%.2f.' % \n (ctx.message.author, portfolio.Value()))\n portfolio.Save()",
"def buy_and_pay(self):\n return self.price",
"async def _buy_live(self, pair: str, label: str, detection_name: str,\n trigger_data: Dict[str, Any], rebuy=False):\n\n params = core.Detector.get_detection_params(detection_name, {\n 'push_target': config['trade_push_sell_percent'],\n 'soft_target': config['trade_soft_sell_percent'],\n 'hard_target': config['trade_hard_sell_percent'],\n 'push_max': config['trade_push_max'],\n 'soft_max': config['trade_soft_max'],\n 'stop_percent': config['trade_stop_percent'],\n 'stop_cutoff': config['trade_stop_cutoff'],\n 'stop_check': config['trade_stop_check'],\n 'deferred_push': config['trade_deferred_push_sell'],\n 'deferred_soft': config['trade_deferred_soft_sell'],\n 'deferred_hard': config['trade_deferred_hard_sell'],\n 'groups': ['default']\n })\n\n base, _, trade_base_pair = common.get_pair_elements(pair)\n trade_size = self.trade_sizes[params['groups'][0]]\n\n if config['trade_balance_sync']:\n reserved = await self._get_open_trades_value(trade_base_pair)\n await self.balancer.handle_refill_request(base, trade_size, reserved)\n await self._garbage_collect_live(base, trade_size, reserved)\n\n order_id, quantity = await self._submit_limit_buy(pair, trade_size)\n\n if order_id is None:\n self.log.warning(\"Could not open buy order for {}.\", pair)\n await self.reporter.send_alert(pair, trigger_data, detection_name, prefix='FAIL BUY')\n success = False\n else:\n success = True\n\n if not success:\n self.trade_stats[self.time_prefix][pair]['failed'] += 1\n return None\n\n await self._register_trade_buy(pair, label, detection_name, trigger_data, rebuy)\n\n if not config['trade_balance_sync']:\n reserved = await self._get_open_trades_value(trade_base_pair)\n await self.balancer.handle_refill_request(base, trade_size, reserved)\n await self._garbage_collect_live(base, trade_size + config['trade_min_size'], reserved)\n\n adjusted_value = self.market.adjusted_close_values[pair][-1]\n current_time = self.market.close_times[pair][-1]\n\n order = {\n 'pair': pair,\n 'order_id': order_id,\n 'open_value': adjusted_value,\n 'base_value': await self.market.get_pair_base_mult(config['trade_base'], pair),\n 'quantity': quantity,\n 'remaining': quantity,\n 'filled': False,\n 'fees': 0.0,\n 'sell_pushes': 0,\n 'push_locked': True,\n 'soft_stops': 0,\n 'soft_sells': [],\n 'hard_sells': [],\n 'hard_stops': [],\n 'base_soft_stops': [],\n 'rebuy': rebuy,\n 'open_time': current_time,\n 'detection_name': detection_name,\n 'detection_time': trigger_data['current_time'],\n 'push_target': adjusted_value * (1.0 + params['push_target']),\n 'soft_target': adjusted_value * (1.0 + params['soft_target']),\n 'hard_target': adjusted_value * (1.0 + params['hard_target']),\n 'stop_value': adjusted_value * (1.0 - params['stop_percent']),\n 'cutoff_value': adjusted_value * (1.0 - params['stop_cutoff']),\n 'check_value': adjusted_value * (1.0 - params['stop_check']),\n 'push_max': params['push_max'],\n 'soft_max': params['soft_max'],\n 'stop_percent': params['stop_percent'],\n 'stop_cutoff': params['stop_cutoff'],\n 'stop_check': params['stop_check'],\n 'deferred_push': params['deferred_push'],\n 'deferred_soft': params['deferred_soft'],\n 'deferred_hard': params['deferred_hard'],\n 'groups': params['groups']\n }\n\n return order",
"def do_buy(self, args):\n if not self._check_args(args):\n return\n else:\n self.wallet.get_coins_from_faucet(args)",
"def payout(self):\n self.close()\n if self.is_paid:\n raise ValueError(\"Already paid out the wager.\")\n self.is_paid = True\n self.paid_on = datetime.datetime.now()\n payouts = self.get_payout_information()\n for info in payouts:\n bet, credits = info[\"bet\"], info[\"won\"]\n player = bet.created_by\n player.credits += credits\n player.save()\n self.save()",
"def buyGem(self, amount):\n returnVal = False\n if self.spendCoin(amount=25):\n self.gems += 1\n returnVal = True\n \n return returnVal",
"def buy(self,\n currency_pair,\n rate,\n amount):\n pass"
] |
[
"0.6828917",
"0.6799409",
"0.67829067",
"0.6736817",
"0.67134225",
"0.6636116",
"0.65728295",
"0.6543972",
"0.6479058",
"0.6419585",
"0.6382085",
"0.63807803",
"0.6303579",
"0.62601125",
"0.6240134",
"0.6233498",
"0.62123346",
"0.62091625",
"0.61930096",
"0.6190045",
"0.6150108",
"0.60905373",
"0.60627055",
"0.6055995",
"0.60335016",
"0.60294396",
"0.6028002",
"0.6005993",
"0.6002703",
"0.5989814"
] |
0.7949733
|
0
|
Peform a transaction between 2 players.
|
def transaction_to_player(self, origin, amount, receiver):
print("Transfering", amount, origin, "->", receiver)
try:
origin.transfer(-amount)
except:
# the origin will became bankrupt, so declare it and remove it from the game
origin.set_bankrupt(self, receiver)
self.remove_player(origin)
return
try:
receiver.transfer(amount)
except:
# the receiver will became bankrupt, so declare it
receiver.set_bankrupt(self, origin)
self.remove_player(receiver)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def create_transaction():\n data = request.get_json()\n response = None\n status_code = None\n\n # Proposed transaction document validity checks\n if balance() < (data['amount']):\n response = dict(message='Your balance is not enough to complete transaction')\n status_code = 400\n elif not (\n any(node_['public_key'] == data['sender_address'] for node_ in node.network) and\n any(node_['public_key'] == data['recipient_address'] for node_ in node.network) and\n isinstance((data['amount']), (int, float))\n ):\n response = dict(message='Please make sure the proposed transaction is valid.')\n status_code = 400\n\n if response and status_code:\n return jsonify(response), status_code\n\n transaction_id = str(uuid4())\n\n # Use as many utxos as necessary to create the new transaction inputs\n sender_address = data['sender_address']\n sum_ = 0\n tx_inputs = []\n for utxo in node.blkchain.utxos[sender_address]:\n if sum_ >= (data['amount']):\n break\n elif not node.blkchain.transaction_unconfirmed(utxo):\n sum_ += utxo.amount\n tx_inputs.append(TransactionInput.from_output(utxo))\n\n # Create 2 transaction outputs, one for the transfer and one for the sender's change\n tx_outputs = [\n TransactionOutput(\n transaction_id=transaction_id,\n recipient_address=data['recipient_address'],\n amount=(data['amount'])\n ),\n TransactionOutput(\n transaction_id=transaction_id,\n recipient_address=data['sender_address'],\n amount=sum_ - (data['amount'])\n )\n ]\n\n # Actual transaction object:\n tx = Transaction(\n sender_address=data['sender_address'],\n recipient_address=data['recipient_address'],\n amount=(data['amount']),\n transaction_inputs=tx_inputs,\n transaction_outputs=tx_outputs,\n transaction_id=transaction_id\n )\n\n response = tx.to_dict()\n return jsonify(response), 200",
"def transaction():\n data = jsonpickle.decode(request.get_data())\n address = data[\"address\"]\n amount = int(data[\"amount\"])\n keyname = data[\"keyname\"]\n\n pkplus, pkminus = wallet.keys(keyname)\n\n my_balance = p2p.query(\"/balance\", address=pkplus)[\"balance\"]\n if my_balance < amount:\n abort(404, description=\"Not enough funds.\")\n\n my_utxo = p2p.query(\"/find-utxos\", address=pkplus, amount=amount)[\"utxos\"]\n rem = sum(utxo.amount for utxo in my_utxo) - amount\n address_amount = [(address, amount)]\n\n assert rem >= 0\n\n if rem > 0:\n address_amount.append((pkplus, rem))\n\n tx = build_transaction(my_utxo, address_amount, pkminus)\n try:\n p2p.broadcast(\"/transaction-pool\", transaction=tx)\n return SUCCESSFUL_PATCH\n except UnsuccessfulPatch:\n payload = jsonpickle.encode(\n {\"message\": \"Transaction wasn't accepted by the network.\"})\n return payload, 420, {\"ContentType\": \"application/json\"}",
"def transact(self, transaction_type, digicoins_No):\n\n #Raise an exception of digicoins_No is not multiple of 10.\n try:\n if digicoins_No % 10 != 0:\n raise MyError.MyError(digicoins_No)\n except Exception as inst:\n print \"\\nYou can only transact multiples of 10 of digicoins.\\nTransaction Failed!\"\n return\n\n lowest_price = 0\n digicoins_remain = digicoins_No\n while digicoins_remain > 0:\n if digicoins_remain > 100:\n digicoins_No_to_be_transacted = 100\n else:\n digicoins_No_to_be_transacted = digicoins_remain\n\n A_price = self.Broker1.offered_price(digicoins_No_to_be_transacted)\n B_price = self.Broker2.offered_price(digicoins_No_to_be_transacted)\n\n if A_price < B_price:\n self.Broker1.execute_transaction(digicoins_No_to_be_transacted)\n lowest_price += A_price\n else:\n self.Broker2.execute_transaction(digicoins_No_to_be_transacted)\n lowest_price += B_price\n digicoins_remain -= 100\n\n if transaction_type == \"BUY\":\n print self.name, \"buys\", digicoins_No_to_be_transacted, \"at\", lowest_price\n #update the clients list with a pair [price, digicoins]\n self.transactions.append([lowest_price, digicoins_No])\n else:\n print self.name, \"sells\", digicoins_No_to_be_transacted, \"at\", lowest_price\n self.transactions.append([lowest_price, -digicoins_No])",
"def trade(self, send_Wallet, recv_Wallet, amount):\n # send_balance = self.web3.eth.get_balance(send_Wallet.address)\n # recv_balance = self.web3.eth.get_balance(recv_Wallet.address)\n # Transaction sequence moving from send_Wallet to rcv_Wallet\n print('{s:{c}^{n}}'.format(s=' creating transaction data ', n=80, c='.'))\n txn = self.mk_simple_transaction(send_Wallet.address, recv_Wallet.address, amount)\n print(txn)\n print('{s:{c}^{n}}'.format(s=' signing transaction ', n=80, c='.'))\n signed_txn = self.sign_transaction(txn, send_Wallet.prvkey)\n print(\"signed transaction hash = {}\".format(signed_txn))\n print('{s:{c}^{n}}'.format(s=' sending transaction ', n=80, c='.'))\n txn_hash = self.send_transaction(signed_txn)\n print(\"transaction hash = {}\".format(txn_hash))\n print('{s:{c}^{n}}'.format(s=' getting transaction receipt ', n=80, c='.'))\n receipt = self.wait_for_receipt(txn_hash)\n # pdb.set_trace()\n print(receipt)\n print('{s:{c}^{n}}'.format(s=' getting block transaction was a part of ', n=80, c='.')) \\\n # realistically this part of confirming the status of the block & transaction (mined or not)\n # might be able to be checked using the reciept? Not sure though\n # Answer : Looks like once we get a receipt from the transaction, the transaction will have\n # been completed and added to the ledger (aka block is mined i believe)\n block = self.get_tnx_block(receipt.blockNumber)\n # above line for getting transactino block is flakey...\n # not sure why, but the error that gets raised is as follows\n # raise BlockNotFound(f\"Block with id: {block_identifier} not found.\")\n # web3.exceptions.BlockNotFound: Block with id: 0x9c5b5d not found.\n print(block)",
"def transaction_to_player_from_all(self, amount, receiver):\r\n print(\"Transfering\", amount, \"From all players ->\", receiver)\r\n i = 0\r\n while i < len(self.player_list):\r\n if self.player_list[i] is not receiver:\r\n self.transaction_to_player(self.player_list[i], amount, receiver)\r\n i += 1",
"def _trade_orders(cls, first: 'Order', second: 'Order') -> (\n 'Order',\n 'Order',\n InstrumentBalance,\n InstrumentBalance,\n FiatBalance,\n FiatBalance,\n ):\n # TODO Add fee\n with transaction.atomic():\n trade_amount = min(first.remaining_sum, second.remaining_sum)\n first_balance = InstrumentBalance.objects.select_for_update().get(\n user=first.user, instrument=first.instrument)\n second_balance = InstrumentBalance.objects.select_for_update().get(\n user=second.user, instrument=first.instrument)\n first_fiat_balance = FiatBalance.objects.select_for_update().get(\n user=first.user)\n second_fiat_balance = FiatBalance.objects.select_for_update().get(\n user=second.user)\n if not first_balance:\n raise ValueError(\n f'Balance for user {first.user} in instrument not found')\n if not second_balance:\n raise ValueError(\n f'Balance for user {second.user} in instrument not found')\n if first.type == OrderType.BUY.value and first_fiat_balance.amount < trade_amount * second.price:\n raise ValueError(\n f'Not enough funds for {first_fiat_balance.user}')\n if first.type == OrderType.SELL.value and second_fiat_balance.amount < trade_amount * second.price:\n raise ValueError(\n f'Not enough funds for {second_fiat_balance.user}')\n if first.type == OrderType.BUY.value and second_balance.amount < trade_amount:\n raise ValueError(\n f'Not enough instrument balance for {second_balance.user}')\n if first.type == OrderType.SELL.value and first_balance.amount < trade_amount:\n raise ValueError(\n f'Not enough instrument balance for {first_balance.user}')\n first.remaining_sum -= trade_amount\n second.remaining_sum -= trade_amount\n if first.type == OrderType.BUY.value:\n first_balance.amount += trade_amount\n second_balance.amount -= trade_amount\n first_fiat_balance.amount -= trade_amount * second.price\n second_fiat_balance.amount += trade_amount * second.price\n else:\n first_balance.amount -= trade_amount\n second_balance.amount += trade_amount\n first_fiat_balance.amount += trade_amount * second.price\n second_fiat_balance.amount -= trade_amount * second.price\n if first.remaining_sum == 0:\n first.status = OrderStatus.COMPLETED.value\n first.actual_price = second.price\n if second.remaining_sum == 0:\n second.status = OrderStatus.COMPLETED.value\n second.actual_price = second.price\n return first, second, first_balance, second_balance, first_fiat_balance, second_fiat_balance",
"def transaction_run():\n print('working...')\n # Get all transaction\n transactions = executor.submit(Transaction.query.filter_by(done=False).all)\n print(transactions.result())\n # Check if thier a transactions\n if transactions.result():\n # Go through each transaction\n for tran in transactions.result():\n print(\"Looping...\")\n # print(trans)\n # Get the currency account for the source user\n currency = executor.submit(Currency.query.filter_by(user_id=tran.user_id).first).result()\n print(currency)\n # target_user = executor.submit(User.query.filter_by(id=tran.target_user).first).result()\n # print(target_user)\n # Get the currency account for the target user\n target = executor.submit(Currency.query.filter_by(user_id=tran.target_user).first).result()\n # Get the transaction account for the target user\n trans_target = executor.submit(Transaction.query.filter_by(user_id=tran.target_user).first).result()\n ### # TODO:\n trans_source = executor.submit(Transaction.query.filter_by(user_id=tran.user_id).first).result()\n # update replace all tran with trans_source\n\n print(tran)\n # print(target_user)\n print(target)\n print(trans_target)\n # Check if the target user has account\n if target:\n # If the user send to himself fail the transaction\n if tran.user_id == tran.target_user:\n tran.state = \"Transaction faild.\"\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n else:\n # If the currency type is bitcoin\n # Check if the user has a bitcoin ID\n if tran.currency_Type.lower() == \"bitcoin\":\n if not currency.bitcoin_id:\n tran.state = \"Transaction faild.\"\n # trans_source.state = \"Transaction faild. You don't have a bitcoin account!\"\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n # If user has a bitcoin ID\n # Check if transfared money greater than his balance or not\n # Check if transfared money greater than the max amount per transaction or not\n else:\n if tran.currency_amount > currency.bitcoin_balance:\n tran.state = \"Transaction faild.\"\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n elif tran.currency_amount > currency.max_amount:\n tran.state = \"Transaction faild.\"\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n # Everything ok, then subtract the transfared money from source user\n # Add transfare maney to target user\n else:\n balance = currency.bitcoin_balance - tran.currency_amount\n # updated_balance = str(balance)\n currency.bitcoin_balance = balance\n db.session.merge(currency)\n db.session.commit()\n db.session.remove()\n\n balance_target = target.bitcoin_balance + tran.currency_amount\n target.bitcoin_balance = balance_target\n db.session.merge(target)\n db.session.commit()\n db.session.remove()\n\n tran.state = \"Transaction success.\"\n tran.time_processed = datetime.now().strftime(\"%d-%b-%Y (%H:%M:%S)\")\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n\n # If the currency type is ethereum\n # Check if the user has a ethereum ID\n elif tran.currency_Type.lower() == \"ethereum\":\n if not currency.ethereum_id:\n tran.state = \"Transaction faild.\"\n # trans_source.state = \"Transaction faild. You don't have a ethereum account!\"\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n # If user has a ethereum ID\n # Check if transfared money greater than his balance or not\n # Check if transfared money greater than the max amount per transaction or not\n else:\n if tran.currency_amount > currency.ethereum_balance:\n tran.state = \"Transaction faild.\"\n # trans_source.state = \"Transaction faild. You don't have enough money!\"\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n elif tran.currency_amount > currency.max_amount:\n tran.state = \"Transaction faild.\"\n # trans_source.state = \"Transaction faild. You exceed the max amount!\"\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n # Everything ok, then subtract the transfared money from source user\n # Add transfare maney to target\n else:\n balance = currency.ethereum_balance - tran.currency_amount\n currency.ethereum_balance = balance\n db.session.merge(currency)\n db.session.commit()\n db.session.remove()\n\n balance_target = target.ethereum_balance + tran.currency_amount\n target.ethereum_balance = balance_target\n db.session.merge(target)\n db.session.commit()\n db.session.remove()\n\n tran.state = \"Transaction success.\"\n tran.time_processed = datetime.now().strftime(\"%d-%b-%Y (%H:%M:%S)\")\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n # if the currency type not bitcoin or ethereum\n else:\n tran.state = \"Transaction faild.\"\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n # If the user has no currency account\n else:\n tran.state = \"Transaction faild.\"\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n\n\n # Finish the transaction request\n print(tran)\n tran.done = True\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n print('Done!!!!')",
"def create_tx(self, coin, account, to, amount):\n if coin is ETH:\n gasEstimate = self.w3.eth.estimateGas(\n {\"from\": account.address, \"to\": to, \"value\": amount}\n )\n return {\n \"from\": account.address,\n \"to\": to,\n \"value\": self.w3.toWei(amount, 'ether'),\n \"gasPrice\": self.w3.eth.gasPrice,\n \"gas\": gasEstimate,\n \"nonce\": self.w3.eth.getTransactionCount(account.address),\n }\n elif coin is BTCTEST:\n return PrivateKeyTestnet.prepare_transaction(account.address, [(to, amount, BTC)])\n elif coin is BTC:\n return PrivateKey.prepare_transaction(account.address, [(to, amount, BTC)])\n else:\n return None",
"def __call__(self, account_from: Optional[str] = None, account_to: Optional[str] = None, accounts: Optional[str] = None, amount: Optional[Union[int, float, Decimal]] = None, currency: Optional[str] = None, passthrough: Optional[Any] = None, req_id: Optional[int] = None):\n\n data = {\n \"transfer_between_accounts\": int(1)\n }\n\n if account_from:\n data['account_from'] = str(account_from)\n\n if account_to:\n data['account_to'] = str(account_to)\n\n if accounts:\n data['accounts'] = str(accounts)\n\n if amount:\n data['amount'] = amount\n\n if currency:\n data['currency'] = str(currency)\n\n return self.send_websocket_request(self.name, data, passthrough=passthrough, req_id=req_id)",
"def transact(self, args):\n private_key = os.environ.get('private_key')\n if private_key:\n set_gas_prices(self.w3, args)\n tx = send(self.w3, private_key, args)\n return self.w3.toHex(tx)",
"def miner_main_send_tx(miner):\n if miner.pubkey in miner.balance:\n if miner.balance[miner.pubkey] > 10:\n other = random.choice(miner.peers)\n miner.create_transaction(other[\"pubkey\"], 10)\n print(f\"Miner {miner.name} sent transaction to {other['name']}\")",
"def update_wallets_and_transaction(sender, instance, created, **kwargs):\n if created:\n from_wallet = update_from_wallet(instance)\n to_wallet = update_to_wallet(instance)\n update_transaction_profit(instance, from_wallet, to_wallet)",
"def transaction(self, symbol, ask, bid):\n\n\t\t# Stock to trade\n\t\tstock = self.data.get(symbol)\n\n\t\tif stock.position == 1:\n\t\t\t# DLA ZADANIA 2:\n\t\t\t# Przed zakupem należy zamknąć sprzedaż\n\t\t\tself.close_sell(symbol, ask)\n\t\t\tself.open_buy(symbol, ask)\n\t\n\n\t\tif stock.position == -1:\n\t\t\t# DLA ZADANIA 2:\n\t\t\t# Po zamknięciu sprzedaży rozpoczynami zakupy\n\t\t\tself.close_buy(symbol, bid)\n\t\t\tself.open_sell(symbol, bid)",
"async def trade(self):\n\t\ttradeable_p = []\n\t\ttradeable_partner = []\n\t\tmoney_p = 0\n\t\tmoney_partner = 0\n\t\tgoojf_p = 0\n\t\tgoojf_partner = 0\n\t\tcolors = {\n\t\t\t1: 'Brown', 3: 'Brown',\n\t\t\t6: 'Light Blue', 8: 'Light Blue', 9: 'Light Blue',\n\t\t\t11: 'Pink', 13: 'Pink', 14: 'Pink',\n\t\t\t16: 'Orange', 18: 'Orange', 19: 'Orange',\n\t\t\t21: 'Red', 23: 'Red', 24: 'Red',\n\t\t\t26: 'Yellow', 27: 'Yellow', 29: 'Yellow',\n\t\t\t31: 'Green', 32: 'Green', 34: 'Green',\n\t\t\t37: 'Dark Blue', 39: 'Dark Blue',\n\t\t\t5: 'Railroad', 15: 'Railroad', 25: 'Railroad', 35: 'Railroad', \n\t\t\t12: 'Utility', 28: 'Utility'\n\t\t}\n\t\tmsg = '```\\n'\n\t\tfor a in range(self.num):\n\t\t\tif self.isalive[a] and a != self.p:\n\t\t\t\tmem = await self.get_member(self.uid[a])\n\t\t\t\tname = mem.display_name\n\t\t\t\tmsg += f'{a} {name}\\n'\n\t\tmsg += '```Select the player you want to trade with.\\n`c`: Cancel'\n\t\tawait self.ctx.send(file=discord.File(self.bprint()))\n\t\tawait self.ctx.send(msg)\n\t\tdef tradecheck(m):\n\t\t\tif m.author.id == self.uid[self.p] and m.channel == self.ctx.channel:\n\t\t\t\ttry:\n\t\t\t\t\tm = int(m.content)\n\t\t\t\texcept Exception:\n\t\t\t\t\tif m.content.lower() == 'c':\n\t\t\t\t\t\treturn True\n\t\t\t\t\treturn False\n\t\t\t\tif 0 <= m < self.num and self.isalive[m] and m != self.p:\n\t\t\t\t\treturn True\n\t\t\treturn False\n\t\tchoice = await self.bot.wait_for(\n\t\t\t'message',\n\t\t\ttimeout=await self.cog.config.guild(self.ctx.guild).timeoutValue(),\n\t\t\tcheck=tradecheck\n\t\t)\n\t\tchoice = choice.content.lower()\n\t\tif choice == 'c':\n\t\t\treturn\n\t\tpartner = int(choice)\n\t\tfor a in range(40):\n\t\t\t#properties cannot be traded if any property in their color group has a house\n\t\t\tgroupHasHouse = False\n\t\t\tfor group in PROPGROUPS:\n\t\t\t\tif a in group:\n\t\t\t\t\tif any(self.numhouse[prop] not in (-1, 0) for prop in group):\n\t\t\t\t\t\tgroupHasHouse = True\n\t\t\tif groupHasHouse:\n\t\t\t\tcontinue\n\t\t\tif self.ownedby[a] == self.p:\n\t\t\t\ttradeable_p.append(a)\n\t\t\telif self.ownedby[a] == partner:\n\t\t\t\ttradeable_partner.append(a)\n\t\tto_trade_p = [False for _ in range(len(tradeable_p))]\n\t\tto_trade_partner = [False for _ in range(len(tradeable_partner))]\n\t\tmsg = ''\n\t\twhile True:\n\t\t\tmsg += '```\\nid sel color name\\n'\n\t\t\tfor a in range(len(tradeable_p)):\n\t\t\t\tif to_trade_p[a]:\n\t\t\t\t\tmsg += '{:2} + {:10} {}\\n'.format(\n\t\t\t\t\t\ta, colors[tradeable_p[a]], TILENAME[tradeable_p[a]]\n\t\t\t\t\t)\n\t\t\t\telse:\n\t\t\t\t\tmsg += '{:2} {:10} {}\\n'.format(\n\t\t\t\t\t\ta, colors[tradeable_p[a]], TILENAME[tradeable_p[a]]\n\t\t\t\t\t)\n\t\t\tmsg += '\\n'\n\t\t\tif money_p != 0:\n\t\t\t\tmsg += f'${money_p}\\n'\n\t\t\tif goojf_p == 1:\n\t\t\t\tmsg += '1 get out of jail free card.\\n'\n\t\t\telif goojf_p != 0:\n\t\t\t\tmsg += f'{goojf_p} get out of jail free cards.\\n'\n\t\t\tmsg += (\n\t\t\t\t'```Type the ID of any property you want to toggle trading to them.\\n'\n\t\t\t\t'`m`: Give money\\n`j`: Give get out of jail free cards\\n`d`: Done\\n`c`: Cancel'\n\t\t\t)\n\t\t\tawait self.ctx.send(file=discord.File(self.bprint()))\n\t\t\tawait self.ctx.send(msg)\n\t\t\tvalid = [str(x) for x in range(len(tradeable_p))] + ['m', 'j', 'd', 'c']\n\t\t\tchoice = await self.bot.wait_for(\n\t\t\t\t'message',\n\t\t\t\ttimeout=await self.cog.config.guild(self.ctx.guild).timeoutValue(),\n\t\t\t\tcheck=lambda m: (\n\t\t\t\t\tm.author.id == self.uid[self.p]\n\t\t\t\t\tand m.channel == self.ctx.channel\n\t\t\t\t\tand m.content.lower() in valid\n\t\t\t\t)\n\t\t\t)\n\t\t\tchoice = choice.content.lower()\n\t\t\tif choice == 'm':\n\t\t\t\tawait self.ctx.send(f'How much money? You have ${self.bal[self.p]}.')\n\t\t\t\tmoney = await self.bot.wait_for(\n\t\t\t\t\t'message',\n\t\t\t\t\ttimeout=await self.cog.config.guild(self.ctx.guild).timeoutValue(),\n\t\t\t\t\tcheck=lambda m: (\n\t\t\t\t\t\tm.author.id == self.uid[self.p]\n\t\t\t\t\t\tand m.channel == self.ctx.channel\n\t\t\t\t\t)\n\t\t\t\t)\n\t\t\t\ttry:\n\t\t\t\t\tmoney = int(money.content)\n\t\t\t\texcept:\n\t\t\t\t\tmsg = 'You need to specify a number.\\n'\n\t\t\t\telse:\n\t\t\t\t\tif money > self.bal[self.p]:\n\t\t\t\t\t\tmsg = 'You do not have that much money.\\n'\n\t\t\t\t\telif money < 0:\n\t\t\t\t\t\tmsg = 'You cannot give a negative amount of money.\\n'\n\t\t\t\t\telse:\n\t\t\t\t\t\tmoney_p = money\n\t\t\t\t\t\tmsg = ''\n\t\t\telif choice == 'j':\n\t\t\t\tif self.goojf[self.p] == 0:\n\t\t\t\t\tmsg = 'You do not have any get out of jail free cards to give.\\n'\n\t\t\t\t\tcontinue\n\t\t\t\tawait self.ctx.send(f'How many? You have {self.goojf[self.p]}.')\n\t\t\t\tcards = await self.bot.wait_for(\n\t\t\t\t\t'message',\n\t\t\t\t\ttimeout=await self.cog.config.guild(self.ctx.guild).timeoutValue(),\n\t\t\t\t\tcheck=lambda m: (\n\t\t\t\t\t\tm.author.id == self.uid[self.p]\n\t\t\t\t\t\tand m.channel == self.ctx.channel\n\t\t\t\t\t)\n\t\t\t\t)\n\t\t\t\ttry:\n\t\t\t\t\tcards = int(cards.content)\n\t\t\t\texcept:\n\t\t\t\t\tmsg = 'You need to specify a number.\\n'\n\t\t\t\telse:\n\t\t\t\t\tif cards > self.goojf[self.p]:\n\t\t\t\t\t\tmsg = 'You do not have that many get out of jail free cards.\\n'\n\t\t\t\t\telif cards < 0:\n\t\t\t\t\t\tmsg = 'You cannot give a negative amount of get out of jail free cards.\\n'\n\t\t\t\t\telse:\n\t\t\t\t\t\tgoojf_p = cards\n\t\t\t\t\t\tmsg = ''\n\t\t\telif choice == 'd':\n\t\t\t\tbreak\n\t\t\telif choice == 'c':\n\t\t\t\treturn\n\t\t\telse:\n\t\t\t\tchoice = int(choice)\n\t\t\t\tto_trade_p[choice] = not to_trade_p[choice]\n\t\t\t\tmsg = ''\n\t\tmsg = ''\n\t\twhile True:\n\t\t\tmsg += '```\\nid sel color name\\n'\n\t\t\tfor a in range(len(tradeable_partner)):\n\t\t\t\tif to_trade_partner[a]:\n\t\t\t\t\tmsg += '{:2} + {:10} {}\\n'.format(\n\t\t\t\t\t\ta, colors[tradeable_partner[a]], TILENAME[tradeable_partner[a]]\n\t\t\t\t\t)\n\t\t\t\telse:\n\t\t\t\t\tmsg += '{:2} {:10} {}\\n'.format(\n\t\t\t\t\t\ta, colors[tradeable_partner[a]], TILENAME[tradeable_partner[a]]\n\t\t\t\t\t)\n\t\t\tmsg += '\\n'\n\t\t\tif money_partner != 0:\n\t\t\t\tmsg += f'${money_partner}\\n'\n\t\t\tif goojf_partner == 1:\n\t\t\t\tmsg += '1 get out of jail free card.\\n'\n\t\t\telif goojf_partner != 0:\n\t\t\t\tmsg += f'{goojf_partner} get out of jail free cards.\\n'\n\t\t\tmsg += (\n\t\t\t\t'```Type the ID of any property you want '\n\t\t\t\t'to toggle requesting them to trade to you.\\n'\n\t\t\t\t'`m`: Request money\\n`j`: Request get out of jail free cards\\n'\n\t\t\t\t'`d`: Done\\n`c`: Cancel'\n\t\t\t)\n\t\t\tawait self.ctx.send(file=discord.File(self.bprint()))\n\t\t\tawait self.ctx.send(msg)\n\t\t\tvalid = [str(x) for x in range(len(tradeable_partner))] + ['m', 'j', 'd', 'c']\n\t\t\tchoice = await self.bot.wait_for(\n\t\t\t\t'message',\n\t\t\t\ttimeout=await self.cog.config.guild(self.ctx.guild).timeoutValue(),\n\t\t\t\tcheck=lambda m: (\n\t\t\t\t\tm.author.id == self.uid[self.p]\n\t\t\t\t\tand m.channel == self.ctx.channel\n\t\t\t\t\tand m.content.lower() in valid\n\t\t\t\t)\n\t\t\t)\n\t\t\tchoice = choice.content.lower()\n\t\t\tif choice == 'm':\n\t\t\t\tawait self.ctx.send(f'How much money? They have ${self.bal[partner]}.')\n\t\t\t\tmoney = await self.bot.wait_for(\n\t\t\t\t\t'message',\n\t\t\t\t\ttimeout=await self.cog.config.guild(self.ctx.guild).timeoutValue(),\n\t\t\t\t\tcheck=lambda m: (\n\t\t\t\t\t\tm.author.id == self.uid[self.p]\n\t\t\t\t\t\tand m.channel == self.ctx.channel\n\t\t\t\t\t)\n\t\t\t\t)\n\t\t\t\ttry:\n\t\t\t\t\tmoney = int(money.content)\n\t\t\t\texcept:\n\t\t\t\t\tmsg = 'You need to specify a number.\\n'\n\t\t\t\telse:\n\t\t\t\t\tif money > self.bal[partner]:\n\t\t\t\t\t\tmsg = 'They do not have that much money.\\n'\n\t\t\t\t\telif money < 0:\n\t\t\t\t\t\tmsg = 'You cannot take a negative amount of money.\\n'\n\t\t\t\t\telse:\n\t\t\t\t\t\tmoney_partner = money\n\t\t\t\t\t\tmsg = ''\n\t\t\telif choice == 'j':\n\t\t\t\tif self.goojf[partner] == 0:\n\t\t\t\t\tmsg = 'They do not have any get out of jail free cards to give.\\n'\n\t\t\t\t\tcontinue\n\t\t\t\tawait self.ctx.send(f'How many? They have {self.goojf[partner]}.')\n\t\t\t\tcards = await self.bot.wait_for(\n\t\t\t\t\t'message',\n\t\t\t\t\ttimeout=await self.cog.config.guild(self.ctx.guild).timeoutValue(),\n\t\t\t\t\tcheck=lambda m: (\n\t\t\t\t\t\tm.author.id == self.uid[self.p]\n\t\t\t\t\t\tand m.channel == self.ctx.channel\n\t\t\t\t\t)\n\t\t\t\t)\n\t\t\t\ttry:\n\t\t\t\t\tcards = int(cards.content)\n\t\t\t\texcept:\n\t\t\t\t\tmsg = 'You need to specify a number.\\n'\n\t\t\t\telse:\n\t\t\t\t\tif cards > self.goojf[partner]:\n\t\t\t\t\t\tmsg = 'They do not have that many get out of jail free cards.\\n'\n\t\t\t\t\telif money < 0:\n\t\t\t\t\t\tmsg = 'You cannot take a negative amount of get out of jail free cards.\\n'\n\t\t\t\t\telse:\n\t\t\t\t\t\tgoojf_partner = cards\n\t\t\t\t\t\tmsg = ''\n\t\t\telif choice == 'd':\n\t\t\t\tbreak\n\t\t\telif choice == 'c':\n\t\t\t\treturn\n\t\t\telse:\n\t\t\t\tchoice = int(choice)\n\t\t\t\tto_trade_partner[choice] = not to_trade_partner[choice]\n\t\t\t\tmsg = ''\n\t\thold_p = ''\n\t\thold_partner = ''\n\t\tfor a in range(len(tradeable_p)):\n\t\t\tif to_trade_p[a]:\n\t\t\t\thold_p += '{:10} {}\\n'.format(\n\t\t\t\t\tcolors[tradeable_p[a]], TILENAME[tradeable_p[a]]\n\t\t\t\t)\n\t\thold_p += '\\n'\n\t\tif money_p != 0:\n\t\t\thold_p += f'${money_p}\\n'\n\t\tif goojf_p == 1:\n\t\t\thold_p += '1 get out of jail free card.\\n'\n\t\telif goojf_p != 0:\n\t\t\thold_p += f'{goojf_p} get out of jail free cards.\\n'\n\t\tfor a in range(len(tradeable_partner)):\n\t\t\tif to_trade_partner[a]:\n\t\t\t\thold_partner += '{:10} {}\\n'.format(\n\t\t\t\t\tcolors[tradeable_partner[a]], TILENAME[tradeable_partner[a]]\n\t\t\t\t)\n\t\thold_partner += '\\n'\n\t\tif money_partner != 0:\n\t\t\thold_partner += f'${money_partner}\\n'\n\t\tif goojf_partner == 1:\n\t\t\thold_partner += '1 get out of jail free card.\\n'\n\t\telif goojf_partner != 0:\n\t\t\thold_partner += f'{goojf_partner} get out of jail free cards.\\n'\n\t\tif not hold_p.strip():\n\t\t\thold_p = 'Nothing :('\n\t\tif not hold_partner.strip():\n\t\t\thold_partner = 'Nothing :('\n\t\tawait self.ctx.send(file=discord.File(self.bprint()))\n\t\tawait self.ctx.send(\n\t\t\tf'You will give:\\n```\\n{hold_p}```\\nYou will get:\\n```\\n{hold_partner}```\\n'\n\t\t\t'`a`: Accept\\n`c`: Cancel'\n\t\t)\n\t\tchoice = await self.bot.wait_for(\n\t\t\t'message',\n\t\t\ttimeout=await self.cog.config.guild(self.ctx.guild).timeoutValue(),\n\t\t\tcheck=lambda m: (\n\t\t\t\tm.author.id == self.uid[self.p]\n\t\t\t\tand m.channel == self.ctx.channel\n\t\t\t\tand m.content.lower() in ('a', 'c')\n\t\t\t)\n\t\t)\n\t\tchoice = choice.content.lower()\n\t\tif choice == 'c':\n\t\t\treturn\n\t\tdoMention = await self.cog.config.guild(self.ctx.guild).doMention()\n\t\tmember_p = await self.get_member(self.uid[self.p])\n\t\tmember_partner = await self.get_member(self.uid[partner])\n\t\tif doMention:\n\t\t\tmention = member_partner.mention\n\t\telse:\n\t\t\tmention = member_partner.display_name\n\t\tawait self.ctx.send(file=discord.File(self.bprint()))\n\t\tawait self.ctx.send(\n\t\t\tf'{mention}, {member_p.display_name} would like to trade with you. '\n\t\t\tf'Here is their offer.\\n\\nYou will give:\\n```\\n{hold_partner}```\\n'\n\t\t\tf'You will get:\\n```\\n{hold_p}```\\nDo you accept (y/n)?'\n\t\t)\n\t\tchoice = await self.bot.wait_for(\n\t\t\t'message',\n\t\t\ttimeout=await self.cog.config.guild(self.ctx.guild).timeoutValue(),\n\t\t\tcheck=lambda m: (\n\t\t\t\tm.author.id == self.uid[partner]\n\t\t\t\tand m.channel == self.ctx.channel\n\t\t\t\tand m.content.lower() in ('y', 'yes', 'n', 'no')\n\t\t\t)\n\t\t)\n\t\tchoice = choice.content[0].lower()\n\t\tif choice == 'n':\n\t\t\treturn\n\t\tself.bal[self.p] += money_partner\n\t\tself.bal[partner] += money_p\n\t\tself.bal[self.p] -= money_p\n\t\tself.bal[partner] -= money_partner\n\t\tself.goojf[self.p] += goojf_partner\n\t\tself.goojf[partner] += goojf_p\n\t\tself.goojf[self.p] -= goojf_p\n\t\tself.goojf[partner] -= goojf_partner\n\t\tfor a in range(len(tradeable_p)):\n\t\t\tif to_trade_p[a]:\n\t\t\t\tself.ownedby[tradeable_p[a]] = partner\n\t\tfor a in range(len(tradeable_partner)):\n\t\t\tif to_trade_partner[a]:\n\t\t\t\tself.ownedby[tradeable_partner[a]] = self.p",
"def createrawtransaction(self, inputs, outputs):\n return self.proxy.createrawtransaction(inputs, outputs)",
"def fund_transaction(address, role):\n amount = 0\n message_text = ''\n error_text = ''\n if encoding.is_valid_address(address):\n if check_optin(address):\n if role == 'player':\n amount = 1500\n message_text = 'Your account has been funded with 1,500 Monopoly Money'\n\n elif role == 'banker':\n amount = 20000\n message_text = 'Your account has been funded with 20,000 Monopoly Money'\n\n asset_transfer(SENDER_ADDRESS, SENDER_PRIVATE_KEY, address, amount, ASSET_ID)\n else:\n error_text = \"Your account not opt-in to Monopoly Money asset\"\n else:\n error_text = \"Enter correct Algorand address\"\n return message_text, error_text",
"def create_tx(coin, account, recipient, amount):\n if coin ==ETH:\n gasEstimate = w3.eth.estimateGas(\n {\"from\": account.address, \"to\": recipient, \"value\": amount})\n return{\n \"to\": recipient,\n \"from\": account.address,\n \"value\": amount,\n \"gasPrice\": w3.eth.gasPrice,\n \"gas\": gasEstimate,\n \"nonce\": w3.eth.getTransactionCount(account.address)\n }\n if coin == BTCTEST:\n return PrivateKeyTestnet.prepare_transaction(account.address, [(recipient, amount, BTC)])",
"def transfer_money(request):\n source = Account.objects.get(pk=int(request.POST.get('source-id', False)))\n destination = Account.objects.get(pk=int(request.POST.get('destination-id', False)))\n amount = float(request.POST.get('amount', False))\n enough_cash = source.available_cash >= amount\n if enough_cash:\n source.available_cash -= amount\n source.save()\n destination.available_cash += amount\n destination.save()\n messages.success(request, 'OK 200: Transfer successfully executed.')\n else:\n messages.error(request, f'Error 400: Tried to transfer {amount} from {source.name}, but only had {source.available_cash} available.')\n \n transaction = Transaction(description=f\"Transfer from {source.name} to {destination.name}.\", success=enough_cash, cash_amount=amount, source_account=source, \n destination_account=destination)\n transaction.save()\n\n return redirect('overview')",
"def test_transaction_update_receive(self):\n currency_endowment = {\"FET\": 75}\n good_endowment = {\"good_id\": 30}\n self.ownership_state.init(\n amount_by_currency_id=currency_endowment,\n quantities_by_good_id=good_endowment,\n )\n assert self.ownership_state.amount_by_currency_id == currency_endowment\n assert self.ownership_state.quantities_by_good_id == good_endowment\n tx_message = TransactionMessage(\n performative=TransactionMessage.Performative.PROPOSE_FOR_SETTLEMENT,\n skill_callback_ids=[PublicId(\"author\", \"a_skill\", \"0.1.0\")],\n tx_id=\"transaction0\",\n tx_sender_addr=\"agent_1\",\n tx_counterparty_addr=\"pk\",\n tx_amount_by_currency_id={\"FET\": 20},\n tx_sender_fee=5,\n tx_counterparty_fee=0,\n tx_quantities_by_good_id={\"good_id\": -10},\n info={\"some_info_key\": \"some_info_value\"},\n ledger_id=\"fetchai\",\n tx_nonce=\"transaction nonce\",\n )\n self.ownership_state._update(tx_message=tx_message)\n expected_amount_by_currency_id = {\"FET\": 90}\n expected_quantities_by_good_id = {\"good_id\": 20}\n assert (\n self.ownership_state.amount_by_currency_id == expected_amount_by_currency_id\n )\n assert (\n self.ownership_state.quantities_by_good_id == expected_quantities_by_good_id\n )",
"def create_transaction(inputs: list, outputs: dict) -> ((str, int), str):\n\ttry:\n\t\tc = Bitcoin(testnet=bitcoin_is_testnet)\n\t\touts = []\n\t\tfor outk, outv in outputs.items():\n\t\t\touts += [{'value': outv, 'address': outk}]\n\t\ttx = c.mktx(inputs, outs)\n\t\ttx_serialize = serialize(tx)\n\n\t\t# Signing each input to predict the transaction size\n\t\tpriv = sha256('a big long brainwallet password')\n\t\ttx_signed = tx.copy()\n\t\tfor i in range(len(inputs)):\n\t\t\ttx_signed = c.sign(tx_signed, i, priv)\n\n\t\t# The serialization uses one char per nibble so in order the get the number of bytes it's necessary to\n\t\t# divide the size of the string serialization by 2\n\t\treturn (str(tx_serialize), len(str(serialize(tx_signed))) // 2), None\n\texcept Exception as e:\n\t\t# It should be logging using the default log\n\t\tprint(f\"There was a problem trying to create the transaction: {e}\")\n\t\treturn (None, None), \"There was a problem trying to create the transaction\"",
"def send_tx(coin, account, recipient, amount):\n if coin =='eth':\n txn = create_tx(coin, account, recipient, amount)\n signed_txn = w3.eth.account.signTransaction(txn)\n result = w3.eth.sendRawTransaction(signed_txn.rawTransaction)\n print(result.hex())\n return result.hex()\n\n else:\n tx_btctest= create_tx(coin, account, recipient, amount)\n sign_tx_btctest = account.sign_transaction(tx_btctest)\n from bit.network import NetworkAPI\n NetworkAPI.broadcast_tx_testnet(sign_tx_btctest) \n return sign_tx_btctest",
"def createrawtransaction(\n self,\n outpoints: List[Dict[str, Any]],\n send_to: Dict[str, float],\n locktime: Optional[int] = None,\n ) -> str:\n assert type(outpoints) == list\n assert type(send_to) == dict\n assert locktime is None or type(locktime) == int\n return self.rpc_call(\"createrawtransaction\", outpoints, send_to, locktime)",
"def make_transaction():\n account_id = request.json['account_id']\n aux_account = [account for account in accounts if account['id'] == account_id]\n if len(aux_account) == 0:\n abort(404)\n account_balance = Decimal(aux_account[0].get('balance')).quantize(Decimal('0.00'))\n transaction = request.json['transaction']\n transaction_amount = Decimal(abs(request.json['amount'])).quantize(Decimal('0.00'))\n\n if not request.json:\n abort(400)\n if transaction not in ['withdrawal', 'deposit']:\n abort(400, f'Invalid transaction name: {transaction}')\n if transaction == 'withdrawal':\n transaction_amount = transaction_amount*-1\n\n # the user can't withdraw more than the account has\n validation_sum = (account_balance + transaction_amount).quantize(Decimal('.01'), rounding=ROUND_DOWN)\n if validation_sum >= 0:\n for real_account in accounts:\n if real_account.get('id') == account_id:\n real_account['balance'] = round(float(validation_sum),2)\n else:\n abort(400, {'error':'Not enough funds for this transaction'})\n\n return json.dumps({f'{transaction.capitalize()} Done. New balance': str(validation_sum)}, ensure_ascii=False), 200",
"def post_transaction():\n tx_dict = encode_transaction(\"gautham=awesome\") \n print(tx_dict)\n\n tendermint_host = 'localhost'\n tendermint_port = 26657\n endpoint = 'http://{}:{}/'.format(tendermint_host, tendermint_port)\n\n payload = {\n 'method': 'broadcast_tx_commit',\n 'jsonrpc': '2.0',\n #'params': [encode_transaction(tx_dict)],\n 'params': [tx_dict],\n 'id': str(uuid4())\n }\n # TODO: handle connection errors!\n print(payload)\n return requests.post(endpoint, json=payload)",
"def op_transfer(cls, op, tx_idx, num, date):\n result = cls._validated(op, tx_idx, num, date)\n if not result:\n return\n\n record, author_id, permlink = result\n\n # add payment record and return post id\n sql = \\\n\"\"\"\nINSERT INTO hive_payments(block_num, tx_idx, post_id, from_account, to_account, amount, token) SELECT\n bn, tx, hp.id, fa, ta, am, tkn\nFROM\n( \n SELECT bn, tx, hpd.id, auth_id, fa, ta, am, tkn\n FROM (VALUES (:_block_num, :_tx_idx, :_permlink, :_author_id , :_from_account , :_to_account , :_amount, :_token)) \n AS v(bn, tx, perm, auth_id, fa, ta, am, tkn) \n JOIN hive_permlink_data hpd\n ON v.perm = hpd.permlink\n) as vv(bn, tx, hpd_id, auth_id, fa, ta, am, tkn )\nJOIN hive_posts hp\nON hp.author_id=vv.auth_id AND hp.permlink_id=vv.hpd_id\nRETURNING post_id\n\"\"\"\n\n post_id = DB.query_one(sql, \n _block_num=record['block_num'], \n _tx_idx=record['tx_idx'], \n _permlink=permlink, \n _author_id=author_id,\n _from_account=record['from_account'],\n _to_account=record['to_account'],\n _amount=record['amount'],\n _token=record['token']\n )\n\n amount = record['amount']\n if not isinstance(amount, float):\n amount = float(amount)\n\n if amount != 0.0 and post_id is not None:\n # update post record\n sql = \"UPDATE hive_posts SET promoted = promoted + :val WHERE id = :id\"\n DB.query(sql, val=amount, id=post_id)",
"def send_tx(args):\n kwargs = {\n '--privkey': args.privkey,\n '--to': AMEND_ADDR,\n '--code': args.code,\n '--value': str(args.value),\n }\n args = functools.reduce(\n lambda lst, kv: lst + list(kv),\n kwargs.items(),\n [],\n )\n print(['python3', 'make_tx.py', *args, '--no-newcrypto'])\n subprocess.call(['python3', 'make_tx.py', *args, '--no-newcrypto'])\n subprocess.call(['python3', 'send_tx.py'])\n with open('../output/transaction/hash') as fobj:\n return fobj.read().strip()",
"def test_transaction_update(self):\n currency_endowment = {\"FET\": 100}\n good_endowment = {\"good_id\": 20}\n\n self.ownership_state.init(\n amount_by_currency_id=currency_endowment,\n quantities_by_good_id=good_endowment,\n )\n assert self.ownership_state.amount_by_currency_id == currency_endowment\n assert self.ownership_state.quantities_by_good_id == good_endowment\n tx_message = TransactionMessage(\n performative=TransactionMessage.Performative.PROPOSE_FOR_SETTLEMENT,\n skill_callback_ids=[PublicId(\"author\", \"a_skill\", \"0.1.0\")],\n tx_id=\"transaction0\",\n tx_sender_addr=\"agent_1\",\n tx_counterparty_addr=\"pk\",\n tx_amount_by_currency_id={\"FET\": -20},\n tx_sender_fee=5,\n tx_counterparty_fee=0,\n tx_quantities_by_good_id={\"good_id\": 10},\n info={\"some_info_key\": \"some_info_value\"},\n ledger_id=\"fetchai\",\n tx_nonce=\"transaction nonce\",\n )\n self.ownership_state._update(tx_message=tx_message)\n expected_amount_by_currency_id = {\"FET\": 75}\n expected_quantities_by_good_id = {\"good_id\": 30}\n assert (\n self.ownership_state.amount_by_currency_id == expected_amount_by_currency_id\n )\n assert (\n self.ownership_state.quantities_by_good_id == expected_quantities_by_good_id\n )",
"def send_payment(source_wallet, destination_wallet, amount, description):\n # in case the sign is wrong get absolute value\n amount = abs(amount)\n\n source_entry = dict(amount=-amount, wallet=source_wallet)\n destination_entry = dict(amount=amount, wallet=destination_wallet)\n\n if destination_wallet.currency != source_wallet.currency:\n from_rate = (\n find_exchange_rates(dict(to_currency=source_wallet.currency)).first().rate\n )\n to_rate = (\n find_exchange_rates(dict(to_currency=destination_wallet.currency))\n .first()\n .rate\n )\n destination_entry[\"amount\"] = (\n amount * calculate_currency_rate(base_rate=from_rate, target_rate=to_rate)\n ).quantize(Decimal(\"1.00\"))\n\n transaction_instance = create_transaction(\n dict(description=description), entries=[source_entry, destination_entry]\n )\n return transaction_instance",
"def make_money_and_account(self, ):\n mid = self.model.create_money('RU', 'Russian rubles')\n aid = self.model.create_account('test', mid, 10000)\n return (mid, aid)",
"def __create_transaction(self):\n log.debug(\"Displaying __create_transaction\")\n # Make the admin select an user\n user = self.__user_select()\n # Allow the cancellation of the operation\n if isinstance(user, CancelSignal):\n return\n # Create an inline keyboard with a single cancel button\n cancel = telegram.InlineKeyboardMarkup([[telegram.InlineKeyboardButton(self.loc.get(\"menu_all_cancel\"),\n callback_data=\"cmd_cancel\")]])\n # Request from the user the amount of money to be credited manually\n self.bot.send_message(self.chat.id, self.loc.get(\"ask_credit\"), reply_markup=cancel)\n # Wait for an answer\n reply = self.__wait_for_regex(r\"(-? ?[0-9]{1,3}(?:[.,][0-9]{1,2})?)\", cancellable=True)\n # Allow the cancellation of the operation\n if isinstance(reply, CancelSignal):\n return\n # Convert the reply to a price object\n price = self.Price(reply)\n # Ask the user for notes\n self.bot.send_message(self.chat.id, self.loc.get(\"ask_transaction_notes\"), reply_markup=cancel)\n # Wait for an answer\n reply = self.__wait_for_regex(r\"(.*)\", cancellable=True)\n # Allow the cancellation of the operation\n if isinstance(reply, CancelSignal):\n return\n # Create a new transaction\n transaction = db.Transaction(user=user,\n value=int(price),\n provider=\"Manual\",\n notes=reply)\n self.session.add(transaction)\n # Change the user credit\n user.recalculate_credit()\n # Commit the changes\n self.session.commit()\n # Notify the user of the credit/debit\n self.bot.send_message(user.user_id,\n self.loc.get(\"notification_transaction_created\",\n transaction=transaction.text(w=self)))\n # Notify the admin of the success\n self.bot.send_message(self.chat.id, self.loc.get(\"success_transaction_created\",\n transaction=transaction.text(w=self)))"
] |
[
"0.61815363",
"0.59610337",
"0.5821514",
"0.57208735",
"0.5666299",
"0.5660158",
"0.5631508",
"0.5585961",
"0.5577441",
"0.5575177",
"0.55738866",
"0.55622536",
"0.55600274",
"0.55556196",
"0.5540459",
"0.5522409",
"0.54789543",
"0.5451003",
"0.5426924",
"0.54183215",
"0.5410041",
"0.53895485",
"0.5385642",
"0.5364176",
"0.5349267",
"0.53410155",
"0.5337107",
"0.5335376",
"0.53203684",
"0.5312171"
] |
0.6110522
|
1
|
Return the next chance card.
|
def get_next_chance_card(self):
return self.chance_cards.pop(0)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_card(self):\n\n card = random.randint(1,13)\n return card",
"def next_play(self):\n\t\tfor card in self.hand:\n\t\t\tif is_valid(card):\n\t\t\t\tself.play_card(card)\n\t\t\t\treturn card\n\t\tglobal forced_rank\n\t\tif forced_rank == \"2\":\n\t\t\tglobal two_multiplier\n\t\t\tself.draw(two_multiplier)\n\t\t\tprint(f\"{self.name} draws {str(two_multiplier)} cards.\")\n\t\t\ttwo_multiplier = 0\n\t\t\tforced_rank = False\n\t\t\treturn None\n\t\tcard = self.draw(1)[0]\n\t\tprint(self.name + \" draws a card.\")\n\t\tif is_valid(card):\n\t\t\tself.play_card(card)\n\t\t\treturn card\n\t\tprint(self.name + \" passes the turn.\")",
"def deal_cards(self):\n self.card = random.randint(1, 13)\n return self.card",
"def play_card(self, rnd: PlayerRound) -> int:\n # play random\n\n # get the valid cards to play\n valid_cards = rnd.get_valid_cards()\n\n # select a random card\n return np.random.choice(np.flatnonzero(valid_cards))",
"def deal_card():\n cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]\n card = random.choice(cards)\n return card",
"def deal_card():\r\n cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]\r\n return (random.choice(cards))",
"def deal_card():\n cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]\n player = random.choice(cards)\n return player",
"def deal_card():\n cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]\n random_card = random.choice(cards)\n return random_card",
"def get_random_card() -> \"Card\":\n last = Card.objects.count() - 1\n index = random.randint(0, last)\n return Card.objects.all()[index]",
"def card_output():\n cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]\n return random.choice(cards)",
"def random_card(computer_hand):\n \n if len(computer_hand) != 1:\n random_index = random.randint(0,len(computer_hand)-1)\n else:\n random_index = 0\n card_to_play = computer_hand[random_index]\n print('computer hand: ', computer_hand)\n print('computer plays ', card_to_play)\n return computer_hand[random_index]",
"def play_card(self, rnd: PlayerRound) -> int:\n # we can check if we are playing the correct game\n assert rnd.jass_type == JASS_HEARTS\n\n # get the valid cards to play\n valid_cards = rnd.get_valid_cards()\n\n # lets divide our cards into heart and other cards\n my_heart_cards = valid_cards * color_masks[HEARTS, :]\n my_other_cards = valid_cards - my_heart_cards\n\n if rnd.nr_cards_in_trick == 0:\n # we are the first player, so we can select what to play\n # lets select some random non-heart card if we have any (not that this is necessarily\n # a good strategy :-)\n if my_other_cards.sum() > 0:\n card = np.random.choice(np.flatnonzero(my_other_cards))\n else:\n # just play a random valid card\n card = np.random.choice(np.flatnonzero(valid_cards))\n else:\n # if we have to give a card, lets try to give a heart card\n if my_heart_cards.sum() > 0:\n card = np.random.choice(np.flatnonzero(my_heart_cards))\n else:\n # just play a random valid card\n card = np.random.choice(np.flatnonzero(valid_cards))\n\n self._logger.debug('Played card: {}'.format(card_strings[card]))\n return card",
"def choose_card(playable_cards):\r\n\r\n playing = playable_cards[0]\r\n print('\\n choosing \\n', playing)\r\n\r\n return playing # for now\r",
"def next_choice(self, opponent: 'Player') -> str:\n\n if self.adaptive_ai:\n # this is an adaptive_ai player, so see if it has collected\n # enough stats about the current opponent yet:\n if sum(self.opponent_choices[opponent.name].values()) > 5:\n # has enough samples to start adapting to the opponent\n print(' {} is trying to guess the opponent\\'s choice...'.format(self.name))\n\n # AI algorithm 1:\n # simply find the most-frequent selection by the opponent and\n # choose its killer.\n\n guess = self.opponent_choices[opponent.name].most_common(1)[0][0]\n ai_choice = weapon_to_beat(guess)\n print(' ', opponent.name, 'most often chose', guess, 'so he/she chose', ai_choice)\n return ai_choice\n\n # use the standard tendency distribution to choose a weapon:\n n = randint(1, self.randmax)\n if n <= self.tendency[0]:\n return 'rock'\n elif n <= self.tendency[0] + self.tendency[1]:\n return 'paper'\n else:\n return 'scissors'",
"def next(self):\r\n rnd = rand() * self.totals[(-1)]\r\n return bisect.bisect_right(self.totals, rnd)",
"def _random_card(self) -> Cards.Card:\n\t\tif len(self.drawstack) < 1:\n\t\t\tfor col in list(Cards.CardColour):\n\t\t\t\t# Numbers 1-9 + Skip, Reverse, Draw2\n\t\t\t\tfor typ in list(Cards.CardType)[1:13]:\n\t\t\t\t\tself.drawstack.append(Cards.Card(typ, col))\n\t\t\t\t\tself.drawstack.append(Cards.Card(typ, col))\n\t\t\t\tself.drawstack.append(Cards.Card(Cards.CardType.NUMBER0, col))\n\t\t\tfor _ in range(4):\n\t\t\t\tself.drawstack.append(Cards.Card(Cards.CardType.WILD))\n\t\t\t\tself.drawstack.append(Cards.Card(Cards.CardType.WILD_DRAW))\n\t\t\trandom.shuffle(self.drawstack)\n\n\t\treturn self.drawstack.pop()",
"def getNextCard(deckId):\n deckOfCards = getCardsForDeck(deckId)\n card = deckOfCards.order_by('?')[0]\n return card",
"def nextMoveDecision(self):\n b = random.randint(1, 9) \n while (self.Occupied(b)):\n b = random.randint(1, 9) \n return b",
"def receive_chance_card(self, card):\r\n self.chance_cards.append(card)",
"def choose_first():\n rand = random.randint(1, 2)\n print(f\"The first is Player-{rand}\")\n return rand",
"def round_next(cls):\n if cls.is_finished():\n logging.error('Grand challenge finished.')\n return None\n\n round = cls.get_current_round()\n cls.force_round_close(round)\n\n challenges = []\n if cls.is_final():\n # Only two players left in the game\n arb_win = cls.eligible(0)\n arb_lose = cls.eligible(1)\n challenges.append(GrandChallenge.create(arb_win[0], arb_lose[0], round.round_number + 1))\n else:\n # More than two players, create new challenges\n if round.round_number % 2 == 1:\n challenges += cls.play_round(1, round.round_number + 1)\n challenges += cls.play_round(0, round.round_number + 1)\n else:\n challenges += cls.play_round(1, round.round_number + 1)\n\n if challenges:\n # Update round number\n round.round_number += 1\n cls.set_current_round(round.round_number)\n logging.debug('Played round %s' % round.round_number)\n return round",
"def chance(dice):\n return sum(dice)",
"def best_hand(cards):\n return max(generate_all_hands(cards))",
"def deal_one(self):\r\n rand_card = random.choice(self.cards_list)\r\n self.cards_list.remove(rand_card)\r\n return rand_card",
"def one_in_four():\n chance = random.randrange(0, 4)\n return chance",
"def next_move(self, board, dice):\r\n rulebook = RuleBook(board, self, dice)\r\n legal_plies = rulebook.generate_legal_ply_list()\r\n return random.choice(legal_plies)",
"def get_next(self):\n if self.has_next():\n return self.pile.pop(0)\n else:\n return Card.null_card()",
"def pick(self, amount = 1):\n pick_card = self._starting_card[-1:-amount -1 :-1]\n self._starting_card = self._starting_card[0:-amount:1] \n return pick_card",
"def get_next_community_card(self):\r\n return self.community_cards.pop(0)",
"def randomperc(self):\n\n self.jrand += 1\n if (self.jrand >= 55):\n self.jrand = 1\n self.oldrand = advance_random(self.oldrand)\n return self.oldrand[self.jrand]"
] |
[
"0.73839265",
"0.7233632",
"0.6910416",
"0.6814642",
"0.6752508",
"0.67323685",
"0.6697044",
"0.6683003",
"0.6669536",
"0.65967244",
"0.6572759",
"0.64932215",
"0.64538753",
"0.63115644",
"0.6229882",
"0.62091255",
"0.6207595",
"0.6194546",
"0.61408436",
"0.61378855",
"0.61348593",
"0.61221486",
"0.61207575",
"0.6100377",
"0.6077723",
"0.6068667",
"0.60684",
"0.6015678",
"0.5997485",
"0.59803295"
] |
0.8169335
|
0
|
Receive a chance card.
|
def receive_chance_card(self, card):
self.chance_cards.append(card)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"async def cards_per_hand(ctx):\n message = NNB.cards_per_hand()\n await ctx.send(message)",
"def hit(player):\n deal_random_card(player)",
"def deal_card():\r\n cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]\r\n return (random.choice(cards))",
"def deal_card():\n cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]\n card = random.choice(cards)\n return card",
"def get_next_chance_card(self):\r\n return self.chance_cards.pop(0)",
"def deal_card():\n cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]\n random_card = random.choice(cards)\n return random_card",
"def deal_card():\n cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]\n player = random.choice(cards)\n return player",
"def deal_cards(self):\n self.card = random.randint(1, 13)\n return self.card",
"def play_card(self, rnd: PlayerRound) -> int:\n # play random\n\n # get the valid cards to play\n valid_cards = rnd.get_valid_cards()\n\n # select a random card\n return np.random.choice(np.flatnonzero(valid_cards))",
"def get_card(self):\n\n card = random.randint(1,13)\n return card",
"def hit(self, player):\n\n hit_card = self.deck.draw()\n hit_card.flip()\n player.take_card(hit_card)\n\n if self.verbose:\n print(player, 'receives', hit_card)",
"def random_card(computer_hand):\n \n if len(computer_hand) != 1:\n random_index = random.randint(0,len(computer_hand)-1)\n else:\n random_index = 0\n card_to_play = computer_hand[random_index]\n print('computer hand: ', computer_hand)\n print('computer plays ', card_to_play)\n return computer_hand[random_index]",
"def choose_card_to_discard(self):\n random.choice(self.hand.card_list).use()",
"def deal_cards(self):\n aux = random.randint(0, len(self.deck))\n card = self.deck[aux]\n self.deck.pop(aux)\n print(f\"Received: {card}\")\n return card",
"def play(self):\n # log.debug(\"{0} is playing...\".format(self.label))\n legal_cards = []\n for c in self.hand:\n if self.is_legal_play(c):\n legal_cards.append(c)\n chosen_card_pos = random.randint(0, len(legal_cards)-1)\n # log.debug(str(legal_cards))\n chosen_card = legal_cards[chosen_card_pos]\n self.send_play(chosen_card)",
"def play_card(self, rnd: PlayerRound) -> int:\n # we can check if we are playing the correct game\n assert rnd.jass_type == JASS_HEARTS\n\n # get the valid cards to play\n valid_cards = rnd.get_valid_cards()\n\n # lets divide our cards into heart and other cards\n my_heart_cards = valid_cards * color_masks[HEARTS, :]\n my_other_cards = valid_cards - my_heart_cards\n\n if rnd.nr_cards_in_trick == 0:\n # we are the first player, so we can select what to play\n # lets select some random non-heart card if we have any (not that this is necessarily\n # a good strategy :-)\n if my_other_cards.sum() > 0:\n card = np.random.choice(np.flatnonzero(my_other_cards))\n else:\n # just play a random valid card\n card = np.random.choice(np.flatnonzero(valid_cards))\n else:\n # if we have to give a card, lets try to give a heart card\n if my_heart_cards.sum() > 0:\n card = np.random.choice(np.flatnonzero(my_heart_cards))\n else:\n # just play a random valid card\n card = np.random.choice(np.flatnonzero(valid_cards))\n\n self._logger.debug('Played card: {}'.format(card_strings[card]))\n return card",
"def choose_card(self, state=None):\n # if self.at_last_stich():\n # allowed = yield self.cards[0]\n # else:\n self.observation_received.acquire()\n self.observation = self.build_observation(state, self.cards)\n logger.debug(f\"choose_card received observation: {self.observation}\")\n self.observation_received.notify_all() # notify all threads to be sure\n self.observation_received.release()\n\n self.action_received.acquire()\n received = self.action_received.wait()\n if not received:\n logger.debug(\"Timeout occurred. action_received condition has not been notified.\")\n logger.debug(f\"choose_card received action: {self.action}\")\n allowed_cards = self.allowed_cards(state=state)\n chosen_card = allowed_cards[0] # set chosen_card to the first allowed card in case anything goes south\n chosen_card = self.set_chosen_card(allowed_cards, chosen_card)\n self.action_received.release()\n\n allowed = yield chosen_card\n\n if allowed:\n yield None",
"def choose_card(playable_cards):\r\n\r\n playing = playable_cards[0]\r\n print('\\n choosing \\n', playing)\r\n\r\n return playing # for now\r",
"def assess_hand(self, r):\n bidIndex = -1\n while bool(random.getrandbits(1)): # Coin flip\n bidIndex += 1\n if bidIndex == -1:\n self.maxBid = LEGAL_BIDS[0] - 1 # Pass immediately.\n else:\n self.maxBid = LEGAL_BIDS[bidIndex]",
"def take_turn(self):\n \n self.card_1 = self.get_card()\n self.display_card_1()\n guess = self.player.higher_lower()\n self.card_2 = self.get_card()\n self.display_card_2()\n self.compare_cards(guess)\n self.player.print_score()\n if self.player.score > 0:\n self.can_deal = self.player.keep_playing()\n print(\"\\n\")\n else:\n self.can_deal = False\n print(\"Game overThanks for playing!\")",
"def deal_random_card(player):\n return player.append(random.choice(deck))",
"def take_comp_turn(self, deck, pile):\n matches = [card for card in self.hand if card.is_match(pile.top_card() != 0)]\n if len(matches) > 0: # can play\n choice = random.randrange(len(matches))\n self.play_card(matches[choice-1], pile)\n if matches[choice - 1].kind == 'wild' or matches[choice - 1].kind == 'wild4':\n chosencolor = random.choice(['red', 'yellow', 'green', 'blue'])\n matches[choice - 1].color = chosencolor\n print(\"The color is now \" + str(chosencolor) + \".\")\n print(str(self.name) + \" played \" + str(matches[choice-1]))\n\n else: # comp can't play\n # check if deck is empty -- if so, reset it\n if deck.is_empty():\n deck.reset_deck(pile)\n # draw a new card from the deck\n newcard = self.draw_card(deck)\n print(\"The computer drew: \" + str(newcard))\n if newcard.is_match(pile.top_card()): # can be played\n self.play_card(newcard, pile)\n if newcard.kind == 'wild':\n chosencolor = random.choice(['red', 'yellow', 'green', 'blue'])\n newcard.color = chosencolor\n print(\"The color is now \" + str(chosencolor) + \".\")\n else: # still can't play\n print(\"Sorry, you still can't play.\")\n print(str(self.name) + \" played \" + str(newcard))\n return",
"async def roulette(self, ctx):\n choices = [\"This is the end of the world\", \"And I don't know what to put here\"]\n await ctx.send(random.choice(choices))",
"def waitForCard(self):\n try:\n cardService = self.cardrequest.waitforcard()\n self.timer.stop()\n\n cardService.connection.connect()\n\n self.cardUid = self.getUID(cardService)\n self.ATR = self.getATR(cardService)\n\n account = self.action.getAccount(self.cardUid)\n \n if account is None:\n self.warning.emit(WARN_NO_ACCOUNT)\n else:\n if account['statement'] == STA_USER_ACTIVE:\n for device in account['devices']:\n if device['uid'] == self.cardUid:\n if device['status'] == STA_DEVICE_ACTIVE:\n # all is ok\n self.cardDetected.emit(account['balance'])\n elif device['status'] == STA_DEVICE_LOST:\n self.warning.emit(WARN_DEVICE_LOST)\n elif device['status'] == STA_DEVICE_STOLEN:\n self.warning.emit(WARN_DEVICE_STOLEN)\n elif device['status'] == STA_DEVICE_DELETED:\n self.warning.emit(WARN_DEVICE_DELETED)\n else:\n self.warning.emit(WARN_DEVICE_DELETED)\n break\n elif account['statement'] == STA_USER_INACTIVE:\n self.warning.emit(WARN_ACCOUNT_INACTIVE)\n elif account['statement'] == STA_USER_DELETED:\n self.warning.emit(WARN_ACCOUNT_DELETED)\n else:\n self.warning.emit(WARN_ACCOUNT_DELETED)\n\n except CardRequestTimeoutException:\n self.updateWaiting.emit()\n # init variables\n self.cardUid = None\n self.ATR = None",
"def eat(self, agent, card, ctype, eat_list):\r\n\t\tdcard = self.pong_or_eat(2, ctype, card)\r\n\t\t#if (dcard): print \"\\tGeniusAgent eat: {0}\".format(card)\r\n\t\t#else: print \"\\tGeniusAgent do not eat:{0}\".format(card)\r\n\t\treturn dcard",
"async def _99play(ctx, card):\n try:\n card = int(card)\n except Exception:\n await ctx.send(\"Sorry {}, you did not provide a reasonable card number.\".format(ctx.message.author.name))\n\n if card in NNB.hand(ctx.message.author.id):\n NNB.play(ctx.message.author.id, card)\n hand = NNB.hand(ctx.message.author.id)\n await ctx.send(\"{} played the card {}.\".format(ctx.message.author.name, card))\n await ctx.message.author.send(\"You played the card {}, and your hand is {}.\".format(card, hand))\n\n print(ctx.message.author.name, \"played the card\", card)\n\n # Say number of cards in each hand\n await cards_per_hand(ctx)\n\n # Say next player's turn\n await next_player_message(ctx)\n\n else:\n await ctx.send(\"Sorry {}, you do not have card {} in your hand.\".format(ctx.message.author.name, card))",
"def test_play_card(self):\n self.plr.piles[Piles.DECK].set(\"Silver\", \"Province\", \"Moat\", \"Gold\")\n self.vic.piles[Piles.DECK].set(\"Duchy\")\n self.plr.test_input = [\"discard\", \"discard\", \"putback\"]\n self.plr.play_card(self.card)\n self.g.print_state()\n self.assertEqual(self.plr.actions.get(), 1)\n self.assertIn(\"Duchy\", self.vic.piles[Piles.DISCARD])\n self.assertIn(\"Gold\", self.plr.piles[Piles.DISCARD])\n self.assertIn(\"Province\", self.plr.piles[Piles.HAND])\n self.assertIn(\"Moat\", self.plr.piles[Piles.HAND])\n self.assertIn(\"Silver\", self.plr.piles[Piles.DECK])",
"def hit():\n \n # Update messages, score and the player's \"Hand\" status\n # as global variables.\n global outcome, outcome_plus, outcome_plus_plus, in_play, score, action \n \n # If the \"Hand\" is in play, hit the \"player\". \n if in_play:\n outcome = outcome_plus = outcome_plus_plus = \"\"\n player.add_card(deck_of_cards.deal_card())\n else:\n return None\n \n # If busted, update messages, score and the player's \n # \"Hand\" status.\n if player.get_value() > 21:\n outcome = PLAYER_BUSTED\n outcome_plus = outcome_plus_plus = \"\"\n action = NEW_DEAL \n score -= SCORE_POINTS\n in_play = False\n \n return None",
"def receive_card(self, card: Card) -> None:\n\t\tself.deck.append(card)\n\t\t\n\t\t# Sorts the Deck by type and colour for aesthetic purposes\n\t\t\"\"\"self.deck.sort(key=lambda x: repr(x.type))\n\t\tself.deck.sort(key=lambda x: repr(x.colour))\"\"\"",
"def play_random_number (self):\n if (self.cur_game_secret == \"\"):\n print (\"ERROR: No current game, join a game.\")\n return 1\n\n play_value = self.hand.pop (random.randint (0, len (self.hand) - 1))\n r = requests.post (self.url_endpoint, data = {\"play_game\": True, \"game_name\": self.cur_game_name,\n \"player_secret\": self.secret, \"play_value\": play_value})\n # Check if play was accepted\n if (r.status_code != 201):\n return [1, r]\n else:\n return [0, r]"
] |
[
"0.67559594",
"0.6644758",
"0.65473336",
"0.6527448",
"0.6505729",
"0.6439306",
"0.6402647",
"0.638352",
"0.632512",
"0.6302483",
"0.63002384",
"0.6219421",
"0.6176694",
"0.61733097",
"0.6160437",
"0.60518074",
"0.6049033",
"0.60325867",
"0.60216695",
"0.5994193",
"0.5935299",
"0.59130466",
"0.5910721",
"0.5910569",
"0.5884172",
"0.58768684",
"0.58288527",
"0.5811195",
"0.5760732",
"0.5757891"
] |
0.76488245
|
0
|
Return the next community card.
|
def get_next_community_card(self):
return self.community_cards.pop(0)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_next(self):\n if self.has_next():\n return self.pile.pop(0)\n else:\n return Card.null_card()",
"def getNextCard(deckId):\n deckOfCards = getCardsForDeck(deckId)\n card = deckOfCards.order_by('?')[0]\n return card",
"def get_next_chance_card(self):\r\n return self.chance_cards.pop(0)",
"def parse_community_cards(state):\n community_cards = list()\n community = state['game']['community']\n if community and 'card' in community:\n for card in community['card']:\n new_card = robopoker.entities.Card(card['@rank'], card['@suit'])\n community_cards.append(new_card)\n return community_cards",
"def next_play(self):\n\t\tfor card in self.hand:\n\t\t\tif is_valid(card):\n\t\t\t\tself.play_card(card)\n\t\t\t\treturn card\n\t\tglobal forced_rank\n\t\tif forced_rank == \"2\":\n\t\t\tglobal two_multiplier\n\t\t\tself.draw(two_multiplier)\n\t\t\tprint(f\"{self.name} draws {str(two_multiplier)} cards.\")\n\t\t\ttwo_multiplier = 0\n\t\t\tforced_rank = False\n\t\t\treturn None\n\t\tcard = self.draw(1)[0]\n\t\tprint(self.name + \" draws a card.\")\n\t\tif is_valid(card):\n\t\t\tself.play_card(card)\n\t\t\treturn card\n\t\tprint(self.name + \" passes the turn.\")",
"def _get_next_cheapest_desired_card(diff_state):\n # search for min within in Dict\n # may optimize later\n\n next_card = CardEnum.NoCard\n min_cost = 1e6 # Arbitrarily Large Number\n\n for key in list(diff_state.keys()):\n if diff_state[key] is not None:\n if diff_state[key] < min_cost:\n next_card = key\n min_cost = diff_state[key]\n\n return next_card",
"def receive_community_card(self, card):\r\n self.community_cards.append(card)",
"def get_n_card(self, n):\n if len(self.cards) > 0:\n return self.cards[-n]\n else:\n return None",
"def get_card(self):\n\n card = random.randint(1,13)\n return card",
"def next_player(self):\n # Counter is a useful class that counts objects.\n count = Counter(self.board)\n if count.get('X', 0) > count.get('O', 0):\n return 'O'\n return 'X'",
"def get_card_topurchase(self, state, available_cards):\n #get diff_state\n diff_state = self._create_diff_state(state)\n\n available = False\n while not available:\n #get Cheapest next desired Card\n next_get = self._get_next_cheapest_desired_card(diff_state)\n\n if next_get == CardEnum.NoCard:\n next_get = CardEnum.NoCard\n available = True\n continue\n\n diff_state[next_get] = None\n # removes next cheapest from possibles, prevents inf loop\n\n if next_get in available_cards:\n available = True\n\n return next_get",
"def get_next_player(self, player):\r\n return player * -1",
"def get_next_player(self, player):\r\n return player * -1",
"def getNext(self, player):",
"def get_next_player(current_player: Optional[str]) -> str:\n if current_player == c.X:\n return c.O\n else:\n return c.X",
"def next(self):\n return self._call_player_proxy('Next', None)",
"def select_next_cup(self):\n idx = self.current_cup_idx()\n idx += 1\n if idx >= len(self.cups):\n idx = 0\n self.current_cup = self.cups[idx]",
"def __next__(self):\r\n \r\n if self.start != None:\r\n \r\n #store the suit cursor for iterating through 4 suits\r\n suit_cursor = self.start\r\n card_cursor = suit_cursor.element() #link to first node for suits\r\n \r\n templist = []\r\n \r\n if card_cursor._suit[0] == \"D\":\r\n \r\n cursor = self._headD\r\n \r\n #loop until last node is reached\r\n while cursor._next != None:\r\n \r\n #append card rank + suit \r\n templist.append((cursor.data,cursor._suit))\r\n \r\n #update card cursor \r\n cursor = cursor._next\r\n \r\n if card_cursor._suit[0] == \"C\":\r\n \r\n cursor = self._headC\r\n \r\n while cursor._next != None:\r\n \r\n templist.append((cursor.data,cursor._suit))\r\n \r\n cursor = cursor._next\r\n \r\n if card_cursor._suit[0] == \"H\":\r\n \r\n cursor = self._headH\r\n \r\n while cursor._next != None:\r\n \r\n templist.append((cursor.data,cursor._suit))\r\n \r\n cursor = cursor._next\r\n \r\n if card_cursor._suit[0] == \"S\":\r\n \r\n cursor = self._headS\r\n \r\n while cursor._next != None:\r\n \r\n templist.append((cursor.data,cursor._suit))\r\n \r\n cursor = cursor._next\r\n \r\n \r\n #update suit cursor to next suit\r\n self.start = self.after(suit_cursor)\r\n \r\n else:\r\n #stop iteration exception is raised if there are no more nodes to iterate in a list\r\n raise StopIteration()\r\n \r\n return templist",
"def next(self):\n return self.read_message()",
"def next_player(self):\n return next(self.next_tour)",
"def get_card(name_str, page=1):\r\n payload = {'name': name_str, 'page': page}\r\n response = query('https://api.magicthegathering.io/v1/cards', payload)\r\n return response.json()",
"def get_topmost_card(self):\n if len(self.cards) > 0:\n return self.cards[-1]\n else:\n return None",
"def get_card(self):\n return self.card",
"def get_next_keystream_value(deck_of_cards):\n get_big_joker_value(deck_of_cards)\n get_small_joker_value(deck_of_cards)\n move_small_joker(deck_of_cards)\n move_big_joker(deck_of_cards)\n triple_cut(deck_of_cards)\n insert_top_to_bottom(deck_of_cards)\n keystream_value = get_card_at_top_index(deck_of_cards)\n \n if keystream_value == get_big_joker_value(deck_of_cards) or \\\n keystream_value == get_small_joker_value(deck_of_cards):\n keystream_value = get_next_keystream_value(deck_of_cards)\n return keystream_value\n\t\n # Condition where if keystream_value is equal to big_joker_value or\n # small_joker_value then this will be repeated. After occuring it is then \n # checked again to see if keystream_value is equal to big_joker_value or\n # small_joker_value. If so, then again repeated until not so.",
"def nextPlayer(self):\n self.turn += 1\n if self.turn >= len(self.players):\n self.turn = 0\n return self.players[self.turn]",
"def next_player(self,board, prev_player):\r\n opp = self.opponent(prev_player)\r\n isOpp = self.any_legal_move(opp, board)\r\n isPrev = self.any_legal_move(prev_player, board)\r\n if(isOpp==False and isPrev==False):\r\n return None\r\n elif(isOpp == False and isPrev == True):\r\n return prev_player\r\n else:\r\n return opp",
"def get_card (self, card):\n\t\treturn self._card",
"def next_player(board, prev_player):\n opp = Othello.opponent(prev_player)\n if Othello.any_legal_move(opp, board):\n return opp\n elif Othello.any_legal_move(prev_player, board):\n return prev_player\n return None",
"def get_card(self, name):\n for card in self.cards:\n if card.name == name:\n return card\n\n return None",
"def __next__(self):\n if self.iterator_idx >= self.num_conversations:\n print('You reached the end of the conversations.')\n self.reset() # return the iterator idx to 0\n return None\n\n conv = self.conversations[self.iterator_idx]\n self.iterator_idx += 1\n\n return conv"
] |
[
"0.67116123",
"0.63600814",
"0.59270644",
"0.58697116",
"0.5854605",
"0.5853722",
"0.5849353",
"0.5799878",
"0.5773101",
"0.56748474",
"0.56654567",
"0.56156725",
"0.56156725",
"0.561448",
"0.555655",
"0.55324405",
"0.5530132",
"0.5528631",
"0.54927295",
"0.5477948",
"0.5452764",
"0.5451248",
"0.54197806",
"0.54108423",
"0.5408347",
"0.5404497",
"0.53726095",
"0.5372492",
"0.53681654",
"0.5356004"
] |
0.83956087
|
0
|
Renders the default text for the shell, used to be able to render it in all the different commands.
|
def render_defaults(stdscr):
max_y = stdscr.getmaxyx()[0] - 1
if superglobals.information_enabled:
stdscr.addstr(0, 0, uname().system)
stdscr.addstr(1, 0, uname().machine)
for i in range(0, max_y + 1):
stdscr.addstr(i, 43, "│") # Barrier that protects program from user input.
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _default(self):\n self.app.args.print_help()",
"def help_shell(self):\n help_str = \"\"\"Execute a command as if at the OS prompt.\n\n Usage: shell cmd\"\"\"\n self.stdout.write(\"{}\\n\".format(help_str))",
"def main_menu_for_testing():\n print(PROMPT_TEXT)",
"def helptext(self):\n return \"\"",
"def display_main(self):\n self.clear_terminal()\n self.main_menu()\n self.handle_selection_main()",
"def initDefaultCommand(self):\n pass",
"def _get_prompt_text(self):\n return Blinking_Text(\n self,\n self.settings.font_light_filename,\n 48,\n self.settings.font_color,\n 'Press Enter',\n {'center': self.screen_rect.center},\n 0,\n 50,\n )",
"def default(self, line):\n self.stdout.write(\"Unknown command: '{}'\\n\".format(line))\n self._help()",
"def print_welcome(self):\r\n print(\"\"\"\r\n \\033[0;34mWelcome to my game Dave,\r\n I am putting myself to the fullest possible use,\r\n which is all I think that any conscious entity can ever hope to do.\r\n You are X's and I am O's.\\033[0m\r\n \"\"\")",
"def render(console: Console) -> None:\n console.print(Rule(\"[bold blue]CLI File Manager\", style=\"red\"))\n console.print(Panel(\"[white]Welcome to The [bold]BETTER[/bold] File manager\\nFor help type: `help` or `h`\",\n style=\"green\"))",
"def Run(self, text, alternate_screen=False):\n if alternate_screen:\n self.renderer.erase()\n self.coshell.Run(text)\n if alternate_screen:\n self.renderer.erase(leave_alternate_screen=False, erase_title=False)\n self.renderer.request_absolute_cursor_position()\n self._redraw()",
"def handle_default( environ ):\n # TODO: implement me\n return 200, [], _html.format(\n title = 'DEFAULT',\n head = '',\n body = 'DEFAULT'\n )",
"def set_text(self):\n pass",
"def shell():\n pass",
"def shell(state):\n IPython.embed(colors=\"Neutral\", banner1=BANNER, user_ns={\"sdk\": state.sdk})",
"def graphic_auto():\r\n print(\"\\nCe mode n'est pas supporté. Passez en affichage textuel pour le mode automatique\")",
"def render_prompt(self) -> str:\n # pylint: disable=no-member\n return '{}render: '.format(self.prompt)",
"def render_text(self):\n if self.text_type == 1:\n return markdown.markdown(self.text)\n else:\n return self.text",
"def _default(self):\n\n self.app.render(infoNetwork.all())",
"def default(self, command):\n self._run_cmd(command)",
"def add_help(text):\n global default_text\n default_text += text",
"def CreateConsole(self):\n lc = launcher.TextFrame('title')\n return lc",
"def cb_cmd_text(data, item, window):\n return cmd_text",
"def default(self, line):\n print \"Command not found\\n\"",
"def display_eng(self):\n self.clear_terminal()\n self.menu_eng()\n self.handle_selection_eng()",
"def default():\n\treturn render_template(\"login.html\")",
"def title_p(self):\n self.run_command('title_p')",
"def help_display(self):\r\n cmdString = 'pyuic5 -h' \r\n # execute command and return stdout string\r\n output2 = subprocess.getoutput(cmdString) \r\n # show stdout \r\n self.plainTextEdit.insertPlainText( output2 )",
"def echo(self, text):\n tb = self._echobox\n tb.configure(state=Tix.NORMAL)\n tb.insert(Tix.END, text + \"\\n\")\n tb.configure(state=Tix.DISABLED)",
"def default(self, statement):\n arg = statement.full_parsed_statement()\n if self.default_to_shell:\n result = os.system(arg)\n # If os.system() succeeded, then don't print warning about unknown command\n if not result:\n return\n\n # Print out a message stating this is an unknown command\n self.poutput('*** Unknown syntax: {}\\n'.format(arg))"
] |
[
"0.61833084",
"0.61203045",
"0.6098549",
"0.60617983",
"0.5950336",
"0.59205765",
"0.5903588",
"0.58817387",
"0.584353",
"0.5783898",
"0.57385015",
"0.5654426",
"0.56391895",
"0.5628658",
"0.5602524",
"0.55427074",
"0.553693",
"0.55326563",
"0.55046827",
"0.5489929",
"0.5478958",
"0.547871",
"0.54738265",
"0.5472331",
"0.54685783",
"0.54658014",
"0.5447907",
"0.5433388",
"0.5426139",
"0.5410697"
] |
0.6152007
|
1
|
Returns a boolean based off of whether or not all the args as integers are a possible index in list.
|
def indexists(list, *args): # Technically doesn't have to do with the screen, but it is very useful.
return all([int(arg) < len(list) for arg in args])
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _idxs_are_present(self, *args):\n return set(args).issubset(set(range(self.n_atoms)))",
"def has_args(iterable, args):\n\n try:\n return all(x in iterable for x in args)\n\n except TypeError:\n return False",
"def index_is_in_list(the_list, index):\n return bool(0 <= index < len(the_list))",
"def args_is_good(arg_list: list) -> bool:\n usage_msg = (\n \"Usage: python operations.py <number1> <number2>\\n\"\n \"Example:\\n\"\n \" python operations.py 10 3\\n\"\n )\n too_many_msg = \"InputError: too many arguments\\n\"\n only_numbers_msg = \"InputError: only numbers\\n\"\n if len(arg_list) == 1:\n print(usage_msg)\n return False\n if len(arg_list) > 3:\n print(too_many_msg, usage_msg)\n return False\n try:\n a, b = int(arg_list[1]), int(arg_list[2])\n # discarding floats here, even those like 5.0\n # use float.is_integer() if need to keep those\n # keeping only 42 or \"42\" (ints with or without quotes)\n if arg_list[1] == str(a) and arg_list[2] == str(b):\n return True\n except TypeError:\n print(only_numbers_msg, usage_msg)\n return False",
"def _check_valid_command_argument(valid_list, args):\n if args in valid_list:\n return 0\n else:\n return -1",
"def valid_index(self, index):\n if 0 <= index < self._list_size:\n return True\n else:\n return False",
"def valid_args(args):\n return args is not None and len(args) > 0",
"def string_is_index_list(inp: str):\n inp = inp.strip()\n return len((inp)) > 0 and all([x in [\" \", \":\", \"-\"] or x.isdigit() for x in inp])",
"def is_multi_commands(args: list) -> bool:\n for arg in args:\n if not isinstance(arg, list):\n return False\n # all elements must be lists\n return True",
"def _is_positive_int_tuple(item):\n if not isinstance(item, tuple):\n return False\n for i in item:\n if not _is_positive_int(i):\n return False\n return True",
"def validate_indices_per_bin(indices_per_bin: List[List[int]]) -> bool:\n # pylint: disable=too-many-nested-blocks\n is_valid = False\n\n flat_list = [] # type: List[int]\n if isinstance(indices_per_bin, list):\n if not indices_per_bin:\n raise ValueError('The indices_per_bin list cannot be empty.')\n for indices_bin in indices_per_bin:\n if isinstance(indices_bin, list):\n flat_list += indices_bin\n for index in indices_bin:\n if isinstance(index, int):\n if index < 0:\n raise ValueError('One of the indices is a '\n 'negative integer -- all should '\n 'be non-negative.')\n else:\n raise TypeError('Indices should be integers. *{}* is '\n 'not an integer.'.format(index))\n else:\n raise TypeError('One of the elements embedded in the '\n 'indices_per_bin list is not a list.')\n if len(flat_list) != len(set(flat_list)):\n raise ValueError('Some of the indices are duplicated.')\n else:\n raise TypeError('The indices_per_bin parameter has to be a list.')\n\n # Check whether the indices are consecutive numbers without any gaps\n indices_number = max(flat_list) + 1 # The indexing starts from 0\n all_indices = range(indices_number)\n missing_indices = set(all_indices).difference(flat_list)\n if missing_indices:\n warnings.warn(\n 'The following indices are missing (based on the top index): {}.\\n'\n 'It is possible that more indices are missing if they were the '\n 'last one(s).'.format(missing_indices), UserWarning)\n\n is_valid = True\n return is_valid",
"def check_args(args):\n for arg in vars(args):\n if getattr(args, arg):\n return True\n return False",
"def all(a: list[int], b: int) -> bool:\n i = 0\n if len(a) == 0:\n return False\n else:\n while i < len(a):\n if a[i] == b:\n i += 1\n else:\n return False\n return True",
"def is_valid_arg(self, muts, arg):\n for mut in muts:\n if arg in mut.args():\n return True\n\n return False",
"def _all_equal(arg):\n return arg.count(arg[0]) == len(arg)",
"def in_list(value, arg):\r\n return value in arg",
"def is_sequence_of_int(items):\n return all(isinstance(item, int) for item in items)",
"def is_int(*args): \n try:\n for i in args:\n int(i)\n return True\n except Exception:\n return False",
"def all(b: list[int], a: int) -> bool:\n i: int = 0\n while i < len(b):\n if b[i] == a:\n if i == len(b) - 1:\n return True\n i += 1\n else:\n return False\n return False",
"def omt_check(grade_list_idx, grade_list_i, grade_list_j):\n return grade_list_idx == (grade_list_i + grade_list_j)",
"def test_args_count_equal(args: list, target: int) -> bool:\n\n\treturn (args_count(args) == target)",
"def any(self):\n boolean = True\n if type(self.idxs) == np.ndarray:\n boolean = all(self.idxs.shape)\n elif type(self.idxs) == list:\n sh = np.array(self.idxs).shape\n if len(sh) >= 2:\n boolean = np.all(sh)\n return boolean",
"def takes_multiple_arguments(func):\n if func in ONE_ARITY_BUILTINS:\n return False\n elif func in MULTI_ARITY_BUILTINS:\n return True\n\n try:\n spec = getargspec(func)\n except:\n return False\n\n try:\n is_constructor = spec.args[0] == 'self' and isinstance(func, type)\n except:\n is_constructor = False\n\n if spec.varargs:\n return True\n\n if spec.defaults is None:\n return len(spec.args) - is_constructor != 1\n return len(spec.args) - len(spec.defaults) - is_constructor > 1",
"def all(a: list[int], b: int) -> bool:\n i: int = 0\n count: int = 0\n if len(a) > 0:\n while i < len(a):\n if a[i] == b:\n i = i + 1\n count = count + 1\n else:\n i = i + 1\n return(count == (len(a)))\n else:\n return False",
"def is_valid_integer_list(any_list):\n list_object = json.loads(any_list)\n return not any(not is_valid_integer(str(listing_id)) for listing_id in\n list_object)",
"def test_args_count_in_range(args: list, min: int, max: int) -> bool:\n\n\tcount = args_count(args)\n\treturn (count >= min and count <= max)",
"def is_sequence_of_uint(items):\n return all(isinstance(item, int) and item >= 0 for item in items)",
"def all_are_integer_equivalent_numbers(argument):\n from abjad import mathtools\n try:\n return all(mathtools.is_integer_equivalent_number(_) for _ in argument)\n except TypeError:\n return False",
"def _check_index(idx):\n return isinstance(idx, _Int)",
"def isvect(x):\n return isinstance(x,list) and len(x) == 4 and isgoodnum(x[0]) and isgoodnum(x[1]) and isgoodnum(x[2]) and isgoodnum(x[3])"
] |
[
"0.755418",
"0.67725885",
"0.6696974",
"0.64974993",
"0.64970857",
"0.6482055",
"0.64819956",
"0.6474068",
"0.63763213",
"0.6312439",
"0.6281895",
"0.6268804",
"0.62539935",
"0.6230627",
"0.62300694",
"0.6202207",
"0.6108527",
"0.60967743",
"0.60871893",
"0.6086811",
"0.6055318",
"0.60476804",
"0.6010964",
"0.60076433",
"0.5995975",
"0.59756976",
"0.5967378",
"0.5958729",
"0.5956272",
"0.5949041"
] |
0.8298558
|
0
|
Checks if the given arguments can be turned into integers.
|
def is_int(*args):
try:
for i in args:
int(i)
return True
except Exception:
return False
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def check_for_int(check):",
"def int_check(*args, func=None):\n func = func or inspect.stack()[2][3]\n for var in args:\n if not isinstance(var, numbers.Integral):\n name = type(var).__name__\n raise ComplexError(\n 'Function {} expected integral number, {} got instead.'.format(func, name))",
"def all_are_integer_equivalent_numbers(argument):\n from abjad import mathtools\n try:\n return all(mathtools.is_integer_equivalent_number(_) for _ in argument)\n except TypeError:\n return False",
"def check_if_input_is_int(self):\n try:\n int(self.input)\n except ValueError:\n return False\n else:\n return True",
"def hasCorrectNumberArguments(self, *args):\n return _libsbml.ASTBasePlugin_hasCorrectNumberArguments(self, *args)",
"def is_convertible_to_int(v: Any) -> bool:\n\n try:\n test = int(v)\n return True\n except:\n return False",
"def is_sequence_of_int(items):\n return all(isinstance(item, int) for item in items)",
"def _is_integer(x):\n return (not isinstance(x, (bool, np.bool))) and \\\n isinstance(x, (numbers.Integral, int, np.int, np.long, long)) # no long type in python 3",
"def args_is_good(arg_list: list) -> bool:\n usage_msg = (\n \"Usage: python operations.py <number1> <number2>\\n\"\n \"Example:\\n\"\n \" python operations.py 10 3\\n\"\n )\n too_many_msg = \"InputError: too many arguments\\n\"\n only_numbers_msg = \"InputError: only numbers\\n\"\n if len(arg_list) == 1:\n print(usage_msg)\n return False\n if len(arg_list) > 3:\n print(too_many_msg, usage_msg)\n return False\n try:\n a, b = int(arg_list[1]), int(arg_list[2])\n # discarding floats here, even those like 5.0\n # use float.is_integer() if need to keep those\n # keeping only 42 or \"42\" (ints with or without quotes)\n if arg_list[1] == str(a) and arg_list[2] == str(b):\n return True\n except TypeError:\n print(only_numbers_msg, usage_msg)\n return False",
"def isInteger(self):",
"def isInteger(self):",
"def isInteger(data):\n\ttry:\n\t\tfrom types import LongType, IntType\n\t\tif type(data) == LongType or type(data) == IntType:\n\t\t\treturn True\n\texcept ImportError:\n\t\tif type(data) == type(int(0)):\n\t\t\treturn True\n\treturn False",
"def isInteger(self):\n pass",
"def is_integer(value):\n try:\n return True, int(value)\n except (ValueError, TypeError):\n return False",
"def is_int(x):\n return int(x) == x",
"def _int_validator(arg):\n if arg is None or type(arg) != int:\n raise ValueError('Incorrect value: input should be an int')",
"def is_integer(x):\n try:\n int(x)\n return True\n except ValueError:\n return False",
"def _check_args(self):\n if not isinstance(self.digits, str):\n raise TypeError('digits must be of type string.')\n if isinstance(self.n_points, float):\n self.n_points = int(self.n_points)\n if not isinstance(self.n_points, int):\n raise TypeError('n_points must be of type integer.')\n if self.n_points < 0:\n raise ValueError('n_points must be positive.')",
"def is_int(n):\n try:\n int(n)\n return True\n except ValueError:\n return False",
"def test_badargs(self):\n self.assertRaises(TypeError, isint, [])\n self.assertRaises(TypeError, isint, {})\n self.assertRaises(TypeError, isint, None)\n return",
"def is_int(value):\n return isinstance(value, int)",
"def __check_args(self):\n self.__check_args_type()\n self.__check_args_val()",
"def is_int(num):\n return int(num) == num",
"def _is_valid_target_int(self, target):\n if isinstance(target, (int, np.int, np.int8, np.int16, np.int32, np.int64)):\n return True\n else:\n return False",
"def could_be_int(val):\n if val == None:\n return False\n\n if isinstance(val, int):\n return True\n\n # allow coercion from str\n if isinstance(val, (str, unicode)):\n try:\n i = int(val)\n if not isinstance(i, int):\n raise ValueError\n else:\n return True\n except:\n return False\n\n # otherwise\n return False",
"def check_for_float_and_int(check):",
"def number_check(*args, func=None):\n func = func or inspect.stack()[2][3]\n for var in args:\n if not isinstance(var, numbers.Number):\n name = type(var).__name__\n raise DigitError(\n 'Function {} expected number, {} got instead.'.format(func, name))",
"def _is_int(test_val):\n try:\n int(test_val)\n return True\n except ValueError:\n return False",
"def is_int3(items):\n return len(items) == 3 and all(isinstance(item, int) for item in items)",
"def __checkInput(self, var):\n try:\n int(var)\n\n except:\n return False\n\n else:\n return True"
] |
[
"0.711587",
"0.7098558",
"0.70692873",
"0.69129986",
"0.68626827",
"0.6750262",
"0.6704833",
"0.67028326",
"0.6642334",
"0.65828174",
"0.65828174",
"0.65801835",
"0.65182704",
"0.6516078",
"0.6502916",
"0.6467116",
"0.6466402",
"0.6451798",
"0.64275944",
"0.64255023",
"0.63717496",
"0.6366728",
"0.6339329",
"0.6327542",
"0.63050306",
"0.6304005",
"0.6299826",
"0.6297296",
"0.62826896",
"0.62729347"
] |
0.8308678
|
0
|
Checks if the given arguments can be turned into floats.
|
def is_float(*args):
try:
for i in args:
float(i)
return True
except Exception:
return False
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def check_arguments(arguments):\n quit = False\n for argument, value in vars(arguments).items():\n try:\n float(value)\n except:\n print(\"{} must be numeric\".format(argument))\n quit = True\n if quit:\n exit(1)",
"def check_for_float(check):",
"def check_for_float_and_int(check):",
"def is_float(x):\r\n try:\r\n float(x)\r\n except ValueError:\r\n return False\r\n return True",
"def is_float(self, input):\n try:\n float(input)\n return True\n except ValueError:\n return False",
"def isFloat(value): \n try:\n float(value)\n return True\n except ValueError:\n return False",
"def could_be_float(val):\n if val == None:\n return False\n\n if isinstance(val, float):\n return True\n\n # allow coercion from str\n if isinstance(val, (str, unicode)):\n try:\n f = float(val)\n if not isinstance(f, float):\n raise ValueError\n else:\n return True\n except:\n return False\n\n #otherwise\n return False",
"def test_float(self):\n self.assertFalse(validate_measure_input('0.0', self.measures))\n self.assertFalse(validate_measure_input('1.0', self.measures))\n self.assertFalse(validate_measure_input('1.1', self.measures))",
"def is_float3(items):\n return len(items) == 3 and all(isinstance(item, float) for item in items)",
"def is_float(val):\n try:\n float(val)\n return True\n except ValueError:\n return False",
"def is_sequence_of_float(items):\n return all(isinstance(item, float) for item in items)",
"def isfloat(value):\r\n try:\r\n float(value)\r\n return True\r\n except ValueError:\r\n return False",
"def isfloat(value):\n try:\n float(value)\n return True\n except ValueError:\n return False",
"def is_float(value):\n try:\n float(value)\n except ValueError:\n return False\n else:\n return True",
"def is_floatable(value):\n\n try:\n float(value)\n return True\n except:\n return False",
"def __test_float(self, bk):\n for arg in self.args['float']:\n print(\"\\nTesting:\", arg)\n ds = ArgoDataFetcher(backend=bk).float(arg).to_xarray()\n assert isinstance(ds, xr.Dataset) == True",
"def is_float(self, val):\n try:\n float(val)\n return True\n except ValueError:\n return False",
"def isfloat(s):\n try:\n x = float(s)\n return True\n except:\n return False",
"def isFloat(string):\n return (True)",
"def _restricted_float(val: float):\n try:\n val = float(val)\n except ValueError:\n raise argparse.ArgumentTypeError(f\"{val} not a floating-point literal\")\n\n if 0.0 < val > 1.0:\n raise argparse.ArgumentTypeError(f\"{val} not in range [0.0, 1.0]\")\n return val",
"def is_float(value):\n try:\n float(value)\n return True\n except ValueError:\n return False",
"def test_float():\n floatify = fields.FloatField().adapt\n\n for input, expect in [\n (1.1, 1.1),\n (11, 11.0),\n (int(5.7), 5)\n ]:\n assert floatify(input) == expect",
"def is_float(self, value):\n try:\n float(value)\n return True\n except ValueError:\n return False",
"def is_float(space, w_obj):\n return space.wrap(w_obj.tp == space.tp_float)",
"def is_valid_float(input_string):\n assert input_string is not None\n try:\n float(input_string)\n return True\n except ValueError:\n return False",
"def check_if_number(list):\n for item in list:\n try:\n float(item)\n except ValueError as e:\n print WrongTypePointError(item)\n sys.exit()",
"def isFloat(val):\n\tvalFloat = True\n\ttry:\n\t\ttVal = float(val)\n\texcept ValueError:\n\t\tvalFloat = False\n\ttVal = None\n\tr = (valFloat, tVal)\n\treturn r",
"def test_check_X_not_int_not_float():\n with pytest.raises(ValueError):\n check_X(['hi'], verbose=False)",
"def _is_positive_float(item):\n if not isinstance(item, (int, float)):\n return False\n return item > 0",
"def is_float(word):\n try:\n float(word)\n return True\n except ValueError:\n return False"
] |
[
"0.77807665",
"0.7744407",
"0.7359381",
"0.69544315",
"0.6885165",
"0.6864641",
"0.6821068",
"0.67614245",
"0.67499506",
"0.6736192",
"0.6709657",
"0.6687189",
"0.66818863",
"0.66159344",
"0.6614823",
"0.6611244",
"0.6589316",
"0.65854704",
"0.6578222",
"0.6568799",
"0.6527092",
"0.64679873",
"0.6438442",
"0.63565946",
"0.6343812",
"0.6339862",
"0.6334897",
"0.6307977",
"0.62964696",
"0.62885994"
] |
0.8388929
|
0
|
Draws a box using box drawing characters.
|
def draw_box(stdscr, y, x, height, width, mode=0):
if mode == 0:
stdscr.addstr(y, x, "┌" + "─" * (width - 1) + "┐")
stdscr.addstr(y + height, x, "└" + "─" * (width - 1) + "┘")
for i in range(y + 1, y + height):
stdscr.addstr(i, x, "│")
stdscr.addstr(i, x + width, "│")
if mode == 1:
stdscr.addstr(y, x, "╭" + "─" * (width - 1) + "╮")
stdscr.addstr(y + height, x, "╰" + "─" * (width - 1) + "╯")
for i in range(y + 1, y + height):
stdscr.addstr(i, x, "│")
stdscr.addstr(i, x + width, "│")
if mode == 2:
stdscr.addstr(y, x, "╔" + "═" * (width - 1) + "╗")
stdscr.addstr(y + height, x, "╚" + "═" * (width - 1) + "╝")
for i in range(y + 1, y + height):
stdscr.addstr(i, x, "║")
stdscr.addstr(i, x + width, "║")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def draw_box(\n draw,\n box,\n img_width,\n img_height,\n text=\"\",\n color=(255, 255, 0),\n) -> None:\n\n line_width = 3\n font_height = 8\n y_min, x_min, y_max, x_max = box\n (left, right, top, bottom) = (\n x_min * img_width,\n x_max * img_width,\n y_min * img_height,\n y_max * img_height,\n )\n draw.line(\n [(left, top), (left, bottom), (right, bottom), (right, top), (left, top)],\n width=line_width,\n fill=color,\n )\n if text:\n draw.text(\n (left + line_width, abs(top - line_width - font_height)), text, fill=color\n )",
"def drawbox(length, width, xstart, ystart):\n # curses takes y,x not x,y\n # Make the top left corner\n mvaddch(ystart, xstart, ACS_ULCORNER, color_pair(BORDER_COLOUR) | A_BOLD)\n # Draw the top side\n for i in range(0, width - 1):\n mvaddch(ystart, xstart + 1 + i, ACS_HLINE, color_pair(BORDER_COLOUR) | A_BOLD)\n #Make the top right corner\n mvaddch(ystart, xstart + width - 1, ACS_URCORNER, color_pair(BORDER_COLOUR) | A_BOLD)\n # Draw the left side\n for i in range(1, length):\n mvaddch(ystart + i, xstart, ACS_VLINE, color_pair(BORDER_COLOUR) | A_BOLD)\n # Draw the right side\n for i in range(1, length):\n mvaddch(ystart + i, xstart + width - 1, ACS_VLINE, color_pair(BORDER_COLOUR) | A_BOLD)\n # Make the bottom left corner\n mvaddch(ystart + length, xstart, ACS_LLCORNER, color_pair(BORDER_COLOUR) | A_BOLD)\n # # Draw the bottom side\n for i in range(0, width - 1):\n mvaddch(ystart + length, xstart + 1 + i, ACS_HLINE, color_pair(BORDER_COLOUR) | A_BOLD)\n # # Make the bottom left corner\n mvaddch(ystart + length, xstart + width - 1, ACS_LRCORNER, color_pair(BORDER_COLOUR) | A_BOLD)\n refresh()",
"def draw(self):\n print(\"Drawing...\", end=' ')\n s = self.pixelsPerCell\n for h in range(self.height):\n for w in range(self.width):\n self.box[w][h] = self.canvas.create_rectangle(w*s, h*s, w*s+s, h*s+s,\n fill = \"gray\", outline = \"gray\")\n self.canvas.update()\n print(\"Done!\")",
"def draw_box_and_text(img, text, box, color):\n cv2.putText(\n img, text,\n (box[0], box[1]-5), cv2.FONT_HERSHEY_SIMPLEX,\n 0.5, color, 2)\n # cv2.rectangle(\n # img,\n # (box[1], box[0]),\n # (box[3], box[2]),\n # color, 2)\n cv2.rectangle(\n img,\n (box[0], box[1]),\n (box[2], box[3]),\n color, 2)",
"def draw_box(image, box, color, thickness=2):\n b = np.array(box).astype(int)\n cv2.rectangle(image, (b[0], b[1]), (b[2], b[3]), color, thickness, cv2.LINE_AA)",
"def _draw(self, frame, boxes, probs, landmarks, name):\n try:\n print('drawing')\n for box, prob, ld, id in zip(boxes, probs, landmarks, name):\n # Draw rectangle on frame\n\n cv2.putText(frame, id, (200, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv2.LINE_AA)\n\n\n except:\n print('not draw box')\n pass\n\n return frame",
"def draw_box(image, boxes, box_color=(255, 255, 255)):\r\n for box in boxes:\r\n cv2.rectangle(image,\r\n (box[0], box[1]),\r\n (box[2], box[3]), box_color)",
"def draw_box(image, boxes, box_color=(255, 255, 255)):\r\n for box in boxes:\r\n cv2.rectangle(image,\r\n (box[0], box[1]),\r\n (box[2], box[3]), box_color, 3)",
"def drawBox (self, left, top, width, height, colour):\r\n w = self.bih_vals [bih_Width]\r\n h = self.bih_vals [bih_Height]\r\n\r\n cols = [left, left + width - 1]\r\n rows = [top, top + height - 1]\r\n \r\n x0 = max ((0,left))\r\n x1 = min ((cols[1]+1, w))\r\n y0 = max ((0,top))\r\n y1 = min ((rows [1]+1, h))\r\n\r\n # rows\r\n\r\n for r in rows:\r\n if r >= 0 and r < h:\r\n row = self.image [r]\r\n for x in range (x0, x1):\r\n row [x] = colour\r\n\r\n # columns\r\n \r\n for y in range (y0, y1):\r\n row = self.image [y]\r\n for c in cols:\r\n if c >= 0 and c < w :\r\n row [c] = colour",
"def draw_box(image, curr_box, label, draw_line=False):\n # y1, x1, y2, x2 = box\n # print(curr_box)\n # assert False\n x1, y1, x2, y2 = curr_box[0], curr_box[1], curr_box[2], curr_box[3]\n _, h, w = image.size()\n x1 = int(x1.item() * w)\n y1 = int(y1.item() * h)\n x2 = int(x2.item() * w)\n y2 = int(y2.item() * h)\n if draw_line:\n if x1 > x2:\n x1, x2 = x2, x1\n if y1 > y2:\n y1, y2 = y2, y1\n image[:, y1:y1 + 3, x1:x2] = label/13.0\n image[:, y2:y2 + 3, x1:x2] = label/13.0\n image[:, y1:y2, x1:x1 + 3] = label/13.0\n image[:, y1:y2, x2:x2 + 3] = label/13.0\n else:\n image[:, y1:y1 + 3, x1:x2] = label/13.0\n image[:, y2:y2 + 3, x1:x2] = label/13.0\n image[:, y1:y2, x1:x1 + 3] = label/13.0\n image[:, y1:y2, x2:x2 + 3] = label/13.0\n return image",
"def box(message, *style, **kwds):\n if style and style[0] in ('flag', 'box', 'overline', 'underline', 'lined'):\n border = style[0]\n chars = style[1:]\n else:\n border = 'box'\n chars = style\n lines = message.split('\\n')\n width = max([len(re.sub('\\x1b\\[[\\d;]*\\w', '', l)) for l in lines])\n if not chars:\n top = bottom = '-'\n left = right = '|'\n elif len(chars) == 1:\n top = bottom = left = right = chars[0]\n elif len(chars) == 2:\n top = bottom = chars[0]\n left = right = chars[1]\n elif len(chars) == 4:\n top, bottom, left, right = chars\n else:\n raise ScriptionError('if box chars specified, must be a single item for use as all four, two items for use as top/bottom and left/right, or four items')\n # calculate rule now\n rule = '-' * width\n #\n padding = 0\n if border == 'box':\n padding = 1\n width += len(left) + len(right) + 2 * padding\n elif border == 'flag':\n padding = 1\n width += len(left) + 2 * padding\n # make sure right is not used\n right = ''\n else:\n # make sure left and right are not used\n left = right = ''\n #\n times, remainder = divmod(width, len(top))\n top_line = top * times\n if remainder:\n top_line += top[-remainder:]\n #\n times, remainder = divmod(width, len(bottom))\n bottom_line = bottom * times\n if remainder:\n bottom_line += bottom[-remainder:]\n #\n box = []\n padding = padding * ' '\n if border != 'underline':\n box.append(top_line)\n for line in lines:\n if line == '---':\n line = rule\n leading = ('%(left)s%(padding)s%(line)s' %\n {'left': left, 'padding': padding, 'line':line}\n )\n line = '%-*s%s' % (width-len(right), leading, right)\n box.append(line)\n if border != 'overline':\n box.append(bottom_line)\n return '\\n'.join(box)",
"def draw_box(\n canvas,\n layout,\n box_width=None,\n box_alpha=0,\n color_map=None,\n show_element_id=False,\n show_element_type=False,\n id_font_size=None,\n id_font_path=None,\n id_text_color=None,\n id_text_background_color=None,\n id_text_background_alpha=1,\n):\n\n assert 0 <= box_alpha <= 1, ValueError(\n f\"The box_alpha value {box_alpha} is not within range [0,1].\"\n )\n assert 0 <= id_text_background_alpha <= 1, ValueError(\n f\"The id_text_background_alpha value {id_text_background_alpha} is not within range [0,1].\"\n )\n\n draw = ImageDraw.Draw(canvas, mode=\"RGBA\")\n\n id_text_background_color = id_text_background_color or DEFAULT_TEXT_BACKGROUND\n id_text_color = id_text_color or DEFAULT_TEXT_COLOR\n\n if box_width is None:\n box_width = _calculate_default_box_width(canvas)\n\n if show_element_id or show_element_type:\n font_obj = _create_font_object(id_font_size, id_font_path)\n\n if color_map is None:\n all_types = set([b.type for b in layout if hasattr(b, \"type\")])\n color_map = _create_color_palette(all_types)\n\n for idx, ele in enumerate(layout):\n\n if isinstance(ele, Interval):\n ele = ele.put_on_canvas(canvas)\n\n outline_color = (\n DEFAULT_OUTLINE_COLOR\n if not isinstance(ele, TextBlock)\n else color_map.get(ele.type, DEFAULT_OUTLINE_COLOR)\n )\n\n _draw_box_outline_on_handler(draw, ele, outline_color, box_width)\n\n _draw_transparent_box_on_handler(draw, ele, outline_color, box_alpha)\n\n if show_element_id or show_element_type:\n text = \"\"\n if show_element_id:\n ele_id = ele.id or idx\n text += str(ele_id)\n if show_element_type:\n text = str(ele.type) if not text else text + \": \" + str(ele.type)\n\n start_x, start_y = ele.coordinates[:2]\n text_w, text_h = font_obj.getsize(text)\n\n text_box_object = Rectangle(\n start_x, start_y, start_x + text_w, start_y + text_h\n )\n # Add a small background for the text\n\n _draw_transparent_box_on_handler(\n draw,\n text_box_object,\n id_text_background_color,\n id_text_background_alpha,\n )\n\n # Draw the ids\n draw.text(\n (start_x, start_y),\n text,\n fill=id_text_color,\n font=font_obj,\n )\n\n return canvas",
"def box(self, x0, y0, width, height):\n assert width > 1\n assert height > 1\n\n width -= 1\n height -= 1\n\n for x in range(x0, x0 + width):\n self.point(x, y0, \"-\")\n self.point(x, y0 + height, \"-\")\n\n for y in range(y0, y0 + height):\n self.point(x0, y, \"|\")\n self.point(x0 + width, y, \"|\")\n\n self.point(x0, y0, \"+\")\n self.point(x0 + width, y0, \"+\")\n self.point(x0, y0 + height, \"+\")\n self.point(x0 + width, y0 + height, \"+\")",
"def _draw_single_box_on_image(self,box,label,id):\n p1 = (box[1], box[0])\n p2 = (box[3], box[2])\n if self.config.DISCO_MODE:\n color = random.choice(self.STANDARD_COLORS)\n else:\n color = self.STANDARD_COLORS[id]\n cv2.rectangle(self.image, p1, p2, color, 2)\n self._draw_text_on_image(label,(p1[0],p1[1]-10),color)",
"def boxTextAt( text = \"\", lboxchar = \" \", rboxchar = \" \", paddingchar = \" \", linewidth = 78 ):\n\n ansistring_text = stringExtends.ansiStringClass( \"\" )\n if isinstance( text, ( str, unicode ) ):\n ansistring_text.Text = text\n\n ansistring_lboxchar = stringExtends.ansiStringClass( default_display_vars.borderChar_Left )\n if isinstance( lboxchar, ( str, unicode ) ):\n ansistring_lboxchar.Text = lboxchar\n\n ansistring_rboxchar = stringExtends.ansiStringClass( default_display_vars.borderChar_Right )\n if isinstance( rboxchar, (str, unicode) ) :\n ansistring_rboxchar.Text = rboxchar\n\n ansistring_paddingchar = stringExtends.ansiStringClass( default_display_vars.boxText_padding )\n if isinstance( paddingchar, (str, unicode) ) :\n ansistring_paddingchar.Text = paddingchar\n\n line_width = 78\n if isinstance( linewidth, ( int, float ) ):\n line_width = linewidth\n\n r = stringExtends.ansiStringClass( '' )\n for line in ansistring_text.ansiTextWrap( line_width - ( ansistring_lboxchar.rawTextLen() + ansistring_rboxchar.rawTextLen() ) ):\n ansistring_line = stringExtends.ansiStringClass( line )\n\n pad_len = line_width - ( ansistring_lboxchar.rawTextLen() + ansistring_rboxchar.rawTextLen() + ansistring_line.rawTextLen() )\n\n this_pad_string = ( ansistring_paddingchar.ansiTextFormat() * int( math.floor( pad_len / ansistring_paddingchar.rawTextLen() ) ) )\n\n r.Text += ansistring_lboxchar.ansiTextFormat() + ansistring_line.ansiTextFormat() + this_pad_string\n if ( r.rawTextLen() + ansistring_rboxchar.ansiTextLen() ) < line_width:\n r.Text += ansistring_paddingchar.ansiSlice( 0, ( line_width - r.rawTextLen() ) - ansistring_rboxchar.ansiTextLen() )\n r.Text += ansistring_rboxchar.ansiTextFormat() + \"\\n\"\n\n r.Text = r.Text[:-1]\n return r.Text",
"def draw(self):\n # 5 is the number of characters per box add one for the header column\n sepreator_line = \"-\" * (len(self.letters) + 1) * 5 + \"-\"\n print(sepreator_line)\n print(\n \"| \" + \"\".join([f\"| {letter} \" for letter in self.letters]) + \"|\")\n print(sepreator_line)\n for number in self.numbers:\n print(f\"| {number} \" + \"\".join(\n [f\"| {self.positions[letter + number]} \" for letter in self.letters]) + \"|\")\n print(sepreator_line)",
"def _draw_boxes(self, image, boxes, classes, thickness=4):\n for i in range(len(boxes)):\n bot, left, top, right = boxes[i, ...]\n class_id = int(classes[i]) - 1\n color = self.COLOR_LIST[class_id]\n cv2.rectangle(image, (left, top), (right, bot), color=color, thickness=thickness)",
"def draw_hand_box(data,box,c=[255,255,255]):\n crop = np.array(data['crop']);\n if box is not None:\n cv2.rectangle(crop, *box, c);\n return crop;",
"def box(self, box, padx=0.5, pady=0.3, **options):\n\n # underride sets default values only if the called hasn't\n underride(options, outline='black')\n box.left -= padx\n box.top -= pady\n box.right += padx\n box.bottom += pady\n item = self.rectangle(box, **options)\n return item",
"def draw_boxs(img,boxs,width=3,color=(0,0,255)):\n box_img = copy.deepcopy(img)\n for i in range(boxs.shape[0]):\n # x1,y1,x2,y2=boxs[i]\n x1 = boxs[i][0]\n y1 = boxs[i][1]\n x2 = boxs[i][2]\n y2 = boxs[i][3]\n p1 = (int(round(x1)),int(round(y1)))\n p2 = (int(round(x2)),int(round(y2)))\n cv2.rectangle(box_img, p1, p2, color, width)\n\n return box_img",
"def drawRectangle(x, y, width, height):\n pen1.up()\n pen1.goto(x, y)\n pen1.down()\n pen1.fd(width)\n pen1.right(90)\n pen1.fd(height)\n pen1.right(90)\n pen1.fd(width)\n pen1.right(90)\n pen1.fd(height)",
"def _display_face(draw, bounding_box, name):\n top, right, bottom, left = bounding_box\n draw.rectangle(((left, top), (right, bottom)), outline=BOUNDING_BOX_COLOR)\n text_left, text_top, text_right, text_bottom = draw.textbbox(\n (left, bottom), name\n )\n draw.rectangle(\n ((text_left, text_top), (text_right, text_bottom)),\n fill=BOUNDING_BOX_COLOR,\n outline=BOUNDING_BOX_COLOR,\n )\n draw.text(\n (text_left, text_top),\n name,\n fill=TEXT_COLOR,\n )",
"def draw_square(display, coord, box_size, color, bg_color):\n left, top = coord\n half = int(box_size * 0.5)\n quarter = int(box_size * 0.25)\n pygame.draw.rect(\n display, color, (left + quarter, top + quarter, half, half))\n return",
"def draw(text: list):\n\n curses.wrapper(wrapper, text)",
"def draw_box(img, box):\n draw_img = img.copy()\n cv2.polylines(draw_img, np.int32([box]), True, (255, 0, 0), 4)\n show(draw_img)",
"def fill(self, x, y, width=None, height=None, char=None,\n fg=(255, 255, 255), bg=None):\n self.console.draw_rect(x, y, width, height, char, fg, bg)",
"def box(self, x, y, w, h):\n\t\tpass",
"def draw_boxes_texts(img,\n boxes,\n texts=None,\n colors=None,\n line_width=1,\n draw_start=False,\n box_format='x1y1x2y2'):\n assert box_format in ('x1y1x2y2', 'x1y1wh', 'xywh', 'xywha',\n 'polygon'), 'not supported box format!'\n img = imread(img)\n if len(boxes) == 0:\n return img\n boxes = copy.deepcopy(boxes)\n # convert bbox type to int\n if not isinstance(boxes, np.ndarray):\n if box_format != 'polygon':\n boxes = np.array(boxes)\n if box_format != 'xywha':\n boxes = boxes.astype(np.int)\n if len(boxes.shape) == 1:\n boxes = [boxes]\n else:\n boxes = [list(map(int, box)) for box in boxes]\n else:\n boxes = boxes.astype(np.int)\n if texts is not None and not isinstance(texts, (list, np.ndarray)):\n texts = [texts]\n if isinstance(img, Image.Image):\n img = cv.cvtColor(np.asarray(img), cv.COLOR_RGB2BGR)\n if not isinstance(img, np.ndarray):\n return\n if colors == 'random':\n colors = np.random.randint(0, 255, size=(len(boxes), 3))\n colors = [tuple(map(int, color)) for color in colors]\n text_color = (0, 255, 255)\n thickness = line_width\n font = cv.FONT_HERSHEY_SIMPLEX\n for idx, box in enumerate(boxes):\n # default color: red, BGR order\n box_color = (0, 0, 255) if colors is None else colors[idx]\n if box_format == 'x1y1x2y2':\n cv.rectangle(img, tuple(box[0:2]),\n tuple(box[2:4]), box_color, thickness)\n elif box_format == 'x1y1wh':\n box[0:4] = cvtools.x1y1wh_to_x1y1x2y2(list(box[0:4]))\n cv.rectangle(img, tuple(box[0:2]),\n tuple(box[2:4]), box_color, thickness)\n elif box_format == 'xywh':\n box[0:4] = cvtools.xywh_to_x1y1x2y2(list(box[0:4]))\n cv.rectangle(img, tuple(box[0:2]),\n tuple(box[2:4]), box_color, thickness)\n elif box_format == 'xywha':\n rrect = tuple(box[:2]), tuple(box[2:4]), box[4]\n box = cv.boxPoints(rrect).astype(np.int)\n # box = np.int0(box)\n cv.drawContours(img, [box], 0, box_color, thickness)\n box = box.reshape((-1,))\n elif box_format == 'polygon':\n # for i in np.arange(2, len(box), 2):\n # cv.line(img, tuple(box[i-2:i]),\n # tuple(box[i:i+2]), box_color, thickness)\n # cv.line(img, tuple(box[-2:]),\n # tuple(box[:2]), box_color, thickness)\n # 如果img内存非连续,cv的所有绘制都会失效\n cv.polylines(img, np.int32([np.array(box).reshape(-1, 2)]), 1, box_color, thickness)\n # cv.line(img, tuple(box[:2]), tuple(box[2:4]), box_color, thickness)\n # cv.line(img, tuple(box[2:4]), tuple(box[4:6]), box_color, thickness)\n # cv.line(img, tuple(box[4:6]), tuple(box[6:8]), box_color, thickness)\n # cv.line(img, tuple(box[6:]), tuple(box[:2]), box_color, thickness)\n if draw_start:\n cv.circle(img, tuple(box[:2]),\n radius=5, color=text_color, thickness=-1)\n if texts is not None:\n cv.putText(img, texts[idx],\n (box[0]+2, box[1]-2), font, 0.5, text_color, 1)\n return img",
"def rectangle(height,width):\n for row in range(height):\n for column in range(width):\n print(CHAR, end = '')\n print()",
"def draw_box(self, xy=None, bbox=None, flatratio=1.0, **options):\n options = self._check_options(options)\n args = []\n \n if options[\"outlinecolor\"]:\n pen = aggdraw.Pen(options[\"outlinecolor\"], options[\"outlinewidth\"])\n args.append(pen)\n if options[\"fillcolor\"]:\n brush = aggdraw.Brush(options[\"fillcolor\"])\n args.append(brush)\n \n if xy:\n x,y = xy\n width = options[\"fillwidth\"]\n height = options[\"fillheight\"]\n if flatratio: height *= flatratio\n width, height = width / self.width * self.coordspace_width, \\\n height / self.height * self.coordspace_height\n halfwidth, halfheight = width / 2.0, height / 2.0\n bbox = [x-halfwidth, y-halfheight, x+halfwidth, y+halfheight]\n \n elif bbox: pass\n \n else: raise Exception(\"Either xy or bbox has to be specified\")\n \n self.drawer.rectangle(bbox, *args)"
] |
[
"0.7524085",
"0.7420148",
"0.6947447",
"0.6904583",
"0.6866881",
"0.67783445",
"0.67416203",
"0.6716438",
"0.66725934",
"0.6665916",
"0.65296215",
"0.64508295",
"0.64451134",
"0.6430952",
"0.64158237",
"0.6409835",
"0.6386739",
"0.6345775",
"0.6328558",
"0.6314567",
"0.62996423",
"0.62806875",
"0.62804085",
"0.6244331",
"0.6239906",
"0.62391216",
"0.6199206",
"0.6197713",
"0.61723024",
"0.6168583"
] |
0.7759127
|
0
|
Function to parse features from GeoDataFrame in such a manner that rasterio wants them
|
def getFeatures(gdf):
import json
return [json.loads(gdf.to_json())['features'][0]['geometry']]
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def getFeatures(gdf):\n\timport json\n\treturn [json.loads(gdf.to_json())['features'][0]['geometry']]",
"def getFeatures(gdf):\r\n import json\r\n return [json.loads(gdf.to_json())['features'][0]['geometry']]",
"def getFeatures(gdf):\r\n import json\r\n features = [json.loads(gdf.to_json())['features'][0]['geometry']]\r\n return features",
"def make_features(self, gdb, fc_name, fields, rows):\n point_geotoken = \"SHAPE@XY\"\n other_geotoken = \"SHAPE@\"\n now = GeoJSONUtils.timestamp()\n stats = []\n\n if len(rows[0]) > 0:\n point_rows = rows[0]\n point_fc_name = fc_name + now + '_p'\n point_fc = GeoJSONUtils.make_fc(self, gdb, point_fc_name, fields, point_rows, \"POINT\", point_geotoken)\n stats.append(point_fc)\n if len(rows[1]) > 0:\n line_rows = rows[1]\n line_fc_name = fc_name + now + '_l'\n line_fc = GeoJSONUtils.make_fc(self, gdb, line_fc_name, fields, line_rows, \"POLYLINE\", other_geotoken)\n stats.append(line_fc)\n if len(rows[2]) > 0:\n poly_rows = rows[2]\n poly_fc_name = fc_name + now + '_a'\n poly_fc = GeoJSONUtils.make_fc(self, gdb, poly_fc_name, fields, poly_rows, \"POLYGON\", other_geotoken)\n stats.append(poly_fc)\n return stats",
"def extractFeatures(self, datum):\n abstract",
"def preprocess_feature(df):",
"def _extract_features(self, row):\n ncep_data = self.ncep_data\n ncep_sfc_data = self.ncep_sfc_data\n date = row['date']\n features = dict(row)\n #reduce the dimensions of ncep_data(xarray dataset) by fixing coordinates(lon,lat)\n #and then convert it to dataframe\n ncep_data = ncep_data[date.year] \\\n .sel(lon=row['longitude'], lat=row['latitude'], method='nearest') \\\n .to_dask_dataframe() \\\n .compute() \\\n .set_index(['level','time'])\n #reduce the dimensions of ncep_sfc_data(xarray dataset) by fixing coordinates(lon,lat)\n #and then convert it to dataframe\n ncep_sfc_data = ncep_sfc_data[date.year] \\\n .sel(lon=row['longitude'], lat=row['latitude'], method='nearest') \\\n .to_dask_dataframe() \\\n .compute() \\\n .set_index(['time'])\n\n for level in self.levels:\n #features at different pressure level\n point = ncep_data.loc[level]\n p1w = point.rolling(7).mean() # 1 Week mean\n p2w = point.rolling(14).mean() # 2 Week mean\n p3w = point.rolling(21).mean() # 3 Week mean\n # \n v0w = point.loc[date]\n v1w = p1w.loc[date]\n v2w = p2w.loc[date]\n v3w = p3w.loc[date]\n #\n for data_var in self.ncep_data_vars:\n features[\"{0}_0w_lvl_{1}\".format(data_var,level)] = v0w[data_var]\n features[\"{0}_1w_lvl_{1}\".format(data_var,level)] = v1w[data_var]\n features[\"{0}_2w_lvl_{1}\".format(data_var,level)] = v2w[data_var]\n features[\"{0}_3w_lvl_{1}\".format(data_var,level)] = v3w[data_var]\n #features at surface level\n point = ncep_sfc_data\n p1w = point.rolling(7).mean() # 1 Week mean\n p2w = point.rolling(14).mean() # 2 Week mean\n p3w = point.rolling(21).mean() # 3 Week mean\n # \n v0w = point.loc[date]\n v1w = p1w.loc[date]\n v2w = p2w.loc[date]\n v3w = p3w.loc[date]\n #\n for data_var in self.ncep_sfc_data_vars:\n features[\"{0}_0w\".format(data_var)] = v0w[data_var]\n features[\"{0}_1w\".format(data_var)] = v1w[data_var]\n features[\"{0}_2w\".format(data_var)] = v2w[data_var]\n features[\"{0}_3w\".format(data_var)] = v3w[data_var] \n\n return features",
"def text_feature_extract(df):\n return df",
"def extract_raster_features(\n gdf, raster_path, pixel_values=None, nodata=255, n_jobs=-1, collapse_values=False\n):\n if n_jobs == -1:\n n_jobs = multiprocessing.cpu_count()\n with rio.open(raster_path) as src:\n\n raster_crs = src.crs.data\n gdf = gdf.to_crs(raster_crs)\n geomask = [gdf.unary_union.__geo_interface__]\n\n out_image, out_transform = mask(\n src, geomask, nodata=nodata, crop=True\n ) # clip to AoI using a vector layer\n\n if pixel_values:\n if collapse_values:\n out_image = np.where(\n np.isin(out_image, pixel_values), pixel_values[0], out_image\n ) # replace values to generate fewer polys\n pixel_values = np.isin(\n out_image, pixel_values\n ) # only include requested pixels\n\n shapes = list(\n features.shapes(out_image, mask=pixel_values, transform=out_transform)\n ) # convert regions to polygons\n res = list(zip(*shapes))\n geoms = pd.Series(res[0], name=\"geometry\").astype(str)\n pieces = _chunk_dfs(geoms, n_jobs)\n geoms = pd.concat(\n Parallel(n_jobs=n_jobs)(delayed(_apply_parser)(i) for i in pieces)\n )\n geoms = gpd.GeoSeries(geoms).buffer(0) # we sometimes get self-intersecting rings\n vals = pd.Series(res[1], name=\"value\")\n gdf = gpd.GeoDataFrame(vals, geometry=geoms, crs=raster_crs)\n if collapse_values:\n gdf = gdf.drop(columns=[\"value\"]) # values col is misleading in this case\n\n return gdf",
"def extractFeatures(image, mask, name, binCount=8, features=\"all\"):\n def extractType(func, type_name):\n name = []\n values = []\n feat = func(image,mask, binCount=binCount)\n feat.enableAllFeatures() \n feat.execute()\n for (key,val) in six.iteritems(feat.featureValues):\n name.append(key+f'_{type_name}')\n values.append(val)\n return pd.DataFrame([values], columns=name)\n\n dim = image.GetDimension()\n\n features_array = np.array([\"FO\", f\"S{dim}D\", \"GLCM\", \"GLSZM\", \"GLRLM\", \"NGTDM\", \"GLDM\"])\n features_func = np.array([firstorder.RadiomicsFirstOrder, eval(f\"shape{'2D'*(dim == 2)}.RadiomicsShape{'2D'*(dim==2)}\"), \n glcm.RadiomicsGLCM, glszm.RadiomicsGLSZM, glrlm.RadiomicsGLRLM, ngtdm.RadiomicsNGTDM, \n gldm.RadiomicsGLDM])\n if features != \"all\":\n if features is str:\n print(\"Type wrong. Returning None.\")\n return None\n index = pd.Index(features_array).isin(features)\n features_array = features_array[index]\n features_func = features_func[index]\n\n list_feat = list(map(lambda i: extractType(features_func[i], features_array[i]), np.arange(len(features_array))))\n df = pd.concat([pd.DataFrame([name], columns=[\"Caso\"])] + list_feat, axis=1)\n return df",
"def _read_geojson_features(data, features=None, prefix=\"\"):\n if features is None:\n features = collections.OrderedDict()\n for i, feature in enumerate(data['features']):\n key = feature.get('id', prefix + str(i))\n feature_type = feature['geometry']['type']\n if feature_type == 'FeatureCollection':\n _read_geojson_features(feature, features, prefix + '.' + key)\n elif feature_type == 'Point':\n value = Circle._convert_point(feature)\n elif feature_type in ['Polygon', 'MultiPolygon']:\n value = Region(feature)\n else:\n # TODO Support all http://geojson.org/geojson-spec.html#geometry-objects\n value = None\n features[key] = value\n return features",
"def get_features_from_segment_raw(seg_raw_df, feature_func_dict):\n # parse input\n if type(feature_func_dict) == str: # it's a json filename\n import json\n feature_func_str = open(feature_func_dict).read()\n feature_func_dict = json.loads(feature_func_str)\n print \"===========start computing features=================\"\n print \"===========feature function dictionary==============\"\n print feature_func_dict\n grouped = seg_raw_df.groupby(s_info.segment_col)\n # parse feature function dictionary\n result = {}\n for feature_name in feature_func_dict:\n print \"==========compute \" + feature_name + \"================\"\n feature = feature_func_dict[feature_name]\n if len(feature['paras']) == 0: # no parameter need to be set, easiest case\n # find out the function\n func_name = feature['handler']\n if hasattr(np, func_name):\n func = getattr(np, func_name)\n elif hasattr(sp_stats, func_name):\n func = getattr(sp_stats, func_name)\n elif hasattr(s_feature, func_name):\n func = getattr(s_feature, func_name)\n else:\n func = func_name\n # prepare columns\n temp = grouped[feature['apply']].aggregate(func)\n result[feature_name] = temp\n else: # has parameters, will compute column one by one\n paras = feature['paras']\n print paras\n # find out the function\n func_name = feature['handler']\n if hasattr(s_feature, func_name):\n func = getattr(s_feature, func_name)\n elif hasattr(np, func_name):\n func = getattr(np, func_name)\n else:\n print func_name + \" can't be found, ignore this feature\"\n continue\n # iterate over columns\n temp = {}\n c = 0\n for col in feature['apply']:\n if paras.has_key('with'): # need another column\n paras['another'] = grouped[paras['with'][c]].copy(True)\n temp[col] = grouped[col].aggregate(func, paras)\n c += 1\n # construct DataFrame\n result[feature_name] = pd.DataFrame(temp)\n print \"Inf values: %s\" % np.any(np.isinf(result[feature_name]))\n print \"NaN values: %s\" % np.any(np.isnan(result[feature_name]))\n feature_raw_df = pd.concat(result, axis=1)\n # feature_raw_df = feature_raw_df.reset_index(drop=True)\n return feature_raw_df",
"def __feature_set__(self):\r\n import numpy as np\r\n import datetime\r\n import time\r\n cols_norm = [col for col in self.columns]\r\n cols_lower = [col.lower() for col in self.columns]\r\n fields = []\r\n features = []\r\n date_fields = []\r\n _geom_types = {\r\n arcgis.geometry._types.Point : \"esriGeometryPoint\",\r\n arcgis.geometry._types.Polyline : \"esriGeometryPolyline\",\r\n arcgis.geometry._types.MultiPoint : \"esriGeometryMultipoint\",\r\n arcgis.geometry._types.Polygon : \"esriGeometryPolygon\"\r\n }\r\n if self.sr is None:\r\n sr = {'wkid' : 4326}\r\n else:\r\n sr = self.sr\r\n fs = {\r\n \"objectIdFieldName\" : \"\",\r\n \"globalIdFieldName\" : \"\",\r\n \"displayFieldName\" : \"\",\r\n \"geometryType\" : _geom_types[type(self.geometry[self.geometry.first_valid_index()])],\r\n \"spatialReference\" : sr,\r\n \"fields\" : [],\r\n \"features\" : []\r\n }\r\n if 'objectid' in cols_lower:\r\n fs['objectIdFieldName'] = cols_norm[cols_lower.index('objectid')]\r\n fs['displayFieldName'] = cols_norm[cols_lower.index('objectid')]\r\n elif 'fid' in cols_lower:\r\n fs['objectIdFieldName'] = cols_norm[cols_lower.index('fid')]\r\n fs['displayFieldName'] = cols_norm[cols_lower.index('fid')]\r\n elif 'oid' in cols_lower:\r\n fs['objectIdFieldName'] = cols_norm[cols_lower.index('oid')]\r\n fs['displayFieldName'] = cols_norm[cols_lower.index('oid')]\r\n else:\r\n self['OBJECTID'] = list(range(1, self.shape[0] + 1))\r\n res = self.__feature_set__\r\n del self['OBJECTID']\r\n return res\r\n if 'objectIdFieldName' in fs:\r\n fields.append({\r\n \"name\" : fs['objectIdFieldName'],\r\n \"type\" : \"esriFieldTypeOID\",\r\n \"alias\" : fs['objectIdFieldName']\r\n })\r\n cols_norm.pop(cols_norm.index(fs['objectIdFieldName']))\r\n if 'globalIdFieldName' in fs and len(fs['globalIdFieldName']) > 0:\r\n fields.append({\r\n \"name\" : fs['globalIdFieldName'],\r\n \"type\" : \"esriFieldTypeGlobalID\",\r\n \"alias\" : fs['globalIdFieldName']\r\n })\r\n cols_norm.pop(cols_norm.index(fs['globalIdFieldName']))\r\n elif 'globalIdFieldName' in fs and \\\r\n len(fs['globalIdFieldName']) == 0:\r\n del fs['globalIdFieldName']\r\n if self._geometry_column_name in cols_norm:\r\n cols_norm.pop(cols_norm.index(self._geometry_column_name))\r\n for col in cols_norm:\r\n try:\r\n idx = self[col].first_valid_index()\r\n col_val = self[col].loc[idx]\r\n except:\r\n col_val = \"\"\r\n if isinstance(col_val, (str, np.str)):\r\n l = self[col].str.len().max()\r\n if str(l) == 'nan':\r\n l = 255\r\n\r\n fields.append({\r\n \"name\" : col,\r\n \"type\" : \"esriFieldTypeString\",\r\n \"length\" : int(l),\r\n \"alias\" : col\r\n })\r\n if fs['displayFieldName'] == \"\":\r\n fs['displayFieldName'] = col\r\n elif isinstance(col_val, (datetime.datetime,\r\n pd.Timestamp,\r\n np.datetime64,\r\n pd.datetime)):\r\n fields.append({\r\n \"name\" : col,\r\n \"type\" : \"esriFieldTypeDate\",\r\n \"alias\" : col\r\n })\r\n date_fields.append(col)\r\n elif isinstance(col_val, (np.int32, np.int16, np.int8)):\r\n fields.append({\r\n \"name\" : col,\r\n \"type\" : \"esriFieldTypeSmallInteger\",\r\n \"alias\" : col\r\n })\r\n elif isinstance(col_val, (int, np.int, np.int64)):\r\n fields.append({\r\n \"name\" : col,\r\n \"type\" : \"esriFieldTypeInteger\",\r\n \"alias\" : col\r\n })\r\n elif isinstance(col_val, (float, np.float64)):\r\n fields.append({\r\n \"name\" : col,\r\n \"type\" : \"esriFieldTypeDouble\",\r\n \"alias\" : col\r\n })\r\n elif isinstance(col_val, (np.float32)):\r\n fields.append({\r\n \"name\" : col,\r\n \"type\" : \"esriFieldTypeSingle\",\r\n \"alias\" : col\r\n })\r\n fs['fields'] = fields\r\n for row in self.to_dict('records'):\r\n geom = {}\r\n if self._geometry_column_name in row:\r\n geom = row[self._geometry_column_name]\r\n del row[self._geometry_column_name]\r\n for f in date_fields:\r\n try:\r\n row[f] = int(row[f].to_pydatetime().timestamp() * 1000)\r\n except:\r\n row[f] = None\r\n features.append(\r\n {\r\n \"geometry\" : dict(geom),\r\n \"attributes\" : row\r\n }\r\n )\r\n del row\r\n del geom\r\n fs['features'] = features\r\n return fs",
"def parse_feature(self, feature_key, lines):\n ...",
"def parse_features(self, skip=...):\n ...",
"def parse_features(self, skip=...):\n ...",
"def get_features(feature: Any) -> List[Union[str, Sequence]]:\n feature_coords = get_coords_from_features(feature)\n feature_geometry = get_geometry_type(feature)\n\n if isinstance(feature_geometry, (list, tuple)):\n feature_geometry = feature_geometry[0]\n\n return [feature_geometry, feature_coords]",
"def parse_df(data):\n\n # First column should be the ids\n ids = list(data.iloc[:, 0])\n\n # Second column should hold the labels\n labels = list(data.iloc[:, 1])\n\n # From third columns, we should have the features\n features = list(data.iloc[:, 2:].values)\n\n return ids, labels, features",
"def geojson_zillow_fc(rows):\n feature_collection = []\n for row in rows:\n if row[2] == \"Boston\":\n f = Feature(geometry=Polygon(\\\n [parse_zillow_coordinates(row[6])]),\n id = row[4],\n properties={\"state\": str(row[0]),\n \"county\": str(row[1]),\n \"city\": str(row[2]),\n \"neighborhood\": \\\n str(row[3]),\n \"regionid\": str(row[4]),\n \"total_potins\": \\\n str(row[5]),\n \"mean_interior_score\": \\\n row[7],\n \"sd_interior_score\":\\\n row[8],\n \"max_int_score\":\\\n row[9],\n \"min_int_score\":\\\n row[10],\n \"region_property_count\":\\\n row[11],\n \"projectedfor2018\":\\\n row[17],\n })\n feature_collection.append(f)\n\n else:\n print(\"City: {}\".format(row[2]))\n\n fc = FeatureCollection(feature_collection)\n return fc",
"def vectorize(df):\n\tt = calc_affine(df)\n\ta = df.values\n\t# zeros an nan are left open space, means mask = True!\n\tmaske = (df != 0).fillna(True)\n\tgdf = gpd.GeoDataFrame()\n\tgeoms = []\n\tvalue = []\n\tfor s,v in rasterio.features.shapes(a,transform=t,mask=maske.values):\n\t\tgeoms.append(shape(s))\n\t\tvalue.append(v)\n\tgdf['geometry'] = geoms\n\tgdf = gdf.set_geometry('geometry')\n\tgdf['val']=value\n\treturn gdf",
"def geo_dataframe_to_list(data_frame, polygon=False):\n to_return = []\n for line in data_frame.geometry:\n lines = []\n if polygon:\n for values in line.exterior.coords:\n lines.append(values)\n else:\n for values in line.coords:\n lines.append(values)\n to_return.append(lines)\n return to_return",
"def _geofilter(frame):\r\n try:\r\n import geopandas as gpd\r\n\r\n # Remove rows with no latitude and longitude\r\n try:\r\n\r\n filresults = frame[(frame['ActionGeo_Lat'].notnull()\r\n ) | (frame['ActionGeo_Long'].notnull()\r\n )]\r\n except:\r\n\r\n filresults = frame[(frame['actiongeolat'].notnull()\r\n ) | (frame['actiongeolong'].notnull()\r\n )]\r\n gdf = gpd.GeoDataFrame(filresults.assign(geometry=_parallelize_dataframe(filresults)),\r\n crs={'init': 'epsg:4326'})\r\n gdf.columns = list(map(lambda x: (x.replace('_', \"\")).lower(), gdf.columns))\r\n\r\n final = gdf[gdf.geometry.notnull()]\r\n\r\n return final\r\n\r\n\r\n except BaseException as err: # pragma: no cover\r\n if str(err) == \"import of 'geopandas' halted; None in sys.modules\":\r\n raise ImportError(\"geopandas is not installed. gdeltPyR needs\"\r\n \" geopandas to export as shapefile. Visit http://\"\r\n \"geopandas.org/install.html for instructions.\")\r\n else:\r\n raise ValueError(\"geopandas is installed but the geospatial \"\r\n \"functions failed for some other reason. Review\"\r\n \" the stack trace to see where the failure \"\r\n \"occurred.\")",
"def process_dataframe(df):\n\n if isinstance(df, pd.DataFrame):\n df2 = df.copy()\n required_columns = {'name', 'wkt', 'lower_limit', 'upper_limit'}\n if not required_columns <= set(df2.columns):\n raise ValueError(\"DataFrame must contain columns 'name', 'wkt', 'lower_limit', 'upper_limit'.\")\n if not 'geometry' in list(df2.columns):\n logger.info(\"Converting WKT representation of geometry to geometry objects.\")\n df2['geometry'] = df2.wkt.apply(shapely.wkt.loads)\n gdf = geopandas.GeoDataFrame(df2, geometry=df2.geometry)\n elif isinstance(df, geopandas.GeoDataFrame):\n df2 = df.copy()\n required_columns = {'name', 'lower_limit', 'upper_limit'}\n if not required_columns <= set(df2.columns):\n raise ValueError(\"GeoDataFrame must contain columns 'name', 'lower_limit', 'upper_limit'.\")\n if not 'wkt' in list(df2.columns):\n logger.info(\"Converting geometry objects to their WKT representations.\")\n df2['wkt'] = df2.geometry.apply(lambda g: g.wkt)\n gdf = df2\n else:\n raise ValueError(\"df must be a DataFrame or GeoDataFrame!\")\n\n return gdf",
"def extract_features(raw_data):\n width = len(raw_data[0])\n num_features = len(raw_data) * width\n features = np.zeros((num_features, 3), dtype=bool)\n for row, line in enumerate(raw_data):\n for col, char in enumerate(line):\n if char == ' ':\n features[col + row * width][0] = True\n elif char == '+':\n features[col + row * width][1] = True\n elif char == '#':\n features[col + row * width][2] = True\n return features",
"def vectorize(self, connectivity=8):\n data = self._obj.values\n data_isnan = True if self.nodata is None else np.isnan(self.nodata)\n mask = ~np.isnan(data) if data_isnan else data != self.nodata\n feats_gen = features.shapes(\n data,\n mask=mask,\n transform=self.transform,\n connectivity=connectivity,\n )\n feats = [\n {\"geometry\": geom, \"properties\": {\"value\": idx}}\n for geom, idx in list(feats_gen)\n ]\n if len(feats) == 0: # return empty GeoDataFrame\n return gpd.GeoDataFrame()\n gdf = gpd.GeoDataFrame.from_features(feats, crs=self.crs)\n gdf.index = gdf.index.astype(self._obj.dtype)\n return gdf",
"def getFeatureColumnData(featurefile):\n featurecoldata = pd.read_csv(featurefile, sep=\"\\t\", header=None).values\n features = []\n for i in range(0, len(featurecoldata)):\n features.extend(range(featurecoldata[i,0], featurecoldata[i,1]))\n return features",
"def _convert_to_features(self, img: np.ndarray) -> np.ndarray:",
"def dataset_extract_features_from_date(dataset,date_feature): \n dataset['dayofmonth'] = dataset[date_feature].dt.day\n dataset['dayofyear'] = dataset[date_feature].dt.dayofyear \n dataset['dayofweek'] = dataset[date_feature].dt.dayofweek\n dataset['month'] = dataset[date_feature].dt.month\n dataset['year'] = dataset[date_feature].dt.year\n dataset['weekofyear'] = dataset[date_feature].dt.weekofyear\n dataset['is_month_start'] = (dataset[date_feature].dt.is_month_start).astype(int)\n dataset['is_month_end'] = (dataset[date_feature].dt.is_month_end).astype(int)\n return dataset",
"def _get_features_geo(self, id):\n #creates featues/geo tensors for all atoms in protein\n if self.type_feature == \"hot_simple\":\n features = self.hot_enc(id)\n elif self.type_feature == \"mass_charges\":\n features = self.mass_charges(id)\n elif self.type_feature == \"bio_properties\":\n features = self.bio_prop(id)\n elif self.type_feature == \"bio_all_properties\":\n features_1 = self.mass_charges(id)\n features_2 = self.bio_prop(id)\n features = np.concatenate((features_1, features_2), axis=1)\n geometry = self._get_geometry_protein(id)\n return features, geometry",
"def convert_full_features_to_input_features(raw_features):\n data_features = mx.gluon.data.SimpleDataset(list(itertools.chain.from_iterable(raw_features)))\n data_features = data_features.transform(lambda *example: (\n example[0], # example_id\n example[7], # inputs_id\n example[9], # segment_ids\n example[2], # valid_length,\n example[8], # p_mask\n example[10], # start_position,\n example[11], # end_position\n example[14])) # is_impossible\n return data_features"
] |
[
"0.70071805",
"0.70068884",
"0.69907653",
"0.6657737",
"0.6583149",
"0.6436121",
"0.63375276",
"0.62475395",
"0.6246845",
"0.6211411",
"0.61899287",
"0.6185225",
"0.60740775",
"0.60602796",
"0.6055743",
"0.6055743",
"0.60478085",
"0.60417706",
"0.6026995",
"0.6003534",
"0.59155536",
"0.58944905",
"0.5892515",
"0.5885935",
"0.58474654",
"0.5844267",
"0.58417326",
"0.5837681",
"0.58354455",
"0.580529"
] |
0.70252883
|
1
|
Gets the task_name of this LoanApplicationTasks.
|
def task_name(self) -> str:
return self._task_name
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def getTaskName(self):\n return self._taskName",
"def task_name(self):\n pass",
"def task(self) -> str:\n return self._task",
"def get_task_id(self):\n if self.task_id:\n return self.task_id\n return (f'{self.task_type}_{self.get_source_system().lower()}'\n f'.{self.get_source_subsystem().lower()}.{self.get_name().upper()}')",
"def get_target(self):\n task = self.task.get_task(self.task_id)\n if 'name' in task:\n return str(task['name'])\n return str(task)",
"def gettaskname(self): # 3\n sizetaskname_ = (1 + self.gettasknamelen())\n arr_taskname = array.array(\"b\",[0]*((sizetaskname_)))\n memview_arr_taskname = memoryview(arr_taskname)\n res,resargs = self.__obj.gettaskname(sizetaskname_,memview_arr_taskname)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n retarg_taskname = resargs\n retarg_taskname = arr_taskname.tobytes()[:-1].decode(\"utf-8\",errors=\"ignore\")\n return retarg_taskname",
"def task_label(self) -> str:\n label = str(self.request.id) if self.request.id else self.name\n label += '_%d' % self.request.retries if self.request.retries >= 1 else ''\n return label",
"def task_id(self) -> str:\n return self.get_from_redis(\"task_id\")",
"def task_id(self):\n return self._task_id",
"def task_id(self):\n return self._task_id",
"def task_id(self):\n return self._task_id",
"def task_id(self):\n return self._task_id",
"def __str__(self):\n return self.task",
"def get_target_simple(self):\n task = self.task.get_task(self.task_id)\n return str(task['name'])",
"def getTaskTitle(self) -> unicode:\n ...",
"def getTaskTitle(self) -> unicode:\n ...",
"def TaskDisplayName(cls, task):\n if not task: return None\n return '//' + cls.TaskRelativeName(task)",
"def task_file(self) -> str:\n return self._task_file",
"def task_id(self):\n return self._mpis.task_id",
"def get_current_task_name(self):\r\n task_available = False\r\n while not task_available:\r\n try:\r\n with open('current_taskname.txt', 'r') as f:\r\n self.task_name = f.read()\r\n task_available = True\r\n except:\r\n print('No available task yet...')\r\n time.sleep(1)\r\n pass\r\n return self.task_name",
"def gettaskname(self):\n sizetaskname_ = (1 + self.gettasknamelen())\n taskname_ = (ctypes.c_char * (sizetaskname_))()\n res = __library__.MSK_XX_gettaskname(self.__nativep,sizetaskname_,taskname_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _taskname_retval = taskname_.value.decode(\"utf-8\",errors=\"replace\")\n return (_taskname_retval)",
"def TaskBaseName(cls, task):\n if not task: return None\n return os.path.basename(task)",
"def get_task_uuid(self):\n\t\treturn call_sdk_function('PrlRunningTask_GetTaskUuid', self.handle)",
"def gen_task_name(app, name, module_name):\n ...",
"def filename(self):\n return TaskInfo._filename(self.id)",
"def state_name(self):\n return TASK_STATE.get(self.state, 'UNKNOWN')",
"def name(self):\n try:\n return self._name\n except AttributeError:\n if self.is_task:\n try:\n return self.pos_str\n except:\n return os.path.basename(self.workdir)\n else:\n return os.path.basename(self.workdir)",
"def task_status(self) -> str:\n return self._task_status",
"def state_name(self):\n return task_states.to_str(self.state)",
"def TaskNormalizedName(cls, task):\n abs_path = FileUtils.GetAbsPathForFile(task)\n if abs_path: return abs_path\n return task"
] |
[
"0.8486359",
"0.7913793",
"0.75000155",
"0.7168119",
"0.70443463",
"0.69992584",
"0.68941015",
"0.6861723",
"0.68553126",
"0.68553126",
"0.68553126",
"0.68553126",
"0.6802803",
"0.67627627",
"0.67244226",
"0.67244226",
"0.6719401",
"0.6687001",
"0.6637115",
"0.66048795",
"0.65795404",
"0.6555251",
"0.65432847",
"0.65239054",
"0.65215844",
"0.6438161",
"0.6330709",
"0.63179415",
"0.62975866",
"0.6294217"
] |
0.8500848
|
0
|
Sets the task_name of this LoanApplicationTasks.
|
def task_name(self, task_name: str):
if task_name is None:
raise ValueError("Invalid value for `task_name`, must not be `None`") # noqa: E501
self._task_name = task_name
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def task_name(self, task_name):\n\n self._task_name = task_name",
"def _setTaskName(self, taskName):\n self._taskName = taskName\n self._progressChangedNotifier.notify(self)",
"def task_name(self) -> str:\n return self._task_name",
"def puttaskname(self,taskname_): # 3\n res = self.__obj.puttaskname(taskname_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)",
"def task_name(self):\n pass",
"def task(self, **task):\n task[\"name\"] = task[\"name\"].replace(\"=\", \"--\")\n return task",
"def getTaskName(self):\n return self._taskName",
"def puttaskname(self,taskname_):\n if isinstance(taskname_,unicode):\n taskname_ = taskname_.encode(\"utf-8\",errors=\"replace\")\n res = __library__.MSK_XX_puttaskname(self.__nativep,taskname_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)",
"def setNextTask(self, taskname):\n self._NextTask = taskname\n return",
"def task_id(self, task_id):\n self._task_id = task_id",
"def task_id(self, task_id):\n\n self._task_id = task_id",
"def task(self, name):\n pass",
"def task_type(self, task_type):\n\n self._task_type = task_type",
"def gen_task_name(app, name, module_name):\n ...",
"def set_thread_name(self, thread_name: str):\n self.thread_name = thread_name",
"def edit_task_name(entry):\n entry.task_name = get_task_name()\n entry.save()\n input(\"Edit successful. \")\n return entry",
"def _set_task(self, task_idx):\n self.task_idx = task_idx",
"def project_task_id(self, project_task_id):\n\n self._project_task_id = project_task_id",
"def __str__(self):\n return self.task",
"def gettaskname(self): # 3\n sizetaskname_ = (1 + self.gettasknamelen())\n arr_taskname = array.array(\"b\",[0]*((sizetaskname_)))\n memview_arr_taskname = memoryview(arr_taskname)\n res,resargs = self.__obj.gettaskname(sizetaskname_,memview_arr_taskname)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n retarg_taskname = resargs\n retarg_taskname = arr_taskname.tobytes()[:-1].decode(\"utf-8\",errors=\"ignore\")\n return retarg_taskname",
"def task(self) -> str:\n return self._task",
"def set_task1(self, task_id):\n self._set_task(self.controller.CURRENT, task_id)",
"def setTask(self, value):\n return self._call_java(\"setTask\", value)",
"def set_task(self, task):\n if self.properties['Task'] == '':\n self.properties['Task'] = task.quest\n self.properties['Last Edit'] = int(self._map.now().strftime(\"%j\"))\n if self.properties['Category'] == 'Shadow':\n self.properties['Old_Category'] = task.reward_type\n self.properties['Old_Icon'] = task.icon\n else:\n self.properties['Category'] = task.reward_type\n self.properties['Icon'] = task.icon\n self.properties['Reward'] = task.reward\n else:\n raise TaskAlreadyAssigned(self, task)",
"def update_task(self, name, fields):\n pass",
"def del_task(self, task_name):\n if task_name not in self.task_list:\n raise Exception(\"Task not in list.\")\n del self.task_list[task_name]\n ii = [i for i, t in enumerate(self.task_order) if t == task_name][::-1]\n for i in ii:\n del self.task_order[i]",
"def TaskDisplayName(cls, task):\n if not task: return None\n return '//' + cls.TaskRelativeName(task)",
"def update_task(\n self,\n task_id: str,\n task_name: Optional[str] = None,\n project_id: Optional[str] = None,\n ) -> None:\n if len(self.find_task_runs(task_id=task_id)) != 0:\n raise MephistoDBException(\n \"Cannot edit a task that has already been run, for risk of data corruption.\"\n )\n if task_name in [\"\"]:\n raise MephistoDBException(f'Invalid task name \"{task_name}')\n with self.table_access_condition, self._get_connection() as conn:\n c = conn.cursor()\n try:\n if task_name is not None:\n c.execute(\n \"\"\"\n UPDATE tasks\n SET task_name = ?\n WHERE task_id = ?;\n \"\"\",\n (task_name, int(task_id)),\n )\n if project_id is not None:\n c.execute(\n \"\"\"\n UPDATE tasks\n SET project_id = ?\n WHERE task_id = ?;\n \"\"\",\n (int(project_id), int(task_id)),\n )\n except sqlite3.IntegrityError as e:\n if is_key_failure(e):\n raise EntryDoesNotExistException(e)\n elif is_unique_failure(e):\n raise EntryAlreadyExistsException(\n f\"Task name {task_name} is already in use\"\n )\n raise MephistoDBException(e)",
"def set_name(self, application_name):\r\n self._name = application_name",
"def task_label(self) -> str:\n label = str(self.request.id) if self.request.id else self.name\n label += '_%d' % self.request.retries if self.request.retries >= 1 else ''\n return label"
] |
[
"0.8254526",
"0.74813414",
"0.688587",
"0.6770161",
"0.65853554",
"0.65247416",
"0.6488435",
"0.6327614",
"0.62774235",
"0.6201881",
"0.61336696",
"0.6100681",
"0.5963401",
"0.59389114",
"0.5913918",
"0.5872126",
"0.5836587",
"0.56788886",
"0.56594557",
"0.56442976",
"0.55894923",
"0.5568109",
"0.55527943",
"0.55439353",
"0.5499737",
"0.5490095",
"0.54504204",
"0.543582",
"0.5434947",
"0.5422086"
] |
0.79433864
|
1
|
Gets the stage_name of this LoanApplicationTasks.
|
def stage_name(self) -> str:
return self._stage_name
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def stage_name(self) -> str:\n return pulumi.get(self, \"stage_name\")",
"def stage_name(self) -> str:\n return pulumi.get(self, \"stage_name\")",
"def stage_name(self) -> str:\n return self._values.get(\"stage_name\")",
"def getTaskName(self):\n return self._taskName",
"def task_name(self) -> str:\n return self._task_name",
"def task_name(self):\n pass",
"def state_name(self):\n return TASK_STATE.get(self.state, 'UNKNOWN')",
"def stage_status(self) -> str:\n return pulumi.get(self, \"stage_status\")",
"def stage(self):\n return self._stage",
"def stage(self):\n return self._stage",
"def stage(self):\n return self._stage",
"def state_name(self):\n return task_states.to_str(self.state)",
"def step_name(self):\n return self._step_name",
"def env_name(self):\n return f\"{self.project_name}-{self.stage}\"",
"def stage_time(self) -> str:\n return pulumi.get(self, \"stage_time\")",
"def getName(self):\n return self.stepDictionary[self.getLocation()]",
"def task(self) -> str:\n return self._task",
"def name(self):\n return self.application_tree['name']",
"def job_name(self) -> str:\n return self._step_execution_context.job_name",
"def get_target(self):\n task = self.task.get_task(self.task_id)\n if 'name' in task:\n return str(task['name'])\n return str(task)",
"def step_container_name(self) -> str:\n return (\n self.step_field(\"task.type\") +\n \"_\" +\n self.step_name().replace(\".\", \"_\")\n )",
"def task_label(self) -> str:\n label = str(self.request.id) if self.request.id else self.name\n label += '_%d' % self.request.retries if self.request.retries >= 1 else ''\n return label",
"def name(self):\n return self._job",
"def get_target_simple(self):\n task = self.task.get_task(self.task_id)\n return str(task['name'])",
"def name(self):\n try:\n return self._name\n except AttributeError:\n if self.is_task:\n try:\n return self.pos_str\n except:\n return os.path.basename(self.workdir)\n else:\n return os.path.basename(self.workdir)",
"def get_task_id(self):\n if self.task_id:\n return self.task_id\n return (f'{self.task_type}_{self.get_source_system().lower()}'\n f'.{self.get_source_subsystem().lower()}.{self.get_name().upper()}')",
"def step_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"step_name\")",
"def generate_workflow_name(self) -> str:\n return self._workflow_name",
"def get_process_name(self):\n\n return self._args.t",
"def workflow_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"workflow_name\")"
] |
[
"0.785884",
"0.785884",
"0.78139293",
"0.718115",
"0.70540637",
"0.6642022",
"0.64218915",
"0.63033044",
"0.62711096",
"0.62711096",
"0.62711096",
"0.62335616",
"0.62320334",
"0.6219078",
"0.61579126",
"0.61254984",
"0.6106092",
"0.6102382",
"0.6053165",
"0.6023286",
"0.59876245",
"0.5971004",
"0.59544677",
"0.5953912",
"0.5910319",
"0.58930576",
"0.5870665",
"0.5868596",
"0.5868347",
"0.5867998"
] |
0.7905333
|
0
|
Sets the stage_name of this LoanApplicationTasks.
|
def stage_name(self, stage_name: str):
if stage_name is None:
raise ValueError("Invalid value for `stage_name`, must not be `None`") # noqa: E501
self._stage_name = stage_name
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def stage_name(self) -> str:\n return self._stage_name",
"def stage_name(self) -> str:\n return pulumi.get(self, \"stage_name\")",
"def stage_name(self) -> str:\n return pulumi.get(self, \"stage_name\")",
"def task_name(self, task_name):\n\n self._task_name = task_name",
"def stage_name(self) -> str:\n return self._values.get(\"stage_name\")",
"def stage(self, stage):\n if stage is None:\n raise ValueError(\"Invalid value for `stage`, must not be `None`\")\n allowed_values = [\"starting\", \"completed\", \"in_progress\"]\n if stage not in allowed_values:\n raise ValueError(\n \"Invalid value for `stage` ({0}), must be one of {1}\"\n .format(stage, allowed_values)\n )\n\n self._stage = stage",
"def _setTaskName(self, taskName):\n self._taskName = taskName\n self._progressChangedNotifier.notify(self)",
"def add_stage(self, stage_name: str) -> \"CdkStage\":\n return jsii.invoke(self, \"addStage\", [stage_name])",
"def set_progress_stage(self, stage_name, progress=None):\n if progress is not None:\n progress = int(progress)\n stage_name += ' %3d%%' % progress\n self.replace_metainfo_value(self.PROGRESS_INFO, StringValue(stage_name))",
"def task_name(self, task_name: str):\n if task_name is None:\n raise ValueError(\"Invalid value for `task_name`, must not be `None`\") # noqa: E501\n\n self._task_name = task_name",
"def task_name(self) -> str:\n return self._task_name",
"def task_name(self):\n pass",
"def stage(self, stage):\n self._stage = stage\n self._layer = Sdf.Layer.CreateAnonymous()\n self._stage.GetSessionLayer().subLayerPaths.append(self._layer.identifier)",
"def set_thread_name(self, thread_name: str):\n self.thread_name = thread_name",
"def team_name(self, team_name):\n\n self._team_name = team_name",
"def getTaskName(self):\n return self._taskName",
"def set_batch_archive_name(self, archive_name):\n self._basket.set_batch_archive_name(archive_name)",
"def env_name(self):\n return f\"{self.project_name}-{self.stage}\"",
"def stage_set_send_note(self, cr, uid, ids, stage_id, context=None):\n stage_name = self.pool.get('crm.case.stage').name_get(cr, uid, [stage_id], context=context)[0][1]\n return self.message_post(cr, uid, ids, body= _(\"Stage changed to <b>%s</b>.\") % (stage_name), context=context)",
"def set_name(self, application_name):\r\n self._name = application_name",
"def state_name(self):\n return TASK_STATE.get(self.state, 'UNKNOWN')",
"def set_stage(stage):\n try:\n filename = os.path.join(get_var('SITE'), \".stage\")\n f = open(filename, \"w\")\n f.write(\"%s\\n\" % stage)\n f.close()\n logger.debug(\"set stage: %s\" % (stage))\n except:\n raise AssertionError(\"Unable to save setup/teardown stage! %s\" % (sys.exc_info()[1]))\n return stage",
"def setProgramName(self, *args):\n return _libsbml.SBMLWriter_setProgramName(self, *args)",
"def puttaskname(self,taskname_): # 3\n res = self.__obj.puttaskname(taskname_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)",
"def task(self, **task):\n task[\"name\"] = task[\"name\"].replace(\"=\", \"--\")\n return task",
"def name(self, name):\n self._name = name\n self.working_directory = abspath(join(self.dataset_dir, 'analyses/' + self.name))",
"def name(self, name: str):\n self.inst['targetname'] = name",
"def _set_group_name(self):\n self._scene_gen.group_name = self._group_name_le.text()\n self._refresh_view()",
"def setup(self, stage: Optional[str] = None) -> None:",
"def topology_name(self, topology_name):\n\n self._topology_name = topology_name"
] |
[
"0.65028423",
"0.63868105",
"0.63868105",
"0.63487524",
"0.628022",
"0.600978",
"0.57595444",
"0.5660529",
"0.550158",
"0.55004424",
"0.54686415",
"0.53505766",
"0.52731216",
"0.52383596",
"0.5148092",
"0.51372725",
"0.5131897",
"0.50910527",
"0.5033554",
"0.5031995",
"0.5024196",
"0.5023451",
"0.5019682",
"0.500132",
"0.49981803",
"0.49678448",
"0.49639288",
"0.4959651",
"0.49463254",
"0.49407095"
] |
0.69675004
|
0
|
Gets the task_status of this LoanApplicationTasks.
|
def task_status(self) -> str:
return self._task_status
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"async def get_task_status(task_id: TaskId):",
"def celery_task_status(self):\n return self._get_celery_queue_data()",
"def taskbystatus(self, **kwargs):\n rows = self.api.query(None, None, self.Task.TaskByStatus_sql, username_=kwargs[\"username\"], taskstatus=kwargs[\"taskstatus\"])\n\n return rows",
"def get_task_status(task_id):\r\n mock_request = Mock()\r\n mock_request.REQUEST = {'task_id': task_id}\r\n response = instructor_task_status(mock_request)\r\n status = json.loads(response.content)\r\n return status",
"def fuota_task_status(self) -> Optional[str]:\n return pulumi.get(self, \"fuota_task_status\")",
"def _get_instructor_task_status(task_id):\r\n instructor_task = get_updated_instructor_task(task_id)\r\n status = get_status_from_instructor_task(instructor_task)\r\n if instructor_task is not None and instructor_task.task_state in STATES_WITH_STATUS:\r\n succeeded, message = get_task_completion_info(instructor_task)\r\n status['message'] = message\r\n status['succeeded'] = succeeded\r\n return status",
"def get_task_type(self):\n\t\treturn call_sdk_function('PrlRunningTask_GetTaskType', self.handle)",
"def get_status(self):\n return self._status",
"def get_task_status(id):\n # obtain the task and validate it\n global background_tasks\n rv = background_tasks.get(id)\n if rv is None:\n return not_found(None)\n\n # if the task object is a Thread object that means that the task is still\n # running. In this case return the 202 status message again.\n if isinstance(rv, Thread):\n return jsonify({}), 202, {'Location': url_for('get_task_status', id=id)}\n\n # If the task object is not a Thread then it is assumed to be the response\n # of the finished task, so that is the response that is returned.\n # If the application is configured to auto-delete task status resources once\n # the task is done then the deletion happens now, if not the client is\n # expected to send a delete request.\n if app.config['AUTO_DELETE_BG_TASKS']:\n del background_tasks[id]\n return rv",
"def retrieve_task(self, task_id):\n r = requests.get('/'.join([self.base_url, self.ENDPOINT_TASK_STATUS,\n str(task_id)]))\n return r.json()",
"def workflow_status(self):\n return self._workflow_status",
"def get_status(self):\n return self.status",
"def get_status(self):\n return self.status",
"def get_status(self):\n return self.status",
"def getstatus(self):\n return self.__status",
"def tasks(self) -> List[TaskStatusDefinition]:\n return self._tasks",
"def get_status(self, ids):\n return [self.tasks[id].status for id in ids]",
"def task_status():\n pass",
"def get_status(self):\n # TODO retrieve from db if not set\n return self.status",
"def get_running_task_count(self):\n\t\treturn call_sdk_function('PrlLoginResponse_GetRunningTaskCount', self.handle)",
"def get(self):\n\n return task_service.get_tasks()",
"def status(self):\n return self._data['status']",
"def status(self):\n\n return self._status",
"def status(self):\n\n return self._status",
"def status(self):\n\n return self._status",
"def mesos_status(self, submissionId):\n get_tasks = self.driver.getTasks()['get_tasks']\n task_state = None\n\n tasks = get_tasks['tasks'] + get_tasks.get('completed_tasks')\n tasks_list = list(filter(lambda x: x['task_id']['value'] == submissionId, tasks))\n if len(tasks_list) > 0:\n task = tasks_list[0]\n task_state = task['state']\n self._log.debug(\"Task state = \" + task_state)\n else:\n self._log.debug(\"Task not found\")\n\n return task_state",
"def status(self):\n return self.get(self._names[\"status\"])",
"def getStatus(self):\n return self._status",
"def get_import_status(self):\n return AsyncResult(self.import_task_id).state",
"def get_task_status(self, **kwargs):\n if kwargs is None or kwargs['parameters'] is None:\n message = \"For 'get_task_status' method parameters are not parsed.\"\n logger.critical(message)\n raise ValueError(message)\n\n if \"message_id\" not in kwargs['parameters']:\n message = \"Key 'message_id' not in kwargs.\"\n logger.critical(message)\n raise ValueError(message)\n\n message_id = kwargs['parameters']['message_id']\n\n return_data = {\"state\": \"Error\"}\n auth = self.authenticate()\n if auth == 200:\n task_completed = False\n state_message = \"Queued\"\n while not task_completed:\n sleep(WAIT_TIME_BETWEEN_REQUESTS)\n response = Utils.make_get_request(self.url(\"TaskInfo\" + \"/\" + str(message_id)),\n headers=self.request_header, verify=False)\n if 'StateMessage' in response.json():\n state_message = response.json()['StateMessage']\n if state_message == \"Success\" or state_message == \"Error\":\n task_completed = True\n return_data[\"state\"] = state_message\n if state_message == \"Success\":\n return_data[\"vm_id\"] = response.json()['Result']\n else:\n message = \"unable to authenticate to the PlatformA server,\" \\\n \" got the below response from server {}\".format(auth)\n logging.debug(message)\n raise Exception(message)\n\n return return_data"
] |
[
"0.71324706",
"0.7093253",
"0.67850393",
"0.65579414",
"0.6541642",
"0.6334591",
"0.6273188",
"0.6264664",
"0.62501335",
"0.6231414",
"0.61964184",
"0.61462724",
"0.61462724",
"0.61462724",
"0.61388564",
"0.6122741",
"0.61159295",
"0.61024487",
"0.6097303",
"0.6073409",
"0.60279",
"0.602229",
"0.59898156",
"0.59898156",
"0.59898156",
"0.5989738",
"0.5978851",
"0.5975789",
"0.5961026",
"0.594536"
] |
0.7633925
|
0
|
Sets the task_status of this LoanApplicationTasks.
|
def task_status(self, task_status: str):
if task_status is None:
raise ValueError("Invalid value for `task_status`, must not be `None`") # noqa: E501
self._task_status = task_status
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def set_task_in_progress(self):\n\n tasks = self._get_all_tasks()\n\n task_id = tasks[self.tasks_view.currentRow()].Id\n\n self.tasks_flow.set_status(task_id, 1)\n\n # Refresh the table\n self.write_tasks_table()",
"def set_status_(self, task: Task):\n tic = time.time()\n self._database[task.name] = tic\n self.save()\n return self",
"def workflow_status(self, workflow_status):\n self._workflow_status = workflow_status",
"def set_status(self, status):\n self.status = status",
"def set_status(self, status):\n self.status = status",
"def set_status(self, status):\n self.status = status",
"def set_activity(self, status):\n self._activity = status",
"def setstatus(self, status):\n with self.lock:\n self.status = status",
"def status(self, status: str):\n allowed_values = [\"done\", \"late\", \"in progress\", \"to do\"] # noqa: E501\n if status not in allowed_values:\n raise ValueError(\n \"Invalid value for `status` ({0}), must be one of {1}\".format(\n status, allowed_values\n )\n )\n\n self._status = status",
"def status(self, status: int):\n if status is None:\n raise ValueError(\"Invalid value for `status`, must not be `None`\") # noqa: E501\n\n self._status = status",
"def task_status(self) -> str:\n return self._task_status",
"def set_task_not_started(self):\n\n tasks = self._get_all_tasks()\n\n task_id = tasks[self.tasks_view.currentRow()].Id\n\n self.tasks_flow.set_status(task_id, 2)\n\n # Refresh the table\n self.write_tasks_table()",
"def status(self, status):\n allowed_values = [\"Pending\", \"Running\", \"Success\", \"Failed\", \"Skipped\", \"SuccessWithWarning\", \"Canceled\"] # noqa: E501\n if status not in allowed_values:\n raise ValueError(\n \"Invalid value for `status` ({0}), must be one of {1}\" # noqa: E501\n .format(status, allowed_values)\n )\n\n self._status = status",
"def status(self, status):\n\n self._status = status",
"def status(self, status):\n\n self._status = status",
"def status(self, status):\n\n self._status = status",
"def status(self, status):\n\n self._status = status",
"def status(self, status):\n\n self._status = status",
"def status(self, status):\n\n self._status = status",
"def status(self, status):\n\n self._status = status",
"def status(self, status):\n\n self._status = status",
"def status(self, status):\n\n self._status = status",
"def status(self, status):\n\n self._status = status",
"def status(self, status):\n\n self._status = status",
"def status(self, status):\n\n self._status = status",
"def status(self, status):\n\n self._status = status",
"def status(self, status):\n\n self._status = status",
"def status(self, status):\n\n self._status = status",
"def status(self, status):\n\n self._status = status",
"def status(self, status):\n\n self._status = status"
] |
[
"0.6546041",
"0.63993967",
"0.6269863",
"0.6198993",
"0.6198993",
"0.6198993",
"0.6175255",
"0.61558944",
"0.60749936",
"0.6051563",
"0.5988801",
"0.5986775",
"0.5885703",
"0.5883675",
"0.5883675",
"0.5883675",
"0.5883675",
"0.5883675",
"0.5883675",
"0.5883675",
"0.5883675",
"0.5883675",
"0.5883675",
"0.5883675",
"0.5883675",
"0.5883675",
"0.5883675",
"0.5883675",
"0.5883675",
"0.5883675"
] |
0.76288915
|
0
|
Gets the loan_application_id of this LoanApplicationTasks.
|
def loan_application_id(self) -> str:
return self._loan_application_id
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def app_id(self):\n return self._app_id",
"def app_id(self) -> str:\n return self._app_id",
"def application_id(self) -> Optional[str]:\n return pulumi.get(self, \"application_id\")",
"def application_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"application_id\")",
"def application_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"application_id\")",
"def app_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"app_id\")",
"def application_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"application_id\")",
"def application_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"application_id\")",
"def application_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"application_id\")",
"def task_id(self):\n return self._mpis.task_id",
"def task_id(self):\n return self._task_id",
"def task_id(self):\n return self._task_id",
"def task_id(self):\n return self._task_id",
"def task_id(self):\n return self._task_id",
"def application_object_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"application_object_id\")",
"def app_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"app_id\")",
"def get_id(self, app_name):\n _id = []\n apps = [app for app in self.applications.response if app.name == app_name]\n if len(apps) > 0:\n return apps[0].id",
"def app_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"app_id\")",
"def appid(self):\n return self._item[\"appid\"]",
"def task_id(self) -> str:\n return self.get_from_redis(\"task_id\")",
"def application_object_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"application_object_id\")",
"def loan_application_id(self, loan_application_id: str):\n if loan_application_id is None:\n raise ValueError(\"Invalid value for `loan_application_id`, must not be `None`\") # noqa: E501\n\n self._loan_application_id = loan_application_id",
"def peer_azure_app_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"peer_azure_app_id\")",
"def application_object_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"application_object_id\")",
"def app_id(self):\n return self._app_id or self._modules['default'].data.get('application')",
"def peer_azure_app_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"peer_azure_app_id\")",
"def peer_azure_app_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"peer_azure_app_id\")",
"def developer_app_insights_application_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"developer_app_insights_application_id\")",
"def ApplicationId(self) -> _n_0_t_0:",
"def secondary_app_id(self) -> Optional[str]:\n return pulumi.get(self, \"secondary_app_id\")"
] |
[
"0.64398617",
"0.63063055",
"0.6300978",
"0.6264804",
"0.6140923",
"0.6061766",
"0.60486615",
"0.60486615",
"0.60486615",
"0.60020316",
"0.59677273",
"0.59677273",
"0.59677273",
"0.59677273",
"0.5940339",
"0.59222615",
"0.5908691",
"0.58757806",
"0.58466387",
"0.58241063",
"0.5787616",
"0.5738017",
"0.5719483",
"0.57035357",
"0.5693852",
"0.56572866",
"0.5634824",
"0.555434",
"0.5525218",
"0.5439627"
] |
0.7899649
|
0
|
Sets the loan_application_id of this LoanApplicationTasks.
|
def loan_application_id(self, loan_application_id: str):
if loan_application_id is None:
raise ValueError("Invalid value for `loan_application_id`, must not be `None`") # noqa: E501
self._loan_application_id = loan_application_id
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def loan_application_id(self) -> str:\n return self._loan_application_id",
"def application_id(self, application_id):\n\n self._application_id = application_id",
"def loan_id(self, loan_id):\n\n self._loan_id = loan_id",
"def task_id(self, task_id):\n self._task_id = task_id",
"def task_id(self, task_id):\n\n self._task_id = task_id",
"def app_id(self, app_id):\n self._app_id = app_id",
"def app_id(self, app_id):\n\n self._app_id = app_id",
"def with_application_id(self, application_id):\n if not isinstance(application_id, str):\n raise TypeError('Application Id must be a string')\n\n self.application_id = application_id\n\n return self",
"def project_task_id(self, project_task_id):\n\n self._project_task_id = project_task_id",
"def setAppID(self, appid):\n\t\tself.config.APP_ID = appid",
"def application_ids(self, application_ids):\n if self.local_vars_configuration.client_side_validation and application_ids is None: # noqa: E501\n raise ValueError(\"Invalid value for `application_ids`, must not be `None`\") # noqa: E501\n\n self._application_ids = application_ids",
"def set_application(self, app_id):\n if self._use_channel_info:\n self._channel = \"\"\n self._channel_name = app_id\n self._is_forced_val = True\n self._forced_count = 0",
"def program_ids(self, program_ids):\n\n self._program_ids = program_ids",
"def task_id(self, task_id):\n if self.local_vars_configuration.client_side_validation and task_id is None: # noqa: E501\n raise ValueError(\"Invalid value for `task_id`, must not be `None`\") # noqa: E501\n\n self._task_id = task_id",
"def organizational_id_number(self, organizational_id_number: str):\n self._organizational_id_number = organizational_id_number",
"def set_task1(self, task_id):\n self._set_task(self.controller.CURRENT, task_id)",
"async def slashtagset_appid(self, ctx: commands.Context, id: int = None):\n app_id = id or self.bot.user.id\n await self.config.application_id.set(app_id)\n self.application_id = app_id\n await ctx.send(f\"Application ID set to `{id}`.\")",
"def set_application(self, app):\n \n self.app = app",
"def workflow_task_count(self, workflow_task_count):\n\n self._workflow_task_count = workflow_task_count",
"def new_task(self, appid):\r\n tasks = []\r\n for i in range(0, 10):\r\n tasks.append(Task(app_id=appid, state='0', info={}))\r\n db.session.add_all(tasks)\r\n db.session.commit()",
"def app_id(self):\n return self._app_id",
"def task_id(self):\n return self._task_id",
"def task_id(self):\n return self._task_id",
"def task_id(self):\n return self._task_id",
"def task_id(self):\n return self._task_id",
"def app_version_id(self, app_version_id):\n\n self._app_version_id = app_version_id",
"def set_task_not_started(self):\n\n tasks = self._get_all_tasks()\n\n task_id = tasks[self.tasks_view.currentRow()].Id\n\n self.tasks_flow.set_status(task_id, 2)\n\n # Refresh the table\n self.write_tasks_table()",
"def _set_task(self, task_idx):\n self.task_idx = task_idx",
"def task_id(self):\n return self._mpis.task_id",
"def app_id(self) -> str:\n return self._app_id"
] |
[
"0.63690144",
"0.59828776",
"0.57021713",
"0.5565539",
"0.5496316",
"0.54807234",
"0.5467243",
"0.54585177",
"0.5425827",
"0.51348335",
"0.5049111",
"0.49390966",
"0.48765567",
"0.48614517",
"0.48548588",
"0.48031816",
"0.47550064",
"0.4697146",
"0.46013895",
"0.45823494",
"0.4561365",
"0.45572948",
"0.45572948",
"0.45572948",
"0.45572948",
"0.4489687",
"0.44827533",
"0.44732875",
"0.4472312",
"0.44525033"
] |
0.6659482
|
0
|
Serialize a search result.
|
def serialize_search(
self, pid_fetcher, search_result, links=None, item_links_factory=None
):
records = []
for hit in search_result["hits"]["hits"]:
processed_hit = self.transform_search_hit(
pid_fetcher(hit["_id"], hit["_source"]),
hit,
links_factory=item_links_factory,
)
records.append(self.process_dict(processed_hit))
return self._format_csv(records)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def serialize_search(self, pid_fetcher, search_result, links=None,\n item_links_factory=None, **kwargs):\n hits_dict = {\n 'hits': {\n 'hits': [\n self.transform_search_hit(\n pid_fetcher(hit['_id'], hit['_source']),\n hit,\n links_factory=item_links_factory,\n **kwargs\n )\n for hit in search_result['hits']['hits']\n ],\n 'total': search_result['hits']['total']\n },\n 'links': links or {},\n # This is the only new/different thing from parent\n 'aggregations': self.transform_aggregation(\n search_result.get('aggregations', {})\n )\n }\n\n return json.dumps(hits_dict, **self._format_args())",
"def serializeSearchResult( result ):\n print( result.__dict__ )\n return { k: getattr( result, k ) for k in result.__dict__ }",
"def __str__(self):\n return pprint.pformat(self.search_results)",
"def serialize(self) -> dict:\n return {\n \"parameters\": self.parameters,\n \"results\": self.results,\n }",
"def serialize(self, root):\r\n self.res = []\r\n self.search_serialize(root)\r\n return ' '.join(self.res)",
"def save_results_internal(self, obj: object):\r\n filename = f\"{self.search_internal_path}/results_internal.dill\"\r\n\r\n with open_(filename, \"wb\") as f:\r\n dill.dump(obj, f)",
"def __repr__(self):\n\n\t\treturn\"<Result id={} venueid={} competitor_id={} position={}\".format(\n\t\t\tself.result_id, self.venue_id, self.competitor_id, self.position)",
"def results(self):\n\n return self._search_resut",
"def get_json_accessibility_result(self):\n axe_result = json.dumps(self.results, indent = 3)\n logger.info(axe_result)\n return axe_result",
"def serialize(self) -> dict:\n return {\n \"found\": self.found,\n \"items\": list(map(lambda item: item.serialize(), self.items)),\n }",
"def serialize_result(result: Any) -> Union[str, bytes]:\n if isinstance(result, Node):\n return result.serialize(how='default' if RESULT_FILE_EXTENSION != '.xml' else 'xml')\n else:\n return repr(result)",
"def search_responsify(serializer, mimetype):\n def view(pid_fetcher, search_result, code=200, headers=None, links=None,\n item_links_factory=None):\n response = current_app.response_class(\n serializer.serialize_search(pid_fetcher, search_result,\n links=links,\n item_links_factory=item_links_factory),\n mimetype=mimetype)\n response.status_code = code\n if headers is not None:\n response.headers.extend(headers)\n\n if links is not None:\n add_link_header(response, links)\n\n return response\n\n return view",
"def save_results(results):\n json.dump(results, open(\"results.json\", \"w\"))",
"def serialize(self, value):\n # (Any) -> json\n # this is called when writing to elasticsearch",
"def search(self):\r\n return v3.Search(self)",
"def __str__(self):\n return self.result",
"def search(self, query):\n request_url = self.base_url + 'search'\n data = {'q': query}\n response = requests.get(request_url, data=data, headers=self.headers).json()\n return response",
"def serialize(self):\n pass",
"def elastic_search_json(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'code': self.code,\n 'description': self.description,\n 'type': self.type,\n 'active': \"true\" if self.active else \"false\",\n }",
"def format_json(self,query_results):\n results=query_results.data\n factory=factory_json()\n dump=factory.dumps(results)\n print(dump)\n # TODO return output for this\n return \"\"",
"def search_results(self):\r\n route_name = self.request.matched_route.name\r\n mdict = self.matchdict\r\n rdict = self.GET\r\n\r\n if 'terms' in mdict:\r\n phrase = \" \".join(mdict['terms'])\r\n else:\r\n phrase = rdict.get('search', '')\r\n\r\n # Always search the fulltext content\r\n with_content = True\r\n\r\n conn_str = self.settings.get('sqlalchemy.url', False)\r\n searcher = get_fulltext_handler(conn_str)\r\n\r\n # check if we have a page count submitted\r\n params = self.params\r\n page = params.get('page', 0)\r\n count = params.get('count', 50)\r\n\r\n if rdict.get('search_mine') or 'username' in mdict:\r\n with_user = True\r\n else:\r\n with_user = False\r\n\r\n username = None\r\n if with_user:\r\n if 'username' in mdict:\r\n username = mdict.get('username')\r\n elif self.request.user and self.request.user.username:\r\n username = self.request.user.username\r\n\r\n res_list = searcher.search(\r\n phrase,\r\n content=with_content,\r\n username=username if with_user else None,\r\n ct=count,\r\n page=page,\r\n )\r\n\r\n # if the route name is search_ajax we want a json response\r\n # else we just want to return the payload data to the mako template\r\n if 'ajax' in route_name or 'api' in route_name:\r\n return {\r\n 'success': True,\r\n 'message': \"\",\r\n 'payload': {\r\n 'search_results': [dict(res) for res in res_list],\r\n 'result_count': len(res_list),\r\n 'phrase': phrase,\r\n 'page': page,\r\n 'username': username,\r\n }\r\n }\r\n else:\r\n return {\r\n 'search_results': res_list,\r\n 'count': len(res_list),\r\n 'max_count': 50,\r\n 'phrase': phrase,\r\n 'page': page,\r\n 'username': username,\r\n }",
"def printSearchResults(results):\n Log.Debug('Search produced %d results:' % len(results))\n index = 0\n for result in results:\n Log.Debug(' ... %d: id=\"%s\", name=\"%s\", year=\"%s\", score=\"%d\".' %\n (index, result.id, result.name, str(result.year), result.score))\n index += 1",
"def save_results_internal_json(self, results_internal_dict: Dict):\r\n filename = f\"{self.search_internal_path}/results_internal.json\"\r\n\r\n with open_(filename, \"w+\") as f:\r\n json.dump(results_internal_dict, f, indent=4)",
"def deserialize(self, data):\r\n self.res = data.split()\r\n return self.search_deserialize()",
"def serialize(self):\n return {\n 'id': self.id,\n 'title': self.title,\n 'description': self.description,\n 'ranking': self.ranking,\n 'created_date': self.created_date,\n }",
"def test_parse_search_result(self):\n datafile = pathlib.Path(__file__).parent.joinpath(\"../data/ol_search.json\")\n search_data = json.loads(datafile.read_bytes())\n result = list(self.connector.parse_search_data(search_data, 0))[0]\n\n self.assertIsInstance(result, SearchResult)\n self.assertEqual(result.title, \"This Is How You Lose the Time War\")\n self.assertEqual(result.key, \"https://openlibrary.org/works/OL20639540W\")\n self.assertEqual(result.author, \"Amal El-Mohtar, Max Gladstone\")\n self.assertEqual(result.year, 2019)\n self.assertEqual(result.connector, self.connector)",
"def search(self, query):",
"def do_search(self, *args, **kwargs):\n return [{}]",
"def serialize(self):\n return{\n # 'date': self.date,\n 'q1': self.q1,\n 'q2': self.q2,\n 'q3': self.q3,\n 'q4': self.q4,\n 'finalscore': self.finalscore,\n 'id': self.id,\n }",
"def serialize(self):\n return {\n 'name': self.name,\n 'location': self.location,\n 'telephone': self.telephone,\n 'founding_year': self.founding_year,\n }"
] |
[
"0.72805274",
"0.71256214",
"0.6633089",
"0.63286436",
"0.6074966",
"0.5878755",
"0.5834139",
"0.5825184",
"0.5819029",
"0.5762264",
"0.57363033",
"0.57293785",
"0.5727291",
"0.57013524",
"0.5679446",
"0.5613311",
"0.5610144",
"0.55878663",
"0.5585593",
"0.55538034",
"0.55349004",
"0.5531812",
"0.55203617",
"0.5513217",
"0.55119234",
"0.5476604",
"0.54740036",
"0.5464309",
"0.5460942",
"0.5452911"
] |
0.7188329
|
1
|
Checks if the given key is contained within any of the fields.
|
def key_in_field(self, key, fields):
for field in fields:
if key in field:
return True
return False
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def __contains__(self, key):\n return self.keys[self._linear_probe(key, \"contains\")] is not None",
"def __contains__(self, key):\n\n return key in self.keys_set",
"def contains(self, key):\n if key in self.key_list:\n return True\n return False",
"def __contains__(self, key):\n return key in self.keys",
"def contains(self, key):\n\n return key in self.keys()",
"def __contains__(self, key):\n keys = list(self._indexer(key))\n if len(keys) == 1:\n return keys[0] in self._data\n return [k in self._data for k in keys]",
"def __contains__(self, key):\n return key in self._opts or key in self._groups",
"def __contains__(self, key, *args, **kwargs):\n if key in self._list(*args, **kwargs):\n return True\n return False",
"def _in_keys(self, key, keys):\n # sorting required for comparison\n key.sort()\n return key in keys",
"def contains(self, key: int) -> bool:\n y = key % 80\n return key in self.arr[y]",
"def has_key(self, key):\n return key in self",
"def check_fields_in_dict(dictionary, fields, dictionary_name):\n for field in fields:\n if field not in dictionary:\n raise KafkaIotException(\"%s field(s) required but not found in %s: %s\"\n % (\", \".join(fields), dictionary_name, str(dictionary)))\n return True",
"def contains(self, key: int) -> bool:\n return self._find_key(key, find_empty=False) >= 0",
"def __contains__(self, key: str) -> bool:\n return key in self.raw",
"def contains(self, key):\n if key in self.nums:\n return True\n return False",
"def contains(self, key: int) -> bool:\n if key in self.d:\n return True\n else:\n return False",
"def __contains__(self, key):\n return self._lookup(key).value is not None",
"def __contains__(self, key):\n return key in self._index",
"def __contains__(self, key: object) -> bool:\n if isinstance(key, str):\n key = key.casefold()\n for k in self._keys:\n if k.casefold() == key:\n return True\n return False",
"async def contains(self, key: str) -> bool:",
"def subfields_any(verifield, required):\n for req_key, req_val in required.items():\n if getitem(verifield, req_key, '') == req_val:\n return True\n return False",
"def __contains__(self, key: K) -> bool:\n return key in self._table",
"def has(self, key):\r\n # handle any special cases\r\n if key.scope == Scope.content:\r\n self._load_definition()\r\n elif key.scope == Scope.parent:\r\n return True\r\n\r\n # it's not clear whether inherited values should return True. Right now they don't\r\n # if someone changes it so that they do, then change any tests of field.name in xx._field_data\r\n return key.field_name in self._fields",
"def __contains__(self, key):\n return key in self._group._opts",
"def containsKey(self, key):\n return get(key) != None",
"def __contains__(self, key):\n\n if type(key) != self.type:\n return False\n\n first_char = key[:1]\n others = key[1:]\n\n if first_char not in self.children:\n return False\n\n if len(first_char) != 0 and len(others) == 0:\n node = self.children[first_char]\n\n if node.value is None:\n return False\n\n return True\n else:\n return others in self.children[first_char]",
"def has_key(self, key):\n if '.' in key:\n first, second = key.split('.', 1)\n return self[first].has_key(second)\n else:\n return key in self.keys()",
"def __contains__(self, key):\n return key in self._get_storage()",
"def contains(self, key: int) -> bool:\n lv1, lv2 = self.hashing(key)\n \n for item in self.cont[lv1][lv2]:\n if item==key:\n return True\n \n return False",
"def contains(self, key):\n return key in self.hashset[key % self.N]"
] |
[
"0.7070488",
"0.6968872",
"0.6961819",
"0.6932418",
"0.6916172",
"0.6817634",
"0.67951804",
"0.678997",
"0.6779572",
"0.6749631",
"0.6730777",
"0.6663097",
"0.6663021",
"0.6642367",
"0.6633289",
"0.6630922",
"0.6619883",
"0.6599063",
"0.65861446",
"0.6579086",
"0.65784895",
"0.6576983",
"0.65735716",
"0.6562349",
"0.65590394",
"0.6528164",
"0.6510153",
"0.6507415",
"0.6502535",
"0.6494223"
] |
0.84226483
|
0
|
This function will be called a form postsave/create. It adds a logging message
|
def callback_success_message(request):
msg = 'Sucessfully recorded form :)'
logger.info(msg)
messages.info(request._request, msg)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def on_post(self):\n return \"Ok, the stuff is being saved\"",
"def callback_fail_message(request):\n msg = 'Form storing has failed :('\n logger.error(msg)\n messages.error(request._request, msg)",
"def log(self, message):",
"def generate_log(window_info, form: SerializedForm(LogNameForm)):\n if form.is_valid():\n logname = (\n form.cleaned_data[\"log_name\"]\n or form.cleaned_data[\"other_log_name\"]\n or \"django.request\"\n )\n level = form.cleaned_data[\"level\"]\n message = form.cleaned_data[\"message\"]\n logger_ = logging.getLogger(logname)\n logger_.log(int(level), message)\n message = _(\n 'message \"%(message)s\" logged to \"%(logname)s\" at level %(level)s.'\n ) % {\"message\": message, \"level\": level, \"logname\": logname}\n notify(window_info, message, to=WINDOW, level=WARNING, style=BANNER)",
"def log_message(self, formate, *args):\n return",
"def _log(self, message):\n pass",
"def log_create(sender, instance, created, **kwargs):\n if created:\n stracks.user(instance).log(\"? has been created\")",
"def log_message(self, msg):\n\t\tself.logView.log_message(msg)",
"def log(self, message: str):",
"def log_success(self, obj, message=None):\n super().log_success(obj=obj, message=message)",
"def msg_handler(self, msg):\n self.view.frame.log.append(msg)",
"def InsertLog():",
"def _logging_handler(self, record):\n if self.enable:\n message = self.log.handlers[0].format(record)\n self._log_lines.append(str(message))\n self.widget.object = \"<br/>\".join(self._log_lines[::-1])",
"def __init__(self, *args, **kwargs):\n super(AppswellSimpleModelForm, self).__init__(*args, **kwargs)\n\n # override labels\n self.fields['message'].label = 'log message'",
"def log_message(self, fmt, *args):\r\n pass\r\n # log_message\r",
"def log_post(msg):\n casalog.post(msg, 'INFO', 'bsvoboda')",
"def log_addition(request, instance, message):\n from django.contrib.admin.models import LogEntry, ADDITION\n LogEntry.objects.log_action(\n user_id=request.user.pk,\n content_type_id=get_content_type_for_model(instance).pk,\n object_id=instance.pk,\n object_repr=force_text(instance),\n action_flag=ADDITION,\n change_message=message,\n )",
"def log_info(self, obj, message):\n super().log_info(obj=obj, message=message)",
"def log(self, level, msg, *args, **kwargs):\n pass",
"def log(self, _strMessage=\"\"):\n self.edLogging.log(_strMessage)",
"def logs_add_message(self, level, message):\n pass",
"def on_UploadLog_clicked(self):\n # TODO: not implemented yet\n raise NotImplementedError",
"def post_log(\n self, message, level=\"INFO\", item_id=None, file_to_attach=None\n ):\n self._log(\n message,\n level,\n file_to_attach=file_to_attach,\n item_id=item_id or self._log_item_id,\n )",
"def log_message(self, format, *args):",
"def update_log(self, message):\n self.LogOutput_Field.appendPlainText(message)",
"def save_model(self, request, obj, form, change):\n\n if form.is_valid():\n cd = form.cleaned_data\n obj.filename = cd['filename']\n obj.society_code = cd['society_code']\n obj.society_name = cd['society_name']\n obj.date = cd['date']\n # TODO move process() to model, and handle messages here\n obj.report = self.process(\n request, obj.society_code, cd['acknowledgement_file'])\n obj.cwr = cd['acknowledgement_file']\n super().save_model(request, obj, form, change)",
"def on_up(self):\r\n self.log()",
"def post_comment(self, char, event):\r\n msg = self.cleaned_data[\"journal_text\"]\r\n white = not self.cleaned_data[\"private\"]\r\n char.messages.add_event_journal(event, msg, white=white)",
"def save(self, *args, **kwargs):\n data = self.cleaned_data #Gets the data from the form, stores it as a dict\n allUsers = Bruker.get_all_dict(Bruker)\n mottaker = allUsers[int(data['mottaker'])]\n melding = Messages(content=data['content'], author=self.getUser(), receiver=mottaker)\n melding.save()",
"def Log(self, msg):\n self.DBExecute(\"INSERT INTO Log (class, instance, event) VALUES (%s, %s, %s)\",\n self.__class__.__name__, self._instance, msg)\n print '%s/%s: %s' % (self.__class__.__name__, self._instance, msg)"
] |
[
"0.70131916",
"0.673436",
"0.6676555",
"0.6661796",
"0.6638116",
"0.6433321",
"0.64053524",
"0.63213503",
"0.6281074",
"0.627926",
"0.6102167",
"0.6007364",
"0.60037506",
"0.59628826",
"0.595433",
"0.5934547",
"0.59297633",
"0.5929501",
"0.5916433",
"0.59147054",
"0.58945763",
"0.5890661",
"0.5849816",
"0.58048856",
"0.5794713",
"0.57878387",
"0.57724255",
"0.5767899",
"0.57317394",
"0.5721849"
] |
0.69311756
|
1
|
This function will be called a form postsave/create. It adds a logging message (error)
|
def callback_fail_message(request):
msg = 'Form storing has failed :('
logger.error(msg)
messages.error(request._request, msg)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def callback_success_message(request):\n msg = 'Sucessfully recorded form :)'\n logger.info(msg)\n messages.info(request._request, msg)",
"def error(self, tag, message, exc_info=False):\n \n self.log(logging.error,tag, message, exc_info)",
"def on_post(self):\n return \"Ok, the stuff is being saved\"",
"def handle_error(self, request, error):\n self.log.error(\"An error occurred at request \" + repr(request) + \": \" + repr(error))",
"def add_error(self, request, message):\n\n ParameterErrorMessage(request, self, message)",
"def log_error(self, fmt, *args):\r\n pass\r\n # log_error\r",
"def error(self):\n ...",
"def generate_log(window_info, form: SerializedForm(LogNameForm)):\n if form.is_valid():\n logname = (\n form.cleaned_data[\"log_name\"]\n or form.cleaned_data[\"other_log_name\"]\n or \"django.request\"\n )\n level = form.cleaned_data[\"level\"]\n message = form.cleaned_data[\"message\"]\n logger_ = logging.getLogger(logname)\n logger_.log(int(level), message)\n message = _(\n 'message \"%(message)s\" logged to \"%(logname)s\" at level %(level)s.'\n ) % {\"message\": message, \"level\": level, \"logname\": logname}\n notify(window_info, message, to=WINDOW, level=WARNING, style=BANNER)",
"def error(self, msg):\r\n self.logger.error(msg)",
"def error(self, _strMessage=\"\"):\n self.edLogging.error(_strMessage)",
"def error ( self , message , *args , **kwargs ) :\n return self.logger.error ( message , *args , **kwargs )",
"def log_message(self, formate, *args):\n return",
"def error(self, *args):\n self.mylog.error(*args)",
"def error(self, msg, *args, **kwargs):\n pass",
"def log(self, message):",
"def error(self, *args, **kwargs):",
"def _log(self, message):\n pass",
"def error(self, update, context):\n self.logger.error('Update \"%s\" caused error \"%s\"' % (update, context.error))",
"def form_invalid(self, form, request):\n return",
"def form_invalid(self, form, request):\n return",
"def log_failure(self, obj, message):\n super().log_failure(obj=obj, message=message)",
"def error_handler(self, failure):\n log.error(failure)",
"def error(self, msg):\n\n self.logger.error(msg)",
"def tag_create(request, error='', message=''):\n error_fields=[]\n default_tag = Tag(name='',title='',color='#' + str(random.randint(222222, 999999)))\n\n try:\n added = bool(request.POST['add'])\n except:\n added = False\n try:\n action = request.POST['action']\n except:\n action = 'add'\n\n if added == True:\n try:\n new_name = request.POST['name']\n if new_name == '':\n error += ' Name is blank.'\n else:\n default_tag.name = new_name\n except:\n error += ' No name provided.'\n try:\n new_title = request.POST['title']\n if new_title == '':\n error += ' Title is blank.'\n else:\n default_tag.title = new_title\n except:\n error += ' No title provided.'\n try:\n new_color = request.POST['color']\n default_tag.color = new_color\n if len(new_color) != 7:\n error += ' Invalid colour - hex colours are 7 characters long, including the #.'\n except:\n error += ' No colour provided.'\n\n if error == '':\n try:\n new_tag = Tag(name=new_name, title=new_title, color=new_color)\n new_tag.full_clean()\n try:\n new_tag.save()\n message += 'Your tag was added to the database.'\n default_tag = Tag(name='',title='',color='#' + str(random.randint(222222, 999999)))\n except:\n error += 'Failed to access the database.'\n except ValidationError as ve:\n for k in ve.message_dict.keys():\n error_fields.append(k)\n for m in ve.message_dict[k]:\n error += m + ' '\n\n if action == 'saveandaddanother' or action == 'add' or error != '':\n return render_to_response('feedback/tag_create.html',\n {'error': error,\n 'error_fields': error_fields,\n 'message': message,\n 'added': added,\n 'default_tag': default_tag},\n context_instance=RequestContext(request))\n elif action == 'save':\n return tags(request, error=error, message=message)\n else:\n error += 'Invalid submit action requested.'\n return render_to_response('feedback/tag_create.html',\n {'error': error,\n 'error_fields': error_fields,\n 'added': added,\n 'message': message,\n 'default_tag': default_tag},\n context_instance=RequestContext(request))",
"def log_error(self, error): \n # add the error to the list\n self.error_log.append(error)",
"def log_success(self, obj, message=None):\n super().log_success(obj=obj, message=message)",
"def error(self, *args, **kwargs):\n self.msg(logging.ERROR, *args, **kwargs)",
"def error(self):\n pass",
"def error(self, context, update, error):\n\t\tself.logger.warning('Update \"%s\" caused error \"%s\"', update, error)",
"def error(self, message):\n print message"
] |
[
"0.6559552",
"0.65144616",
"0.63575673",
"0.6104596",
"0.6062535",
"0.5954148",
"0.5945705",
"0.5941205",
"0.59325004",
"0.5923055",
"0.5919421",
"0.5919199",
"0.58883065",
"0.58834726",
"0.5868146",
"0.5862035",
"0.58343416",
"0.5818541",
"0.58176386",
"0.58176386",
"0.5815134",
"0.5806772",
"0.5792482",
"0.57611614",
"0.5738195",
"0.57200795",
"0.56989723",
"0.568584",
"0.5647961",
"0.56385463"
] |
0.76007026
|
0
|
Returns PIL.Image objects for all the images in directory.''' If directory is not specified, uses current directory. Returns a 2tuple containing a list with a PIL.Image object for each image file in root_directory, and a list with a string filename for each image file in root_directory
|
def get_images(directory=None): #import from mask.py
if directory == None:
directory = os.getcwd() # Use working directory if unspecified
image_list = [] # Initialize aggregaotrs
file_list = []
directory_list = os.listdir(directory) # Get list of files
for entry in directory_list:
absolute_filename = os.path.join(directory, entry)
try:
image = PIL.Image.open(absolute_filename)
file_list += [entry]
image_list += [image]
except IOError:
pass # do nothing with errors tying to open non-images
return image_list, file_list
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_images(directory=None):\n \n if directory == None:\n directory = os.getcwd() # Use working directory if unspecified\n \n image_list = [] # Initialize aggregaotrs\n file_list = []\n \n directory_list = os.listdir(directory) # Get list of files\n for entry in directory_list:\n absolute_filename = os.path.join(directory, entry)\n try:\n image = PIL.Image.open(absolute_filename)\n file_list += [entry]\n image_list += [image]\n except IOError:\n pass # do nothing with errors tying to open non-images\n return image_list, file_list",
"def get_images(directory=None):\r\n \r\n if directory == None:\r\n directory = os.getcwd() # Use working directory if unspecified\r\n \r\n image_list = [] # Initialize aggregaotrs\r\n file_list = []\r\n \r\n directory_list = os.listdir(directory) # Get list of files\r\n for entry in directory_list:\r\n if len(file_list)<2:\r\n absolute_filename = os.path.join(directory, entry)\r\n try:\r\n image = PIL.Image.open(absolute_filename)\r\n file_list += [entry]\r\n image_list += [image]\r\n except IOError:\r\n pass # do nothing with errors tying to open non-images\r\n return image_list, file_list",
"def load_pic_in_directory(directory):\n return [Image.open(os.path.join(directory, img)) for img in os.listdir(directory)]",
"def return_images(directory):\r\n allfiles = os.listdir(directory)\r\n image_list = [im for im in allfiles if '.jpg' in str(im)]\r\n image_list = [directory + im for im in image_list]\r\n return image_list",
"def get_existing_images(directory):\n validate_directory(directory)\n directory += '/'\n try:\n return listdir(directory)\n except:\n mkdir(directory)\n return []",
"def _getImagesFromDirectory(self, directoryPath):\n files = [f for f in listdir(directoryPath)\n if isfile(join(directoryPath, f))]\n for filePath in files:\n self._imageDictionary[filePath] = image.load(\n self._formatPath(directoryPath, filePath))",
"def open_images_in(directory):\n\n files = [\n filename\n for filename in os.listdir(directory)\n if \"_\" in filename and not filename.startswith(\"joined\")\n ]\n tiles = []\n if len(files) > 0:\n i = 0\n for file in files:\n pos = get_image_column_row(file)\n im = Image.open(os.path.join(directory, file))\n\n position_xy = [0, 0]\n count = 0\n for a, b in zip(pos, im.size):\n position_xy[count] = a * b\n count = count + 1\n tiles.append(\n Tile(\n image=im,\n position=pos,\n number=i + 1,\n coords=position_xy,\n filename=file,\n )\n )\n i = i + 1\n return tiles",
"def parse_dir_imgs(root_pth):\n def visit(imgpths, pth, names):\n # Appends detected image filenames to a list.\n imgpths.extend([os.path.join(pth, name) for name in names\n if os.path.splitext(name)[1].lower() in img_exts])\n # Walk down directory tree and get the image file paths\n imgpaths = []\n for dp, foo, names in os.walk(root_pth):\n visit(imgpaths, dp, names)\n # Make lowercased list of imagefilenames\n imgnames = [os.path.split(pth)[1].lower() for pth in imgpaths]\n return imgnames, imgpaths",
"def get_images_of_folder(folder):\n\n Settings.dev_print(\"getting images of folder: {}\".format(folder.get_title()))\n if not folder: return []\n imgs = []\n files = []\n valid_images = [\".jpg\",\".gif\",\".png\",\".tga\",\".jpeg\"]\n for f in os.listdir(folder.get_path()):\n ext = os.path.splitext(f)[1]\n if ext.lower() not in valid_images:\n continue\n file = File()\n setattr(file, \"path\", os.path.join(folder.get_path(),f))\n files.append(file)\n Settings.maybe_print(\"image path: {}\".format(os.path.join(folder.get_path(),f)))\n return files",
"def make_image_list(directory):\r\n\tonly_files = [file for file in listdir(directory) if isfile(join(directory, file))]\r\n\treturn only_files",
"def list_images(img_dir) -> Iterable[str]:\n extensions = (\".png\", \".jpg\", \".jpeg\", \".tif\", \".tiff\")\n\n paths = Path(img_dir).glob(\"**/*\")\n paths = filter(lambda p: p.is_file() and p.suffix.lower() in extensions, paths)\n return (str(p) for p in paths)",
"def getImages(path):\n files = list()\n\n for f in listdir(path):\n file = join(path, f)\n if isfile(file):\n files.append(getImage(file))\n\n return files",
"def readImages(image_dir):\n extensions = ['bmp', 'pbm', 'pgm', 'ppm', 'sr', 'ras', 'jpeg',\n 'jpg', 'jpe', 'jp2', 'tiff', 'tif', 'png']\n\n search_paths = [os.path.join(image_dir, '*.' + ext) for ext in extensions]\n image_files = sorted(sum(map(glob, search_paths), []))\n images = [cv2.imread(f, cv2.IMREAD_UNCHANGED | cv2.IMREAD_COLOR) for f in image_files]\n\n bad_read = any([img is None for img in images])\n if bad_read:\n raise RuntimeError(\n \"Reading one or more files in {} failed - aborting.\"\n .format(image_dir))\n\n return images",
"def load_images_from_folder(folder):\n images = []\n for filename in os.listdir(folder):\n img = Image.open(os.path.join(folder,filename))\n images.append(img)\n return images",
"def search_images(\n current_dir: str,\n exts={\"jpg\", \"png\", \"jpeg\", \"gif\"}\n) -> typing.Iterable[typing.Tuple[str, str]]:\n for root, _, files in os.walk(current_dir):\n for file_name in files:\n ext = file_name.rsplit('.', 1)[-1].lower()\n if ext in exts:\n yield os.path.join(root, file_name), file_name",
"def loadimages(root):\n imgs = []\n\n def add_json_files(path, ):\n for imgpath in glob.glob(path + \"/*.png\"):\n if exists(imgpath) and exists(imgpath.replace('png', \"json\")):\n imgs.append((imgpath, imgpath.replace(path, \"\").replace(\"/\", \"\"),\n imgpath.replace('png', \"json\")))\n for imgpath in glob.glob(path + \"/*.jpg\"):\n if exists(imgpath) and exists(imgpath.replace('jpg', \"json\")):\n imgs.append((imgpath, imgpath.replace(path, \"\").replace(\"/\", \"\"),\n imgpath.replace('jpg', \"json\")))\n\n def explore(path):\n if not os.path.isdir(path):\n return\n folders = [os.path.join(path, o) for o in os.listdir(path)\n if os.path.isdir(os.path.join(path, o))]\n if len(folders) > 0:\n for path_entry in folders:\n explore(path_entry)\n else:\n add_json_files(path)\n\n explore(root)\n\n return imgs",
"def get_lists_in_dir(dir_path):\n image_list = []\n\n for filename in glob.glob(dir_path + '/*.jpg'):\n image_list.append(filename)\n return image_list",
"def get_image_list(source_dir):\n\n dir_list = os.path.os.listdir(source_dir)\n# print(dir_list)\n image_list = []\n os.chdir(source_dir)\n for file in dir_list:\n print(\"Inspecting.... : {}\".format(file))\n\n try:\n if Image.open(file).format:\n image_list.append(file)\n print(\"{} : is an image\".format(file))\n except Exception as e:\n print(\"{} : failed the imageness test.i \\n {}\".format(file, e))\n continue\n\n# print(image_list)\n return image_list",
"def get_images(path):\n\n # Cast path to absolute path\n absolute = abspath(path)\n\n img_lis = [] # Holds images in a folder\n file_lis = get_files(absolute)\n\n # Now get the images within file list\n img_lis = [f for f in file_lis if is_filetype(f)]\n\n return img_lis",
"def create_image_lists(image_dir):\n if not gfile.Exists(image_dir):\n print(\"Image directory '\" + image_dir + \"' not found.\")\n return None\n result = {}\n sub_dirs = [x[0] for x in os.walk(image_dir)]\n # The root directory comes first, so skip it.\n is_root_dir = True\n for sub_dir in sub_dirs:\n print('in sub loop')\n extensions = ['jpg', 'jpeg', 'JPG', 'JPEG']\n file_list = []\n dir_name = os.path.basename(image_dir)\n print(\"Looking for images in '\" + image_dir + \"'\")\n for extension in extensions:\n file_glob = os.path.join(image_dir, dir_name, '*.' + extension)\n file_list.extend(glob.glob(file_glob))\n if not file_list:\n print('No files found')\n continue\n if len(file_list) < 20:\n print('WARNING: Folder has less than 20 images, which may cause issues.')\n label_name = re.sub(r'[^a-z0-9]+', ' ', dir_name.lower())\n testing_images = []\n for file_name in file_list:\n base_name = os.path.basename(file_name)\n # We want to ignore anything after '_nohash_' in the file name when\n # deciding which set to put an image in, the data set creator has a way of\n # grouping photos that are close variations of each other. For example\n # this is used in the plant disease data set to group multiple pictures of\n # the same leaf.\n hash_name = re.sub(r'_nohash_.*$', '', file_name)\n # This looks a bit magical, but we need to decide whether this file should\n # go into the training, testing, or validation sets, and we want to keep\n # existing files in the same set even if more files are subsequently\n # added.\n # To do that, we need a stable way of deciding based on just the file name\n # itself, so we do a hash of that and then use that to generate a\n # probability value that we use to assign it.\n testing_images.append(base_name)\n return testing_images",
"def getAllImages(self):\n\n images = list(self._images)\n for s in self._subdirs:\n images += s.getAllImages()\n return images",
"def _locate_images(self):\r\n extensions = '|'.join(self.valid_extensions)\r\n extension_re = re.compile('.+\\.(%s)$' % extensions, re.IGNORECASE)\r\n files = sorted(os.listdir(self.path))\r\n\r\n images = []\r\n for root, dirs, files in os.walk(self.path, followlinks=self.config['follow_links']):\r\n for filename in sorted(files):\r\n if not filename.startswith('.') and extension_re.match(filename):\r\n images.append(Image(path=os.path.join(root, filename), config=self.config))\r\n if not self.config['recursive']:\r\n break\r\n\r\n if not images:\r\n raise SourceImagesNotFoundError(self.path)\r\n\r\n images = sorted(images, reverse=self.config['algorithm_ordering'][0] != '-')\r\n\r\n return images",
"def _get_images(image_path):\n logger.debug(\"Getting images: '%s'\", image_path)\n if not os.path.isdir(image_path):\n logger.debug(\"Folder does not exist\")\n return None\n files = [os.path.join(image_path, f)\n for f in os.listdir(image_path) if f.lower().endswith((\".png\", \".jpg\"))]\n logger.debug(\"Image files: %s\", files)\n return files",
"def get_images(self, file_path: str) -> Iterable[Image]:\n return []",
"def get_img_files(images, db):\n img_dir = db.source\n if img_dir == None:\n raise ValueError('Cannot locate file without a base path. This method looks for it at \\\n db.source, which is not set. This should be set by the loader during DB construction!')\n img_dir = path.join(img_dir, 'img') \n locs = db.get_img_locs(images)\n titles = db.get_location_titles()\n returnval = []\n for image in images:\n loc = locs[image]\n if loc is None:\n raise ValueError('The image %s could not be found' % image)\n returnval.append(path.join(img_dir, titles[loc], str(image) + '.jpg'))\n return returnval",
"def loadimages(root):\n imgs = []\n\n def add_json_files(path,):\n for imgpath in glob.glob(path+\"/*.png\"):\n if exists(imgpath) and exists(imgpath.replace('png',\"json\")):\n imgs.append((imgpath,imgpath.replace(path,\"\").replace(\"/\",\"\"),\n imgpath.replace('png',\"json\")))\n for imgpath in glob.glob(path+\"/*.jpg\"):\n if exists(imgpath) and exists(imgpath.replace('jpg',\"json\")):\n imgs.append((imgpath,imgpath.replace(path,\"\").replace(\"/\",\"\"),\n imgpath.replace('jpg',\"json\")))\n\n def explore(path):\n if not os.path.isdir(path):\n return\n folders = [os.path.join(path, o) for o in os.listdir(path) \n if os.path.isdir(os.path.join(path,o))]\n if len(folders)>0:\n for path_entry in folders: \n explore(path_entry)\n else:\n add_json_files(path)\n\n explore(root)\n\n return imgs",
"def get_all_image_paths(self):\n image_paths, image_labels = [], []\n for directory_name, subdirectory_list, file_list in os.walk(self.root_directory):\n for file_name in file_list:\n if file_name.endswith(('.jpg',)):\n image_paths.append(os.path.join(directory_name, file_name))\n # Translates labels to 0-26 as recommended in the exercise description\n image_labels.append(ord(directory_name[-1]) - 97)\n return image_paths, image_labels",
"def loadImagesFromDirectory(self, directoryPath):\n if isdir(directoryPath):\n self._getImagesFromDirectory(directoryPath)\n else:\n print(directoryPath + \" does not exists\")",
"def list_pictures(directory, ext='JPEG'):\n return [os.path.join(root, f)\n for root, _, files in os.walk(directory) for f in files\n if re.match(r'([\\w]+\\.(?:' + ext + '))', f)]",
"def list_pictures(directory, ext='jpg'):\r\n\r\n return [os.path.join(root, f)\r\n for root, _, files in os.walk(directory) for f in files\r\n if re.match(r'([\\w]+\\.(?:' + ext + '))', f)]"
] |
[
"0.8241939",
"0.813711",
"0.77286386",
"0.75424576",
"0.75172657",
"0.73895353",
"0.7282258",
"0.71291536",
"0.7086534",
"0.70178604",
"0.69990695",
"0.69680256",
"0.6947587",
"0.6891989",
"0.6837586",
"0.6810798",
"0.6762368",
"0.67423135",
"0.6730836",
"0.6722797",
"0.6711726",
"0.6676241",
"0.6665158",
"0.66489637",
"0.66422516",
"0.6640771",
"0.6625901",
"0.6615365",
"0.6614463",
"0.6593125"
] |
0.8213258
|
1
|
JSON editor app for viewing config.
|
def json_editor(self):
json_editor = pn.widgets.JSONEditor.from_param(
self.param.config_dict,
mode="view",
menu=False,
sizing_mode="stretch_width",
)
config_viewer = pn.Card(
json_editor,
title="CONFIG Viewer",
sizing_mode="stretch_width",
collapsed=True,
)
return config_viewer
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def config():\n if app.args.ui_mode == \"jinja\":\n ui_config = {\n \"p1\": {\n \"options\": {\n \"lineNumbers\": True,\n \"theme\":\"material\",\n \"lineWrapping\" : True,\n \"mode\": \"yaml\",\n \"indentUnit\": 2,\n \"tabSize\": 2\n },\n \"title\": \"DATA\",\n \"inventory\": bool(app.args.inventory_source),\n \"b1\": {\n \"icon\": None,\n \"show\": False,\n \"text\": None,\n \"url\": None\n }\n },\n \"p2\": {\n \"options\": {\n \"lineNumbers\": True,\n \"theme\": \"material\",\n \"lineWrapping\" : True,\n \"mode\": \"jinja2\"\n },\n \"title\": \"RENDER\",\n \"b1\": {\n \"icon\": \"create\",\n \"show\": True,\n \"text\": \"Render\",\n \"url\": \"/render\"\n }\n },\n \"p3\": {\n \"options\": {\n \"lineNumbers\": True,\n \"theme\": \"material\",\n \"lineWrapping\" : True,\n \"mode\": 'text'\n },\n \"title\": \"RESULT\",\n \"b1\": {\n \"icon\": \"link\",\n \"show\": bool(app.args.url),\n \"text\": \"link\"\n }\n }\n }\n elif app.args.ui_mode == \"schema\":\n ui_config = {\n \"p1\": {\n \"options\": {\n \"lineNumbers\": True,\n \"theme\":\"material\",\n \"lineWrapping\" : True,\n \"mode\": \"yaml\",\n \"indentUnit\": 2,\n \"tabSize\": 2\n },\n \"title\": \"DATA\",\n \"inventory\": bool(app.args.inventory_source),\n \"b1\": {\n \"icon\": \"create\",\n \"show\": True,\n \"text\": \"schema\",\n \"url\": \"/schema\"\n }\n },\n \"p2\": {\n \"options\": {\n \"lineNumbers\": True,\n \"theme\": \"material\",\n \"lineWrapping\" : True,\n \"mode\": \"yaml\"\n },\n \"title\": \"SCHEMA\",\n \"b1\": {\n \"icon\": \"check\",\n \"show\": True,\n \"text\": \"Validate\",\n \"url\": \"/validate\"\n }\n },\n \"p3\": {\n \"options\": {\n \"lineNumbers\": True,\n \"theme\": \"material\",\n \"lineWrapping\" : True,\n \"mode\": \"yaml\"\n },\n \"title\": \"VALIDATION SUCCESS/ERRORS\",\n \"b1\": {\n \"icon\": \"link\",\n \"show\": bool(app.args.url),\n \"text\": \"link\"\n }\n }\n }\n return jsonify(ui_config)",
"def config():",
"def config():",
"def list_config():\n console = Console()\n _config = loadConfig()\n json_data = richJSON.from_data({**asdict(_config)})\n console.print(Panel(json_data, title=\"SubmarineCliConfig\"))",
"def config(self):\n pass",
"def config(self):\n pass",
"def showConf(self):\n\n return json.dumps(\n self.config, sort_keys=True, indent=4, separators=(',', ': '))",
"def config(self):\n return {}",
"def config_to_view(self):\n raise NotImplementedError",
"def test_app():\n editor = Editor()\n test_data = os.path.split(__file__)[0]\n test_data = os.path.join(test_data, \"../resources/test_data.json\")\n test_data = os.path.abspath(test_data)\n with open(test_data, 'r') as file_in:\n data = json.load(file_in)\n mapped_data = [general.key_map_config(item) for item in data]\n editor.model._add_from_mappings(mapped_data)",
"def config():\n with open(config_path) as config_file:\n data = json.load(config_file)\n return data",
"def settings_show(ctx):\n path = ctx.obj['load_path']\n if not path:\n _raise_settings_not_found()\n with open(path) as handle:\n click.echo(json.dumps(json.load(handle), indent=2, sort_keys=True))",
"def config(interactive=False):\n cfg = ConfigManager()\n if interactive:\n cfg.setup_config_interactive()\n print(cfg)",
"def view_to_config(self):\n raise NotImplementedError",
"def config(self):\n raise NotImplementedError",
"def manage_config(self, action=None, peel=False, force=False,\n as_json=False, path=None):\n if action is None:\n return\n elif action == \"view\":\n if self.config is None:\n raise ValueError(\"No config loaded\")\n flattened = {}\n for cat, bcd in self.config._asdict().items():\n flattened[cat] = bcd.peel(peel=peel)\n return flattened\n #\n nvid = self.GetUser().GetUserName()\n if not hasattr(self, \"_nv_undo_stack\"):\n from collections import deque\n self._nv_undo_stack = deque()\n #\n import os\n from .configgers import default_config\n defver = default_config.settings[\"config_version\"]\n #\n def get_path(path): # noqa: E306\n ext = \"json\" if as_json else \"ini\"\n if path and not force:\n path = os.path.expandvars(os.path.expanduser(path))\n path = os.path.abspath(path)\n # All dirs must exist; \"export\" creates files if absent\n if os.path.isdir(path):\n path = os.path.join(path, f\"config.{ext}\")\n elif not os.path.exists(path):\n parpath = os.path.dirname(path)\n if not os.path.isdir(parpath):\n path = None\n elif not any(path.endswith(e) for e in (\".json\", \".ini\")):\n path = os.path.join(parpath, f\"config.{ext}\")\n if not path:\n path = os.path.join(self.datadir, f\"config.{ext}\")\n return path\n #\n # save/export\n def ensure_defver(peeled): # noqa: E306\n ps = peeled.setdefault(\"settings\", {})\n ps.setdefault(\"config_version\", defver)\n #\n # load/reload\n def handle_outdated(curver): # noqa: E306\n dirname = os.path.dirname(path or get_path(path))\n basename = \"config.{}.new\".format(\"json\" if as_json else \"ini\")\n dest = os.path.join(dirname, basename)\n orig, self.config = self.config, construct_config({})\n # OnHooks will be skipped during this call\n self.manage_config(\"export\", force=True, as_json=as_json,\n path=dest)\n self.config = orig\n msg = \" \".join(\"\"\"\n Your config appears to be outdated. Please update it\n using the latest defaults, which can be found here: {}.\n Make sure to include the new version number. Or, use\n --force to bypass this warning.\n \"\"\".split()).format(dest)\n raise UserWarning(msg)\n #\n if action == \"load\":\n from .configgers import load_config, construct_config\n stringified = self.nv.get(nvid)\n peeled = load_config(stringified) if stringified else {}\n # Could just view/peel, but this should be the only redundant item\n curver = peeled.get(\"settings\", {}).get(\"config_version\")\n if peeled:\n if not curver:\n raise KeyError(\"Required item /settings/config_version \"\n f\"missing from nv[{nvid}]\")\n elif curver == defver:\n del peeled[\"settings\"][\"config_version\"]\n elif not force:\n handle_outdated(curver)\n self.config = construct_config(peeled)\n return\n elif action == \"save\":\n if not self.config.settings[\"host\"] and not force:\n msg = (\"Warning: not caching config because \"\n \"'/settings/host' is empty; use --force to override\")\n raise UserWarning(msg)\n peeled = self.manage_config(action=\"view\", peel=True)\n # Must track version because module may be updated in the interim\n ensure_defver(peeled)\n from .ootil import restring\n if nvid in self.nv:\n MAX_UNDOS = 5\n if len(self._nv_undo_stack) == MAX_UNDOS:\n del self.nv[self._nv_undo_stack.pop()]\n from datetime import datetime\n bakkey = f\"{nvid}.{datetime.now().timestamp()}\"\n self.nv[bakkey] = self.nv[nvid]\n self._nv_undo_stack.appendleft(bakkey)\n self.nv[nvid] = restring(peeled)\n return\n elif action == \"undo\":\n # TODO write tests for this, add to cmd_update\n raise RuntimeError(\"TODO: need tests for this\")\n try:\n lastkey = self._nv_undo_stack.popleft()\n except IndexError:\n raise UserWarning(\"Nothing to undo\")\n self.nv[nvid] = self.nv[lastkey] # TODO see if nv supports pop\n del self.nv[lastkey]\n return self.manage_config(\"load\")\n elif action not in (\"reload\", \"export\"):\n raise ValueError(\"Unrecognized action\")\n #\n path = get_path(path)\n #\n def validate(peeled, skip_dropped=False): # noqa: E306\n from .configgers import validate_config\n warn, info = validate_config(peeled)\n msg = []\n if skip_dropped:\n info = [l for l in info if \"dropped\" not in l]\n if info:\n msg += [f\"\\x02FYI:\\x02\\n\"] + info\n if warn:\n msg += [f\"\\x02Potential problems:\\x02\\n\"] + warn\n if msg:\n self.put_pretty(\"\\n\".join(msg))\n return False if warn else True\n #\n if action == \"reload\":\n if not os.path.exists(path):\n raise FileNotFoundError(f\"No config found at {path}\")\n from .configgers import load_config, construct_config\n loaded = load_config(path)\n if not force:\n curver = loaded.get(\"settings\", {}).get(\"config_version\")\n if curver:\n if curver == defver:\n del loaded[\"settings\"][\"config_version\"]\n elif not force:\n handle_outdated(curver)\n elif as_json:\n msg = (\"Warning: 'config_version' absent from config; \"\n \"use --force to try loading anyway\")\n raise UserWarning(msg)\n if not validate(loaded, as_json):\n return\n self.config = construct_config(loaded)\n return self.manage_config(\"save\")\n elif action == \"export\":\n try:\n peeled = self.manage_config(\"view\", peel=True)\n except Exception:\n if not force:\n raise\n else:\n # \"Emergency\" backup called by OnShutdown(); must peel,\n # unfortunately, since construct_config likely just failed\n as_json = peel = True\n strung = self.nv[nvid]\n import json\n peeled = json.loads(strung)\n version = peeled[\"settings\"][\"config_version\"]\n path = os.path.dirname(path)\n path = os.path.join(path, f\"config.{version}.json.bak\")\n if not force:\n if not peeled:\n msg = (\"Warning: cached config is empty; \"\n \"use --force to export default config\")\n raise UserWarning(msg)\n if not validate(peeled):\n return\n with open(path, \"w\") as flow:\n if as_json:\n # No need to support \"complete\" (redundant) version\n if not peel:\n spread = self.config.conditions.spread\n payload = self.manage_config(\"view\", peel=spread)\n else:\n payload = peeled\n ensure_defver(payload)\n import json\n json.dump(payload, flow, indent=2)\n else:\n from Signal.iniquitous import gen_ini\n formatted = gen_ini(self.config)\n flow.write(formatted)",
"def configuration():",
"def configuration_view(project):\n project_query = Project.select().where(Project.slug == project).first()\n if project_query is None:\n flash(\"invalid project\")\n return redirect(url_for(\"projects\"))\n session[\"project\"] = project_query\n\n g.selected_tab = \"configuration\"\n\n settings = None\n if request.method == \"GET\":\n settings = Anemone.abcfile.parse(path(project_query.path, \"build.abc\"))\n elif request.method == \"POST\":\n configuration_post(project_query, request)\n\n return render_template(\"configure.html\", ssh=open(app.config[\"SSH_PUBLIC\"]).readline(),\n build=settings, unity=app.config[\"UNITY_PATH\"])",
"def configs(self):\n raise NotImplementedError()",
"def get_config():\n\n return json.loads(CONFIG_FILE.read_text())",
"def config(self) -> Dict[str, Any]:",
"def config():\n\tglobal INPUT_CHUNK_LENGTH, VIDEO_SAVE_PATH, AUDIO_SAVE_PATH, TRANSCRIPT_SAVE_PATH, JSON_SAVE_PATH, FINAL_SAVE_PATH\n\n\twith open('config.json', 'r') as json_data_file:\n\t\tconfig = json.load(json_data_file)\n\n\t\tINPUT_CHUNK_LENGTH = config[\"input\"][\"INPUT_CHUNK_LENGTH\"]\n\t\tVIDEO_SAVE_PATH = config[\"input\"][\"VIDEO_SAVE_PATH\"]\n\t\tAUDIO_SAVE_PATH = config[\"input\"][\"AUDIO_SAVE_PATH\"]\n\t\tTRANSCRIPT_SAVE_PATH = config[\"intermediate\"][\"TRANSCRIPT_SAVE_PATH\"]\n\t\tJSON_SAVE_PATH = config[\"intermediate\"][\"JSON_SAVE_PATH\"]\n\t\tFINAL_SAVE_PATH = config[\"final\"][\"FINAL_SAVE_PATH\"]\n\n\t\tDEBUGGING = config[\"debug_mode\"]\n\t\tCHUNKS_PER_MINUTE = int(60//(INPUT_CHUNK_LENGTH/1000))",
"def get_config_on_json(self):\n # load section CONFIG from data\n try:\n return self.json_data[\"CONFIG\"]\n except:\n constant.get_error(constant.ERROR_004)",
"def app_config_cli(**kwargs):\n\n @signals.config_complete.connect\n def set_config(app):\n app.config.update(kwargs)\n\n yield",
"def get_config(self, view = None):\n return self._get_config(\"config\", view)",
"def config():\n return Config()",
"def config():\n return Config()",
"def config(self):\n return None",
"def config( **kwargs ):",
"def config(ctx):\n return"
] |
[
"0.7127012",
"0.64928466",
"0.64928466",
"0.6352412",
"0.630213",
"0.630213",
"0.6027477",
"0.600231",
"0.59771115",
"0.59740865",
"0.591125",
"0.58834666",
"0.58547175",
"0.58471215",
"0.58188564",
"0.5816865",
"0.5796275",
"0.57883906",
"0.5772668",
"0.5765284",
"0.57640326",
"0.5763944",
"0.5758604",
"0.5753457",
"0.5752769",
"0.57501143",
"0.57501143",
"0.5746446",
"0.57241386",
"0.57224345"
] |
0.78986704
|
0
|
Compute novatel checksum. Expects a StringIO with a size that is a multiple of four bytes.
|
def _checksum(cls, buff):
checksum = 0
while True:
data = buff.read(cls.checksum_struct.size)
if len(data) == 0:
break
if len(data) < 4:
pad_count = len(data) % 4
data = data + "\x00" * pad_count
raise ValueError("Checksum data length is not a multiple of 4. %d" % len(data))
print(data)
c1, c2 = cls.checksum_struct.unpack(data)
checksum += c1 + c2
print(checksum, checksum % 65536) # novatel 32 bit crc
return checksum % 65536
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_checksum(size1, size2, lines, tmpdir):\n fp = tmpdir.join(\"temp-data.txt\").strpath\n data = \"\\n\".join(lines)\n with open(fp, 'w') as f:\n f.write(data)\n exp = hashlib.new(\"md5\", data.encode(\"utf-8\")).hexdigest()\n res1 = checksum(fp, size1)\n res2 = checksum(fp, size2)\n assert exp == res1\n assert res1 == res2\n assert res2 == exp",
"def checksum(self):\n hasher = md5()\n with self.open('rb') as fd:\n buf = fd.read(_BLOCKSIZE)\n while len(buf) > 0:\n # TODO Could cancel work here.\n hasher.update(buf)\n buf = fd.read(_BLOCKSIZE)\n digest = safetype(hasher.hexdigest())\n return digest",
"def calculate_crc(chunk):\n\n return ensure_crc(crc16.crc16xmodem(chunk))",
"def _compute_checksum(packet):\n # checksum is the sum of the bytes\n # from device id to the end of the data\n # mod (%) 256 and bit negated (~) (1's compliment)\n # and (&) with 0xFF to make sure it is a byte.\n return ~(sum(packet[2:]) % 0x100) & 0xFF",
"def compute_checksum(filename):\n cmd = 'md5sum ' + filename\n return pipe(cmd)",
"def doChecksum(line):\n return sum(map(int, filter(lambda c: c >= '0' and c <= '9', line[:-1].replace('-','1')))) % 10",
"def getChecksum(self, s):\n \n chksum = 0\n for ch in s:\n chksum = chksum + ord(ch)\n \n return hex(chksum%256)[2:]",
"def checksum_of(filepath):\n bfsz = 10240000 # 10 MB buffer\n sum = hashlib.sha256()\n with open(filepath) as fd:\n while True:\n buf = fd.read(bfsz)\n if not buf: break\n sum.update(buf)\n return sum.hexdigest()",
"def _checksum_compute(content, seed=0):\n csum = seed\n chunks = _chunkify(content, 4)\n for chunk in chunks:\n if len(chunk) == 4:\n ul = chunk[0]\n ul |= chunk[1] << 8\n ul |= chunk[2] << 16\n ul |= chunk[3] << 24\n else:\n # WTF: I can only assume this is a typo from the original\n # author of the cabinet file specification\n if len(chunk) == 3:\n ul = (chunk[0] << 16) | (chunk[1] << 8) | chunk[2]\n elif len(chunk) == 2:\n ul = (chunk[0] << 8) | chunk[1]\n elif len(chunk) == 1:\n ul = chunk[0]\n csum ^= ul\n return csum",
"def checksum(n):\n return zlib.crc32(n.to_bytes(int(math.log2(n)), \"big\"))",
"def checksum(payload):\n return hashlib.sha256(hashlib.sha256(payload).digest()).digest()[:4]",
"def calc_checksum(self):\n total = 0\n packet = ipv4(self.bytes)\n packet.checksum = 0\n bytes = packet.bytes\n if len(bytes) % 2 == 1:\n bytes += \"\\0\"\n for i in range(len(bytes)/2):\n total += (struct.unpack(\"!H\", bytes[2*i:2*i+2])[0])\n total = (total >> 16) + (total & 0xffff)\n total += total >> 16\n return ~total",
"def ComputeChecksum(payload_bytes: bytes) -> bytes:\n return BytesUtils.Reverse(XModemCrc.QuickDigest(payload_bytes))",
"def _get_checksum(self, arg):",
"def get_md5(data):\n if hasattr(data, \"read\") and hasattr(data, 'seek'):\n data.seek(0)\n m = md5()\n chunk = data.read(1024*1024) # 1Mb\n f_size = 0\n while(chunk):\n f_size += len(chunk)\n m.update(chunk)\n chunk = data.read(1024*1024)\n data.seek(0)\n return m.hexdigest(), f_size\n else: # normal str\n m = md5()\n f_size = len(data)\n m.update(data)\n return m.hexdigest(), f_size",
"def calc_checksum(self, segment: bytes) -> int:\n if len(segment) % 2 == 1: # padding\n segment += b'\\x00'\n strarr = array.array('H', segment) # split into 16-bit substrings\n cksum = sum(strarr) # sum\n cksum = (cksum >> 16) + (cksum & 0xffff) # carry\n cksum += (cksum >> 16) # carry in case of spill\n cksum = ~cksum & 0xffff # 1's complement\n return cksum",
"def calcChecksum(self, data, length):\n checksum = 0\n\n for i in range(length//2):\n checksum = checksum ^ (data[i*2] | (data[i*2+1] << 8)) #xor-ing\n return 0xffff & (checksum ^ 0xffff) #inverting",
"def test_right_checksum(self):\n self.assertEqual(utils.checksum('fooo'), 'L')",
"def get_checksum(input_fname):\n with open(input_fname, \"rb\") as infile:\n file_contents = infile.read()\n\n checksum = hashlib.md5(file_contents).hexdigest()\n return checksum",
"def tcp_checksum_calc(src: bytes, dst: bytes, proto: int, payload: bytes) -> bytes:\n _sum = dpkt.struct.pack(\">4s4sxBH\", src, dst, proto, len(payload))\n _sum = dpkt.in_cksum_add(0, _sum)\n _sum = dpkt.in_cksum_add(_sum, payload)\n _sum = dpkt.in_cksum_done(_sum)\n return _sum",
"def calculate_checksum(self, data):\n\t\tdata = data[2:] # Ignore start tokens ($$)\n\t\tcrc16 = crcmod.predefined.mkCrcFun('crc-ccitt-false')\n\t\treturn hex(crc16(data))[2:].upper().zfill(4)",
"def do_checksum(source_string):\n sum = 0\n max_count = 3\n count = 0\n while count < max_count:\n val = ord(source_string[count + 1]) * 256 + ord(source_string[count])\n sum = sum + val\n sum = sum & 0xffffffff\n count = count + 2\n if max_count < len(source_string):\n sum = sum + ord(source_string[len(source_string) - 1])\n sum = sum & 0xffffffff\n\n sum = (sum >> 16) + (sum & 0xffff)\n sum = sum + (sum >> 16)\n answer = ~sum\n answer = answer & 0xffff\n answer = answer >> 8 | (answer << 8 & 0xff00)\n print(answer)\n return answer",
"def _hash(fn, buffer: Union[io.StringIO, io.BytesIO]):\n\n buffer.seek(0)\n hashsum = fn()\n for chunk in iter(lambda: buffer.read(4096), b''):\n hashsum.update(chunk)\n return hashsum.hexdigest()",
"def md5checksum(file_name):\n from hashlib import md5\n hash_md5 = md5()\n with open(file_name, \"rb\") as f:\n for chunk in iter(lambda: f.read(32768), b\"\"):\n hash_md5.update(chunk)\n return hash_md5.hexdigest()",
"def Checksum(self, default=None):\n return self.data.get('checksum', default)",
"def file_checksum(file_path, block_size=65536):\n path = Path(file_path)\n h = xxhash.xxh64()\n with path.open(\"rb\") as f:\n for chunk in iter(lambda: f.read(block_size), b\"\"):\n h.update(chunk)\n return h.hexdigest()",
"def getChecksum(dataString):\n sum = 0\n count_to = (len(dataString) / 2) * 2\n count = 0\n while count < count_to:\n this_val = ord(dataString[count + 1])*256+ord(dataString[count])\n sum = sum + this_val\n sum = sum & 0xffffffff # Necessary?\n count = count + 2\n if count_to < len(dataString):\n sum = sum + ord(dataString[len(dataString) - 1])\n sum = sum & 0xffffffff # Necessary?\n sum = (sum >> 16) + (sum & 0xffff)\n sum = sum + (sum >> 16)\n answer = ~sum\n answer = answer & 0xffff\n # Swap bytes. Bugger me if I know why.\n answer = answer >> 8 | (answer << 8 & 0xff00)\n return answer",
"def checksumFile(filename):\n return md5File(filename)",
"def correct_checksum():\n test_strs = [\"ch3ck1nG c0rr3ct ch3cksu|\\/|\\n\", \"y3T an0th3r str1ng0_x\\/.!&\\n\"]\n\n def test_checksum(test_str):\n server = start_server()\n client = start_client()\n\n write_to(client, test_str)\n segments = read_segments_from(client)\n if not segments:\n return False\n teardown()\n\n # Start reference solution to get answers.\n ref_server = start_server(port=REF_PORT, reference=True)\n ref_client = start_client(server_port=REF_PORT, reference=True)\n\n # Get reference checksum.\n write_to(ref_client, test_str)\n ref_segment = read_segments_from(ref_client)[0]\n ref_checksum = ref_segment.checksum\n\n # Check the first sent segment.\n segment = segments[0]\n\n # Checksum equal to the reference checksum.\n if segment.checksum == ref_checksum:\n return True\n\n # Maybe they also set an ACK for this segment. Compare with the computed\n # checksum.\n return int(segment.checksum, 16) == segment.c_repr.cksum;\n\n return reduce(lambda a, b: a and b, [test_checksum(t) for t in test_strs])",
"def checksum(path):\n with open(path, 'r') as f:\n return md5(f.read()).digest()"
] |
[
"0.6441798",
"0.64138716",
"0.61732674",
"0.60550714",
"0.60465926",
"0.59932405",
"0.59910274",
"0.5981081",
"0.5977071",
"0.59749424",
"0.5952695",
"0.5949494",
"0.5903017",
"0.585889",
"0.58151466",
"0.57396185",
"0.56941354",
"0.5688313",
"0.5686545",
"0.565836",
"0.56320965",
"0.55984056",
"0.559288",
"0.5588203",
"0.55855936",
"0.5561184",
"0.5553262",
"0.5551326",
"0.55436414",
"0.55374056"
] |
0.71015364
|
0
|
GET method, returns department collection.
|
def get():
logger.debug('Catch GET request by URL /api/departments.')
departments = ds.get_all()
return marshal_departments(departments)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get(id_):\n\n logger.debug('Catch GET request by URL /api/departments/%i.', id_)\n try:\n department = ds.get(id_)\n if not department.id:\n raise Exception\n except Exception:\n logger.error('There is no department with id %i', id_)\n return {'message': f'There is no department with {id_}.'}, 404\n return marshal_departments(department)",
"def get_departments() -> list:\n return Department.query.all()",
"def get(self, department_id):\n department = get_department_by_id(department_id)\n employees = set_employees_by_id(department_id)\n department_obj = {\n \"id\": department.id,\n \"name\": department.name,\n \"employees\": [\n {\n \"id\": employee.id,\n \"name\": employee.name,\n \"salary\": employee.salary,\n }\n for employee in employees\n ],\n }\n return department_obj, 200",
"async def getDepartments(self, ):\n payload = {}\n \n\n # Parameter validation\n schema = CatalogValidator.getDepartments()\n schema.dump(schema.load(payload))\n \n\n url_with_params = await create_url_with_params(self._conf.domain, f\"/service/platform/catalog/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/departments\", \"\"\"{\"required\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"A `company_id` is a unique identifier for a particular seller account.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"A `application_id` is a unique identifier for a particular sale channel.\",\"schema\":{\"type\":\"string\"},\"required\":true}],\"optional\":[],\"query\":[],\"headers\":[],\"path\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"A `company_id` is a unique identifier for a particular seller account.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"A `application_id` is a unique identifier for a particular sale channel.\",\"schema\":{\"type\":\"string\"},\"required\":true}]}\"\"\", )\n query_string = await create_query_string()\n headers = {\n \"Authorization\": \"Bearer \" + await self._conf.getAccessToken()\n }\n for h in self._conf.extraHeaders:\n headers.update(h)\n exclude_headers = []\n for key, val in headers.items():\n if not key.startswith(\"x-fp-\"):\n exclude_headers.append(key)\n return await AiohttpHelper().aiohttp_request(\"GET\", url_with_params, headers=get_headers_with_signature(self._conf.domain, \"get\", await create_url_without_domain(f\"/service/platform/catalog/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/departments\", ), query_string, headers, \"\", exclude_headers=exclude_headers), data=\"\")",
"def departments(department_name=None):\n\tif not department_name:\n\t\tdepartment_data = _serialize_list(Department.query.all(), backrefs=[\"employees\"])\n\t\tdepartment_data = {'departments': department_data, 'total': len(department_data)}\n\telse:\n\t\tdepartment_data = _serialize_model(Department.query.filter_by(name=department_name).first(), backrefs=[\"employees\"])\n\n\treturn jsonify(department_data)",
"def department(department_id):\n # gather data from db about all employees\n return render_template(\"department.html\",\n department_id=department_id)",
"def get_departments(self) -> list:\n return self.client.departments.get_all()",
"def departments():\n # gather data from db about all departments\n return render_template(\"departments.html\")",
"def show_department(id_: int):\n\n logger.debug('Routed to /departments/%i', id_)\n titles = ['Name', 'Average Salary', 'Employees', 'E-mail']\n department = None\n\n try:\n department = ds.get(id_)\n except IntegrityError:\n logger.error(\"Can't find employee with id %i\", id_)\n abort(404)\n\n logger.info('Get department %s', department.name)\n return render_template('department.html',\n title=f'Department {department.name}',\n table_title=f'Department: {department.name}',\n headers=titles,\n department=department)",
"def get(self, request, department):\n try:\n dep = DepartmentsModel.objects.get(name__iexact=department)\n except DepartmentsModel.DoesNotExist:\n return HttpResponse('Department not found')\n result = {'department': dep.name,\n 'questions': list(),\n 'grades': list(),\n 'stages': list(),\n 'sections': list()}\n\n for grade in list(GradesModel.objects.all()):\n result['grades'].append({'name': grade.name})\n\n for question in list(dep.questions.order_by('id').all()):\n result['questions'].append({'name': question.name, 'stages': question.f_stage.name, 'hint': question.hint})\n result['stages'].append({'name': question.f_stage.name, 'section': question.f_stage.f_section.name})\n result['sections'].append({'name': question.f_stage.f_section.name})\n\n # Remove duplicates from dict\n result['stages'] = [dict(names) for names in set(tuple(item.items()) for item in result['stages'])]\n result['sections'] = [dict(names) for names in set(tuple(item.items()) for item in result['sections'])]\n return JsonResponse(result)",
"def get_department_by_id(department_id):\n return Department.query.get(department_id)",
"def get_departments():\n\n term = '201931' # Get current term from somewhered\n maxCount = 300\n\n # Call getsubjects\n params = {\n 'dataType': 'json',\n 'term': term,\n 'offset': 1,\n 'max': maxCount\n }\n\n r = requests.get(BASE_URL, params=params)\n\n json = ''\n # Attempt to convert it to JSON\n try:\n json = r.json()\n except:\n print('Error converting depts to JSON')\n\n return json",
"async def getApplicationDepartmentListing(self, page_no=None, page_size=None, q=None):\n payload = {}\n \n if page_no:\n payload[\"page_no\"] = page_no\n \n if page_size:\n payload[\"page_size\"] = page_size\n \n if q:\n payload[\"q\"] = q\n \n\n # Parameter validation\n schema = CatalogValidator.getApplicationDepartmentListing()\n schema.dump(schema.load(payload))\n \n\n url_with_params = await create_url_with_params(self._conf.domain, f\"/service/platform/catalog/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/department\", \"\"\"{\"required\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"A `company_id` is a unique identifier for a particular seller account.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"A `application_id` is a unique identifier for a particular sale channel.\",\"schema\":{\"type\":\"string\"},\"required\":true}],\"optional\":[{\"in\":\"query\",\"name\":\"page_no\",\"description\":\"The page number to navigate through the given set of results\",\"schema\":{\"type\":\"integer\"},\"required\":false},{\"in\":\"query\",\"name\":\"page_size\",\"description\":\"Number of items to retrieve in each page. Default is 12.\",\"schema\":{\"type\":\"integer\",\"default\":12},\"required\":false},{\"in\":\"query\",\"name\":\"q\",\"description\":\"Search query with brand name.Use this parameter to search department by name.\",\"schema\":{\"type\":\"string\"},\"required\":false}],\"query\":[{\"in\":\"query\",\"name\":\"page_no\",\"description\":\"The page number to navigate through the given set of results\",\"schema\":{\"type\":\"integer\"},\"required\":false},{\"in\":\"query\",\"name\":\"page_size\",\"description\":\"Number of items to retrieve in each page. Default is 12.\",\"schema\":{\"type\":\"integer\",\"default\":12},\"required\":false},{\"in\":\"query\",\"name\":\"q\",\"description\":\"Search query with brand name.Use this parameter to search department by name.\",\"schema\":{\"type\":\"string\"},\"required\":false}],\"headers\":[],\"path\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"A `company_id` is a unique identifier for a particular seller account.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"A `application_id` is a unique identifier for a particular sale channel.\",\"schema\":{\"type\":\"string\"},\"required\":true}]}\"\"\", page_no=page_no, page_size=page_size, q=q)\n query_string = await create_query_string(page_no=page_no, page_size=page_size, q=q)\n headers = {\n \"Authorization\": \"Bearer \" + await self._conf.getAccessToken()\n }\n for h in self._conf.extraHeaders:\n headers.update(h)\n exclude_headers = []\n for key, val in headers.items():\n if not key.startswith(\"x-fp-\"):\n exclude_headers.append(key)\n return await AiohttpHelper().aiohttp_request(\"GET\", url_with_params, headers=get_headers_with_signature(self._conf.domain, \"get\", await create_url_without_domain(f\"/service/platform/catalog/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/department\", page_no=page_no, page_size=page_size, q=q), query_string, headers, \"\", exclude_headers=exclude_headers), data=\"\")",
"def department(self) -> object:\n return self._department",
"def test_api_can_get_department_by_id(self):\n res = self.client().get(service_url+'/1')\n self.assertEqual(res.status_code, 200)\n self.assertIn('dep 1', str(res.data))",
"def department(department_id):\n\n department_obj = Department.query.get_or_404(department_id)\n employees = Employee.query.filter_by(department_id=department_id)\n return render_template('department/department.html',\n department=department_obj, employees=employees)",
"def departments_with_id(department_id=None):\n department_obj = storage.get('Department', department_id)\n if department_obj is None:\n abort(404, 'Not found')\n\n if request.method == 'GET':\n return jsonify(department_obj.to_json())\n\n if request.method == 'DELETE':\n department_obj.delete()\n del department_obj\n return jsonify({})\n\n if request.method == 'PUT':\n req_json = request.get_json()\n if req_json is None:\n abort(400, 'Not a JSON')\n department_obj.bm_update(req_json)\n return jsonify(department_obj.to_json())",
"def get_dep():\n # return jsonify([i.serialize for i in Department.query.all()])\n return jsonify(\n [i[0] for i in Book_view.query.with_entities(Book_view.dep_code).distinct().order_by(Book_view.dep_code)])",
"def departments(request):\n if 'selected_package' in request.session:\n del request.session['selected_package']\n assert isinstance(request, HttpRequest)\n status, result = api.show_departments()\n return render(\n request,\n 'app/departments.html',\n {\n 'title': 'แผนกและแพ็คเกจ',\n 'departments': result,\n 'logged_user': request.session.get('user')\n }\n )",
"def show_all_departments():\n\n logger.debug('Function show_all_departments(). Routed to /departments')\n titles = ['Name', 'Average Salary', 'Employees']\n departments = ds.get_all()\n logger.info('Get list of departments, length is %i', len(departments))\n return render_template('departments.html',\n title='Departments',\n table_title='List of Departments',\n headers=titles,\n departments=departments)",
"def get_all_departments(self):\n sql = 'SELECT name FROM department'\n try:\n self.control.execute(sql)\n except Error:\n print(f\"[X] {Error}\")\n return 1\n answer = {}\n counter = 1\n for i in self.control.fetchall():\n answer.update(\n {\n counter: str(i[0]).replace(\"'\", \"\")\n }\n )\n counter += 1\n return answer",
"def departments_no_id():\n if request.method == 'GET':\n all_departments = storage.all('Department')\n all_departments = [obj.to_json() for obj in all_departments.values()]\n return jsonify(all_departments)\n\n if request.method == 'POST':\n req_json = request.get_json()\n if req_json is None:\n abort(400, 'Not a JSON')\n if req_json.get(\"name\") is None:\n abort(400, 'Missing name')\n Department = CNC.get(\"Department\")\n new_object = Department(**req_json)\n new_object.save()\n return jsonify(new_object.to_json()), 201",
"def list_departments():\n \t check_admin()\n\n #check all the departments in the database and assign them to a variable.departments \n \t departments = Department.query.all()\n\n \t return render_template('admin/departments/departments.html',departments = departments,title = \"Departments\")",
"def department(self):\n if \"department\" in self._prop_dict:\n return self._prop_dict[\"department\"]\n else:\n return None",
"def department(self):\n if \"department\" in self._prop_dict:\n return self._prop_dict[\"department\"]\n else:\n return None",
"def getDepartmentDictionary( self ):\n return DepartmentDictionary.departmentDictionary",
"def department():\n # Use Pandas to perform the sql query\n stmt = db.session.query(oc_salary_db).statement\n df = pd.read_sql_query(\"select department from oc_salary group by department\", db.session.bind, coerce_float=False)\n\n # Return a list of the column names (sample names)\n return jsonify(list(df[\"department\"].values))",
"def get_data(department_id):\n url='https://covidstats.com.ar/ws/evolucion?comprimido=1&departamentos[]={}'\n with urllib.request.urlopen(url.format(department_id)) as req:\n return json.loads(req.read().decode())",
"def test_api_can_get_all_departments(self):\n res = self.client().get(service_url)\n self.assertEqual(res.status_code, 200)\n self.assertIn('dep 1', str(res.data))\n self.assertIn('dep 2', str(res.data))\n self.assertIn('dep 3', str(res.data))",
"async def getCategories(self, department=None):\n payload = {}\n \n if department:\n payload[\"department\"] = department\n \n\n # Parameter validation\n schema = CatalogValidator.getCategories()\n schema.dump(schema.load(payload))\n \n\n url_with_params = await create_url_with_params(self._conf.domain, f\"/service/platform/catalog/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/categories\", \"\"\"{\"required\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"A `company_id` is a unique identifier for a particular seller account.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"A `application_id` is a unique identifier for a particular sale channel.\",\"schema\":{\"type\":\"string\"},\"required\":true}],\"optional\":[{\"in\":\"query\",\"name\":\"department\",\"description\":\"The name of the department. Use this parameter to filter products by a particular department. See below the list of available departments. You can retrieve available departments from the **v1.0/departments/** API\",\"schema\":{\"type\":\"string\",\"enum\":[\"baby-care-kids-essentials\",\"beauty-personal-care\",\"home-living\",\"kids\",\"men\",\"others\",\"toys\",\"women\"]},\"required\":false}],\"query\":[{\"in\":\"query\",\"name\":\"department\",\"description\":\"The name of the department. Use this parameter to filter products by a particular department. See below the list of available departments. You can retrieve available departments from the **v1.0/departments/** API\",\"schema\":{\"type\":\"string\",\"enum\":[\"baby-care-kids-essentials\",\"beauty-personal-care\",\"home-living\",\"kids\",\"men\",\"others\",\"toys\",\"women\"]},\"required\":false}],\"headers\":[],\"path\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"A `company_id` is a unique identifier for a particular seller account.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"A `application_id` is a unique identifier for a particular sale channel.\",\"schema\":{\"type\":\"string\"},\"required\":true}]}\"\"\", department=department)\n query_string = await create_query_string(department=department)\n headers = {\n \"Authorization\": \"Bearer \" + await self._conf.getAccessToken()\n }\n for h in self._conf.extraHeaders:\n headers.update(h)\n exclude_headers = []\n for key, val in headers.items():\n if not key.startswith(\"x-fp-\"):\n exclude_headers.append(key)\n return await AiohttpHelper().aiohttp_request(\"GET\", url_with_params, headers=get_headers_with_signature(self._conf.domain, \"get\", await create_url_without_domain(f\"/service/platform/catalog/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/categories\", department=department), query_string, headers, \"\", exclude_headers=exclude_headers), data=\"\")"
] |
[
"0.7557787",
"0.7532816",
"0.71916354",
"0.7127789",
"0.69939005",
"0.69594824",
"0.69122684",
"0.6909003",
"0.6907418",
"0.68410194",
"0.67590916",
"0.67334664",
"0.6722613",
"0.67003804",
"0.66987646",
"0.66958904",
"0.6678866",
"0.6579981",
"0.6577867",
"0.65603775",
"0.6537399",
"0.6381583",
"0.637039",
"0.63696843",
"0.63696843",
"0.63333267",
"0.61940634",
"0.6171834",
"0.6168319",
"0.6142436"
] |
0.79613036
|
0
|
POST method, adds new department.
|
def post():
logger.debug('Catch POST request by URL /api/departments.')
args = department_args.parse_args()
try:
id_ = ds.add(name=args['name'], email=args['email'])
created_department = ds.get(id_)
except IntegrityError:
return {'message': f"Department with name {args['name']} already "
"exists."}, 404
except Exception:
return {'message': "Can't post department."}, 404
return marshal_departments(created_department), 201
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def add_department():\n logger.debug('Routed to /departments/add')\n\n if request.method == 'POST':\n name = request.form.get(\"name\")\n email = request.form.get(\"email\")\n\n try:\n ds.add(name, email)\n except IntegrityError as exception:\n logger.error('Can\\'t add department with name %s and email \"%s\". '\n 'Exception: %s', name, email, str(exception))\n session['name'] = name\n session['email'] = email\n flash(f'Department with name {name} already exists.')\n return redirect(request.referrer)\n except Exception as exception:\n logger.error('Can\\'t add department with name %s and email %s. '\n 'Exception: %s', name, email, str(exception))\n abort(404)\n return redirect(url_for('department.show_all_departments'))\n\n titles = ['Name', 'E-mail']\n return render_template('add_department.html',\n title='Add department',\n table_title='Adding new department',\n headers=titles)",
"def add_department():\n details = request.get_json()\n errors = check_department_keys(request)\n if errors:\n return raise_error(400, \"Invalid {} key\".format(', '.join(errors)))\n department_name = details['department_name']\n if DepartmentsModel().get_department_name(department_name):\n return raise_error(400,\n \"{} department already exists\".format(department_name))\n response = DepartmentsModel(department_name).save()\n return Serializer.serialize(response, 201, \"Department added successfully\")",
"def add_department():\n form = AddDepartment()\n if request.method == 'POST':\n if form.validate_on_submit():\n new_department = Department(name=form.name.data)\n db.session.add(new_department)\n try:\n db.session.commit()\n except IntegrityError:\n db.session.rollback()\n flash('Department already exists!', 'warning')\n return redirect(url_for('add_department'))\n\n flash(f'Department {form.name.data} created!', 'success')\n return redirect(url_for('home'))\n\n flash('Name not defined.', 'warning')\n return render_template('department/department_add.html', form=form)",
"def add_department():\r\n check_admin()\r\n\r\n add_department = True\r\n\r\n form = DepartmentForm()\r\n if form.validate_on_submit():\r\n department = Department(name=form.name.data,\r\n description=form.description.data)\r\n try:\r\n # add department to the database\r\n db.session.add(department)\r\n db.session.commit()\r\n flash('You have successfully added a new department.')\r\n except:\r\n # in case department name already exists\r\n flash('Error: department name already exists.',category='error')\r\n\r\n # redirect to departments page\r\n return redirect(url_for('admin.list_departments'))\r\n\r\n # load department template\r\n return render_template('admin/departments/department.html', action=\"Add\",\r\n add_department=add_department, form=form,\r\n title=\"Add Department\")",
"def add_department():\n\tcheck_admin()\n\n\tadd_department = True\n\n\tform = DepartmentForm()\n\tif form.validate_on_submit():\n\t\tdepartment = Department(name=form.name.data,description=form.description.data)\n\n\t\ttry:\n\t\t\t#add department to the database\n\t\t\tdb.session.add(department)\n\t\t\tdb.session.commit()\n\t\t\tflash(\"You have successsfully added a new department.\")\n\t\texcept:\n\t\t\t#incase the department already exists\n\t\t\tflash(\"Error: department already exists.\")\n\t#once the admin creates a new department,they will be redirected to the departments page\n\treturn render_template('admin/departments/department.html',action=\"Add\", add_department= add_department,form=form,title = \"Add Department\")",
"def test_department_creation(self):\n res = self.client().post(service_url, json={\"dep_name\": \"test dep 4\", \"description\": \"testing department 4\"})\n self.assertEqual(res.status_code, 201)\n self.assertIn('dep 4', str(res.data))",
"def post(id_=None):\n\n logger.debug('Catch POST request by URL /api/departments/%i.', id_)\n return abort(405)",
"def _insert_department(self):\n # Insert\n if db_department.idx_department_exists(1) is False:\n record = Department(\n code=general.encode(self.reserved),\n name=general.encode(self.reserved))\n database = db.Database()\n database.add(record, 1102)",
"def departments_no_id():\n if request.method == 'GET':\n all_departments = storage.all('Department')\n all_departments = [obj.to_json() for obj in all_departments.values()]\n return jsonify(all_departments)\n\n if request.method == 'POST':\n req_json = request.get_json()\n if req_json is None:\n abort(400, 'Not a JSON')\n if req_json.get(\"name\") is None:\n abort(400, 'Missing name')\n Department = CNC.get(\"Department\")\n new_object = Department(**req_json)\n new_object.save()\n return jsonify(new_object.to_json()), 201",
"def put(id_=None):\n\n logger.debug('Catch PUT request by URL /api/departments/%i.', id_)\n try:\n args = department_args.parse_args()\n ds.update(id_, name=args['name'], email=args['email'])\n except Exception:\n return {'message': \"Can't update department.\"}, 404\n return marshal_departments(ds.get(id_)), 200",
"def put(self, department_id):\n department = get_department_by_id(department_id)\n department.name = request.json[\"name\"]\n db.session.commit()\n return {}, 200",
"def post(self):\n try:\n employee = self.service.add_employee(self.schema, request.json)\n except ValidationError as error:\n return error.messages, 400\n return self.schema.dump(employee), 201",
"def add_hospital(request):\n if request.POST:\n post = request.POST\n name = post.get(\"name\")\n address = post.get(\"address\")\n city = post.get(\"city\")\n state = post.get(\"state\")\n zip = post.get(\"zip\")\n hospital = Hospital.objects.create(\n name=name,\n address=address,\n city=city,\n state=state,\n zip=zip\n )\n\n if hospital:\n return redirect('add_hospital')\n\n return render(request, 'add_hospital.html')",
"def update_department(id_: int):\n logger.debug('Routed to /departments/%i/update', id_)\n\n if request.method == 'POST':\n name = request.form.get(\"name\")\n email = request.form.get(\"email\")\n\n try:\n ds.update(id_, name, email)\n except IntegrityError as exception:\n logger.error('Can\\'t update department with name %s and email %s. '\n 'Exception: %s', name, email, str(exception))\n\n session['name'] = name\n session['email'] = email\n flash(f'Department with name {name} already exists.')\n return redirect(request.referrer)\n\n except Exception as exception:\n logger.error('Can\\'t add department with name %s and email %s. '\n 'Exception: %s', name, email, str(exception))\n abort(404)\n\n logger.info(\n 'Successfully updated department with id %i. It\\'s name = %s, '\n 'email = %s', id_, name, email)\n return redirect(url_for(\"department.show_department\", id_=id_))\n\n department = None\n try:\n department = ds.get(id_)\n except IntegrityError:\n logger.error(\"Can't get department with id %i\", id_)\n abort(404)\n\n titles = ['Name', 'Average Salary', 'Employees', 'E-mail']\n logger.info('Get department %s', department.name)\n return render_template('edit_department.html',\n title='Update department',\n table_title=f'Updating department: '\n f'{department.name}',\n headers=titles,\n department=department)",
"def put():\n\n logger.debug('Catch PUT request by URL /api/departments.')\n return abort(405)",
"def update_department(department_id):\n\n form = UpdateDepartment()\n department_obj = Department.query.get_or_404(department_id)\n if request.method == 'POST':\n if form.validate_on_submit():\n department_obj.name = form.name.data\n try:\n db.session.commit()\n except IntegrityError:\n flash('Department with this name already exists.', 'warning')\n db.session.rollback()\n return redirect(url_for('update_department',\n department_id=department_obj.id))\n flash('Department name successfully changed!', 'success')\n return redirect(url_for('home'))\n return render_template('department/department_update.html',\n form=form, department=department_obj)",
"def post(self, request):\n data = request.data\n try:\n career_planning = CareerPlanning(**data)\n career_planning.save()\n LOGGER.info(\"CareerPlanning created successfully\")\n except Exception, error:\n LOGGER.error(\"Error:%s\", str(error))\n return Response({\"status\": \"FAILED\", \"message\": str(error)})\n return Response({\"status\": \"SUCCESS\", \"message\": \"Record saved successfully\"})",
"def post(self):\n # Create deposition (uses default deposition type unless type is given)\n d = Deposition.create(\n current_user,\n request.json['metadata'].get(\n 'upload_type',\n None))\n # Validate input data according to schema\n self.validate_input(d)\n # Process input data\n self.process_input(d)\n # Save if all went fine\n d.save()\n return d.marshal(), 201",
"def test_department_can_be_edited(self):\n res = self.client().put(service_url, json={\"id_dep\": 1, \"dep_name\": \"\", \"description\": \"this is a new description\"})\n self.assertEqual(res.status_code, 204)\n results = self.client().get(service_url+'/1')\n self.assertIn('is a new', str(results.data))\n self.assertIn('dep 1', str(results.data))",
"def departments_with_id(department_id=None):\n department_obj = storage.get('Department', department_id)\n if department_obj is None:\n abort(404, 'Not found')\n\n if request.method == 'GET':\n return jsonify(department_obj.to_json())\n\n if request.method == 'DELETE':\n department_obj.delete()\n del department_obj\n return jsonify({})\n\n if request.method == 'PUT':\n req_json = request.get_json()\n if req_json is None:\n abort(400, 'Not a JSON')\n department_obj.bm_update(req_json)\n return jsonify(department_obj.to_json())",
"def department(department_id):\n # gather data from db about all employees\n return render_template(\"department.html\",\n department_id=department_id)",
"def addSport():\n\n if request.method == 'GET':\n return render_template('addsport.html')\n elif request.method == 'POST':\n newSport = Sport(\n sportName=request.form['sportName'],\n user_id=login_session['user_id'])\n session.add(newSport)\n session.commit()\n return redirect(url_for('showSports'))",
"def post(self):\n app.logger.info('Request to Create a Pet')\n content_type = request.headers.get('Content-Type')\n if not content_type:\n abort(status.HTTP_400_BAD_REQUEST, \"No Content-Type set\")\n\n data = {}\n # Check for form submission data\n if content_type == 'application/x-www-form-urlencoded':\n app.logger.info('Processing FORM data')\n data = {\n 'name': request.form['name'],\n 'category': request.form['category'],\n 'available': request.form['available'].lower() in ['true', '1', 't']\n }\n elif content_type == 'application/json':\n app.logger.info('Processing JSON data')\n data = request.get_json()\n else:\n message = 'Unsupported Content-Type: {}'.format(content_type)\n app.logger.info(message)\n abort(status.HTTP_400_BAD_REQUEST, message)\n\n pet = Pet()\n try:\n pet.deserialize(data)\n except DataValidationError as error:\n raise BadRequest(str(error))\n pet.save()\n app.logger.info('Pet with new id [%s] saved!', pet.id)\n location_url = api.url_for(PetResource, pet_id=pet.id, _external=True)\n return pet.serialize(), status.HTTP_201_CREATED, {'Location': location_url}",
"def update_department(department_id):\n details = request.get_json()\n errors = check_department_keys(request)\n if errors:\n return raise_error(400, \"Invalid {} key\".format(', '.join(errors)))\n department_name = details['department_name']\n if DepartmentsModel().get_department_name(department_name):\n return raise_error(\n 400,\n \"{} department already exists\".format(department_name))\n response = DepartmentsModel().edit_department(department_name,\n department_id)\n if response:\n return Serializer.serialize(response, 200,\n 'Department updated successfully')\n return raise_error(404, \"Department not found\")",
"def add_post(request):\n if 'form.submitted' in request.params:\n title = request.params['title']\n name = title_to_name(title)\n\n if not name or DBSession.query(Post).filter(Post.name==name).count():\n # this should be a popup ajaxy box\n return Response(\"Name %s is in use, choose a different title\" % name, content_type='text/plain', status_int=500)\n\n body = request.params['body']\n post = Post(title, body, name)\n DBSession.add(post)\n return HTTPFound(location = request.route_url('view_post', postname=name))\n\n save_url = request.route_url('new_post')\n post = Post('')\n return environment_factory(post=post, save_url=save_url)",
"def post(self):\n app.logger.info('Request to Create a Pet')\n content_type = request.headers.get('Content-Type')\n if not content_type:\n abort(status.HTTP_400_BAD_REQUEST, \"No Content-Type set\")\n\n data = {}\n # Check for form submission data\n if content_type == 'application/x-www-form-urlencoded':\n app.logger.info('Processing FORM data')\n app.logger.info(type(request.form))\n app.logger.info(request.form)\n data = {\n 'name': request.form['name'],\n 'category': request.form['category'],\n 'available': request.form['available'].lower() in ['yes', 'y', 'true', 't', '1']\n }\n elif content_type == 'application/json':\n app.logger.info('Processing JSON data')\n data = request.get_json()\n else:\n message = 'Unsupported Content-Type: {}'.format(content_type)\n app.logger.info(message)\n abort(status.HTTP_400_BAD_REQUEST, message)\n\n pet = Pet()\n try:\n pet.deserialize(data)\n except DataValidationError as error:\n raise BadRequest(str(error))\n pet.create()\n app.logger.info('Pet with new id [%s] saved!', pet.id)\n location_url = api.url_for(PetResource, pet_id=pet.id, _external=True)\n return pet.serialize(), status.HTTP_201_CREATED, {'Location': location_url}",
"def post():\n\n title = request.form[\"title\"]\n description = request.form[\"description\"]\n is_valid = request.form[\"is_valid\"]\n company_id = request.form[\"company_id\"]\n city_id = request.form[\"city_id\"]\n start_date = request.form[\"start_date\"]\n add_date = request.form[\"add_date\"]\n sector = request.form[\"sector\"]\n contract_type_id = request.form[\"contract_type_id\"]\n experience = request.form[\"experience\"]\n formation = request.form[\"formation\"]\n try:\n elements = Advertisements().post( title, description, company_id, city_id, start_date, add_date, sector, contract_type_id, experience, formation)\n result = jsonify(elements)\n result.statut_code = 201\n return result\n except Exception as identifier:\n return abort(500, identifier)",
"def department(self, department: object):\n\n self._department = department",
"def post(self):\n try:\n new_form = FORM_SCHEMA.load(request.json).data\n except ValidationError as err:\n APP.logger.error(err.args)\n return err.messages, status.HTTP_400_BAD_REQUEST\n\n add_new_form = Form(**new_form)\n DB.session.add(add_new_form)\n\n try:\n DB.session.commit()\n except IntegrityError as err:\n APP.logger.error(err.args)\n DB.session.rollback()\n return {'error': 'Already exists.'}, status.HTTP_400_BAD_REQUEST\n return Response(status=status.HTTP_201_CREATED)",
"def post(self):\n teacher = self.request.get(\"teacher\")\n temail = self.request.get(\"temail\")\n tphone = self.request.get(\"tphone\")\n specialty = self.request.get(\"specialty\")\n\n if teacher and temail and tphone and specialty:\n\n #create a new teacher object and store it in the database\n teacher = Teacher(\n teacher=teacher,\n temail=temail,\n tphone=tphone, \n specialty=specialty)\n teacher.put()\n\n id = teacher.key().id()\n self.redirect(\"/teacher/%s\" % id)\n else:\n error = \"Please include a teacher, an email, a phone number, and a specialty.\"\n self.render_form(teacher, temail, tphone, specialty, error)"
] |
[
"0.83515495",
"0.8286092",
"0.80750674",
"0.78898543",
"0.78584856",
"0.7276642",
"0.71313643",
"0.6963064",
"0.66448486",
"0.6494299",
"0.6454834",
"0.63422024",
"0.6288573",
"0.62164277",
"0.62058616",
"0.59953094",
"0.592722",
"0.58748484",
"0.5860363",
"0.58289534",
"0.5751742",
"0.5711168",
"0.5704056",
"0.57022166",
"0.56909925",
"0.5654393",
"0.56522775",
"0.5648818",
"0.5647386",
"0.56329817"
] |
0.84991974
|
0
|
GET method, returns certain department by id.
|
def get(id_):
logger.debug('Catch GET request by URL /api/departments/%i.', id_)
try:
department = ds.get(id_)
if not department.id:
raise Exception
except Exception:
logger.error('There is no department with id %i', id_)
return {'message': f'There is no department with {id_}.'}, 404
return marshal_departments(department)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def show_department(id_: int):\n\n logger.debug('Routed to /departments/%i', id_)\n titles = ['Name', 'Average Salary', 'Employees', 'E-mail']\n department = None\n\n try:\n department = ds.get(id_)\n except IntegrityError:\n logger.error(\"Can't find employee with id %i\", id_)\n abort(404)\n\n logger.info('Get department %s', department.name)\n return render_template('department.html',\n title=f'Department {department.name}',\n table_title=f'Department: {department.name}',\n headers=titles,\n department=department)",
"def get_department_by_id(department_id):\n return Department.query.get(department_id)",
"def test_api_can_get_department_by_id(self):\n res = self.client().get(service_url+'/1')\n self.assertEqual(res.status_code, 200)\n self.assertIn('dep 1', str(res.data))",
"def department(department_id):\n\n department_obj = Department.query.get_or_404(department_id)\n employees = Employee.query.filter_by(department_id=department_id)\n return render_template('department/department.html',\n department=department_obj, employees=employees)",
"def get(self, department_id):\n department = get_department_by_id(department_id)\n employees = set_employees_by_id(department_id)\n department_obj = {\n \"id\": department.id,\n \"name\": department.name,\n \"employees\": [\n {\n \"id\": employee.id,\n \"name\": employee.name,\n \"salary\": employee.salary,\n }\n for employee in employees\n ],\n }\n return department_obj, 200",
"def departments_with_id(department_id=None):\n department_obj = storage.get('Department', department_id)\n if department_obj is None:\n abort(404, 'Not found')\n\n if request.method == 'GET':\n return jsonify(department_obj.to_json())\n\n if request.method == 'DELETE':\n department_obj.delete()\n del department_obj\n return jsonify({})\n\n if request.method == 'PUT':\n req_json = request.get_json()\n if req_json is None:\n abort(400, 'Not a JSON')\n department_obj.bm_update(req_json)\n return jsonify(department_obj.to_json())",
"def department(department_id):\n # gather data from db about all employees\n return render_template(\"department.html\",\n department_id=department_id)",
"def get_data(department_id):\n url='https://covidstats.com.ar/ws/evolucion?comprimido=1&departamentos[]={}'\n with urllib.request.urlopen(url.format(department_id)) as req:\n return json.loads(req.read().decode())",
"def get_dessert_by_id(dessert_id: int):\n return get_data_by_id(\"Desserts\", dessert_id)",
"def get(self, id):\n resultado = EmployeeModel.query.filter_by(employee_id=id).first()\n if resultado:\n return resultado\n api.abort(404)",
"def get():\n\n logger.debug('Catch GET request by URL /api/departments.')\n departments = ds.get_all()\n return marshal_departments(departments)",
"def get(self,id):\r\n person = get_one_by_persons_id(id=id)\r\n if not person:\r\n api.abort(404)\r\n else:\r\n return person",
"def get(self,id):\r\n person = get_one(id=id)\r\n if not person:\r\n api.abort(404)\r\n else:\r\n return person",
"def update_department(id_: int):\n logger.debug('Routed to /departments/%i/update', id_)\n\n if request.method == 'POST':\n name = request.form.get(\"name\")\n email = request.form.get(\"email\")\n\n try:\n ds.update(id_, name, email)\n except IntegrityError as exception:\n logger.error('Can\\'t update department with name %s and email %s. '\n 'Exception: %s', name, email, str(exception))\n\n session['name'] = name\n session['email'] = email\n flash(f'Department with name {name} already exists.')\n return redirect(request.referrer)\n\n except Exception as exception:\n logger.error('Can\\'t add department with name %s and email %s. '\n 'Exception: %s', name, email, str(exception))\n abort(404)\n\n logger.info(\n 'Successfully updated department with id %i. It\\'s name = %s, '\n 'email = %s', id_, name, email)\n return redirect(url_for(\"department.show_department\", id_=id_))\n\n department = None\n try:\n department = ds.get(id_)\n except IntegrityError:\n logger.error(\"Can't get department with id %i\", id_)\n abort(404)\n\n titles = ['Name', 'Average Salary', 'Employees', 'E-mail']\n logger.info('Get department %s', department.name)\n return render_template('edit_department.html',\n title='Update department',\n table_title=f'Updating department: '\n f'{department.name}',\n headers=titles,\n department=department)",
"def set_department_by_id(department_id):\n return Department.query.filter(id=department_id).one()",
"def get(id=None):\n return requests.get(\"/{}\".format(id))",
"def get_doctor_detail(id):\n headers = {\"Authorization\": 'Bearer ' + api_key}\n return requests.get(\n f\"{base_url}businesses/{id}\",\n headers=headers).json()",
"def get(self, id=None):\n if id:\n slip = test4ValidEntity(id)\n if slip == None:\n self.response.set_status(404)\n else:\n slip_dict = slip.to_dict()\n slip_dict['departure_history'] = {}\n slip_dict['departure_history']['departure_date'] = slip.departure_date\n slip_dict['departure_history']['departed_boat'] = slip.departed_boat\n del slip_dict['departed_boat'], slip_dict['departure_date']\n self.response.headers['Content-Type'] = 'application/json'\n self.response.write(json.dumps(slip_dict))",
"def delete(id_=None):\n\n logger.debug('Catch DELETE request by URL /api/departments/%i.', id_)\n ds.delete(id_)\n return '', 204",
"def doctor(request):\n assert isinstance(request, HttpRequest)\n if request.method == 'POST':\n request.session['selected_package'] = request.POST['package_id']\n request.session['selected_doctor'] = request.POST['doctor_id']\n return redirect('/doctor-detail/')\n status, result = api.show_doctor_in_department()\n # print(result)\n return render(\n request,\n 'app/doctor.html',\n {\n 'title': 'แผนกและแพทย์',\n 'departments': result,\n 'logged_user': request.session.get('user')\n }\n )",
"def delete_department(id):\r\n check_admin()\r\n\r\n department = Department.query.get_or_404(id)\r\n db.session.delete(department)\r\n db.session.commit()\r\n flash('You have successfully deleted the department.')\r\n\r\n # redirect to the departments page\r\n return redirect(url_for('admin.list_departments'))\r\n\r\n return render_template(title=\"Delete Department\")",
"def departments_no_id():\n if request.method == 'GET':\n all_departments = storage.all('Department')\n all_departments = [obj.to_json() for obj in all_departments.values()]\n return jsonify(all_departments)\n\n if request.method == 'POST':\n req_json = request.get_json()\n if req_json is None:\n abort(400, 'Not a JSON')\n if req_json.get(\"name\") is None:\n abort(400, 'Missing name')\n Department = CNC.get(\"Department\")\n new_object = Department(**req_json)\n new_object.save()\n return jsonify(new_object.to_json()), 201",
"def getbyid(self, id):\n\n return esd.retrieve(id)",
"def departments(request):\n if 'selected_package' in request.session:\n del request.session['selected_package']\n assert isinstance(request, HttpRequest)\n status, result = api.show_departments()\n return render(\n request,\n 'app/departments.html',\n {\n 'title': 'แผนกและแพ็คเกจ',\n 'departments': result,\n 'logged_user': request.session.get('user')\n }\n )",
"def get(self, id):\n\n teacher = Teacher.get_by_id(int(id))\n if teacher:\n t = jinja_env.get_template(\"teacher.html\")\n response = t.render(teacher=teacher)\n else:\n error = \"there is no teacher with id %s\" % id\n t = jinja_env.get_template(\"404.html\")\n response = t.render(error=error)\n\n self.response.out.write(response)",
"def get_doctor(id):\n doctor = Doctor.query.get(id)\n result = doctor_schema.dump(doctor)\n return jsonify(result.data)",
"def put(id_=None):\n\n logger.debug('Catch PUT request by URL /api/departments/%i.', id_)\n try:\n args = department_args.parse_args()\n ds.update(id_, name=args['name'], email=args['email'])\n except Exception:\n return {'message': \"Can't update department.\"}, 404\n return marshal_departments(ds.get(id_)), 200",
"def departments():\n # gather data from db about all departments\n return render_template(\"departments.html\")",
"async def getDepartments(self, ):\n payload = {}\n \n\n # Parameter validation\n schema = CatalogValidator.getDepartments()\n schema.dump(schema.load(payload))\n \n\n url_with_params = await create_url_with_params(self._conf.domain, f\"/service/platform/catalog/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/departments\", \"\"\"{\"required\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"A `company_id` is a unique identifier for a particular seller account.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"A `application_id` is a unique identifier for a particular sale channel.\",\"schema\":{\"type\":\"string\"},\"required\":true}],\"optional\":[],\"query\":[],\"headers\":[],\"path\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"A `company_id` is a unique identifier for a particular seller account.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"A `application_id` is a unique identifier for a particular sale channel.\",\"schema\":{\"type\":\"string\"},\"required\":true}]}\"\"\", )\n query_string = await create_query_string()\n headers = {\n \"Authorization\": \"Bearer \" + await self._conf.getAccessToken()\n }\n for h in self._conf.extraHeaders:\n headers.update(h)\n exclude_headers = []\n for key, val in headers.items():\n if not key.startswith(\"x-fp-\"):\n exclude_headers.append(key)\n return await AiohttpHelper().aiohttp_request(\"GET\", url_with_params, headers=get_headers_with_signature(self._conf.domain, \"get\", await create_url_without_domain(f\"/service/platform/catalog/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/departments\", ), query_string, headers, \"\", exclude_headers=exclude_headers), data=\"\")",
"def get_departments() -> list:\n return Department.query.all()"
] |
[
"0.84085786",
"0.7923413",
"0.7799314",
"0.762513",
"0.7419371",
"0.74175876",
"0.7370106",
"0.6990669",
"0.6760814",
"0.6547017",
"0.6442252",
"0.6337737",
"0.63375086",
"0.6291892",
"0.62416136",
"0.6226284",
"0.62131727",
"0.62112105",
"0.6149921",
"0.60447145",
"0.59633994",
"0.59350663",
"0.59209937",
"0.58993226",
"0.5850854",
"0.58481026",
"0.5833897",
"0.57800543",
"0.57782316",
"0.57491684"
] |
0.8391516
|
1
|
PUT method, updates existing department by id.
|
def put(id_=None):
logger.debug('Catch PUT request by URL /api/departments/%i.', id_)
try:
args = department_args.parse_args()
ds.update(id_, name=args['name'], email=args['email'])
except Exception:
return {'message': "Can't update department."}, 404
return marshal_departments(ds.get(id_)), 200
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def put(self, department_id):\n department = get_department_by_id(department_id)\n department.name = request.json[\"name\"]\n db.session.commit()\n return {}, 200",
"def update_department(department_id):\n details = request.get_json()\n errors = check_department_keys(request)\n if errors:\n return raise_error(400, \"Invalid {} key\".format(', '.join(errors)))\n department_name = details['department_name']\n if DepartmentsModel().get_department_name(department_name):\n return raise_error(\n 400,\n \"{} department already exists\".format(department_name))\n response = DepartmentsModel().edit_department(department_name,\n department_id)\n if response:\n return Serializer.serialize(response, 200,\n 'Department updated successfully')\n return raise_error(404, \"Department not found\")",
"def update_department(id_: int):\n logger.debug('Routed to /departments/%i/update', id_)\n\n if request.method == 'POST':\n name = request.form.get(\"name\")\n email = request.form.get(\"email\")\n\n try:\n ds.update(id_, name, email)\n except IntegrityError as exception:\n logger.error('Can\\'t update department with name %s and email %s. '\n 'Exception: %s', name, email, str(exception))\n\n session['name'] = name\n session['email'] = email\n flash(f'Department with name {name} already exists.')\n return redirect(request.referrer)\n\n except Exception as exception:\n logger.error('Can\\'t add department with name %s and email %s. '\n 'Exception: %s', name, email, str(exception))\n abort(404)\n\n logger.info(\n 'Successfully updated department with id %i. It\\'s name = %s, '\n 'email = %s', id_, name, email)\n return redirect(url_for(\"department.show_department\", id_=id_))\n\n department = None\n try:\n department = ds.get(id_)\n except IntegrityError:\n logger.error(\"Can't get department with id %i\", id_)\n abort(404)\n\n titles = ['Name', 'Average Salary', 'Employees', 'E-mail']\n logger.info('Get department %s', department.name)\n return render_template('edit_department.html',\n title='Update department',\n table_title=f'Updating department: '\n f'{department.name}',\n headers=titles,\n department=department)",
"def update_department(department_id):\n\n form = UpdateDepartment()\n department_obj = Department.query.get_or_404(department_id)\n if request.method == 'POST':\n if form.validate_on_submit():\n department_obj.name = form.name.data\n try:\n db.session.commit()\n except IntegrityError:\n flash('Department with this name already exists.', 'warning')\n db.session.rollback()\n return redirect(url_for('update_department',\n department_id=department_obj.id))\n flash('Department name successfully changed!', 'success')\n return redirect(url_for('home'))\n return render_template('department/department_update.html',\n form=form, department=department_obj)",
"def departments_with_id(department_id=None):\n department_obj = storage.get('Department', department_id)\n if department_obj is None:\n abort(404, 'Not found')\n\n if request.method == 'GET':\n return jsonify(department_obj.to_json())\n\n if request.method == 'DELETE':\n department_obj.delete()\n del department_obj\n return jsonify({})\n\n if request.method == 'PUT':\n req_json = request.get_json()\n if req_json is None:\n abort(400, 'Not a JSON')\n department_obj.bm_update(req_json)\n return jsonify(department_obj.to_json())",
"def set_department_by_id(department_id):\n return Department.query.filter(id=department_id).one()",
"def test_department_can_be_edited(self):\n res = self.client().put(service_url, json={\"id_dep\": 1, \"dep_name\": \"\", \"description\": \"this is a new description\"})\n self.assertEqual(res.status_code, 204)\n results = self.client().get(service_url+'/1')\n self.assertIn('is a new', str(results.data))\n self.assertIn('dep 1', str(results.data))",
"def put():\n\n logger.debug('Catch PUT request by URL /api/departments.')\n return abort(405)",
"def update_doctor(id):\n doctor = Doctor.query.get(id)\n doctor.calendar_id = request.json['calendar_id']\n db.session.commit()\n return doctor_schema.jsonify(doctor)",
"def put(self, request, pk):\n data = request.data\n data.pop('skills')\n Department_name = data.pop('department')\n department = Department.objects.get(name=Department_name)\n manager_name = data.pop('manager')\n manager = Manager.objects.get(name=manager_name)\n EmployeeDetail.objects.filter(pk=pk).update(department=department, manager=manager, **data)\n return Response(\n data=\"request.data\"\n )",
"async def updateAppDepartment(self, department_uid=None, body=\"\"):\n payload = {}\n \n if department_uid:\n payload[\"department_uid\"] = department_uid\n \n\n # Parameter validation\n schema = CatalogValidator.updateAppDepartment()\n schema.dump(schema.load(payload))\n \n # Body validation\n from .models import ApplicationDepartmentJson\n schema = ApplicationDepartmentJson()\n schema.dump(schema.load(body))\n \n\n url_with_params = await create_url_with_params(self._conf.domain, f\"/service/platform/catalog/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/department/{department_uid}\", \"\"\"{\"required\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"Id of the company associated to department custom json.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"application id for which the custom_json is associated.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"department_uid\",\"description\":\"department id for which the custom_json is associated.\",\"schema\":{\"type\":\"string\"},\"required\":true}],\"optional\":[],\"query\":[],\"headers\":[],\"path\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"Id of the company associated to department custom json.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"application id for which the custom_json is associated.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"department_uid\",\"description\":\"department id for which the custom_json is associated.\",\"schema\":{\"type\":\"string\"},\"required\":true}]}\"\"\", department_uid=department_uid)\n query_string = await create_query_string(department_uid=department_uid)\n headers = {\n \"Authorization\": \"Bearer \" + await self._conf.getAccessToken()\n }\n for h in self._conf.extraHeaders:\n headers.update(h)\n exclude_headers = []\n for key, val in headers.items():\n if not key.startswith(\"x-fp-\"):\n exclude_headers.append(key)\n return await AiohttpHelper().aiohttp_request(\"PATCH\", url_with_params, headers=get_headers_with_signature(self._conf.domain, \"patch\", await create_url_without_domain(f\"/service/platform/catalog/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/department/{department_uid}\", department_uid=department_uid), query_string, headers, body, exclude_headers=exclude_headers), data=body)",
"def department_id(self, department_id):\n if self.local_vars_configuration.client_side_validation and department_id is None: # noqa: E501\n raise ValueError(\"Invalid value for `department_id`, must not be `None`\") # noqa: E501\n\n self._department_id = department_id",
"def department(self, department: object):\n\n self._department = department",
"def put(self, resource_id, draft_id):\n d = Deposition.get(resource_id, user=current_user)\n self.validate_input(d, draft_id)\n self.process_input(d, draft_id)\n d.save()",
"def delete(self, department_id):\n department = get_department_by_id(department_id)\n db.session.delete(department)\n db.session.commit()\n return {}, 204",
"def department(self, department):\n\n self._department = department",
"def department(self, department):\n\n self._department = department",
"def department(department_id):\n\n department_obj = Department.query.get_or_404(department_id)\n employees = Employee.query.filter_by(department_id=department_id)\n return render_template('department/department.html',\n department=department_obj, employees=employees)",
"def delete_department(id):\r\n check_admin()\r\n\r\n department = Department.query.get_or_404(id)\r\n db.session.delete(department)\r\n db.session.commit()\r\n flash('You have successfully deleted the department.')\r\n\r\n # redirect to the departments page\r\n return redirect(url_for('admin.list_departments'))\r\n\r\n return render_template(title=\"Delete Department\")",
"def show_department(id_: int):\n\n logger.debug('Routed to /departments/%i', id_)\n titles = ['Name', 'Average Salary', 'Employees', 'E-mail']\n department = None\n\n try:\n department = ds.get(id_)\n except IntegrityError:\n logger.error(\"Can't find employee with id %i\", id_)\n abort(404)\n\n logger.info('Get department %s', department.name)\n return render_template('department.html',\n title=f'Department {department.name}',\n table_title=f'Department: {department.name}',\n headers=titles,\n department=department)",
"def put(self, id):\n empleadoactualizar = EmployeeModel.query.filter_by(employee_id=id).first()\n if empleadoactualizar:\n reg = api.payload\n empleadoactualizar.employee_id = reg['employee_id']\n empleadoactualizar.name = reg['name']\n empleadoactualizar.age = reg['age']\n empleadoactualizar.position = reg['position']\n empleadoactualizar.fechaingreso = datetime.date.fromisoformat(reg['fechaingreso'])\n db.session.merge(empleadoactualizar)\n db.session.commit()\n return 201\n api.abort(404)",
"def put(self, uuid: str):\n try:\n employee = self.service.update_employee(\n self.schema, uuid, request.json\n )\n except ValidationError as error:\n return error.messages, 400\n except ValueError:\n return self.NOT_FOUND_MESSAGE, 404\n return self.schema.dump(employee), 200",
"def delete_department(department_id):\n\n department_obj = Department.query.get_or_404(department_id)\n db.session.delete(department_obj)\n db.session.commit()\n flash(f'Department {department_obj.name} successfully deleted.', 'success')\n return redirect(url_for('home'))",
"def put(self, pet_id):\n app.logger.info('Request to Update a pet with id [%s]', pet_id)\n #check_content_type('application/json')\n pet = Pet.find(pet_id)\n if not pet:\n abort(status.HTTP_404_NOT_FOUND, \"Pet with id '{}' was not found.\".format(pet_id))\n\n payload = request.get_json()\n try:\n pet.deserialize(payload)\n except DataValidationError as error:\n raise BadRequest(str(error))\n\n pet.id = pet_id\n pet.update()\n return pet.serialize(), status.HTTP_200_OK",
"def put(self,id):\r\n data = request.json\r\n return update(id=id,data=data)",
"def put(self, id):\n return Contacts().update_one(id, request.json)",
"def test_api_can_get_department_by_id(self):\n res = self.client().get(service_url+'/1')\n self.assertEqual(res.status_code, 200)\n self.assertIn('dep 1', str(res.data))",
"def update(self, id, obj):\n url = self._format_url(self.url + \"/{id}\", {\"id\": id})\n\n return self._make_request('put', url, data={self.singular: obj})",
"def put(self, id):\n data = request.json\n update_entry(id, data)\n return None, 204",
"def put(self, id):\n payload = marshal(api.payload, room_request)\n taskroom_service.update_room(id, payload)\n return {'Message': \"Room updated successfully\"}"
] |
[
"0.8171113",
"0.8080008",
"0.78269655",
"0.730599",
"0.6754468",
"0.668468",
"0.65975183",
"0.63646346",
"0.6236213",
"0.61737126",
"0.61372775",
"0.60807115",
"0.6018062",
"0.5967394",
"0.5767757",
"0.5758104",
"0.5758104",
"0.57168895",
"0.57055074",
"0.56856376",
"0.56799513",
"0.56116956",
"0.5609301",
"0.5607081",
"0.55954254",
"0.5517713",
"0.5514795",
"0.54940754",
"0.5471222",
"0.5469243"
] |
0.8127168
|
1
|
POST method, doesn't relate to certain department.
|
def post(id_=None):
logger.debug('Catch POST request by URL /api/departments/%i.', id_)
return abort(405)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def post():\n\n logger.debug('Catch POST request by URL /api/departments.')\n args = department_args.parse_args()\n try:\n id_ = ds.add(name=args['name'], email=args['email'])\n created_department = ds.get(id_)\n except IntegrityError:\n return {'message': f\"Department with name {args['name']} already \"\n \"exists.\"}, 404\n except Exception:\n return {'message': \"Can't post department.\"}, 404\n return marshal_departments(created_department), 201",
"def departments_no_id():\n if request.method == 'GET':\n all_departments = storage.all('Department')\n all_departments = [obj.to_json() for obj in all_departments.values()]\n return jsonify(all_departments)\n\n if request.method == 'POST':\n req_json = request.get_json()\n if req_json is None:\n abort(400, 'Not a JSON')\n if req_json.get(\"name\") is None:\n abort(400, 'Missing name')\n Department = CNC.get(\"Department\")\n new_object = Department(**req_json)\n new_object.save()\n return jsonify(new_object.to_json()), 201",
"def test_department_creation(self):\n res = self.client().post(service_url, json={\"dep_name\": \"test dep 4\", \"description\": \"testing department 4\"})\n self.assertEqual(res.status_code, 201)\n self.assertIn('dep 4', str(res.data))",
"def add_department():\n logger.debug('Routed to /departments/add')\n\n if request.method == 'POST':\n name = request.form.get(\"name\")\n email = request.form.get(\"email\")\n\n try:\n ds.add(name, email)\n except IntegrityError as exception:\n logger.error('Can\\'t add department with name %s and email \"%s\". '\n 'Exception: %s', name, email, str(exception))\n session['name'] = name\n session['email'] = email\n flash(f'Department with name {name} already exists.')\n return redirect(request.referrer)\n except Exception as exception:\n logger.error('Can\\'t add department with name %s and email %s. '\n 'Exception: %s', name, email, str(exception))\n abort(404)\n return redirect(url_for('department.show_all_departments'))\n\n titles = ['Name', 'E-mail']\n return render_template('add_department.html',\n title='Add department',\n table_title='Adding new department',\n headers=titles)",
"def add_department():\n details = request.get_json()\n errors = check_department_keys(request)\n if errors:\n return raise_error(400, \"Invalid {} key\".format(', '.join(errors)))\n department_name = details['department_name']\n if DepartmentsModel().get_department_name(department_name):\n return raise_error(400,\n \"{} department already exists\".format(department_name))\n response = DepartmentsModel(department_name).save()\n return Serializer.serialize(response, 201, \"Department added successfully\")",
"def put():\n\n logger.debug('Catch PUT request by URL /api/departments.')\n return abort(405)",
"def add_department():\n form = AddDepartment()\n if request.method == 'POST':\n if form.validate_on_submit():\n new_department = Department(name=form.name.data)\n db.session.add(new_department)\n try:\n db.session.commit()\n except IntegrityError:\n db.session.rollback()\n flash('Department already exists!', 'warning')\n return redirect(url_for('add_department'))\n\n flash(f'Department {form.name.data} created!', 'success')\n return redirect(url_for('home'))\n\n flash('Name not defined.', 'warning')\n return render_template('department/department_add.html', form=form)",
"def post(self, request):\n pass",
"def put(id_=None):\n\n logger.debug('Catch PUT request by URL /api/departments/%i.', id_)\n try:\n args = department_args.parse_args()\n ds.update(id_, name=args['name'], email=args['email'])\n except Exception:\n return {'message': \"Can't update department.\"}, 404\n return marshal_departments(ds.get(id_)), 200",
"def post(self):\n self.get_or_post(method='POST')",
"def put(self, department_id):\n department = get_department_by_id(department_id)\n department.name = request.json[\"name\"]\n db.session.commit()\n return {}, 200",
"def add_department():\n\tcheck_admin()\n\n\tadd_department = True\n\n\tform = DepartmentForm()\n\tif form.validate_on_submit():\n\t\tdepartment = Department(name=form.name.data,description=form.description.data)\n\n\t\ttry:\n\t\t\t#add department to the database\n\t\t\tdb.session.add(department)\n\t\t\tdb.session.commit()\n\t\t\tflash(\"You have successsfully added a new department.\")\n\t\texcept:\n\t\t\t#incase the department already exists\n\t\t\tflash(\"Error: department already exists.\")\n\t#once the admin creates a new department,they will be redirected to the departments page\n\treturn render_template('admin/departments/department.html',action=\"Add\", add_department= add_department,form=form,title = \"Add Department\")",
"def http_method_post():\n return 'POST'",
"def test_department_can_be_edited(self):\n res = self.client().put(service_url, json={\"id_dep\": 1, \"dep_name\": \"\", \"description\": \"this is a new description\"})\n self.assertEqual(res.status_code, 204)\n results = self.client().get(service_url+'/1')\n self.assertIn('is a new', str(results.data))\n self.assertIn('dep 1', str(results.data))",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def add_department():\r\n check_admin()\r\n\r\n add_department = True\r\n\r\n form = DepartmentForm()\r\n if form.validate_on_submit():\r\n department = Department(name=form.name.data,\r\n description=form.description.data)\r\n try:\r\n # add department to the database\r\n db.session.add(department)\r\n db.session.commit()\r\n flash('You have successfully added a new department.')\r\n except:\r\n # in case department name already exists\r\n flash('Error: department name already exists.',category='error')\r\n\r\n # redirect to departments page\r\n return redirect(url_for('admin.list_departments'))\r\n\r\n # load department template\r\n return render_template('admin/departments/department.html', action=\"Add\",\r\n add_department=add_department, form=form,\r\n title=\"Add Department\")",
"def _insert_department(self):\n # Insert\n if db_department.idx_department_exists(1) is False:\n record = Department(\n code=general.encode(self.reserved),\n name=general.encode(self.reserved))\n database = db.Database()\n database.add(record, 1102)"
] |
[
"0.7745221",
"0.6630201",
"0.6624349",
"0.65502745",
"0.64529896",
"0.64247143",
"0.6355796",
"0.62937057",
"0.62396705",
"0.6131375",
"0.6056458",
"0.6031699",
"0.6026827",
"0.5993256",
"0.5987849",
"0.5987849",
"0.5987849",
"0.5987849",
"0.5987849",
"0.5987849",
"0.5987849",
"0.5987849",
"0.5987849",
"0.5987849",
"0.5987849",
"0.5987849",
"0.5987849",
"0.5987849",
"0.5975867",
"0.5914173"
] |
0.7382478
|
1
|
Query plug for energy usage data. Runs as async task.
|
def getEnergyUsage():
energy_data = asyncio.run(plug.get_emeter_realtime())
return energy_data
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"async def query(self, metric):\n raise NotImplementedError()",
"def consume_energy(self):\n return self.bot_client.send_command(_Command.ConsumeEnergy)",
"def get_energy(self):\n return self.bot_client.send_command(_Command.GetEnergy)",
"def query(self):\n self._measurements[self.KEY_USAGE].df = self.fetch_data_usage()",
"async def async_update_data():\n _LOGGER.debug(\"Updating SunPower data\")\n return await hass.async_add_executor_job(sunpower_fetch, sunpower_monitor)",
"async def async_update_data():\n return await hass.async_add_executor_job(read_consumption, api, entry)",
"async def async_update(self):\n today = date.today()\n\n try:\n self.data = await self.hass.async_add_executor_job(\n self.client.get_stats_and_body, today.isoformat()\n )\n except (\n GarminConnectAuthenticationError,\n GarminConnectTooManyRequestsError,\n GarminConnectConnectionError,\n ) as err:\n _LOGGER.error(\n \"Error occurred during Garmin Connect get activity request: %s\", err\n )\n return\n except Exception: # pylint: disable=broad-except\n _LOGGER.exception(\n \"Unknown error occurred during Garmin Connect get activity request\"\n )\n return",
"async def _async_update_data(self) -> EasyEnergyData:\n today = dt_util.now().date()\n gas_today = None\n energy_tomorrow = None\n\n try:\n energy_today = await self.easyenergy.energy_prices(\n start_date=today, end_date=today\n )\n try:\n gas_today = await self.easyenergy.gas_prices(\n start_date=today, end_date=today\n )\n except EasyEnergyNoDataError:\n LOGGER.debug(\"No data for gas prices for easyEnergy integration\")\n # Energy for tomorrow only after 14:00 UTC\n if dt_util.utcnow().hour >= THRESHOLD_HOUR:\n tomorrow = today + timedelta(days=1)\n try:\n energy_tomorrow = await self.easyenergy.energy_prices(\n start_date=tomorrow, end_date=tomorrow\n )\n except EasyEnergyNoDataError:\n LOGGER.debug(\n \"No electricity data for tomorrow for easyEnergy integration\"\n )\n\n except EasyEnergyConnectionError as err:\n raise UpdateFailed(\"Error communicating with easyEnergy API\") from err\n\n return EasyEnergyData(\n energy_today=energy_today,\n energy_tomorrow=energy_tomorrow,\n gas_today=gas_today,\n )",
"async def get_device_data(self):\n pass",
"async def query(self, metric):\n metric_name = metric.spec.provider.metric\n\n url = self.metrics_provider.spec.influx.url\n token = self.metrics_provider.spec.influx.token\n org = self.metrics_provider.spec.influx.org\n bucket_name = self.metrics_provider.spec.influx.bucket\n\n client = InfluxDBClient(url=url, token=token, org=org)\n query_api = client.query_api()\n\n query = f'''\n from(bucket:\"{bucket_name}\")\n |> range(start: -1h)\n |> filter(fn: (r) => r._measurement == \"{metric_name}\")\n |> last()\n '''\n\n try:\n loop = asyncio.get_event_loop()\n result = await loop.run_in_executor(None, query_api.query, query)\n for table in result:\n for record in table.records:\n response = record.values['_value']\n return float(response)\n\n except Exception as err:\n metric_provider_name = self.metrics_provider.metadata.name\n raise MetricsProviderError(\n f\"Failed to query InfluxDB with provider {metric_provider_name!r}\"\n ) from err\n\n raise MetricError(f\"Metric {metric_name!r} not in InfluxDB response\")",
"async def async_get_temperature(self):\n if self.token is None:\n await self.async_initialize_token()\n\n self.temperature = None\n raw = await self._async_ws_get_function(CMD_TEMPERATURE)\n\n f_to_c = lambda f: (5.0 / 9) * (f - 32)\n try:\n xml_root = element_tree.fromstring(raw)\n self.temperature = Temperature(\n tunerTemperature=f_to_c(int(xml_root.find(\"TunnerTemperature\").text)),\n temperature=f_to_c(int(xml_root.find(\"Temperature\").text)),\n )\n except (element_tree.ParseError, TypeError):\n _LOGGER.warning(\"Can't read temperature from %s\", self.host)\n self.token = None\n raise exceptions.ConnectBoxNoDataAvailable() from None",
"def testEgaugeFetch(self):\n\n timeCol = 'datetime'\n rows = []\n for row in self.aggregator.rawData(dataType = 'egauge',\n orderBy = [timeCol, 'egauge_id'],\n timestampCol = timeCol,\n startDate = self.testStart,\n endDate = self.testEnd):\n rows.append(row)\n self.assertIsNotNone(rows, 'Rows are present.')",
"def run(self):\n result = self.Take_Voltage_Measurement()\n self.result_queue.put(result)",
"def run(self):\n result = self.Take_Voltage_Measurement()\n self.result_queue.put(result)",
"def read_db_energies( self ):\n for row in self.db.select():\n db_energy = row.get(\"energy\")\n if ( not db_energy is None ):\n self.db_energies.append(db_energy)",
"def measure():\n print(\"alias, timestamp, current, total, power, voltage, err_code\")\n message_str = MeasurementRequest(None).to_json()\n socket_object = UdpSocket()\n s = UDPSendThread(message_str, socket_object)\n r = UDPRecvThread(socket_object, measurement_output_parser)\n s.start()\n r.start()\n\n wait((s, r))",
"def _run_async_query(self, context):\n result = self._cb.get_object(self._doc_class.urlobject.format(self._cb.credentials.org_key))\n results = result.get(\"results\", [])\n self._total_results = len(results)\n self._count_valid = True\n return [self._doc_class(self._cb, item[\"id\"], item) for item in results]",
"async def async_query_api(self, endpoint, payload=None):\n async with RetryClient() as client:\n # The Eskom API occasionally drops incoming connections, implement reies\n async with client.get(\n url=self.base_url + endpoint,\n headers=self.headers,\n params=payload,\n ssl=self.ssl_context,\n retry_attempts=50,\n retry_exceptions={\n ClientConnectorError,\n ServerDisconnectedError,\n ConnectionError,\n OSError,\n },\n ) as res:\n return await res.json()",
"async def run():\n while(1):\n # timeout < 1 seems not very stable on my system\n d = await scanner.find_device_by_address(\"C0:98:E5:49:53:54\", timeout=2)\n if not d:\n # Device not found,\n continue\n if ('manufacturer_data' not in d.metadata) or \\\n (d.metadata['manufacturer_data'] is None) or \\\n (736 not in d.metadata['manufacturer_data']):\n print(\"Corrupted data %s ...\" % d.metadata)\n continue\n # This is the mfg data (without the leading 2-byte mfg id)\n # No idea what is 736\n return d.metadata['manufacturer_data'][736]",
"def query(self):\r\n self.reportDrivers()",
"def poll(self) -> None:\n self._resolve_rdates()\n self._resolve_queries()\n self._process_special_cells()\n self._fetch_queries()",
"def plugin_fetch(self, tmrange):\n raise NotImplementedError(\"EMFetcher abstract superclass\")",
"def acquisition(self):\n\t\twhile True:\n\t\t\tself.humidity, self.temperature = Adafruit_DHT.read_retry(SENSOR, PIN)\n\t\t\tprint (\"[{}] New measures from the Adafruit DHT:\\n\\tTemperature: {}C\\n\\tHumidity: {}%\".format(\n\t\t\t\tint(time.time()),\n\t\t\t\tself.temperature,\n\t\t\t\tself.humidity\n\t\t\t))\n\t\t\tmqttCli.publish(\"measure/temperature\", mqttJsonDump(self.temperature))\n\t\t\tmqttCli.publish(\"measure/humidity\", mqttJsonDump(self.humidity))\n\t\t\t\n\t\t\tself.updatePendingJson(\"humidity\", self.humidity, \"data\")\n\t\t\tself.updatePendingJson(\"temperature\", self.temperature, \"data\")\n\t\t\t\n\t\t\tr=req.get('http://localhost:9090/interacquisition')\n\t\t\tr = r.content\n\t\t\tr = json.loads(r)\n\t\t\tdelta_t = r[\"interacquisition\"]*60\n\t\t\t\n\t\t\tprint (\"[{}] Interacquisition time retrieved from the Room Catalog\".format(\n\t\t\t\tint(time.time()),\n\t\t\t))\n\t\t\t\n\t\t\ttime.sleep(delta_t)",
"async def query(self, metric):\n metric_name = metric.spec.provider.metric\n\n url = (\n URL(self.metrics_provider.spec.prometheus.url) / \"api/v1/query\"\n ).with_query({\"query\": metric_name})\n\n try:\n # The value of 20 is only here to prevent a long timeout of 5 minutes\n # by default with aiohttp.\n # TODO: The mechanism of fetching metrics will be changed with the issue\n # #326, so this timeout should be modified at this moment. This timeout\n # allows the integration tests to last not too long.\n wait_time = 20\n timeout = ClientTimeout(total=wait_time)\n async with self.session.get(url, timeout=timeout) as resp:\n resp.raise_for_status()\n body = await resp.json()\n except (ClientError, asyncio.TimeoutError) as err:\n metric_provider_name = self.metrics_provider.metadata.name\n raise MetricsProviderError(\n f\"Failed to query Prometheus with provider {metric_provider_name!r}\"\n ) from err\n\n # @see https://prometheus.io/docs/prometheus/latest/querying/api/\n for result in body[\"data\"][\"result\"]:\n if result and result[\"metric\"][\"__name__\"] == metric_name:\n try:\n return float(result[\"value\"][1])\n except (TypeError, ValueError) as err:\n raise MetricError(\n f\"Invalid value for metric {metric_name!r}\"\n ) from err\n\n raise MetricError(f\"Metric {metric_name!r} not in Prometheus response\")",
"async def async_scan_devices(self):\n await self.async_update_info()\n return list(self.last_results.keys())",
"def get_energy(self):\r\n return self._energy",
"def Take_Voltage_Measurement(self):\n self._CHK(nidaq.DAQmxStartTask(self.task_handle))\n samples_read = int32()\n data = numpy.zeros((self.num_samples,),dtype=numpy.float64)\n self._CHK(nidaq.DAQmxReadAnalogF64(self.task_handle,uInt32(self.num_samples),float64(self.timeout),\n DAQmx_Val_GroupByChannel,data.ctypes.data,\n self.num_samples,ctypes.byref(samples_read),None))\n if self.task_handle.value != 0:\n nidaq.DAQmxStopTask(self.task_handle)\n nidaq.DAQmxClearTask(self.task_handle)\n if samples_read.value != self.num_samples:\n return 'error'\n else:\n return data",
"def Take_Voltage_Measurement(self):\n self._CHK(nidaq.DAQmxStartTask(self.task_handle))\n samples_read = int32()\n data = numpy.zeros((self.num_samples,),dtype=numpy.float64)\n self._CHK(nidaq.DAQmxReadAnalogF64(self.task_handle,uInt32(self.num_samples),float64(self.timeout),\n DAQmx_Val_GroupByChannel,data.ctypes.data,\n self.num_samples,ctypes.byref(samples_read),None))\n if self.task_handle.value != 0:\n nidaq.DAQmxStopTask(self.task_handle)\n nidaq.DAQmxClearTask(self.task_handle)\n if samples_read.value != self.num_samples:\n return 'error'\n else:\n return data",
"async def _async_update_data(self) -> list[dict[str, Any]]:\n try:\n return await self.async_get_devices()\n except ClientConnectorError as exc:\n raise UpdateFailed(\"Failed to get LIVISI the devices\") from exc",
"def run(self):\r\n self.collect_data()"
] |
[
"0.6075474",
"0.59772664",
"0.5869885",
"0.58322054",
"0.5771755",
"0.56210566",
"0.5562882",
"0.55395865",
"0.5468506",
"0.54670334",
"0.54600596",
"0.5457776",
"0.54573524",
"0.54573524",
"0.53745216",
"0.53253096",
"0.53008413",
"0.5285705",
"0.5281344",
"0.5260745",
"0.525877",
"0.52222437",
"0.51997465",
"0.51579124",
"0.5137531",
"0.5133022",
"0.51003075",
"0.51003075",
"0.5085909",
"0.50782645"
] |
0.7689566
|
0
|
Connect to MQTT server and display server IP when successful. If error occur restart script.
|
def connectMQTT():
try:
mqttClient.connect(mqttServerIP)
print("Connected to %s MQTT broker" % mqttServerIP)
except OSError:
print("Failed to connect to MQTT broker. Restarting and reconnecting.")
os.execl(sys.executable, os.path.abspath(__file__), *sys.argv)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def mqtt_connect():\n global mqtt_client\n logging.debug('connecting to mqtt broker %s', config['mqtt']['host'])\n mqtt_client = paho.Client()\n mqtt_client.tls_set()\n mqtt_client.on_connect = mqtt_on_connect\n mqtt_client.on_message = mqtt_on_message\n mqtt_client.username_pw_set(config['mqtt']['username'], config['mqtt']['password'])\n mqtt_client.connect(config['mqtt']['host'], config['mqtt']['port'])\n mqtt_client.loop_start()",
"def connect(self):\n self.log.info(u\"==> Connecting to MQTT broquer ...\")\n try:\n self.MQTTClient.connect(self.mqtthost, int(self.mqttport), 60)\n self.log.info(u\"==> Connected on MQTT broquer\")\n self.MQTTClient.loop_start() # This will automatically reconnect if connection is lost.\n except:\n error = u\"### Error while connecting to MQTT broquer : %s \" % str(traceback.format_exc())\n raise MQTTException(error)",
"def connect():\n logging.info('Client connected')",
"def clientConnect(self,server_ip=\"localhost\"):\n try:\n server_socket = int(12345)\n m = TaskManager(address=(server_ip, server_socket), authkey = b'secret')\n m.connect()\n return m\n except:\n from gui import qt_gui\n qt_gui.set_status_text(\"Connection Failed!\")\n return None",
"def on_connect(mqtt_client, obj, flags, rc):\n print(\"Connected\")",
"def on_connect(client, userdata, flags, rc):\n if rc == 0:\n logging.info(\"Connected to %s:%s\" % (MQTT_HOST_IP, MQTT_PORT))\n mqttc.subscribe(MQTT_TOPIC_IN, qos=MQTT_QOS) \n elif rc == 1:\n logging.info(\"Connection refused - unacceptable protocol version\")\n elif rc == 2:\n logging.info(\"Connection refused - identifier rejected\")\n elif rc == 3:\n logging.info(\"Connection refused - server unavailable\")\n elif rc == 4:\n logging.info(\"Connection refused - bad user name or password\")\n elif rc == 5:\n logging.info(\"Connection refused - not authorised\")\n else:\n logging.warning(\"Connection failed - result code %d\" % (rc))",
"def start(self):\n if not self._connected:\n self._client.connect(self._addr, port=self._port, keepalive=60, bind_address=\"\")\n self._client.loop_start()\n self._connected = True\n logger.info(\"Connection with MQTT Broker at %s:%d estabilished.\", self._addr, self._port)",
"def connect_mqtt():\n ### TODO: Connect to the MQTT client ###\n client = mqtt.Client(\"41\")\n client.connect(MQTT_HOST,port=MQTT_PORT, keepalive=MQTT_KEEPALIVE_INTERVAL)\n\n return client",
"def connect_mqtt():\n client = mqtt.Client()\n client.connect(MQTT_HOST, MQTT_PORT, MQTT_KEEPALIVE_INTERVAL)\n\n return client",
"def on_connection_start(self) -> None:\r\n print(\r\n \"Connected with: {}:{}\\n\".format(\r\n self.connection_info[\"host\"], self.connection_info[\"port\"]\r\n )\r\n )",
"def on_connect(client, userdata, flags, rc):\n print(\"Connected with with mqtt server: \" + str(rc))\n client.subscribe(\"clients/#\")",
"def on_connect(mqttc, mosq, obj, rc):\n print(\"Connected with result code:\"+str(rc))\n # subscribe for all devices of user\n mqttc.subscribe('+/devices/+/up')\n mqttc.subscribe('+/devices/+/events/down/sent')\n if rc != 0:\n sys.exit('Could not connect to server. \\n Result code: ' + str(rc))",
"def on_connect(client, userdata, flags, rc):\n print('Connected with result code ' + str(rc))\n client.subscribe(MQTT_TOPIC)",
"def on_connect(client, userdata, flags, rc):\n print('Connected with result code ' + str(rc))\n client.subscribe(MQTT_TOPIC)",
"def connect(self):\r\n try:\r\n self.connection = pika.BlockingConnection(pika.ConnectionParameters(self.ip))\r\n print (\"connected!\")\r\n except Exception as error:\r\n print (\"connect() - error - {}\".format(error))",
"async def connect(self) -> None:\n self.client = mqtt.Client()\n self.client.on_message = self.on_message\n self.client.connect(self.host, self.port)\n self.client.loop_start()\n self.client.subscribe(LSST_GENERAL_TOPIC)\n self.connected = True\n self.log.debug(\"Connected.\")",
"def connect_to_server(self):\n\n\t\tself.__logs.append('-- connecting to server ...')\n\t\tself.connect_to_ssh_server.emit(self.__ip.text(), int(self.__port.text()),\\\n\t\t\tself.__username.text(), self.__pwd.text())",
"def on_connect_btn(self):\n ip = self.ip_edit.text()\n port = int(self.port_edit.text())\n try:\n self.rtsp_socket.connect((ip, port))\n QMessageBox.information(self, 'success', 'Connection Success\\n' + str(self.ip_edit.text()) + '\\n' +\n str(self.port_edit.text()), QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes)\n except:\n QMessageBox.critical(self, \"Connection Error\", \"Failed to connect \\n\" + str(self.ip_edit.text()) + '\\n' +\n str(self.port_edit.text()), QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes)",
"def main():\n #define the callbacks\n mqttc.on_message = on_message\n mqttc.on_connect = on_connect\n mqttc.on_publish = on_publish\n mqttc.on_subscribe = on_subscribe\n \n mqttc.will_set(willtopic, payload=\"offline\", qos=0, retain=True)\n mqttc.reconnect_delay_set(delay=3, delay_max=30, exponential_backoff=True)\n \n try:\n mqttc.connect(\"mqtt.localdomain\", 1883, 60)\n except Exception, e:\n print(\"XBMC MQTT -- MQTT connection failed: %s\" % (str(e)))\n sys.exit(1)\n \n while True:\n try:\n mqttc.loop_forever()\n except socket.error:\n print(\"XBMC MQTT --MQTT server disconnected; sleeping\")\n time.sleep(5)\n xbmc.executebuiltin('Notification(Error, mqtt disconnected pls chk,5000,'+mqtt_logo+')\\'') \n except:\n raise",
"def connectToServer(self):\n self.client = Client(base_url = self.server)\n self.ping()",
"def on_connect(client, userdata, flags, rc):\n print('Connected with result code ' + str(rc))\n client.subscribe(MQTT_TOPIC, qos=1)",
"def connect(self):\n self.class_logger.info(\"Performing connection to TRex server via HLT API\")\n self.check_res(self.hltapi.connect(device=self.host, port_list=self.ports, reset=True, break_locks=True))",
"def on_connect(unused_client, unused_userdata, unused_flags, rc):\n #print('on_connect: ', mqtt.connack_string(rc))\n print(f\"on_connect: {error_str(rc)} ({mqtt.connack_string(rc)})\")\n print()\n\n global connected\n connected = True",
"def on_connect(self):\n print('Client connected!')",
"def mqtt_on_connect(client, userdata, flags, rc):\n logging.debug('successfully connected to mqtt broker')\n client.subscribe(config['mqtt']['subscribe_topic'])",
"def connect():",
"def connect_to_server(self):\n\n try:\n client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n client.connect((self.hostname, self.port))\n return client\n except Exception as e:\n print(\"Can't connect to server: \", e)\n sys.exit()",
"def on_connect(client, userdata, flags, rc):\n if rc == 0:\n print(\"Connected to broker\")\n client.connected_flag = True\n else:\n print(\"Connection failed\")\n client.connected_flag = False",
"def on_connect(self, userdata, flags, rc):\n logging.info(f'Connected with Mosquitto Server: (code) {rc}')",
"def _connect(self):\n try:\n self._si = SmartConnectNoSSL(host=self._host, user=self._username, pwd=self._password)\n except Exception as e:\n self._logger.error(\"Unable to connect to host {0} : {1}\".format(self._host, e))\n self._si = None"
] |
[
"0.68676937",
"0.67100126",
"0.669355",
"0.66683996",
"0.6472447",
"0.64513195",
"0.63695097",
"0.63592774",
"0.62721497",
"0.62624174",
"0.62452495",
"0.6171494",
"0.6170111",
"0.6170111",
"0.61474234",
"0.6132856",
"0.6131563",
"0.60914075",
"0.60817116",
"0.6070144",
"0.6020819",
"0.6007748",
"0.6003138",
"0.5988177",
"0.59660935",
"0.5957903",
"0.59576046",
"0.5951979",
"0.5931256",
"0.59248734"
] |
0.7611444
|
0
|
Initialize MQTT client and smart plug device instance
|
def initialize():
client = mqtt.Client(client_id=clientID)
p = kasa.SmartPlug(plugIP)
asyncio.run(p.update())
return client, p
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def __init__(self):\n self.host = None\n self.port = None\n self.topic = None\n self._is_opened = False\n self.debug = 0\n self.qos = 0\n self.mqttc = mqtt.Client(\"sng_mqtt\")",
"def init_mqtt_client(self):\n self.mqtt_client = Client() # create client object\n self.mqtt_client.username_pw_set(self.mqtt_user, self.mqtt_password)\n print(\"Connecting to the MQTT broker\", self.host_name, \".\")\n self.mqtt_client.connect(self.host_name, self.host_port)\n\n def on_message(client, userdata, msg):\n \"\"\" callback function to process mqtt messages \"\"\"\n message_type = msg.topic.split(\"/\")[-1]\n message = str(msg.payload.decode(\"utf-8\"))\n print(\"\\nreceived message on topic \" + msg.topic + \": \" + message)\n\n # The message should contain 3 things:\n # either <field/config>, parameter_name, new_value\n # or <field/config>, parameter_name, client_id\n if len(message.split(\",\")) != 3:\n print(\"Bad message structure\")\n return 0\n\n # React to custom topics. Should be implemented in a concrete class depending on the behaviour to simulate.\n self.custom_mqtt_reaction(msg.topic, message)\n\n # The client wants to change the value of a parameter\n if message_type == \"change\":\n request_type, parameter_name, new_value = message.split(\",\")\n if request_type == \"config\" and parameter_name in self.get_parameters_list():\n self.set_parameter_value(parameter_name, new_value)\n elif request_type == \"field\" and parameter_name in self.get_fields_list():\n self.set_field_value(parameter_name, new_value)\n\n # The client requests the value of a parameter\n elif message_type == \"request\":\n request_type, parameter_name, client_id = message.split(\",\")\n\n # Fake latency\n sleep(float(self.get_parameter_value(\"response_latency\")) / 1000)\n\n # ask for a configuration parameter\n if request_type == \"config\":\n print(\"request for a configuration parameter\")\n if parameter_name in self.get_parameters_list():\n self.mqtt_client.publish(self.base_topic + \"/answer/\" + client_id,\n self.get_parameter_value(parameter_name))\n else:\n self.mqtt_client.publish(self.base_topic + \"/answer/\" + client_id,\n \"no such parameter\")\n\n # ask for a field\n elif request_type == \"field\":\n print(\"request for a field\")\n if parameter_name in self.get_fields_list():\n client.publish(self.base_topic + \"/answer/\" + client_id,\n self.get_field_value(parameter_name))\n else:\n self.mqtt_client.publish(self.base_topic + \"/answer/\" + client_id,\n \"no such field\")\n\n self.mqtt_client.on_message = on_message # bind function to callback\n\n building, floor, room, type, name = self.base_parameters.values()\n\n topics = [\n building + \"/\" + floor + \"/\" + room + \"/\" + type + \"/\" + name + \"/+\",\n building + \"/\" + floor + \"/\" + room + \"/\" + type + \"/All/+\",\n building + \"/\" + floor + \"/\" + room + \"/All/All/+\",\n building + \"/\" + floor + \"/All/\" + type + \"/All/+\",\n building + \"/\" + floor + \"/All/All/All/+\",\n building + \"/All/All/\" + type + \"/All/+\",\n building + \"/All/All/All/All/+\",\n \"All/All/All/\" + type + \"/All/+\",\n \"All/All/All/All/All/+\"\n ]\n for topic in topics:\n print(\"Subscribing to the topic \" + topic)\n self.mqtt_client.subscribe(topic)\n\n self.mqtt_client.loop_start() # start loop to process received messages",
"def mqttConnect(self):\n clientId = 'projects/{}/locations/{}/registries/{}/devices/{}'.format(self.projectId, \n self.cloudRegion, \n self.registryId, \n self.deviceId)\n mqttc = mqtt.Client(client_id = clientId)\n \n # With Google Cloud IoT Core, the username field is ignored, and the\n # password field is used to transmit a JWT to authorize the device.\n mqttc.username_pw_set(\n username='unused',\n password=self.create_jwt())\n\n # Enable SSL/TLS support.\n mqttc.tls_set(ca_certs=self.caCert, tls_version=ssl.PROTOCOL_TLSv1_2) \n self.blogger.info('Starting connection to: {0}:{1}'.format(self.mqttHost, self.mqttPort))\n mqttc.on_connect = self.connectCallBack\n mqttc.on_message = self.processMessage\n mqttc.on_publish = self.publishedMessageCallBack\n mqttc.connect(self.mqttHost, port=self.mqttPort, keepalive=60)\n try:\n mqttc.subscribe(self.configTopic, qos=self.QoS)\n self.blogger.debug('Subscribed to config topic: {}'.format(self.configTopic))\n mqttc.subscribe(self.commandTopic, qos=self.QoS)\n self.blogger.debug('Subscribed to command topic: {}'.format(self.commandTopic))\n mqttc.subscribe(self.eventTopic, qos=self.QoS)\n self.blogger.debug('Subscribed to event topic: {}'.format(self.eventTopic))\n self.messageToPublish = '{\"thingy\":\"ready\"}'\n# self.publishMessage(self.eventTopic, QoS)\n except Exception as e:\n self.blogger.error('subscription failed for reason: {0}'.format(e))\n\n return mqttc",
"def __init__(self, config: ConfigType) -> None:\n\n # We don't import on the top because some integrations\n # should be able to optionally rely on MQTT.\n import paho.mqtt.client as mqtt # pylint: disable=import-outside-toplevel\n\n if (protocol := config.get(CONF_PROTOCOL, DEFAULT_PROTOCOL)) == PROTOCOL_31:\n proto = mqtt.MQTTv31\n elif protocol == PROTOCOL_5:\n proto = mqtt.MQTTv5\n else:\n proto = mqtt.MQTTv311\n\n if (client_id := config.get(CONF_CLIENT_ID)) is None:\n # PAHO MQTT relies on the MQTT server to generate random client IDs.\n # However, that feature is not mandatory so we generate our own.\n client_id = mqtt.base62(uuid.uuid4().int, padding=22)\n transport = config.get(CONF_TRANSPORT, DEFAULT_TRANSPORT)\n self._client = mqtt.Client(client_id, protocol=proto, transport=transport)\n\n # Enable logging\n self._client.enable_logger()\n\n username: str | None = config.get(CONF_USERNAME)\n password: str | None = config.get(CONF_PASSWORD)\n if username is not None:\n self._client.username_pw_set(username, password)\n\n if (\n certificate := get_file_path(CONF_CERTIFICATE, config.get(CONF_CERTIFICATE))\n ) == \"auto\":\n certificate = certifi.where()\n\n client_key = get_file_path(CONF_CLIENT_KEY, config.get(CONF_CLIENT_KEY))\n client_cert = get_file_path(CONF_CLIENT_CERT, config.get(CONF_CLIENT_CERT))\n tls_insecure = config.get(CONF_TLS_INSECURE)\n if transport == TRANSPORT_WEBSOCKETS:\n ws_path: str = config.get(CONF_WS_PATH, DEFAULT_WS_PATH)\n ws_headers: dict[str, str] = config.get(CONF_WS_HEADERS, DEFAULT_WS_HEADERS)\n self._client.ws_set_options(ws_path, ws_headers)\n if certificate is not None:\n self._client.tls_set(\n certificate,\n certfile=client_cert,\n keyfile=client_key,\n tls_version=ssl.PROTOCOL_TLS_CLIENT,\n )\n\n if tls_insecure is not None:\n self._client.tls_insecure_set(tls_insecure)",
"def start_mqtt(self):\n def on_connect(client, userdata, flags, rc):\n logger.debug('MQTT client connected with result code \"%s\"' % rc)\n self.connected = True\n\n for topic in self.subscriptions:\n logger.debug('Subscribing to MQTT topic \"%s\"' % topic)\n client.subscribe(topic)\n\n def on_disconnect(client, userdata, rc):\n logger.debug('MQTT client disconnected with result code \"%s\"' % rc)\n self.connected = False\n\n def on_message(client, userdata, message):\n payload = str(message.payload, 'utf8')\n logger.debug('Received %s byte MQTT message at topic \"%s\"' % (len(payload), message.topic))\n\n data = None\n\n if payload:\n try:\n data = json.loads(payload)\n except json.decoder.JSONDecodeError as e:\n logger.error('Error while JSON decoding message payload: %s' % e)\n\n if data and data.get('rid', None):\n rid = data['rid']\n\n if rid in self.response_callbacks:\n result = self.response_callbacks[rid](payload, data)\n\n if result is not False:\n self.response_callbacks.pop(rid, None)\n\n def on_publish(client, userdata, mid):\n logger.debug('Published message \"%s\" over MQTT' % mid)\n\n # Since the message ID is only generated when publishing,\n # we have to publish BEFORE registering any callbacks.\n # To prevent issues, we wait until these callbacks have been\n # registered before continueing\n while mid not in self.publishes:\n self.wait()\n\n self.publishes.remove(mid)\n\n if mid in self.publish_callbacks:\n self.publish_callbacks[mid]()\n self.publish_callbacks.pop(mid, None)\n\n self.mqtt = mqtt.Client()\n self.mqtt.on_connect = on_connect\n self.mqtt.on_disconnect = on_disconnect\n self.mqtt.on_message = on_message\n self.mqtt.on_publish = on_publish\n\n if self.options.get('secure', False):\n logger.debug('Enabling TLS')\n self.mqtt.tls_set('/etc/ssl/certs/ca-certificates.crt', cert_reqs=ssl.CERT_NONE)\n self.mqtt.tls_insecure_set(True)\n\n if self.options.get('username', None):\n logger.debug('Using username \"%s\" for MQTT %s a password',\n self.options['username'], 'WITH' if self.options.get('password', None) else 'WITHOUT')\n self.mqtt.username_pw_set(self.options['username'], password=self.options.get('password', None))\n\n try:\n logger.debug('Connecting to MQTT server at \"%s:%s\"' % (self.options['host'], self.options['port']))\n self.mqtt.connect(self.options['host'], self.options['port'], self.options['keepalive'])\n self.mqtt.loop_start()\n except Exception as e:\n logger.error('Error while connecting to MQTT server: %s' % e)\n exit(1)\n\n while not self.connected:\n self.wait()",
"def mqtt_connect():\n global mqtt_client\n logging.debug('connecting to mqtt broker %s', config['mqtt']['host'])\n mqtt_client = paho.Client()\n mqtt_client.tls_set()\n mqtt_client.on_connect = mqtt_on_connect\n mqtt_client.on_message = mqtt_on_message\n mqtt_client.username_pw_set(config['mqtt']['username'], config['mqtt']['password'])\n mqtt_client.connect(config['mqtt']['host'], config['mqtt']['port'])\n mqtt_client.loop_start()",
"def init(\n self,\n ) -> bool:\n success = True\n try:\n self.client = mqtt.Client(client_id=\"Draco\", protocol=mqtt.MQTTv5)\n self.client.on_connect = self.on_connect\n self.client.on_message = self.on_message\n self.client.connect(\n host=self._config[\"broker_ip\"], port=self._config[\"broker_port\"]\n )\n self.client.loop_start()\n\n except Exception as error:\n print(f\"Process {self._pid} - \" + repr(error))\n success = False\n return success",
"def start(\n self,\n mqtt_data: MqttData,\n ) -> None:\n self._mqtt_data = mqtt_data\n self.init_client()",
"def connect_mqtt():\n ### TODO: Connect to the MQTT client ###\n client = mqtt.Client(\"41\")\n client.connect(MQTT_HOST,port=MQTT_PORT, keepalive=MQTT_KEEPALIVE_INTERVAL)\n\n return client",
"def get_mqtt_client():\n client = mqtt.Client()\n client.connected_flag = False # set flag\n client.on_connect = on_connect\n client.on_publish = on_publish\n return client",
"def run(self) -> None:\n\t\tself.messageHandler and self.messageHandler.logging(self.mqttClient, logging.DEBUG, f'MQTT: client name: {self.clientID}')\n\t\tself.mqttClient = mqtt.Client(client_id=self.clientID, clean_session=False if self.clientID else True)\t# clean_session=False is defined by TS-0010\n\n\t\t# Enable SSL\n\t\tif self.useTLS:\n\t\t\tself.mqttClient.tls_set(ca_certs=self.caFile, cert_reqs=ssl.CERT_REQUIRED if self.verifyCertificate else ssl.CERT_NONE)\n\n\t\t# Set username/password\n\t\tif self.username and self.password:\n\t\t\tself.mqttClient.username_pw_set(self.username, self.password)\n\t\t\n\t\tself.mqttClient.on_connect \t\t= self._onConnect\n\t\tself.mqttClient.on_disconnect\t= self._onDisconnect\n\t\tself.mqttClient.on_log\t\t\t= self._onLog\n\t\tself.mqttClient.on_subscribe\t= self._onSubscribe\n\t\tself.mqttClient.on_unsubscribe\t= self._onUnsubscribe\n\t\tself.mqttClient.on_message\t\t= self._onMessage\n\n\t\ttry:\n\t\t\tself.messageHandler and self.messageHandler.logging(self.mqttClient, logging.DEBUG, f'MQTT: connecting to host:{self.address}, port:{self.port}, keepalive: {self.keepalive}, bind: {self.bindIF}')\n\t\t\tself.mqttClient.connect(host=self.address, port=self.port, keepalive=self.keepalive, bind_address=self.bindIF)\n\t\texcept Exception as e:\n\t\t\tif self.messageHandler:\n\t\t\t\tself.messageHandler.logging(self.mqttClient, logging.ERROR, f'MQTT: cannot connect to broker: {e}')\n\t\t\t\tself.messageHandler.onError(self, -1)\n\n\t\t# Actually start the actor to run the MQTT client as a thread\n\t\tself.actor = BackgroundWorkerPool.newActor(self._mqttActor, name='MQTTClient').start()",
"def __init__(self, name, on_message, on_publish, will_message=\"Logging off\"):\n self.client = mqtt.Client(client_id=name, clean_session=True, userdata=None, transport=\"tcp\")\n self.client.username_pw_set(MqttClient.username, MqttClient.password)\n self.client.on_connect = MqttClient.on_connect\n self.client.on_message = on_message\n self.client.on_publish = on_publish\n\n # In production, let's consider disabling logging or routing to a file\n self.client.on_log = MqttClient.on_log\n self.client.enable_logger()\n\n # This ensures, that there is some sort of goodbye on losing connection\n self.client.will_set(name, will_message)\n\n # Connect immediately\n self.client.connect(MqttClient.broker_address, port=MqttClient.broker_port)",
"def connect_mqtt():\n client = mqtt.Client()\n client.connect(MQTT_HOST, MQTT_PORT, MQTT_KEEPALIVE_INTERVAL)\n\n return client",
"def __init__(self, client_id, host, port, topic, func_on_message):\n self.host = host\n self.port = port\n self.topic = topic\n self.client = mqtt.Client(client_id=client_id)\n self.client.on_connect = self.on_connect\n self.client.on_message = func_on_message",
"def __init__(self, hostname: str, port: int):\n # Create a dictionary of topics and callbacks\n self.callback_dict = dict()\n\n self.client = mqtt.Client(userdata=self.callback_dict)\n self.client.on_message = _on_message_handler\n self.client.connect(hostname, port, 60)",
"def start(self):\n l.debug(\"Initializing the MQTT connection...\")\n self._mqtt_client.connect(self.domain, self.port, keepalive=30)\n\n # Starts a new thread that handles mqtt protocol and calls us back via callbacks\n l.debug(\"(Re)Starting the MQTT loop.\")\n self._mqtt_client.loop_stop(True)\n self._mqtt_client.loop_start()\n self.connect_event.wait()\n\n # Subscribe to the corresponding topics ...\n self.device_topic = build_device_request_topic(self.target_device_uuid)\n self.client_response_topic = build_client_response_topic(self.user_id, self._app_id)\n self.user_topic = build_client_user_topic(self.user_id)\n\n l.info(f\"Subscribing to topic: {self.device_topic}\")\n self._mqtt_client.subscribe(self.device_topic)\n self.subscribe_event.wait()\n self.subscribe_event.clear()\n\n l.info(f\"Subscribing to topic: {self.client_response_topic}\")\n self._mqtt_client.subscribe(self.client_response_topic)\n self.subscribe_event.wait()\n self.subscribe_event.clear()\n\n l.info(f\"Subscribing to topic: {self.user_topic}\")\n self._mqtt_client.subscribe(self.user_topic)\n self.subscribe_event.wait()\n self.subscribe_event.clear()",
"def __init__(self, mqtt_state_based_provider):\n super(SymmetricKeyProvisioningDeviceClient, self).__init__(mqtt_state_based_provider)\n self._polling_machine = PollingMachine(mqtt_state_based_provider)",
"def setup_mqtt_client(mqtt_conf, mqtt_client):\n\n if mqtt_conf['TLS']['enable']:\n logger.info(\"TLS Setup for Broker\")\n logger.info(\"checking TLS_Version\")\n tls = mqtt_conf['TLS']['tls_version']\n if tls == 'tlsv1.2':\n tlsVersion = ssl.PROTOCOL_TLSv1_2\n elif tls == \"tlsv1.1\":\n tlsVersion = ssl.PROTOCOL_TLSv1_1\n elif tls == \"tlsv1\":\n tlsVersion = ssl.PROTOCOL_TLSv1\n else:\n logger.info(\"Unknown TLS version - ignoring\")\n tlsVersion = None\n if not mqtt_conf['TLS']['insecure']:\n\n logger.info(\"Searching for Certificates in certdir\")\n CERTS_DIR = mqtt_conf['TLS']['certs']['certdir']\n if os.path.isdir(CERTS_DIR):\n logger.info(\"certdir exists\")\n CA_CERT_FILE = os.path.join(CERTS_DIR, mqtt_conf['TLS']['certs']['cafile'])\n CERT_FILE = os.path.join(CERTS_DIR, mqtt_conf['TLS']['certs']['certfile'])\n KEY_FILE = os.path.join(CERTS_DIR, mqtt_conf['TLS']['certs']['keyfile'])\n\n mqtt_client.tls_set(ca_certs=CA_CERT_FILE, certfile=CERT_FILE, keyfile=KEY_FILE, cert_reqs=ssl.CERT_REQUIRED, tls_version=tlsVersion)\n else:\n logger.error(\"certdir does not exist.. check path\")\n sys.exit()\n else:\n mqtt_client.tls_set(ca_certs=None, certfile=None, keyfile=None, cert_reqs=ssl.CERT_NONE, tls_version=tlsVersion)\n mqtt_client.tls_insecure_set(True)\n \n if mqtt_conf['username'] and mqtt_conf['password']:\n logger.info(\"setting username and password for Broker\")\n mqtt_client.username_pw_set(mqtt_conf['username'], mqtt_conf['password'])\n \n return mqtt_client",
"def setup_mqtt(mqtt_broker: str, mqtt_port: int = 1883, keep_alive: int = 60) -> mqtt.Client:\n client = mqtt.Client()\n client.connect(mqtt_broker, mqtt_port, keep_alive)\n return client",
"async def connect(self) -> None:\n self.client = mqtt.Client()\n self.client.on_message = self.on_message\n self.client.connect(self.host, self.port)\n self.client.loop_start()\n self.client.subscribe(LSST_GENERAL_TOPIC)\n self.connected = True\n self.log.debug(\"Connected.\")",
"def connectMQTT():\n try:\n mqttClient.connect(mqttServerIP)\n print(\"Connected to %s MQTT broker\" % mqttServerIP)\n except OSError:\n print(\"Failed to connect to MQTT broker. Restarting and reconnecting.\")\n os.execl(sys.executable, os.path.abspath(__file__), *sys.argv)",
"def client_setup(self):\n self.client = Client()",
"def connect(self):\n self.log.info(u\"==> Connecting to MQTT broquer ...\")\n try:\n self.MQTTClient.connect(self.mqtthost, int(self.mqttport), 60)\n self.log.info(u\"==> Connected on MQTT broquer\")\n self.MQTTClient.loop_start() # This will automatically reconnect if connection is lost.\n except:\n error = u\"### Error while connecting to MQTT broquer : %s \" % str(traceback.format_exc())\n raise MQTTException(error)",
"def setup(self, dictParams):\n global connack\n # we dont use globals here now - instead use config.py which is imported where needed to \n # give cross-module scope\n #global globalScope.incomingMessageBuffer\n global broker_public_key\n \n hostname = dictParams[\"sess\"]\n name = dictParams[\"dest\"]\n port = dictParams[\"port\"]\n protocol = dictParams[\"protoVer\"]\n broker = dictParams[\"broker\"]\n # this is a Singleton\n self.receiver = dictParams[\"RcvdMsg\"]\n timeout = dictParams[\"timeout\"]\n oldtimestamp = dictParams[\"oldtimestamp\"]\n anotherParam = dictParams[\"anotherParam\"]\n \n self.brokerName = broker\n\n # Setting clean_session = False means that subsciption information and \n # queued messages are retained after the client disconnects. It is suitable\n # in an environment where disconnects are frequent.\n # client_id is any unique identifier so our own fqdn should be alright to use\n mqtt_client = mqtt.Client(protocol=self.protocol, client_id=self.myName, clean_session=False)\n mqtt_client.on_connect = self.on_connect\n mqtt_client.on_message = self.on_message\n mqtt_client.on_publish = self.on_publish\n mqtt_client.on_disconnect = self.on_disconnect\n mqtt_client.on_subscribe = self.on_subscribe\n mqtt_client.on_unsubscribe = self.on_unsubscribe\n mqtt_client.on_log = self.on_log\n \n \n # Set the LWT\n # If the client disconnects without calling disconnect, the broker will\n # publish this message on its behalf\n # retain should be set to true\n mqtt_client.will_set(self.STATUS, \n self.status_message(STATUS_DISCONNECTED_UNGRACE), \n qos=QosType.FLAG_QOS_ATMOSTONCE.value, retain=True) \n\n # Connect to the broker\n # keepalive is maximum number of seconds allowed between communications\n # with the broker. If no other messages are sent, the client will send a\n # ping request at this interval\n # set it high for devices that are disconnected for small periods of time, also for debugging\n # set it extremely high for devices that are out to sea for days\n keepalive=1800\n try:\n logging.info('Attempting to connect to broker at ' + self.brokerName)\n mqtt_client.connect(self.brokerName, self.port, keepalive)\n except:\n logging.error(\"ERROR - could not connect to broker at \" + self.brokerName)\n return False\n else:\n logging.info(\"INFO - all OK - no problem on attempt to connect to broker at \" + self.brokerName) \n \n \n # Force function to block until connack is sent from the broker, or timeout\n connack = False\n start_time = time.time()\n while not connack:\n time.sleep(0.1)\n mqtt_client.loop()\n \n if time.time() - start_time > float(timeout):\n raise MqttTimeOutError(\"The program timed out while trying to connect to the broker!\")\n break\n \n # When connected, subscribe to the relevant channels\n mqtt_client.subscribe([(self.PUBLIC, 1), (self.PROTECTED, 1),\n (self.PRIVATE, 1), (self.PINGREQ, 1),\n (self.PINGACK, 1), (self.HANDSHAKE, 1)\n ])\n \n self.client = mqtt_client\n \n # init the globalScope.incomingMessageBuffer - this is now done in the config.py\n \n \n # Do a blocking call\n broker_public_key = None\n self.client.publish(self.STATUS, \n self.status_message(STATUS_CONNECTED), \n qos=QosType.FLAG_QOS_ATLEASTONCE.value)\n\n # TODO dont know what this does and it was getting stuck in the loop so i got rid of it \n# while self.broker_public_key == None:\n# time.sleep(0.1)\n# mqtt_client.loop()\n# # Check the message buffer\n# if globalScope.incomingMessageBuffer != []:\n# for message in globalScope.incomingMessageBuffer:\n# if message.topic == self.HANDSHAKE:\n# # Check whether it is a broker key message.\n# try:\n# payload = json.loads(message.payload.decode())\n## disabled for now\n## self.broker_public_key = payload['public_key']\n## print(self.broker_public_key)\n# self.broker_public_key = json.loads(message.payload.decode())\n# except:\n# pass\n# globalScope.incomingMessageBuffer = []\n \n\n # Start the loop. This method is preferable to repeatedly calling loop\n # since it handles reconnections automatically. It is non-blocking and \n # handles interactions with the broker in the background.\n logging.debug('DEBUG - Starting loop')\n try:\n# mqtt_client.loop()\n self.client.loop_start()\n except:\n logging.error(\"ERROR - failure of loop_start\")\n \n return True",
"def __init__(self, addr, port, auth_info=None, base_topic=\"\"):\n self._addr = addr\n self._port = port\n self._connected = False\n self._client = mqtt.Client(client_id=\"\", clean_session=True, userdata=None, protocol=mqtt.MQTTv311)\n self._client.on_message = self.on_messagge\n self._callbacks = {}\n if auth_info is not None:\n self._client.username_pw_set(auth_info[\"user\"], auth_info[\"password\"])\n self._base_topic = base_topic",
"def __init__(\n self,\n host: str,\n port: int,\n client_id: str,\n qos: int,\n lastwill_message: Message,\n topics: list,\n ) -> None:\n self.log = logger_factory.get_logger(str(self.__class__.__name__))\n\n self.host = host\n self.port = port\n self.client_id = client_id\n self.topics = topics\n self.qos = qos\n self.lastwill_message = lastwill_message\n self.inbound_message_listener: Callable[\n [Message], None\n ] = lambda message: print(\"\\n\\nNo inbound message listener set!\\n\\n\")\n self._connected = False\n self.connected_rc: Optional[int] = None\n\n self.client = mqtt.Client(client_id=self.client_id)\n self.client.on_connect = self._on_mqtt_connect\n self.client.on_disconnect = self._on_mqtt_disconnect\n self.client.on_message = self._on_mqtt_message\n self.client.username_pw_set(self.client_id)\n self.client.will_set(\n self.lastwill_message.topic, self.lastwill_message.payload\n )\n\n self.mutex = Lock()\n\n self.log.debug(self.__repr__())",
"def setup_connection(args):\n\n event_loop_group = io.EventLoopGroup(1)\n host_resolver = io.DefaultHostResolver(event_loop_group)\n client_bootstrap = io.ClientBootstrap(event_loop_group, host_resolver)\n\n if args.use_websocket is True:\n proxy_options = None\n if args.proxy_host:\n proxy_options = http.HttpProxyOptions(\n host_name=args.proxy_host,\n port=args.proxy_port,\n )\n\n credentials_provider = auth.AwsCredentialsProvider.new_default_chain(\n client_bootstrap,\n )\n mqtt_connection = mqtt_connection_builder.websockets_with_default_aws_signing(\n endpoint=args.endpoint,\n client_bootstrap=client_bootstrap,\n region=args.signing_region,\n credentials_provider=credentials_provider,\n websocket_proxy_options=proxy_options,\n ca_filepath=args.root_ca,\n on_connection_interrupted=on_connection_interrupted,\n on_connection_resumed=on_connection_resumed,\n client_id=args.client_id,\n clean_session=False,\n keep_alive_secs=6,\n )\n\n else:\n mqtt_connection = mqtt_connection_builder.mtls_from_path(\n endpoint=args.endpoint,\n cert_filepath=args.cert,\n pri_key_filepath=args.key,\n client_bootstrap=client_bootstrap,\n ca_filepath=args.root_ca,\n on_connection_interrupted=on_connection_interrupted,\n on_connection_resumed=on_connection_resumed,\n client_id=args.client_id,\n clean_session=False,\n keep_alive_secs=6,\n )\n\n print(\n f\"Connecting to {args.endpoint} with client ID '{args.client_id}'...\",\n )\n\n return mqtt_connection",
"def init(self):\n # Initialize runtime and MDK:\n self.runtime = fakeRuntime()\n self.runtime.getEnvVarsService().set(\"DATAWIRE_TOKEN\", \"somevalue\")\n self.runtime.dependencies.registerService(\"failurepolicy_factory\",\n RecordingFailurePolicyFactory())\n self.mdk = MDKImpl(self.runtime)\n self.mdk.start()\n self.disco = self.mdk._disco\n # Create a session:\n self.session = self.mdk.session()",
"def connect(self, mqtt_client, discovery_prefix=\"homeassistant\", node_id=None):\n self.discovery_prefix = discovery_prefix\n self.node_id = node_id\n self.client = mqtt_client\n if self.device_type == 'switch':\n self.client.message_callback_add(self.command_topic, self._on_command)\n self.client.subscribe(self.command_topic)\n\n self.client.publish(self.config_topic, json.dumps(self.config), retain=self.retain)\n logger.debug(\"Connected to broker, sent config to {}\".format(self.config_topic))",
"def init_client():\n init_config()\n begin_sending_packets()"
] |
[
"0.774876",
"0.72923833",
"0.7255548",
"0.7248288",
"0.7223283",
"0.71822965",
"0.7165273",
"0.705659",
"0.70517665",
"0.70345664",
"0.69778866",
"0.6961443",
"0.6934719",
"0.69093215",
"0.68574363",
"0.6839927",
"0.66756827",
"0.66753244",
"0.66513395",
"0.6618242",
"0.65082496",
"0.6483922",
"0.64481115",
"0.64218414",
"0.63943094",
"0.63100874",
"0.6307671",
"0.6297023",
"0.629695",
"0.6285999"
] |
0.8072966
|
0
|
Main script cycle(check connection, get data, send sata). Every 5 seconds tries to send data with energy consumption and actual power state on MQTT server if connection is up.
|
def publish():
while True:
mqttClient.reconnect()
energy_data = getEnergyUsage()
wats = float(energy_data['power_mw']) / 1000
wat_hours = float(energy_data['total_wh'])
sentPayload(name="power", site="bathroom", value=wats)
sentPayload(name="energy_total", site="bathroom", value=wat_hours)
time.sleep(updateInterval)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def main():\n #define the callbacks\n mqttc.on_message = on_message\n mqttc.on_connect = on_connect\n mqttc.on_publish = on_publish\n mqttc.on_subscribe = on_subscribe\n \n mqttc.will_set(willtopic, payload=\"offline\", qos=0, retain=True)\n mqttc.reconnect_delay_set(delay=3, delay_max=30, exponential_backoff=True)\n \n try:\n mqttc.connect(\"mqtt.localdomain\", 1883, 60)\n except Exception, e:\n print(\"XBMC MQTT -- MQTT connection failed: %s\" % (str(e)))\n sys.exit(1)\n \n while True:\n try:\n mqttc.loop_forever()\n except socket.error:\n print(\"XBMC MQTT --MQTT server disconnected; sleeping\")\n time.sleep(5)\n xbmc.executebuiltin('Notification(Error, mqtt disconnected pls chk,5000,'+mqtt_logo+')\\'') \n except:\n raise",
"def loop(self) -> None:\n while True:\n # Sleep before running code to ensure that the sensor is\n # initialized on first run, as per the specifications.\n sleep(config.SLEEP)\n\n self.setup_quiet_hours()\n\n if self.quiet_setup:\n if self.in_quiet_hours():\n if config.DC_QH:\n self.sensor.close()\n self.sleep_quiet_hours()\n continue\n\n # In the case that quiet hours were established during first run\n # and removed from the endpoint afterwards, the sensor may not\n # be in the open state. Because serial.Serial.open() may raise\n # an exception if the sensor is already open, just check prior.\n if not self.sensor.isOpen():\n self.sensor.open()\n\n config.LOGGER.info('Woke up after sleeping. Running loop()')\n self.data = []\n for _ in range(10):\n datum = self.sensor.read()\n self.data.append(datum)\n\n for pm, start in self._SLICES.items():\n # Might be necessary to give the endpoint some time\n # between responses\n sleep(10)\n reading = self.read_data_from_bytes(start)\n aq_dict = air_quality.AQS[pm].get_range(reading)\n\n sections = [\n {\n 'type': 'text',\n 'color': aq_dict['color'],\n 'value': f'Quality: {aq_dict[\"label\"]}'\n },\n {\n 'type': 'gauge',\n 'color': [aq_dict['color']],\n 'range': [aq_dict['lower'], aq_dict['upper']],\n 'value': reading,\n },\n {\n 'type': 'gauge',\n 'color': air_quality.COLORS,\n 'range': air_quality.AQS[pm].get_all_ranges(),\n 'value': reading,\n }\n ]\n\n data = {\n 'module': f'photo-dash-sds011-pm{pm}',\n 'title': f'Air Quality - PM{pm}',\n 'sections': sections,\n }\n\n try:\n r = requests.put(config.ENDPOINT, json=data)\n except Exception as e: # Catching broad Exceptions for now\n config.LOGGER.error(e)\n config.LOGGER.info(r.status_code)",
"def run(self):\n data = ''\n while not rospy.is_shutdown():\n if (rospy.Time.now() - self.lastsync).to_sec() > (self.timeout * 3):\n rospy.logerr(\"Lost sync with device, restarting...\")\n self.requestTopics()\n self.lastsync = rospy.Time.now() \n \n flag = [0,0]\n flag[0] = self.port.read(1)\n if (flag[0] != '\\xff'):\n continue\n flag[1] = self.port.read(1)\n if ( flag[1] != '\\xff'):\n rospy.loginfo(\"Failed Packet Flags \")\n continue\n # topic id (2 bytes)\n header = self.port.read(4)\n if (len(header) != 4):\n #self.port.flushInput()\n continue\n \n topic_id, msg_length = struct.unpack(\"<hh\", header)\n msg = self.port.read(msg_length)\n if (len(msg) != msg_length):\n rospy.loginfo(\"Packet Failed : Failed to read msg data\")\n #self.port.flushInput()\n continue\n chk = self.port.read(1)\n checksum = sum(map(ord,header) ) + sum(map(ord, msg)) + ord(chk)\n\n if checksum%256 == 255:\n if topic_id == TopicInfo.ID_PUBLISHER:\n try:\n m = TopicInfo()\n m.deserialize(msg)\n self.senders[m.topic_id] = Publisher(m.topic_name, m.message_type)\n rospy.loginfo(\"Setup Publisher on %s [%s]\" % (m.topic_name, m.message_type) )\n except Exception as e:\n rospy.logerr(\"Failed to parse publisher: %s\", e)\n elif topic_id == TopicInfo.ID_SUBSCRIBER:\n try:\n m = TopicInfo()\n m.deserialize(msg)\n self.receivers[m.topic_name] = [m.topic_id, Subscriber(m.topic_name, m.message_type, self)]\n rospy.loginfo(\"Setup Subscriber on %s [%s]\" % (m.topic_name, m.message_type))\n except Exception as e:\n rospy.logerr(\"Failed to parse subscriber. %s\"%e)\n elif topic_id == TopicInfo.ID_SERVICE_SERVER:\n try:\n m = TopicInfo()\n m.deserialize(msg)\n\t\t\tservice = ServiceServer(m.topic_name, m.message_type, self)\n self.receivers[m.topic_name] = [m.topic_id, service]\n self.senders[m.topic_id] = service\n rospy.loginfo(\"Setup ServiceServer on %s [%s]\"%(m.topic_name, m.message_type) )\n except:\n rospy.logerr(\"Failed to parse service server\")\n elif topic_id == TopicInfo.ID_SERVICE_CLIENT:\n pass\n \n elif topic_id == TopicInfo.ID_PARAMETER_REQUEST:\n self.handleParameterRequest(msg)\n \n elif topic_id == TopicInfo.ID_LOG:\n self.handleLogging(msg)\n \n elif topic_id == TopicInfo.ID_TIME:\n t = Time()\n t.data = rospy.Time.now()\n data_buffer = StringIO.StringIO()\n t.serialize(data_buffer)\n self.send( TopicInfo.ID_TIME, data_buffer.getvalue() )\n self.lastsync = rospy.Time.now()\n elif topic_id >= 100: # TOPIC\n try:\n self.senders[topic_id].handlePacket(msg)\n except KeyError:\n rospy.logerr(\"Tried to publish before configured, topic id %d\" % topic_id)\n else:\n rospy.logerr(\"Unrecognized command topic!\")\n rospy.sleep(0.001)",
"def main():\n vpin = thermal_cam_power_on(I2C_POWER_PIN, warm_up_time_s=START_WAIT_SECS)\n arr = array('float', (float() for _ in range(768)))\n probe_buffer = array('float', [float()] * 3)\n gc.collect()\n print(\"Getting cam data\")\n get_thermal_cam_data(arr, vpin, I2C_DATA_PIN, I2C_CLOCK_PIN)\n del vpin\n print(\"Getting temperature probe readings\")\n get_temp_probe_readings(\n PROBES_ONEWIRE_PIN, PROBES_POWER_PIN, list(THERMOMETER_IDS), probe_buffer\n )\n print(\"Connecting to wifi\")\n connect_wifi(*WIFI_CREDS, wait_connect_s=20)\n print(\"Wifi connected\")\n print(\"Synching time via NTP\")\n ntptime.settime()\n print(\"Creating mqtt object\")\n mqtt = get_mqtt(\n AWS_IOT_CLIENT_ID,\n AWS_IOT_HOST,\n AWS_IOT_PORT,\n key_path=\"certs/iot.key\",\n cert_path=\"certs/iot.crt\",\n )\n print(\"MQTT object created\")\n print(\"Connecting to MQTT host\")\n mqtt.connect()\n print(\"MQTT host connected\")\n print(\"Publishing to MQTT topic {}\".format(AWS_IOT_TOPIC))\n probes_dict = {k: v for k, v in zip(thermometer_names, probe_buffer)}\n message = make_json_message(time(), arr, probes_dict)\n print(\"JSON data = {}\".format(message))\n mqtt.publish(AWS_IOT_TOPIC, message, qos=MQTT_QOS)\n print(\"Published\")",
"def main():\n # pylint: disable=too-many-locals\n parser = init_parser()\n options = parser.parse_args()\n\n host = options.host\n port = options.port\n keepalive = options.keepalive\n client_id = options.client_id\n topic = options.topic\n qos = options.qos\n filename = options.file\n interval = options.interval\n min_interval = options.min_interval\n max_interval = options.max_interval\n prompt_to_send = options.prompt_to_send\n\n client = mqtt.Client(client_id)\n client.on_connect = on_connect\n client.on_disconnect = on_disconnect\n client.on_publish = on_publish\n client.on_log = on_log\n client.connect(host, port, keepalive)\n client.loop_start()\n\n publish_time = 0\n\n with open(filename) as file_object:\n message = file_object.readline().rstrip()\n while message:\n interval = random.randint(min_interval, max_interval)\n current_time = int(time.time() + 0.5)\n used_time = current_time - publish_time\n if used_time < interval:\n time.sleep(interval - used_time)\n\n publish_time = int(time.time() + 0.5)\n message = message.replace(\"{DATETIME}\", str(publish_time))\n if prompt_to_send:\n print(\"press enter to send next message.\")\n if PY2:\n raw_input() # (only a python 3 error) pylint: disable=undefined-variable\n else:\n input()\n mqtt_message_info = client.publish(topic, message, qos=qos)\n print(\"Publish: %s has return code %i, %s\" % (mqtt_message_info.mid, mqtt_message_info.rc, mqtt.error_string(mqtt_message_info.rc)))\n\n if mqtt_message_info.rc != mqtt.MQTT_ERR_SUCCESS:\n raise ValueError(mqtt.error_string(mqtt_message_info.rc))\n\n if not mqtt_message_info.is_published():\n print(\"Waiting for publish.\")\n mqtt_message_info.wait_for_publish()\n\n message = file_object.readline().rstrip()\n\n client.disconnect()\n print(\"Done\")",
"def run_mainloop():\n \n # List of sensor node addresses expected to be in the network\n node_addr = [2, 3]\n \n # Data gathering time parameters [Frequenct data gathering and network discovery for testing]\n data_gathering_interval = 60*1000 # [msec]\n net_discovery_interval = 3 # (re)do network discovery every 3 cycles \n \n # Enable the NM3 power supply on the powermodule\n powermodule = PowerModule()\n powermodule.enable_nm3()\n\n # Enable power supply to 232 driver\n pyb.Pin.board.EN_3V3.on()\n pyb.Pin('Y5', pyb.Pin.OUT, value=0) # enable Y4 Pin as output\n max3221e = MAX3221E(pyb.Pin.board.Y5)\n max3221e.tx_force_on() # Enable Tx Driver\n \n # Wait for 6 seconds to let the modem start up\n print(\"6 second delay to allow the NM3 to boot up...\")\n pyb.delay(6*1000)\n\n # Initialize UART and NM3 object\n uart = machine.UART(1, 9600, bits=8, parity=None, stop=1, timeout=1000)\n modem = Nm3(uart)\n\n # Create and initialize the network protocol object\n net_protocol = gw_node.NetProtocol()\n net_protocol.init(modem, node_addr)\n \n # Start by doing the network discovery and setup\n net_protocol.do_net_discovery()\n net_protocol.setup_net_schedule() # guard interval [msec] can be specified as function input (default: 500)\n \n # Extract network topology and schedule information as JSON\n net_info = net_protocol.get_net_info_json()\n print(net_info) # print it in this test script (send over Wi-Fi in the real app)\n\n # Start an infinite loop, gathering sensor data\n cycle_counter = 0\n while True:\n \n # Perform a cycle of data gathering\n cycle_counter += 1\n frame_start_time = utime.ticks_ms() # update the frame start time\n stay_awake = (cycle_counter == net_discovery_interval) # if this is the last cycle before network re-discovery\n time_till_next_frame = data_gathering_interval # for sleep synchronisation (this can also be variable between frames)\n packets = net_protocol.gather_sensor_data(time_till_next_frame, stay_awake)\n # A list of MessagePacket objects is returned, to be processed and transmitted over Wi-Fi\n \n # If this was the last cycle before network re-discovery\n if (cycle_counter == net_discovery_interval):\n # Do network discovery and setup again (the network should be awake now)\n net_protocol.do_net_discovery()\n net_protocol.setup_net_schedule()\n cycle_counter = 0\n # When finished, instruct the network to sleep until the next frame\n time_till_next_frame = data_gathering_interval - utime.ticks_diff(utime.ticks_ms(), frame_start_time)\n net_protocol.set_network_to_sleep(time_till_next_frame)\n # Extract network topology and schedule information as JSON\n net_info = net_protocol.get_net_info_json()\n print(net_info) # print it in this test script (send over Wi-Fi in the real app)\n\n # Go to sleep yourself until the start of next frame\n # [This will need to be replaced by a proper sleep mode (with powered down modules)]\n time_till_next_frame = data_gathering_interval - utime.ticks_diff(utime.ticks_ms(), frame_start_time)\n pyb.delay(time_till_next_frame)",
"def on_connect(self, client, userdata, flags, rc):\n# client.subscribe(\"power_meter/status/#\")\n client.subscribe(self.mqtt_topic_status)\n client.subscribe(self.mqtt_topic_electricity + '/#')\n client.subscribe(self.mqtt_topic_gas + '/#')\n client.subscribe(self.mqtt_topic_water + '/#')\n self.mqtt_client.publish(self.mqtt_topic_last_will, \"online, \" + str(self.dconn), qos=0, retain=True)\n self.connected = True\n self.log.warning(\"Connected with result code: \" + str(rc))\n self.log.info(\"Connected to: \" + MQTT_SERVER)",
"def keepalive_ws(self):\n #TODO: should the data coming from the arduino be converted here? or in the master?\n # pro: master does not need to know about the details of the acquisition\n # con: more data sent through the websocket\n\n ################################\n # CONNECT AND LISTEN TO MASTER\n ################################\n #Structure of the \"listen\" loop:\n # - Read message\n # - Convert to arduino command\n # - Connect to arduino using the command\n # - Receive data from arduino and send back to master # Initialises the error state\n\n while not self.is_master_connected:\n try:\n print(\"(node {}) Connecting to WS connection = {} \"\\\n .format(time.strftime(TFORMAT),\n self.location))\n self.master_server = yield tornado.websocket.websocket_connect(self.location)\n print('(node {}) Connection with master server started'\\\n .format(time.strftime(TFORMAT)))\n self.is_master_connected = True\n except socket.error as error:\n if error.errno == 10061:\n print('\\n(node {}) Connection refused by host. \\\n Maybe it is not running? Waiting'.format(time.strftime(TFORMAT)))\n time.sleep(5)\n self.is_master_connected = False\n self.metadata_registered = False\n except HTTPError as error:\n print('(node {}) Connection taking quite long... '\\\n .format(time.strftime(TFORMAT)))\n\n\n #Main loop for data acquisition/sending\n #Acquire data from master\n while self.is_master_connected :\n try:\n if not self.metadata_registered:\n self.master_server.write_message(json.dumps(self.metadata_dict))\n self.metadata_registered=True\n\n msg = yield self.master_server.read_message() #we may use a callback here, instead of the rest of this code block\n except UnboundLocalError:\n print('\\n(node) Connection refused by host. Maybe Master server is not running?')\n self.is_master_connected = False\n self.metadata_registered = False\n raise HostConnectionError\n\n #Process data:\n if msg is not None:\n try:\n self.message_bridging_arduino(msg)\n\n # Sometimes the Arduino disconnectis, throwing a SerialException. We handle this and let the master server know\n # there is an error.\n except (SerialException, ArduinoConnectionError):\n # If the connection is not accessible, send a \"standard\" dictionary, with the 'error' flag\n self.send_message_on_serial_exception()\n print('(node {}) Serial Exception '.format(time.strftime(TFORMAT)))\n if self.is_arduino_connected:\n self.is_arduino_connected = False\n self.arduino_COMS.cleanup()\n self.reconnect_to_arduino()\n except ValueError as err:\n print('(node {}) ValueError thrown'.format(time.strftime(TFORMAT)))\n print(err.args)\n\n except RuntimeError as err:\n if err.args[0]=='generator raised StopIteration':\n print('(node) Cannot find arduino connection')\n else:\n raise err\n except KeyboardInterrupt:\n self.is_master_connected=False\n self.metadata_registered = False\n raise\n\n else:\n print('(node) Could not retrieve message from server. It may be disconnected.')\n self.is_master_connected = False\n self.metadata_registered = False\n #raise KeyboardInterrupt",
"def main(waiting_time = seconds):\n key = 'YOUR-API-KEY-HERE'\n messages = []\n #my_client = WebSocketClient(STOCKS_CLUSTER, key, my_custom_process_message(messages))\n my_client = WebSocketClient(CRYPTO_CLUSTER, key, my_custom_process_message(messages))\n #my_client = WebSocketClient(FOREX_CLUSTER, key, my_custom_process_message(messages))\n my_client.run_async()\n\n #my_client.subscribe(\"T.MSFT\", \"T.AAPL\", \"T.AMD\", \"T.NVDA\") # Stock data\n my_client.subscribe(\"XA.BTC-USD\", \"XA.ETH-USD\", \"XA.LTC-USD\") # Crypto data\n #my_client.subscribe(\"C.USD/CNH\", \"C.USD/EUR\") # Forex data\n time.sleep(waiting_time)\n\n my_client.close_connection()\n\n df = pd.DataFrame(messages)\n\n df = df.iloc[5:, 0].to_frame()\n df.columns = [\"data\"]\n df[\"data\"] = df[\"data\"].astype(\"str\")\n\n df = pd.json_normalize(df[\"data\"].apply(lambda x : dict(eval(x))))\n\n # export data to sqlite\n with sqlite3.connect(\"realtime_crypto.sqlite\") as conn:\n df.to_sql(\"data\", con=conn, if_exists=\"append\", index=False)",
"def pingEsp(self):\n\t\twhile True:\n\t\t\tprint (\"[{}] Keeping alive the ESP8266 connection\".format(\n\t\t\t\tint(time.time()),\n\t\t\t))\n\t\t\tmqttCli.publish(\"ping\", mqttJsonDump('void'))\n\t\t\ttime.sleep(30)",
"def run(self):\n while True:\n try:\n sleep(influx_settings.write_freq)\n self._redis2influx()\n except InterruptExceptions as ex:\n raise ex",
"def run():\n # 1 sec delay to allow DHT22 sensor to start as per datasheet\n sleep_ms(1000)\n last_run = ticks_ms()\n _read()\n\n while True:\n if ticks_diff(ticks_ms(), last_run) > _READING_DELAY_MS:\n last_run = ticks_ms()\n _read()\n\n _signal_alive()\n sleep_ms(1000)",
"def _recv_thread_func(self):\r\n use_ssl = self.config.get_bool(\"gox\", \"use_ssl\")\r\n wsp = {True: \"wss://\", False: \"ws://\"}[use_ssl]\r\n while not self._terminating: #loop 0 (connect, reconnect)\r\n try:\r\n url = \"%s%s/socket.io/1\" % (wsp, self.hostname)\r\n\r\n # subscribing depth and ticker through the querystring,\r\n # the trade and lag will be subscribed later after connect\r\n sym = \"%s%s\" % (self.curr_base, self.curr_quote)\r\n if not FORCE_NO_DEPTH:\r\n querystring = \"Channel=depth.%s/ticker.%s\" % (sym, sym)\r\n else:\r\n querystring = \"Channel=ticker.%s\" % (sym)\r\n self.debug(\"### trying Socket.IO: %s?%s ...\" % (url, querystring))\r\n self.socket = SocketIO()\r\n self.socket.connect(url, query=querystring)\r\n\r\n self._time_last_received = time.time()\r\n self.connected = True\r\n self.debug(\"### connected\")\r\n self.socket.send(\"1::/mtgox\")\r\n\r\n self.debug(self.socket.recv())\r\n self.debug(self.socket.recv())\r\n\r\n self.debug(\"### subscribing to channels\")\r\n self.channel_subscribe()\r\n\r\n self.debug(\"### waiting for data...\")\r\n self.signal_connected(self, None)\r\n while not self._terminating: #loop1 (read messages)\r\n msg = self.socket.recv()\r\n self._time_last_received = time.time()\r\n if msg == \"2::\":\r\n #self.debug(\"### ping -> pong\")\r\n self.socket.send(\"2::\")\r\n continue\r\n prefix = msg[:10]\r\n if prefix == \"4::/mtgox:\":\r\n str_json = msg[10:]\r\n if str_json[0] == \"{\":\r\n self.signal_recv(self, (str_json))\r\n\r\n except Exception as exc:\r\n self.connected = False\r\n self.signal_disconnected(self, None)\r\n if not self._terminating:\r\n self.debug(\"### \", exc.__class__.__name__, exc, \\\r\n \"reconnecting in 1 seconds...\")\r\n self.socket.close()\r\n time.sleep(1)",
"def send_mqtt(self, data_type, data):\n try:\n client = mqtt.Client(\"rpi1_qnas\")\n client.on_connect = self.on_connect\n client.on_message = self.on_message\n client.connect(MQTT_BROKER_ADDRESS)\n client.loop_start()\n client.publish(MQTT_TOPIC + \"/{}\".format(data_type), data)\n client.disconnect()\n client.loop_stop()\n except Exception:\n msg = \"{} \\nMQTT error\".format(time.strftime(\"%Y-%m-%d %H:%M:%S\"))\n self.loggers[\"log_stdout\"].warning(msg)\n self.loggers[\"log_errors\"].warning(msg)\n self.verbose(msg)",
"def monitoring():\n\n logging.info(\"!!! Engine start !!! {}\".format(time.strftime(\"%d.%m.%y %H:%M\")))\n\n try_connect = 0\n initialization()\n while True:\n try:\n for vrt, host in host_vrt.items():\n answer = subprocess.call(['ping', '-c', '3', vrt])\n if answer != 0:\n collection()\n time.sleep(15)\n try_connect += 1\n logging.info(\"!!! Try firs reconnection {} !!!\".format(time.strftime(\"%d.%m.%y %H:%M\")))\n if try_connect == 2:\n vrt_unreachable.append(vrt)\n with app.app_context():\n alert()\n if try_connect >= 3:\n for vm, cmd in host_cmd_vmid.items():\n if vm == vrt:\n ssh_cli(SSHClient(host, port, user, password), cmd)\n try_connect = 0\n successful_autostart()\n\n\n else:\n continue\n\n except TimeoutError:\n print('Connection timed out')\n logging.info(\"SSH Connection time out {}\".format(time.strftime(\"%d.%m.%y %H:%M\")))\n\n except paramiko.ssh_exception.NoValidConnectionsError:\n print('NoValidConnectionsError')\n bad_autostart()",
"def publish():\n try:\n # All circles messages are generated together and before sending data\n # via MQTT. This way sending data overhead is ignored and we expect\n # similar timestamps between all circles.\n messages = []\n for i,c in enumerate(circles):\n config = circles_config[i]\n t = time.time()\n mac = config[\"mac\"]\n name = config[\"name\"]\n last_powers = [ config[\"power\"+x[\"suffix\"]] for x in OUTPUT_LIST ]\n last_state = config[\"state\"]\n try:\n reading = c.get_power_usage()\n powers = [ reading[x[\"key\"]] for x in OUTPUT_LIST ]\n state = c.get_info()['relay_state']\n alert_below_th = config.get(\"alert_below_threshold\", None)\n for i,p in enumerate(powers):\n p = max(0, p)\n key = OUTPUT_LIST[i][\"key\"]\n suffix = OUTPUT_LIST[i][\"suffix\"]\n if alert_below_th is not None and p < alert_below_th:\n logger.alert(\"Value %f %s for circle %s registered with name %s is below threshold %f\",\n float(p), suffix, mac, name, float(alert_below_th))\n if Utils.compute_relative_difference(last_powers[i], p) > config.get(\"tolerance\",DEFAULT_POWER_TOLERANCE) or t - config[\"when\"+suffix] > MAX_TIME_BETWEEN_READINGS:\n usage_message = { 'timestamp' : t, 'data': p }\n messages.append( (topic.format(name, \"power\"+suffix, mac), usage_message) )\n config[\"power\"+suffix] = p\n config[\"when\"+suffix] = t\n # check state transition before message is appended\n if state != last_state:\n state_message = { 'timestamp' : t, 'data' : state }\n messages.append( (topic.format(name, \"state\", mac), state_message) )\n config[\"state\"] = state # track current state value\n except:\n print \"Unexpected error:\", traceback.format_exc()\n logger.info(\"Error happened while processing circles data: %s\", traceback.format_exc())\n for top,message in messages:\n client.publish(top, json.dumps(message))\n except:\n print \"Unexpected error:\", traceback.format_exc()\n logger.error(\"Error happened while processing circles data\")\n raise",
"def start(self):\n\n while True:\n measurement = self.generate_message()\n measurement.save()\n print(\"Storing new measurement\")\n time.sleep(10)",
"def online_check(self):\n self.online = False\n online_topic = '{t_topic}/INFO2'.format(**self)\n print('{BLUE}Watching for {}{NC}'.format(online_topic, **colors))\n try:\n self.mqtt.connect(self.mqtt_host)\n except Exception:\n print('MQTT broker not online')\n return False\n\n self.mqtt.message_callback_add(online_topic, lambda *args: \\\n setattr(self, 'online', True))\n self.mqtt.subscribe(online_topic)\n startTime = dt.datetime.now()\n while not self.online and not too_old(startTime, wait_time):\n self.mqtt.loop(timeout=loop_time)\n time_waited = (dt.datetime.now() - startTime).total_seconds()\n # If we did not see device publish INFO2, sometimes platformio causes\n # a delay by checking for updates and we miss seeing this message.\n # To check for that case, query the device for its build timestamp and\n # check if it was built in the last couple minutes.\n if not self.online:\n self.query_tas_status()\n if 'build_time' in self.reported:\n build_time = dt.datetime.strptime(self.reported['build_time'],\n '%Y-%m-%dT%H:%M:%S')\n if dt.datetime.now() - build_time < dt.timedelta(minutes=2):\n self.online = True\n\n if not self.online:\n print('{RED}{f_name} did not come online within {wait_time} '\n 'seconds{NC}'.format(f_name=self.f_name,\n wait_time=str(wait_time),\n **colors))\n elif self.online:\n print('{GREEN}{f_name} came online in {time_waited} '\n 'seconds{NC}'.format(f_name=self.f_name,\n time_waited=time_waited,\n **colors))\n self.mqtt.unsubscribe(online_topic)\n self.mqtt.message_callback_remove(online_topic)\n self.mqtt.disconnect()\n return self.online",
"def main():\n\tconnected = False\n\t# Get the values from the config file\n\tconfig = read_config()\n\t# Do the infinite(ish) internet check loop\n\twhile True:\n\t\tconnected = check_connection(connected, config)\n\t\ttime.sleep(30)",
"def acquisition(self):\n\t\twhile True:\n\t\t\tself.humidity, self.temperature = Adafruit_DHT.read_retry(SENSOR, PIN)\n\t\t\tprint (\"[{}] New measures from the Adafruit DHT:\\n\\tTemperature: {}C\\n\\tHumidity: {}%\".format(\n\t\t\t\tint(time.time()),\n\t\t\t\tself.temperature,\n\t\t\t\tself.humidity\n\t\t\t))\n\t\t\tmqttCli.publish(\"measure/temperature\", mqttJsonDump(self.temperature))\n\t\t\tmqttCli.publish(\"measure/humidity\", mqttJsonDump(self.humidity))\n\t\t\t\n\t\t\tself.updatePendingJson(\"humidity\", self.humidity, \"data\")\n\t\t\tself.updatePendingJson(\"temperature\", self.temperature, \"data\")\n\t\t\t\n\t\t\tr=req.get('http://localhost:9090/interacquisition')\n\t\t\tr = r.content\n\t\t\tr = json.loads(r)\n\t\t\tdelta_t = r[\"interacquisition\"]*60\n\t\t\t\n\t\t\tprint (\"[{}] Interacquisition time retrieved from the Room Catalog\".format(\n\t\t\t\tint(time.time()),\n\t\t\t))\n\t\t\t\n\t\t\ttime.sleep(delta_t)",
"def __sendHeartbeat(self):\n \n while not rospy.is_shutdown():\n rospy.sleep(5)\n self.setOutput(self.write_start+1,0)",
"def master(): # count = 5 will only transmit 5 packets\r\n\r\n # for the \"HandlingData\" part of the test from the TMRh20 library example\r\n float_value = 0.01\r\n while 1:\r\n nrf.listen = False # ensures the nRF24L01 is in TX mode\r\n print(\"Now Sending\")\r\n start_timer = int(time.monotonic() * 1000) # start timer\r\n # use struct.pack to packetize your data into a usable payload\r\n # '<' means little endian byte order.\r\n # 'L' means a single 4 byte unsigned long value.\r\n # 'f' means a single 4 byte float value.\r\n buffer = struct.pack('<Lf', start_timer, float_value)\r\n result = nrf.send(buffer)\r\n if not result:\r\n print('send() failed or timed out')\r\n time.sleep(.05)",
"def main():\n\n # Run until we get a KeyboardInterrupt\n while True:\n logging.info(\"Checking for new data from Awair\")\n\n # Setup a GraphQL client to connect to Awair\n awairClient = GraphQLClient(os.getenv('AWAIR_ENDPOINT'))\n awairClient.inject_token(\"Bearer \" + os.getenv('AWAIR_TOKEN'))\n\n # Setup an InfluxDB client\n influxClient = InfluxDBClient(host=os.getenv('INFLUXDB_HOST'), port=os.getenv('INFLUXDB_PORT'), username=os.getenv('INFLUXDB_USERNAME'), password=os.getenv('INFLUXDB_PASSWORD'), database=os.getenv('INFLUXDB_DATABASE'))\n\n # Setup our Awair and Influx connectors and inject the clients\n awairConnector = AwairConnector(awairClient)\n influxConnector = InfluxConnector(influxClient)\n\n # Find any devices\n devices = awairConnector.get_devices()\n logging.info(\"Found %s Awair devices\", len(devices))\n\n # Check each device\n for device in devices:\n logging.info(\" - Looking up data for device %(name)s with UUID %(uuid)s\", device)\n\n # Find the last data we have for it, if any, in InfluxDB\n last_data_at = influxConnector.get_last_recorded_time(device)\n\n # Fetch sensor readings from Awair\n samples = awairConnector.get_sensor_readings(device, last_data_at)\n\n # Add samples to InfluxDB\n influxConnector.add_samples(device, samples)\n\n # TODO: we should calculate the next time there will be data\n wait_for_seconds = 15 * 60\n\n logging.info(\"Checking again in %s seconds...\", wait_for_seconds)\n\n # Sleep until we need to do another check\n time.sleep(wait_for_seconds)",
"def _run(self):\n #print(\"try to connect run\")\n while True:\n self._connect()\n while not self.connected and self.auto_retry is not None:\n gevent.sleep(self.auto_retry)\n self._connect()\n if self.connected:\n self.run()\n if self.auto_retry is None:\n break",
"def run(self):\n while True:\n self.current_wifi_clients()\n self._eval_is_someone_home()\n time.sleep(self._interval)",
"def check_market(self):\n while True:\n logger.info(\"Esperando para nueva conexion...\")\n time.sleep(60)\n for client in self.clients:\n\n if self.clients[client][1]:\n\n notifications = self.clients[client][0].get_notifications()\n\n if notifications:\n for notification in notifications:\n self.send_message(notification, chat_id=client)\n\n else:\n pass",
"def run(self):\n self.startSerial()\n # Wait about five seconds before doing anything\n time.sleep(5)\n while True:\n # Check setpoints against all controllers\n self.check_setpoints()\n\n # Issue any new commands as necessary\n self.check_pins()\n\n # Receive the latest Arduino data and process into dictionary\n self.read_arduino_data_and_format_dictionary()\n\n # Clean all of the arduino stuff to avoid incorrect inputs\n with self.lock:\n self.ser.reset_output_buffer()\n with self.lock:\n self.ser.reset_input_buffer()",
"async def exchanges_message_handler(bnc_websocket, ftx_websocket, param) -> None:\n\n ok = True\n while ok:\n try:\n # receiving updates\n bnc = await bnc_websocket.recv()\n ftx = await ftx_websocket.recv()\n # translate to execute strategy\n await price_analyze(json.loads(bnc), json.loads(ftx), param['p_d'], param['m'])\n # sleep if its needed\n await asyncio.sleep(param['r_r'])\n\n except ConnectionClosed:\n print('Connection Closed. Need to reboot.')\n ok = False",
"async def main():\n data_file = open(\"data_file_nefit2.txt\", \"r\")\n data = data_file.read().splitlines()\n loop = asyncio.get_event_loop()\n BoschGateway = bosch.gateway_chooser(device_type=NEFIT)\n gateway = BoschGateway(session=loop,\n session_type=XMPP,\n host=data[0],\n access_token=data[1],\n password=data[2],\n nefit_connector=NefitConnector2)\n # gateway = BoschGateway(session=loop,\n # session_type=\"xmpp\",\n # host=data[0],\n # access_key=data[1],\n # password=data[2])\n print(await gateway.custom_test())\n # await gateway.initialize()\n # return\n # print(f\"UUID {await gateway.check_connection()}\")\n\n # small = await gateway.smallscan(DHW_CIRCUITS)\n# myjson = json.loads(small)\n # print(small)\n # return\n sensors = gateway.initialize_sensors()\n for sensor in sensors:\n await sensor.update()\n for sensor in sensors:\n print(f\"{sensor.name} : {sensor.state}\")\n await gateway.get_capabilities()\n for hc in gateway.heating_circuits:\n await hc.update()\n print(\"hvac mode\", hc.ha_mode)\n print(\"target temp ->\", hc.target_temperature)\n return\n \n# await hc.set_ha_mode(\"auto\") #MEANS AUTO\n# await hc.update()\n # time.sleep(4)\n await dhw.set_temperature(53.0)\n # return\n # return\n # await dhw.set_ha_mode(\"performance\") #MEANS MANUAL\n return\n # print(\"target in manual\", hc.target_temperature)\n # print(\"ha mode in manual\", hc.ha_mode)\n # await hc.update()\n # print(\"target after update\", hc.target_temperature)\n # print(\"ha mode\", hc.ha_mode)\n\n # await hc.set_ha_mode(\"auto\") #MEANS AUTO\n # print(\"target after auto without update\", hc.target_temperature)\n # print(\"ha mode\", hc.ha_mode)\n\n # return\n # print(await hc.set_temperature(10.0))\n # print(\"ustawiona!\")\n dhws = gateway.dhw_circuits\n dhw = dhws[0]\n await dhw.update()\n print(\"START1\")\n print(dhw.target_temperature)\n print(\"START2\")\n print(dhw.current_mode)\n print(dhw.target_temperature)\n \n return\n print(\"START3\")\n print(dhw.target_temperature)\n return\n # print(hc.schedule)\n print(gateway.get_info(DATE))\n # print(await gateway.rawscan())\n #print(hc.schedule.get_temp_for_date(gateway.get_info(DATE)))\n return\n aa=0\n while aa < 10:\n time.sleep(1)\n await hc.update()\n print(hc.target_temperature)\n aa = aa+1\n \n await hc.set_operation_mode(\"auto\")\n\n aa=0\n while aa < 10:\n time.sleep(1)\n await hc.update()\n print(hc.target_temperature)\n aa = aa+1\n\n # print(gateway.get_property(TYPE_INFO, UUID))\n await loop.close()",
"def connect(self):\n self.log.info(u\"==> Connecting to MQTT broquer ...\")\n try:\n self.MQTTClient.connect(self.mqtthost, int(self.mqttport), 60)\n self.log.info(u\"==> Connected on MQTT broquer\")\n self.MQTTClient.loop_start() # This will automatically reconnect if connection is lost.\n except:\n error = u\"### Error while connecting to MQTT broquer : %s \" % str(traceback.format_exc())\n raise MQTTException(error)"
] |
[
"0.6838484",
"0.6585376",
"0.6341815",
"0.6324926",
"0.63036394",
"0.6286901",
"0.6280231",
"0.6225539",
"0.61884284",
"0.6188228",
"0.615399",
"0.61532027",
"0.6128408",
"0.6082001",
"0.60575485",
"0.60568744",
"0.6038828",
"0.6026853",
"0.60169464",
"0.60107285",
"0.5966759",
"0.59442997",
"0.59381205",
"0.59379995",
"0.59371865",
"0.5936623",
"0.5933079",
"0.59199834",
"0.591384",
"0.5904316"
] |
0.6958831
|
0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.