query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Return boolean of the "markdown_convert" option. | def get_opt_markdown_convert(self, command):
if "markdown_convert" in self.command_dict["commands"][command].keys():
return self.command_dict["commands"][command]["markdown_convert"]
else:
return CommandDict.DEFAULT_OPT_MARKDOWN_CONVERT | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_markdown(self):\n return self._tag == 'markdown'",
"def is_markdown(self):\n return self._tag == 'markdown'",
"def isMarkdown(self):\n return self.__isMarkdown",
"def is_markdown_cell(cell):\n return cell[\"cell_type\"] == \"markdown\"",
"def convert_to_markdown(self, text: str) -> str:",
"def can_markdown(repo, fname):\n if markdown is None:\n return False\n\n if not repo.info.embed_markdown:\n return False\n\n return fname.endswith(\".md\")",
"def quick_set_pandoc_markdown_settings(self):\n self.logger.debug(\"Pandoc markdown conversion settings\")\n self.quick_setting = 'pandoc_markdown'\n self.export_format = 'pandoc_markdown'\n self.front_matter_format = 'yaml'\n self.metadata_schema = []\n if self.conversion_input == 'nsx':\n self.metadata_schema = ['title', 'ctime', 'mtime', 'tag']\n self.spaces_in_tags = False\n self.split_tags = False\n self.first_row_as_header = True\n self.first_column_as_header = True\n self.chart_image = True\n self.chart_csv = True\n self.chart_data_table = True",
"def markdown(value, args=''):\n import markdown\n\n extensions = [e for e in args.split(',') if e]\n if len(extensions) > 0 and extensions[0] == \"safe\":\n extensions = extensions[1:]\n safe_mode = True\n else:\n safe_mode = False\n\n return mark_safe(markdown.markdown(\n force_text(value),\n extensions=extensions,\n safe_mode=safe_mode,\n enable_attributes=(not safe_mode)\n ))",
"def enml_to_markdown(enml):\n pass",
"def quick_set_pandoc_markdown_strict_settings(self):\n self.logger.debug(\"Pandoc Markdown Strict Setting conversion settings\")\n self.quick_setting = 'pandoc_markdown_strict'\n self.export_format = 'pandoc_markdown_strict'\n self.metadata_schema = []\n if self.conversion_input == 'nsx':\n self.metadata_schema = ['title', 'ctime', 'mtime', 'tag']\n self.spaces_in_tags = False\n self.split_tags = False\n self.first_row_as_header = True\n self.first_column_as_header = True\n self.chart_image = True\n self.chart_csv = True\n self.chart_data_table = True",
"def fixture_ext_markdown(plugin):\n return plugin.md",
"def htmlForMarkdown(md):\n return mdProcessor.convert(md)",
"def markdown(text, *args, **kwargs):\n md = StMarkdown(*args, **kwargs)\n return md.convert(text)",
"def is_markdown_solution_cell(cell):\n if not is_markdown_cell(cell):\n return False\n source = get_source(cell)\n return any([re.match(MD_SOLUTION_REGEX, l, flags=re.IGNORECASE) for l in source])",
"def markdown(value):\n return Markup(md(value))",
"def setup_md_to_html_converter(self):\n templates = self.load_template_files()\n extensions = [\n \"markdown.extensions.fenced_code\",\n \"markdown.extensions.codehilite\",\n \"markdown.extensions.sane_lists\",\n \"markdown.extensions.tables\",\n mdx_math.MathExtension()\n ]\n settings = {\n \"add_default_interactive_thumbnails_to_required_files\": False,\n }\n self.converter = Verto(\n html_templates=templates,\n extensions=extensions,\n settings=settings\n )",
"def markdown_converter(links_to_convert, root=\"./\"):\n \n def to_markdown(element):\n \"\"\"This is a version of `safe_html_to_markdown` with link conversion baked in.\n \n NB links will all start with \"\"\" + root + \"\"\".\n \"\"\"\n return safe_html_to_markdown(element, \n links_to_convert={k:root + v \n for k, v in links_to_convert.items()})\n return to_markdown",
"def needsProcessing(self):\n return self.isMarkdown() or self.hasMetadata()",
"def is_md(self):\n return self.name.startswith('MD')",
"def markdown(text, **kwargs):\n import markdown\n return markdown.markdown(text, **kwargs)",
"def test_description_markdown_with_custom_options() -> None:\n soup = generate_case(\n \"description_markdown\",\n GenerationConfiguration(\n markdown_options={\n \"cuddled-lists\": True,\n }\n ),\n )\n\n assert (\n str(soup.find(\"span\", class_=\"description\"))\n == \"\"\"<span class=\"description\"><p>DOC </p> <ul> <li>List 1</li> <li>List 2</li> </ul> </span>\"\"\"\n )",
"def convert(self,message):\n \n content_type = message.get('content',{}).get('@type','')\n if content_type in self.supported:\n result = getattr(self.tconv, content_type)(message)\n else:\n return False\n \n return result",
"def markdown(s):\n md = markdown_module.Markdown(MARKDOWN_EXTENSIONS, safe_mode='remove')\n return mark_safe(md.convert(s))",
"def _parse_markdown(self):\n renderer = MyRenderer()\n md = mistune.Markdown(renderer=renderer)\n md.render(self._markdown_text)\n self._bash_commands = renderer._bash_commands",
"def html(self):\n return self.report_format in ['pdf', 'html']",
"def markdown(self, text):\n\n # Remove rel attributes as they are not supported by html2markdown\n text = re.sub(r' rel=\".+?\">', \">\", text)\n\n # Convert html to markdown\n text = html2markdown.convert(text)\n\n # Decode [<>&] characters\n text = text.replace(\"<\", \"<\").replace(\">\", \">\").replace(\"&\", \"&\")\n\n # Wrap as Rich Markdown\n return Markdown(text)",
"def render_markdown(text):\n return markdown(text, **MARKDOWN_KWARGS)",
"def on_page_markdown(self, markdown, page, config, files):\n listext = self.config['ext']\n src_file_path = page.file.abs_src_path\n prepath, ext = os.path.splitext(src_file_path)\n lang = ext.lstrip('.')\n filename = page.file.name\n if ext in listext:\n new_markdown = \"# {0}\\n\\n```{1}\\n\".format(filename, lang) + markdown + \"\\n```\"\n return new_markdown\n else:\n return markdown",
"def markdown(text):\n text = gfm(text)\n text = markdown_lib.markdown(text)\n return text",
"def convert_md(md_file, output_file, contents, numbered):\n\n\tscript = ['pandoc', '-s', md_file, '-o', output_file]\n\tscript += ['-c', os.path.join(SCRIPT_DIR, 'themes', 'base.css')]\n\tscript += ['-B', os.path.join(SCRIPT_DIR, 'themes', 'header.html')]\n\n\t# Check the markdown to see if we need to include MathJax\n\tmaths = False if re.search('\\\\n\\\\$\\\\$(.*?)\\\\$\\\\$\\\\n', read_file(md_file),\n\t flags=re.MULTILINE | re.DOTALL) is None else True\n\n\tif numbered:\n\t\tscript.append('--number-sections')\n\n\tif contents:\n\t\tscript.append('--toc')\n\n\tif maths:\n\t\tscript.append('--mathjax')\n\n\tscript += ['--self-contained', '--highlight-style=haddock']\n\n\twith cd(os.path.dirname(md_file)):\n\t\tprint('Converting %s to %s using Pandoc...' % (os.path.basename(md_file), os.path.basename(output_file)))\n\t\tcheck_output(script) # Runs the script on the OS and raises an exception on failure\n\n\tinclude_fonts(output_file) # Include Google fonts\n\tif contents or maths:\n\t\tinclude_js(output_file, maths)\n\t\tadd_contents(output_file)"
]
| [
"0.70488286",
"0.70488286",
"0.70230716",
"0.6086967",
"0.60160255",
"0.5977364",
"0.5914026",
"0.5822783",
"0.5729446",
"0.5702347",
"0.56494474",
"0.5415037",
"0.5336228",
"0.5286717",
"0.52686924",
"0.52682716",
"0.52588314",
"0.525169",
"0.51487887",
"0.50701106",
"0.50670797",
"0.505952",
"0.5057718",
"0.5055183",
"0.5051163",
"0.50455844",
"0.5030135",
"0.5015792",
"0.49983564",
"0.49756494"
]
| 0.785019 | 0 |
Return boolean of the "formatted" option. | def get_opt_formatted(self, command):
if "formatted" in self.command_dict["commands"][command].keys():
return self.command_dict["commands"][command]["formatted"]
else:
return CommandDict.DEFAULT_OPT_FORMATTED | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def format_(self):\n return self.set_format or self.default_format or self.FALLBACK_FORMAT",
"def _format_bool_(value):\n\n from ocgis.util.helpers import format_bool\n\n return format_bool(value)",
"def post_formatter(self, value):\n if isinstance(value, bool):\n return value and 'true' or None\n return value",
"def conditional_formattings(self):\n return self.container['conditional_formattings']",
"def format_field_with_flag(self, data):\n return data.strip() == '*'",
"def format_bool(b):\n return \"YES\" if b else \"NO\"",
"def get_format_opts(cls, format_=\"value\", fields=[]):\n return \" -f {0} {1}\".format(format_, \" \".join([\"-c \" + it for it in fields]))",
"def getopt_format(self):\n self._print_enum_opt(\"format\", FORMATTERS)",
"def formatted(self) -> str:\r\n ...",
"def hasOption(self, *args):\n return _libsbml.ConversionProperties_hasOption(self, *args)",
"def complete_opt_format(self, text, *_):\n return [t + \" \" for t in FORMATTERS if t.startswith(text)]",
"def format(self):\n return self._format",
"def format(self):\n return self.getparam(\"FORMAT\")",
"def format(self):\n return self.getparam(\"FORMAT\")",
"def html(self):\n return self.report_format in ['pdf', 'html']",
"def is_valid(self, value) -> 'True | str':\n if not value in self.options:\n return f'The value \"{value}\" must one from \"{self.options}\".'\n return True",
"def formatEditText(self, storedText):\n if storedText in self.formatList:\n return (storedText, True)\n try:\n return (GenBoolean(storedText).boolStr(self.format), True)\n except GenBooleanError:\n return (storedText, not storedText)",
"def bool_validator_advice(validator_args):\n \n return \" {True, False}\"",
"def format( self ) :\n\n return( self.__format )",
"def __format__(self, format_specification=''):\n return super().__format__(format_specification=format_specification)",
"def _is_option(line):\n return '=' in line",
"def get_short_flag(self):\n return self.short_flag",
"def storedText(self, editText):\n try:\n return (repr(GenBoolean(editText)), True)\n except GenBooleanError:\n if editText in self.formatList:\n return (editText, True)\n return (editText, not editText and not self.isRequired)",
"def human_readable(self):\n if self.no_flags_set():\n return \"no flags set\"\n else:\n flag_desc = []\n for name in (\"bookmarked\", \"for_validation\", \"candidate\", \"final causative\"):\n if getattr(self, \"flag_%s\" % name.replace(\" \", \"_\")):\n flag_desc.append(name)\n for name in (\"visual\", \"validation\", \"molecular\", \"phenotype_match\", \"summary\"):\n field = getattr(self, \"flag_%s\" % name)\n if field and field != \"empty\":\n flag_desc.append(\"%s rating is %s\" % (name.split(\"_\")[0], field))\n return \", \".join(flag_desc)",
"def format(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"format\")",
"def option_bool(argument: Optional[str]) -> bool:\n if argument and argument.strip():\n output = tinydocutils.directives.choice(argument, (\"true\", \"false\"))\n return output == \"true\"\n return True",
"def format(self) -> str:",
"def format(self):\n return self[\"format\"]",
"def format(self):\n return self[\"format\"]",
"def bool_option (arg: Any) -> bool:\n return True"
]
| [
"0.64655954",
"0.6382571",
"0.63540566",
"0.61730707",
"0.6100689",
"0.58971924",
"0.579236",
"0.5725349",
"0.562158",
"0.5614783",
"0.5597293",
"0.5546151",
"0.5543412",
"0.5543412",
"0.55330604",
"0.55128133",
"0.54894316",
"0.54732186",
"0.5436595",
"0.5412067",
"0.54076236",
"0.54025614",
"0.5376029",
"0.5367366",
"0.53583467",
"0.53573734",
"0.5341342",
"0.5336809",
"0.5336809",
"0.53172415"
]
| 0.7209383 | 0 |
Return the string defined in the "split" option, or None. | def get_opt_split(self, command):
if "split" in self.command_dict["commands"][command].keys():
return self.command_dict["commands"][command]["split"]
else:
return CommandDict.DEFAULT_OPT_SPLIT | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def split_or_none(fld):\n if fld is not None:\n split_fld = fld.split(' ')\n else:\n split_fld = None\n\n return split_fld",
"def _parse_option_name(line):\n return line.split('=')[0].strip()",
"def getSplitChars(self):\n return self.getOrDefault(\"splitChars\")",
"def comp(self) -> Optional[str]:\n if self._is_c():\n inst = self._cur()\n if \"=\" in inst:\n if \";\" in inst:\n return inst.split(\"=\")[1].split(\";\")[0]\n else:\n return inst.split(\"=\")[1]\n elif \";\" in inst:\n return inst.split(\";\")[0]\n\n return inst\n return None",
"def split(self):\n return self._clip_metadata.get(\"split\")",
"def default_version_splitter(instring):\n return instring.split()[-1]",
"def soar_splitpart(value, index, split_chars=' - '):\n splits = value.split(split_chars)\n if len(splits) > index:\n return splits[index]\n\n return value",
"def partition(string, delimiter):\r\n sp = string.split(delimiter, 1)\r\n if len(sp) > 1:\r\n return sp[0], sp[1]\r\n else:\r\n return sp[0], \"\"",
"def get_base_split_token(self):\n return Token(self.token.rsplit('/', 1)[0])",
"def getsplitinfo():\n \n splitvarlist = spss.GetSplitVariableNames()\n if len(splitvarlist) == 0:\n return [], None\n else:\n splittype = spssaux.getShow(\"split\", olang=\"english\")\n if splittype.lower().startswith(\"layer\"):\n splittype=\"layered\"\n else:\n splittype=\"separate\"\n return splitvarlist, splittype",
"def builtin_hook_option(self, hook):\n return shlex.split(self.config.get(self.BUILTIN_HOOKS_OPTIONS_SECTION,\n hook, ''))",
"def getSplitFunc(self, splitType):\n if splitType.upper() == \"INFO GAIN\":\n return self.findBestColumnSplitByInfoGain\n elif splitType.upper() == \"GAIN RATIO\":\n return self.findBestColumnSplitByGainRatio\n elif splitType.upper() == \"GINI INDEX\":\n return self.findBestColumnSplitByGini\n return None",
"def get_value(value):\n if value:\n return value.split('\\n')[0]\n else:\n return None",
"def get_word():\n return ' '.join(sys.argv[1:])",
"def get_par_silent(self,parname,sep=\".\"):\n try:\n return self.get_par(parname,sep)\n except pyca.pyexc:\n return None",
"def get_commandname(self):\n for line in self.helplines:\n if \"Usage:\" in line and self.parser_type is 'optparse':\n tmp = line.split()\n return tmp[1]\n if \"usage:\" in line and self.parser_type is 'argparse':\n tmp = line.split()\n return tmp[1]\n return None",
"def nodesplit(name):\n parts = name.split(NODENAME_SEP, 1)\n if len(parts) == 1:\n return None, parts[0]\n return parts",
"def subdivision(self) -> Optional[str]:\n return pulumi.get(self, \"subdivision\")",
"def subdivision(self) -> Optional[str]:\n return pulumi.get(self, \"subdivision\")",
"def split_tag(chunk_tag):\n if chunk_tag == 'O':\n return ('O', None)\n return chunk_tag.split('-', maxsplit=1)",
"def get_sequence(self, part: str) -> Optional[str]:\n for char in part:\n if char.upper() not in self.base_nucleotides:\n return None\n return part.upper()",
"def _get_field(self, section, field):\n if not self._configparser.has_option(section, field):\n return None\n return self._configparser.get(section, field).strip()",
"def optSplit(opt, delim, empty = ''):\n\tdef getDelimeterPart(oldResult, prefix):\n\t\ttry:\n\t\t\ttmp = oldResult[0].split(prefix)\n\t\t\tnew = tmp.pop(1)\n\t\t\ttry: # Find position of other delimeters in string\n\t\t\t\totherDelim = min(filter(lambda idx: idx >= 0, map(lambda x: new.find(x), delim)))\n\t\t\t\ttmp[0] += new[otherDelim:]\n\t\t\texcept Exception:\n\t\t\t\totherDelim = None\n\t\t\treturn [str.join(prefix, tmp)] + oldResult[1:] + [new[:otherDelim]]\n\t\texcept Exception:\n\t\t\treturn oldResult + ['']\n\tresult = map(str.strip, reduce(getDelimeterPart, delim, [opt]))\n\treturn tuple(map(lambda x: QM(x == '', empty, x), result))",
"def split_id(self):\n return self._split_id",
"def test_get_separator_unknown():\n # GIVEN a line with commas as delimiter\n line = \"one.two.three\"\n # WHEN getting the separator\n sep = samplesheet.get_separator(line)\n # THEN assert None is returned\n assert sep is None",
"def get_option(self, option, default=None):\n splitvals = option.split('/')\n section, key = \"/\".join(splitvals[:-1]), splitvals[-1]\n\n try:\n value = self.get(section, key)\n value = self._str_to_val(value)\n except ValueError, s:\n logger.warning(\"get failed for {}/{}: {}\".format(section,key,s))\n value = default\n except NoSectionError:\n value = default\n except NoOptionError:\n value = default\n\n return value",
"def split(self, sep=None, maxsplit=None):\n return split(self, sep, maxsplit)",
"def _get_split_instance(artifact_list: List[Artifact], split: Text) -> Artifact:\n matched = [x for x in artifact_list if x.split == split]\n if len(matched) != 1:\n raise ValueError('{} elements matches split {}'.format(len(matched), split))\n return matched[0]",
"def split(self, string, maxsplit=MAX_INT, include_separators=False):\n return self._split(\n string, maxsplit=maxsplit, include_separators=include_separators\n )",
"def getValue(splits, featureName):\n for split in splits:\n if split.startswith(featureName):\n return split[split.find(\"=\")+1:]\n \n return None"
]
| [
"0.59221715",
"0.5859727",
"0.5857961",
"0.57524025",
"0.56366026",
"0.56363374",
"0.5515007",
"0.54885024",
"0.5468819",
"0.5438792",
"0.5391925",
"0.53492457",
"0.53471935",
"0.52982587",
"0.52702796",
"0.5250794",
"0.52309585",
"0.52288836",
"0.52288836",
"0.5199767",
"0.5192752",
"0.51674557",
"0.5164367",
"0.51455754",
"0.51435673",
"0.5139842",
"0.5117441",
"0.5109014",
"0.51074237",
"0.5103761"
]
| 0.6627162 | 0 |
pick actions given numeric agent outputs (np arrays) | def sample_actions(self, agent_outputs):
logits, state_values = agent_outputs
policy = np.exp(logits) / np.sum(np.exp(logits), axis=-1, keepdims=True)
return np.array([np.random.choice(len(p), p=p) for p in policy]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def actionSelector(self): \n if self.Temp!=0:\n if len(self.lessons) > 60 and self.var_T: \n # if the agent haven't already gotten food since a certain time \n # we increase the temperature by 0.001 \n if self.count_without_food>12:\n self.Temp += 0.01 \n if self.Temp>=(self.var_T[0]): \n self.Temp = self.var_T[0] \n # otherwise we decrease the temperatur by 0.001 \n else: \n self.Temp -= 0.001\n if self.Temp <= (self.var_T[-1]):\n self.Temp = self.var_T[-1]\n \n s = np.sum([np.exp(float(k)/self.Temp) for k in self.U_list])\n\n self.action_proba =[np.exp(float(m)/self.Temp)/s for m in self.U_list]\n action = np.random.choice(np.arange(4),p=self.action_proba) # choice a random choice relating to the probability distribution given by the softmax algorith \n else:\n action = np.argmax(self.U_list)\n return action",
"def output_to_action(output):\n action_index = argmax(output)\n action_map = [[0.0, 0.0, 0.0], [-1.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.2]]\n result = np.array(action_map[action_index])\n return result",
"def target_act(self, obs, noise=0.0):\n #return target_actions\n target_actions = torch.zeros(obs.shape[:2] + (self.action_size,), dtype=torch.float, device=device)\n for i in range(self.num_agents):\n target_actions[:, i, :] = self.maddpg_agent[i].target_act(obs[:, i])\n \n return target_actions",
"def choose_action(self, features_all_arms) -> Tuple[torch.Tensor, torch.Tensor]:\n actor_output = self.policy.act(obs=features_all_arms)\n chosen_action = torch.argmax(actor_output.action, dim=1)\n log_prob = actor_output.log_prob\n return torch.unsqueeze(chosen_action, 1), log_prob",
"def action_callback(agent, self):\n obs = self.obs_callback(agent, self)\n action = self.s_agents.select_action(torch.Tensor([obs]), action_noise=True, param_noise=None).squeeze().numpy()\n return _get_action(action, agent, self)",
"def act(self, obs_all_agents, noise=0.0):\n actions = [agent.act(obs, noise) for agent, obs in zip(self.maddpg_agent, obs_all_agents)]\n return np.array(actions)",
"def step(self, actions):\r\n # Run actions\r\n actions = [np.argmax((action_scores+.0001) * mask) for action_scores, mask in zip(actions, self.get_avail_actions())]\r\n reward, terminated, info = self.env.step(actions)\r\n\r\n # Get updated state\r\n self.state = self.env.get_state()\r\n\r\n # Return arrays for each agent\r\n reward_n = [reward / self.n for _ in range(self.n)]\r\n terminated_n = [terminated for _ in range(self.n)]\r\n info_n = [info for _ in range(self.n)]\r\n observation_n = self.env.get_obs()\r\n\r\n return observation_n, reward_n, terminated_n, info_n",
"def __call__(self, num_actions):\n return np.random.choice(num_actions)",
"def select_action(images, n_actions, device, eps_threshold=-1):\n actions = []\n\n for i in images:\n if eps_threshold == -1:\n actions.append(torch.tensor([[random.randrange(n_actions)]], device=device, dtype=torch.long))\n else:\n sample = random.random()\n if sample > eps_threshold:\n with torch.no_grad():\n # t.min(1) will return smallest column value of each row.\n # second column on min result is index of where min element was\n # found, so we pick action with the lower expected reward.\n actions.append(policy_net(i.unsqueeze(0)).min(1)[1].view(1, 1))\n else:\n actions.append(torch.tensor([[random.randrange(n_actions)]], device=device, dtype=torch.long))\n\n return torch.tensor(actions, device=device)",
"def actions(self):\n return np.array([m['actions'] for m in self.model_outs], dtype=np.int32)",
"def choose_action(self, state):\n pure_action = self.actor_local.model.predict(state)[0]\n # add gaussian noise for exploration\n # noise = np.random.normal(self.noise_mean, self.noise_stddev, self.action_size)\n \n # add OU noise for exploration\n noise = self.noise.sample()\n\n # action = np.clip(pure_action + noise, self.action_low, self.action_high)\n # print(\"pure\", pure_action)\n # print(\"noise\", noise)\n # action = self.action_high * (pure_action + noise)\n # action = pure_action + noise\n action = np.clip(pure_action + noise, self.action_low, self.action_high)\n # print(\"action\", action)\n return action.tolist()",
"def choose_action(self, state, network, features=(), noise=True):\r\n state = np.concatenate(state)\r\n state = np.reshape(state, (-1, self.feature_number))\r\n if network == 'main':\r\n action = self.actor.predict(state)\r\n elif network == 'target':\r\n action = self.target_actor.predict(state)\r\n if noise:\r\n \"\"\"Ornstein-Uhlenbeck Process\"\"\"\r\n noisy_action = self.add_noise(action, features)\r\n if noisy_action > np.amax(self.action_range):\r\n noisy_action = np.amax(self.action_range)\r\n elif noisy_action < np.amin(self.action_range):\r\n noisy_action = np.amin(self.action_range)\r\n return noisy_action, action\r\n else:\r\n return action",
"def choose_random_action(all_actions: int) -> int:\n return np.random.randint(all_actions)",
"def _send(self, action: List[np.ndarray]) -> None:",
"def gen_action(self, agent_list, observation, frame_idx, train, free_map=None):\n #TODO add multiple agent functionality with a for loop \n \n if train == True:\n epsilon = self.epsilon_by_frame(frame_idx)\n if random.random() > epsilon:\n state = observation\n state = torch.FloatTensor(np.float32(state))\n state = state.to(self.device).unsqueeze(0).unsqueeze(0)\n q_value = self.current_model.forward(state)\n max_q, action = q_value[0].max(0)\n max_q = float(max_q)\n action = int(action)\n \n else:\n action = random.randrange(self.num_actions)\n \n # for evaluation\n elif train == False:\n #TODO fix the CNN input dimensions\n state = observation.flatten()\n state = torch.FloatTensor(np.float32(state))\n state = state.to(self.device)\n \n q_value = self.current_model.forward(state)\n max_q, action = q_value.max(0)\n\n #TODO get all agent actions for one team here\n action_out = []\n action_out.append(action)\n return action_out",
"def target_act(self, obs_all_agents, noise=0.0):\n target_actions = [ddpg_agent.target_act(obs, noise) for ddpg_agent, obs in zip(self.maddpg_agent, obs_all_agents)]\n return target_actions",
"def target_act(self, obs_all_agents, noise=0.0):\n target_actions = [ddpg_agent.target_act(obs, noise) for ddpg_agent, obs in zip(self.maddpg_agent, obs_all_agents)]\n return target_actions",
"def get_random_action(self,num_agents=1, action_size=4):\n actions = np.random.randn(num_agents, action_size)\n return np.clip(actions, -1, 1).squeeze(0)",
"def step(self, action):\n self.steps += 1\n in_var = self.state[:4]\n\n # Increase or decrease the 4 input values\n new_var = in_var+ action \n\n #If the agent tries to exceed the range of the mins & maxes, this sets them to the max. \n for i,temp_i in enumerate(new_var):\n if (temp_i <= self.mins[i]):\n new_var[i] = self.mins[i]\n elif (temp_i >= self.maxes[i]): \n new_var[i] = self.maxes[i]\n\n in_var = new_var\n\n # Get all the new outputs:\n self.ins = in_var\n out_flow = self.temp_func(var=self.O_CH4_flow_uniformity)\n out_frac = self.temp_func(var=self.O_CH4_mol_frac)\n out_temp = self.temp_func(var=self.O_t)\n\n #check that this is a viable output; if not, reject the action\n #is this temp change viable?\n \n MSE1 = (self.goals[0]-out_flow)**2\n MSE2 = (self.goals[1]-out_frac)**2\n MSE3 = (self.goals[2]-out_temp)**2\n\n MSE = MSE1 + MSE2 + MSE3\n\n # Update your state:\n state_new = np.append(self.ins,[out_flow,out_frac,out_temp] )\n self.state =np.append(state_new,self.goals)\n\n done = ((MSE1 <= self.MSE_thresh1) & (MSE2 <= self.MSE_thresh2) & (MSE3 <= self.MSE_thresh3))\n done = bool(done)\n\n # Get the corresponding reward:\n reward = 0\n if done:\n reward += self.rew_goal\n else: \n reward -= MSE *cfg['MSE_scale']\n\n self.reward = reward\n self.tot_rew += reward\n self.done = done\n\n return (self.state, reward, done, {'MSE thresh': self.MSE_thresh1})",
"def learning_Utility(self):\n # Shape the input that we give to the neural network with the value of sensors, the previous actions the life of the agent \n # Get the results from the sensors according the different movement executed by the agent \n sensors_result_N = self.agent.sensors(self, direction=3) + self.agent.get_energy_coarsed()+\\\n self.agent.rotate_previousAction(3)+[self.agent.get_previous_collision()]\n sensors_result_O = self.agent.sensors(self, direction=2) + self.agent.get_energy_coarsed()+\\\n self.agent.rotate_previousAction(2) + [self.agent.get_previous_collision()]\n sensors_result_S = self.agent.sensors(self, direction=1) + self.agent.get_energy_coarsed()+\\\n self.agent.rotate_previousAction(1) + [self.agent.get_previous_collision()]\n sensors_result_E = self.agent.sensors(self, direction=0) + self.agent.get_energy_coarsed()+\\\n self.agent.rotate_previousAction(0) + [self.agent.get_previous_collision()]\n\n input_nn_N = np.asarray(sensors_result_N).astype(int) # input when the Nord action is performed \n input_nn_O = np.asarray(sensors_result_O).astype(int) # input when the West action is performed\n input_nn_S = np.asarray(sensors_result_S).astype(int) # input when the South action is performed\n input_nn_E = np.asarray(sensors_result_E).astype(int) # input when the West action is performed\n\n self.input_list = [input_nn_E.reshape(1,145),\n input_nn_S.reshape(1,145),\n input_nn_O.reshape(1,145),\n input_nn_N.reshape(1,145)]\n self.U_list = [self.nn.predict(i) for i in self.input_list ] #The utility according the different acts performed \n return self.actionSelector() #Select the action acording a propbabilitics distribution given in the paper",
"def act(self, obs_all_agents, noise=0.0):\n actions = [agent.act(obs, noise) for agent, obs in zip(self.maddpg_agent, obs_all_agents)]\n return actions",
"def act(self, obs_all_agents, noise=0.0):\n actions = [agent.act(obs, noise) for agent, obs in zip(self.maddpg_agent, obs_all_agents)]\n return actions",
"def try_step_multi (self, actions):\n legal_action_indices = []\n legal_action_features = []\n\n for i, action in enumerate(actions):\n object_index, new_location, action_means, action_stds = action\n \n position = new_location[:2]\n rotation = new_location[2]\n\n prev_transform = self.e.objects[object_index].transform\n\n if self.e.act(object_index, Command(position, rotation)):\n inputs = self.get_feature_only()\n\n legal_action_indices.append(i)\n legal_action_features.append(inputs)\n\n # Back transform\n self.e.act(object_index, \n self.command_from_transform(prev_transform), check_condition = False )\n\n\n if len(legal_action_indices) == 0:\n return ([], [])\n\n legal_action_feats = np.stack(legal_action_features)\n\n action_size = legal_action_feats.shape[0]\n num_steps = legal_action_feats.shape[1]\n feature_size = legal_action_feats.shape[2]\n epoch_size = int(np.ceil(action_size / self.config.batch_size))\n\n zero_filled = np.zeros( ( epoch_size * self.config.batch_size, num_steps, feature_size ))\n\n zero_filled[:legal_action_feats.shape[0]] = legal_action_feats\n\n zero_filled = zero_filled.reshape((epoch_size, self.config.batch_size, num_steps, feature_size))\n\n all_progress = []\n for i in range(epoch_size):\n progress = self.progress_estimator.predict(zero_filled[i], sess = self.session)\n all_progress.append(progress)\n\n all_progress = np.concatenate(all_progress)\n \n\n return (legal_action_indices, all_progress[:action_size])",
"def choose_action(self, agent_data):\r\n action_value_estimates = agent_data[\"action_value_estimates\"]\r\n action_counts = agent_data[\"action_counts\"]\r\n time_step = np.sum(action_counts)\r\n ucb_value_estimates = np.zeros(len(action_counts))\r\n for i in np.arange(len(action_counts)):\r\n if action_counts[i]!=0:\r\n ucb_value_estimates[i] = action_value_estimates[i] + self.C * np.sqrt(np.log(time_step) / action_counts[i])\r\n else:\r\n ucb_value_estimates[i] = sys.float_info.max\r\n action = self.argmax_with_random_tiebreaker(ucb_value_estimates)\r\n return action",
"def get_random_action():\n # Define an array containing the available actions for the UAV\n # in the final work, takeoff and land must be added\n action_list = ['left', 'right', 'forward', 'backward', 'stop', 'descend']\n # Choose a random action within the array\n #action_index = STDrandom.randint(0, len(action_list) - 1)\n # forward,backward,left,right, stop and land\n probability_descend = 0.25\n probability = (1 - probability_descend)/ (len(action_list) -1)\n action_probability = [probability, probability, probability, probability, probability, probability_descend]\n action = np.random.choice(action_list, 1, p=action_probability)[0]\n #action_index = STDrandom.randint(0, 10)\n #action = action_list[action_index]\n\n return action",
"def RandomAgentProgram(actions):\n return lambda percept: random.choice(actions)",
"def RandomAgentProgram(actions):\n return lambda percept: random.choice(actions)",
"def get_neighbor_action(self, action):\n naction = []\n for i in range(self.n_agent):\n naction.append(action[self.neighbor_mask[i] == 1])\n return naction",
"def act(self, states, add_noise=True):\n \n actions = []\n for agent, state in zip(self.agents, states):\n action = agent.act(state, noise_weight=self.noise_weight, add_noise=self.enable_noise)\n actions.append(action)\n self.noise_weight *= self.noise_decay\n return np.array(actions).reshape(1, -1) # flatten",
"def process_actions(self, n_steps, actions):\n # Each row of actions is one time step,\n # row contains action indices for all agents\n # Convert to [time, agents, l_action]\n # so each agent gets its own 1-hot row vector\n actions_1hot = np.zeros([n_steps, self.n_agents, self.l_action], dtype=int)\n grid = np.indices((n_steps, self.n_agents))\n actions_1hot[grid[0], grid[1], actions] = 1\n # Convert to format [time*agents, agents-1, l_action]\n # so that the set of <n_agent> actions at each time step\n # is duplicated <n_agent> times, and each duplicate\n # now contains all <n_agent>-1 actions representing\n # the OTHER agents actions\n list_to_interleave = []\n for n in range(self.n_agents):\n # extract all actions except agent n's action\n list_to_interleave.append( actions_1hot[:, np.arange(self.n_agents)!=n, :] )\n # interleave\n actions_others_1hot = np.zeros([self.n_agents*n_steps, self.n_agents-1, self.l_action])\n for n in range(self.n_agents):\n actions_others_1hot[n::self.n_agents, :, :] = list_to_interleave[n]\n # In-place reshape of actions to [time*n_agents, l_action]\n actions_1hot.shape = (n_steps*self.n_agents, self.l_action)\n\n return actions_1hot, actions_others_1hot"
]
| [
"0.6332541",
"0.62858754",
"0.62273467",
"0.62202567",
"0.62171763",
"0.6207081",
"0.61901003",
"0.6172455",
"0.61401296",
"0.61305875",
"0.6094308",
"0.6051624",
"0.6049658",
"0.60261565",
"0.6015744",
"0.60044897",
"0.60044897",
"0.596948",
"0.5955129",
"0.595375",
"0.5917385",
"0.5917385",
"0.5912095",
"0.5905839",
"0.5903797",
"0.58740115",
"0.58740115",
"0.5862452",
"0.5853225",
"0.58517104"
]
| 0.67525196 | 0 |
Returns a dictionary of files and folders for a given reference and path. Implemented using ``git lstree``. If an invalid reference and/or path None is returned. | def ls_tree(reference, path=None, directory=None):
# Try to track the reference as a branch
track_branches(reference, directory=directory)
cmd = 'git ls-tree ' + reference
if path is not None and path != '':
cmd += ':' + path
retcode, out, err = execute_command(cmd, autofail=False, silent_error=True,
cwd=directory, return_io=True)
if retcode != 0:
return None
items = {}
for line in out.splitlines():
tokens = line.split()
if len(tokens) != 4:
return None
if tokens[1] not in ['blob', 'tree']:
raise RuntimeError("item not a blob or tree")
if tokens[3] in items:
raise RuntimeError("duplicate name in ls tree")
items[tokens[3]] = 'file' if tokens[1] == 'blob' else 'directory'
return items | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_tree_hash_dict(cls, current_dir, file_path, dirs, files, ref_table):\n\n # we sort just to ensure there are no arrangement issues that could affect the hash outcome\n file_hashs = sorted([ref_table['%s/%s' % (file_path, file)]['hash'] for file in files])\n dir_hashs = sorted([ref_table['%s/%s' % (file_path, dir_name)]['hash'] for dir_name in dirs])\n\n tree_info = {}\n tree_info['path'] = file_path\n tree_info['content'], tree_info['hash'] = cls.get_tree_contents(file_path, dirs, files, ref_table)\n tree_info['type'] = 'tree'\n tree_info['name'] = current_dir\n tree_info['perm'] = stat.S_IMODE(os.lstat(file_path).st_mode)\n\n return tree_info",
"def get_tree_contents(cls, tree_path, dirs, files, ref_table):\n\n tree_contents = ''\n\n for dir_name in dirs:\n dir_name_path = '%s/%s' % (tree_path, dir_name)\n dir_name_perm = ref_table[dir_name_path]['perm']\n tree_contents += '%s tree %s %s \\n' % (dir_name_perm, ref_table[dir_name_path]['hash'], dir_name)\n\n for file in files:\n file_path = '%s/%s' % (tree_path, file)\n file_perm = ref_table[file_path]['perm']\n tree_contents += '%s file %s %s \\n' % (file_perm, ref_table[file_path]['hash'], file)\n\n return tree_contents, cls.get_256_hash_from_string(tree_contents)",
"def show(reference, path, directory=None):\n # Check to see if this is a directory\n dirs = ls_tree(reference, path, directory)\n if dirs is not None:\n return dirs\n # Otherwise a file or does not exist, check for the file\n cmd = 'git show {0}:{1}'.format(reference, path)\n # Check to see if it is a directory\n retcode, out, err = execute_command(cmd, autofail=False, silent_error=True,\n cwd=directory, return_io=True)\n if retcode != 0:\n # Does not exist\n return None\n # It is a file that exists, return the output\n return out",
"def _get_refpaths(data_dict, reference_file_types, observatory):\n if not reference_file_types: # [] interpreted as *all types*.\n return {}\n with crds_cache_locking.get_cache_lock():\n bestrefs = crds.getreferences(\n data_dict, reftypes=reference_file_types, observatory=observatory)\n refpaths = {filetype: filepath if \"N/A\" not in filepath.upper() else \"N/A\"\n for (filetype, filepath) in bestrefs.items()}\n return refpaths",
"def resolve_ref_hierarchy(self, path):\n\n project, ref, refPrefix = self.resolve_partial_ref_prefix(path)\n if not ref:\n return None\n\n refTime = iso8601.parse_date(ref.commit['committed_date']).timestamp()\n\n return Entity(\n EntityType.REF_LEVEL,\n path,\n create_directory_attributes(refTime),\n {'project': project, 'ref': ref, 'refPrefix': refPrefix}\n )",
"def _get_references_data(wit_path):\n\n with open(os.path.join(wit_path, '.wit', 'references.txt'), 'r') as data:\n info = {'None': 'None'}\n info.update({'HEAD': data.readline().split('=')[-1].strip('\\n')})\n info.update({'master': data.readline().split('=')[-1].strip('\\n')})\n for row in data.readlines():\n name, commit_id = row.split('=')\n info.update({name.strip('\\n'): commit_id.strip('\\n')})\n\n return info",
"def resolve_references(path, schema):\n if isinstance(schema, dict):\n # do $ref first\n if '$ref' in schema:\n # Pull the referenced filepath from the schema\n referenced_file = schema['$ref']\n\n # Referenced filepaths are relative, so take the current path's\n # directory and append the relative, referenced path to it.\n inner_path = os.path.join(os.path.dirname(path), referenced_file)\n\n # Then convert the path (which may contiain '../') into a\n # normalised, absolute path\n inner_path = os.path.abspath(inner_path)\n\n # Load the referenced file\n ref = load_file(\"file://\" + inner_path)\n\n # Check that the references in *this* file are valid\n result = resolve_references(inner_path, ref)\n\n # They were valid, and so were the sub-references. Delete\n # the reference here to ensure we don't pass over it again\n # when checking other files\n del schema['$ref']\n else:\n result = {}\n\n for key, value in schema.items():\n result[key] = resolve_references(path, value)\n return result\n elif isinstance(schema, list):\n return [resolve_references(path, value) for value in schema]\n else:\n return schema",
"def getFileReferences():\n refNodes = pm.ls(rf=True)\n fileRefs = [r.referenceFile() for r in refNodes]\n return fileRefs",
"async def get_tree(repository, ref):\n try:\n tree = await repository.get_tree(ref)\n return tree\n except AIOGitHubException as exception:\n raise HacsException(exception)",
"def get(self):\n for path, dirs, files in os.walk(self.directory):\n folders = path[self.start:].split(os.sep)\n if self.branches:\n if self._filter(folders, 'folders'):\n files = dict.fromkeys(files)\n parent = reduce(dict.get, folders[:-1], self.tree_dict)\n parent[folders[-1]] = files\n else:\n files = dict.fromkeys(files)\n parent = reduce(dict.get, folders[:-1], self.tree_dict)\n parent[folders[-1]] = files\n return self.tree_dict",
"async def get_tree(\n self, ref: str or None = None\n ) -> [\"AIOGitHubAPIRepositoryTreeContent\"] or list:\n if ref is None:\n raise AIOGitHubAPIException(\"Missing ref\")\n _endpoint = f\"/repos/{self.full_name}/git/trees/{ref}\"\n _params = {\"recursive\": \"1\"}\n\n response = await self.client.get(endpoint=_endpoint, params=_params)\n\n return [\n AIOGitHubAPIRepositoryTreeContent(x, self.full_name, ref)\n for x in response.get(\"tree\", [])\n ]",
"def _git_show(self, path, ref=\"HEAD\"):\n res = requests.get(\n \"/\".join([self.loc, ref, path]),\n auth=HTTPBasicAuth(self.username, self.password)\n )\n\n if res.status_code // 100 != 2:\n return None\n\n if res.headers['Content-Type'] == 'application/json':\n res = json.loads(res.content)\n # cache existence info about all directories shown!\n if path != \"talus/pypi/simple\" and res[\"type\"] == \"listing\":\n self._add_to_cache(path, items=res[\"items\"])\n else:\n res = res.content\n\n return res",
"def _group_references_by_file(self, references: List[ReferenceDict]\n ) -> Dict[str, List[Tuple[Point, str]]]:\n grouped_references = {} # type: Dict[str, List[Tuple[Point, str]]]\n for reference in references:\n file_path = uri_to_filename(reference[\"uri\"])\n point = Point.from_lsp(reference['range']['start'])\n\n # get line of the reference, to showcase its use\n reference_line = get_line(self.view.window(), file_path, point.row)\n\n if grouped_references.get(file_path) is None:\n grouped_references[file_path] = []\n grouped_references[file_path].append((point, reference_line))\n\n # we don't want to cache the line, we always want to get fresh data\n linecache.clearcache()\n\n return grouped_references",
"def get_entry_properties(self, project, ref, path):\n\n parentDir = os.path.dirname(path)\n targetEntry = os.path.basename(path)\n\n for entry in self.cache.get_repository_tree(project, ref, parentDir):\n if entry['name'] == targetEntry:\n return entry",
"def read_symbolic_ref(self, path: str) -> Optional[Tuple[str, str]]:\n path = posixpath.join(self._path, path)\n self._trace(\"fetching symbolic ref: %s\" % path)\n try:\n meta, resp = self._connection.files_download(path)\n ref = resp.content.decode(\"utf8\")\n ref = ref[len(\"ref: \") :].rstrip()\n rev = meta.rev\n return (rev, ref)\n except dropbox.exceptions.ApiError as e:\n if not isinstance(e.error, dropbox.files.DownloadError):\n raise\n return None",
"def tracked_files(self, branch):\n d = {}\n ls_tree = self.git_cmd('ls-tree -r --name-only --full-tree %s' %\n branch)\n for rpath in ls_tree.splitlines():\n if rpath == '.gitignore':\n continue\n if branch.startswith('timestamps'):\n d[rpath] = pathlib.PosixPath(self.repodir, rpath)\n else:\n if not rpath.startswith(ROOT_SUBDIR):\n continue\n d[rpath] = EtcPath(self.repodir, rpath)\n return d",
"def get_repo_info(path: Union[str, Path]):\n def find_repo(findpath):\n p = Path(findpath).absolute()\n for p in [p, *p.parents]:\n try:\n repo = Repo(p)\n break\n except InvalidGitRepositoryError:\n pass\n else:\n raise InvalidGitRepositoryError\n return repo\n repo = find_repo(path)\n return {\"hash\": repo.head.commit.hexsha,\n \"gitdir\": repo.git_dir,\n \"active_branch\": repo.active_branch.name}",
"def get_reference(self, compare_path):\n if compare_path:\n with open(compare_path, 'r') as f:\n reference = json.load(f)\n return reference[\"boxes\"]\n return None",
"def fileInfo(*args, referenceNode: Union[AnyStr, bool]=\"\", remove: Union[AnyStr, bool]=\"\",\n q=True, query=True, **kwargs)->Union[List[AnyStr], Any]:\n pass",
"def tree_lookup(self, target_path, commit):\n segments = target_path.split(\"/\")\n tree_or_blob = commit.tree\n path = ''\n while segments:\n dirent = segments.pop(0)\n if isinstance(tree_or_blob, pygit2.Tree):\n if dirent in tree_or_blob:\n tree_or_blob = self.repo[tree_or_blob[dirent].oid]\n # self.logger.debug('%s in %s' % (dirent, path))\n if path:\n path += '/'\n path += dirent\n else:\n # This is probably because we were called on a\n # commit whose parent added a new directory.\n self.logger.debug(' %s not in %s in %s' %\n (dirent, path, commit.hex[:8]))\n return None\n else:\n self.logger.debug(' %s not a tree in %s' %\n (tree_or_blob, commit.hex[:8]))\n return None\n return tree_or_blob",
"def get_value_references(self, input_path):\n input_value = self.get_value(input_path)\n\n if isinstance(input_value, ProtocolPath):\n return {input_path: input_value}\n\n if (not isinstance(input_value, list) and\n not isinstance(input_value, tuple) and\n not isinstance(input_value, dict)):\n\n return {}\n\n property_name, protocols_ids = ProtocolPath.to_components(input_path.full_path)\n\n return_paths = {}\n\n if isinstance(input_value, list) or isinstance(input_value, tuple):\n\n for index, list_value in enumerate(input_value):\n\n if not isinstance(list_value, ProtocolPath):\n continue\n\n path_index = ProtocolPath(property_name + '[{}]'.format(index), *protocols_ids)\n return_paths[path_index] = list_value\n\n else:\n\n for dict_key in input_value:\n\n if not isinstance(input_value[dict_key], ProtocolPath):\n continue\n\n path_index = ProtocolPath(property_name + '[{}]'.format(dict_key), *protocols_ids)\n return_paths[path_index] = input_value[dict_key]\n\n return return_paths",
"def resolve_ref(self, path):\n\n project, ref, remainingPath = self.resolve_ref_prefix(path)\n if not ref or remainingPath.as_posix() != '.':\n return None\n\n refTime = iso8601.parse_date(ref.commit['committed_date']).timestamp()\n\n return Entity(\n EntityType.REPOSITORY_DIR,\n path,\n create_directory_attributes(refTime),\n {'project': project, 'ref': ref}\n )",
"def get_refs(self, for_push: bool) -> List[Tuple[str, str]]:\n try:\n loc = posixpath.join(self._path, \"refs\")\n res = self._connection.files_list_folder(loc, recursive=True)\n files = res.entries\n while res.has_more:\n res = self._connection.files_list_folder_continue(res.cursor)\n files.extend(res.entries)\n except dropbox.exceptions.ApiError as e:\n if not isinstance(e.error, dropbox.files.ListFolderError):\n raise\n if not for_push:\n # if we're pushing, it's okay if nothing exists beforehand,\n # but it's good to notify the user just in case\n self._trace(\"repository is empty\", Level.INFO)\n else:\n self._first_push = True\n return []\n files = [i for i in files if isinstance(i, dropbox.files.FileMetadata)]\n paths = [i.path_lower for i in files]\n if not paths:\n return []\n revs: List[str] = []\n data: List[bytes] = []\n for rev, datum in self._get_files(paths):\n revs.append(rev)\n data.append(datum)\n refs = []\n for path, rev, datum in zip(paths, revs, data):\n name = self._ref_name_from_path(path)\n sha = datum.decode(\"utf8\").strip()\n self._refs[name] = (rev, sha)\n refs.append((sha, name))\n return refs",
"def build_tree(path: str, ignore_dirs: Optional[Sequence[str]] = None) -> dict:\n if ignore_dirs is None:\n ignore_dirs = []\n if is_module(path):\n key = uuid.uuid4().hex\n name = os.path.splitext(os.path.basename(path))[0]\n item = {key: {\n \"name\": name,\n \"path\": os.path.abspath(path),\n \"components\": [name],\n \"type\": \"module\",\n }}\n return item\n if is_shared_object(path):\n key = uuid.uuid4().hex\n name = os.path.basename(path).partition(\".\")[0]\n return {key: {\n \"name\": name,\n \"path\": os.path.abspath(path),\n \"components\": [name],\n \"type\": \"shared_object\"\n }}\n if is_file(path):\n key = uuid.uuid4().hex\n return {key: {\n \"name\": None,\n \"path\": os.path.abspath(path),\n \"components\": [None],\n \"type\": \"file\"\n }}\n if is_directory(path):\n key = uuid.uuid4().hex\n name = os.path.basename(path)\n item = {key: {\n \"name\": name if is_package(path) else None,\n \"path\": os.path.abspath(path),\n \"components\": [name] if is_package(path) else [None],\n \"type\": \"package\" if is_package(path) else \"directory\",\n \"children\": {}\n }}\n for child in os.listdir(path):\n if child not in ignore_dirs:\n child_path = os.path.join(path, child)\n info = build_tree(child_path, ignore_dirs)\n if info:\n if \"children\" in item[key]:\n apply_tree(info, lambda x: x[\"components\"].insert(0, item[key][\"name\"]))\n item[key][\"children\"].update(info)\n return item\n return {}",
"def _resolve_entry(self, path):\n upath = pycompat.fsdecode(path)\n ent = None\n if path in self._pending_changes:\n val = self._pending_changes[path]\n if val is None:\n raise KeyError\n return val\n t = self._tree\n comps = upath.split('/')\n te = self._tree\n for comp in comps[:-1]:\n te = te[comp]\n t = self._git_repo[te.id]\n ent = t[comps[-1]]\n if ent.filemode == pygit2.GIT_FILEMODE_BLOB:\n flags = b''\n elif ent.filemode == pygit2.GIT_FILEMODE_BLOB_EXECUTABLE:\n flags = b'x'\n elif ent.filemode == pygit2.GIT_FILEMODE_LINK:\n flags = b'l'\n else:\n raise ValueError('unsupported mode %s' % oct(ent.filemode))\n return ent.id.raw, flags",
"def list_project_ref_hierarchy(self, entity):\n\n refs = []\n\n for ref in self.cache.list_project_refs(entity.objects['project'], self.tagRefs):\n if ref.name.startswith(entity.objects['refPrefix']):\n remainingRefName = pathlib.Path(ref.name).relative_to(pathlib.Path(entity.objects['refPrefix'])).parts[0]\n refs.append(remainingRefName)\n\n return refs",
"def repo_fs():\n for root, dirs, files in os.walk(\".\"):\n dirs[:] = [ # add any extra dirs to ignore #\n d for d in dirs\n if '.' not in d\n and 'ENV' not in d\n and '__' not in d\n and 'build' not in d\n ]\n\n for f in files:\n if f.endswith(\".py\"):\n if not f.startswith('__'):\n ALL_PY_FILES.append(os.path.join(root, f))\n PY_FILES.append(os.path.join(root, f))\n if f.endswith(\".yml\"):\n YML_FILES.append(os.path.join(root, f))\n if f.startswith(\"requirements\"):\n PIP_FILES.append(os.path.join(root, f))\n if f.startswith(\"development\"):\n DEV_FILES.append(os.path.join(root, f))\n if f.startswith(\"README.md\"):\n README_FILES.append(os.path.join(root, f))\n if f.startswith(\"LICENSE\"):\n LICENSE.append(os.path.join(root, f))\n if f.startswith(\"CONTRIBUTIONS\"):\n CONTRIBUTIONS.append(os.path.join(root, f))\n\n if PY_FILES:\n parse_files()\n\n return { # dictionary with all lists of file path/names #\n 'PY_FILES': PY_FILES,\n 'YML_FILES': YML_FILES,\n 'PIP_FILES': PIP_FILES,\n 'README_FILES': README_FILES,\n 'TEST_FILES': TEST_FILES,\n 'LICENSE': LICENSE,\n 'URL_FILES': URL_FILES,\n 'CONTRIBUTIONS': CONTRIBUTIONS,\n 'SETUP_FILES': SETUP_FILES,\n 'MODEL_FILES': MODEL_FILES,\n 'SETTINGS_FILES': SETTINGS_FILES,\n 'DEV_FILES': DEV_FILES,\n }",
"def __get_graph(self, path):\n tree_n = {}\n tree_dir = {}\n expanded = None\n while path:\n if path == os.sep and tree_dir.has_key(os.sep):\n break\n tree_dir[path] = (self.get_dirs(path), expanded)\n expanded = os.path.basename(path)\n path = os.path.dirname(path)\n dir_keys = tree_dir.keys()\n dir_keys.sort()\n n = 0\n for d in dir_keys:\n tree_n[n] = d\n n += 1\n return tree_n, tree_dir",
"def traverse(self, path):\n\n path_list = [s for s in path.split('/') if len(s) > 0 ]\n # print(path)\n # print('files:', self.files)\n directory = self.files\n index = 0\n while index < len(path_list) and path_list[index] in directory:\n if type(directory[path_list[index]]) is str: # directory is a file\n break\n directory = directory[path_list[index]]\n index += 1\n print('info', directory, path_list[index:])\n return directory, path_list[index:]",
"def read_reference_data():\n return {f:read_local_file(f) for f in os.listdir(DATA_DIR)}"
]
| [
"0.61584675",
"0.5878132",
"0.58635056",
"0.5799189",
"0.57655996",
"0.57045203",
"0.5640728",
"0.55679065",
"0.55450636",
"0.5512755",
"0.5477827",
"0.54668874",
"0.54139876",
"0.54109913",
"0.52307737",
"0.5205111",
"0.5195961",
"0.51922023",
"0.5183229",
"0.5154966",
"0.5145712",
"0.5123847",
"0.5122899",
"0.50898236",
"0.5086673",
"0.50673145",
"0.50500834",
"0.5047822",
"0.5026681",
"0.5009415"
]
| 0.7030277 | 0 |
Interface to the git show command. If path is a file that exists, a string will be returned which is the contents of that file. If the path is a directory that exists, then a dictionary is returned where the keys are items in the folder and the value is either the string 'file' or 'directory'. If the path does not exist then this returns None. | def show(reference, path, directory=None):
# Check to see if this is a directory
dirs = ls_tree(reference, path, directory)
if dirs is not None:
return dirs
# Otherwise a file or does not exist, check for the file
cmd = 'git show {0}:{1}'.format(reference, path)
# Check to see if it is a directory
retcode, out, err = execute_command(cmd, autofail=False, silent_error=True,
cwd=directory, return_io=True)
if retcode != 0:
# Does not exist
return None
# It is a file that exists, return the output
return out | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _git_show(self, path, ref=\"HEAD\"):\n res = requests.get(\n \"/\".join([self.loc, ref, path]),\n auth=HTTPBasicAuth(self.username, self.password)\n )\n\n if res.status_code // 100 != 2:\n return None\n\n if res.headers['Content-Type'] == 'application/json':\n res = json.loads(res.content)\n # cache existence info about all directories shown!\n if path != \"talus/pypi/simple\" and res[\"type\"] == \"listing\":\n self._add_to_cache(path, items=res[\"items\"])\n else:\n res = res.content\n\n return res",
"def git_show(ref, filepath, **kw):\n if ref == DIRTY and filepath == 'qwerty.sh':\n return sh('cat', os.environ.get('QWERTY_SH', filepath), **kw)\n return sh('git', 'show', '{ref}:{filepath}'.format(**locals()), **kw)",
"def show(file):\n rino.git_tools.show(file)",
"def GetFileContent(self, file_hash, is_binary):\r\n data, retcode = RunShellWithReturnCode([\"git\", \"show\", file_hash],\r\n universal_newlines=not is_binary)\r\n if retcode:\r\n ErrorExit(\"Got error status from 'git show %s'\" % file_hash)\r\n return data",
"def GetFileContent(self, file_hash, is_binary):\n data, retcode = RunShellWithReturnCode([\"git\", \"show\", file_hash],\n universal_newlines=not is_binary)\n if retcode:\n ErrorExit(\"Got error status from 'git show %s'\" % file_hash)\n return data",
"def info(directory):\n git_dir = os.path.join(directory, '.git')\n if not os.path.exists(git_dir):\n raise IOError(errno.ENOENT, '.git not found', directory)\n\n if os.path.isfile(git_dir):\n # submodules\n with open(git_dir, 'r') as f:\n git_ref = f.read().strip()\n\n if not git_ref.startswith('gitdir: '):\n raise IOError(errno.EINVAL, 'Unexpected .git contents', git_dir)\n git_ref = git_ref[8:]\n if git_ref[0] != '/':\n git_ref = os.path.abspath(os.path.join(directory, git_ref))\n git_dir = git_ref\n\n head_file = os.path.join(git_dir, 'HEAD')\n with open(head_file, 'r') as f:\n head = f.read().strip()\n if head.startswith('ref: '):\n head = head[5:]\n\n if head.startswith('refs/heads/'):\n branch = head[11:]\n elif head.startswith('refs/tags/'):\n branch = head[10:]\n else:\n branch = head\n\n head_sha1 = get_disclosable_head(directory, branch)\n if head_sha1:\n commit_date = subprocess.check_output(\n ('/usr/bin/git', 'show', '-s', '--format=%ct', head_sha1),\n cwd=git_dir).strip()\n else:\n commit_date = ''\n\n # Requires git v1.7.5+\n try:\n remote_url = subprocess.check_output(\n ('/usr/bin/git', 'ls-remote', '--get-url'),\n cwd=git_dir).strip()\n except subprocess.CalledProcessError:\n remote_url = ''\n utils.get_logger().info(\"Unable to find remote URL for %s\", git_dir)\n\n return {\n '@directory': directory,\n 'head': head,\n 'headSHA1': head_sha1,\n 'headCommitDate': commit_date,\n 'branch': branch,\n 'remoteURL': remote_url,\n }",
"def one_line_git_summary(path):\n return _run_command(path, 'git show --oneline -s')",
"def get_repo_info(path: Union[str, Path]):\n def find_repo(findpath):\n p = Path(findpath).absolute()\n for p in [p, *p.parents]:\n try:\n repo = Repo(p)\n break\n except InvalidGitRepositoryError:\n pass\n else:\n raise InvalidGitRepositoryError\n return repo\n repo = find_repo(path)\n return {\"hash\": repo.head.commit.hexsha,\n \"gitdir\": repo.git_dir,\n \"active_branch\": repo.active_branch.name}",
"def lsinfo(path):",
"def describe(dir):\n try:\n # decode() is needed here for Python3 compatibility. In Python2,\n # str and bytes are the same type, but not in Python3.\n # Popen.communicate() returns a bytes instance, which needs to be\n # decoded into text data first in Python3. And this decode() won't\n # hurt Python2.\n return command_output(['git', 'describe'], dir).rstrip().decode()\n except:\n try:\n return command_output(\n ['git', 'rev-parse', 'HEAD'], dir).rstrip().decode()\n except:\n return 'unknown hash, ' + datetime.date.today().isoformat()",
"def describe(location):\n ensure_dir(location)\n with utils.cd(location):\n cmd = '/usr/bin/git describe --always'\n return subprocess.check_output(cmd, shell=True).strip()",
"def do_show(cs, args):\n repo = args.repository\n tag_index = repo.find(':')\n if tag_index != -1:\n tag = repo[tag_index + 1:]\n repo = repo[:tag_index]\n else:\n tag = \"latest\"\n if repo.find('/') == -1:\n repo = \"library/\" + repo\n _, data = cs.repositories.get_manifests(repo, tag)\n utils.print_dict(data)",
"def print_info_for_path(path):\n import json\n import sys\n json.dump(gather_info_for_path(path), sys.stdout)",
"def repository_show(ctx: click.Context, repository_name):\n subcommand_repository.cmd_show(ctx.obj, repository_name)",
"def show_top_level(self, current_path):\n p = Popen(\n [\"git\", \"rev-parse\", \"--show-toplevel\"],\n stdout=PIPE,\n stderr=PIPE,\n cwd=os.path.join(self.root_dir, current_path),\n )\n my_output, my_error = p.communicate()\n if p.returncode == 0:\n result = {\n \"code\": p.returncode,\n \"top_repo_path\": my_output.decode(\"utf-8\").strip(\"\\n\"),\n }\n return result\n else:\n return {\n \"code\": p.returncode,\n \"command\": \"git rev-parse --show-toplevel\",\n \"message\": my_error.decode(\"utf-8\"),\n }",
"def info(self, usecache=1):\r\n info = usecache and cache.info.get(self)\r\n if not info:\r\n try:\r\n output = self._svn('info')\r\n except py.process.cmdexec.Error, e:\r\n if e.err.find('Path is not a working copy directory') != -1:\r\n raise py.error.ENOENT(self, e.err)\r\n elif e.err.find(\"is not under version control\") != -1:\r\n raise py.error.ENOENT(self, e.err)\r\n raise\r\n # XXX SVN 1.3 has output on stderr instead of stdout (while it does\r\n # return 0!), so a bit nasty, but we assume no output is output\r\n # to stderr...\r\n if (output.strip() == '' or \r\n output.lower().find('not a versioned resource') != -1):\r\n raise py.error.ENOENT(self, output)\r\n info = InfoSvnWCCommand(output)\r\n\r\n # Can't reliably compare on Windows without access to win32api\r\n if py.std.sys.platform != 'win32': \r\n if info.path != self.localpath: \r\n raise py.error.ENOENT(self, \"not a versioned resource:\" + \r\n \" %s != %s\" % (info.path, self.localpath)) \r\n cache.info[self] = info\r\n self.rev = info.rev\r\n return info",
"def show_prefix(self, current_path):\n p = Popen(\n [\"git\", \"rev-parse\", \"--show-prefix\"],\n stdout=PIPE,\n stderr=PIPE,\n cwd=os.path.join(self.root_dir, current_path),\n )\n my_output, my_error = p.communicate()\n if p.returncode == 0:\n result = {\n \"code\": p.returncode,\n \"under_repo_path\": my_output.decode(\"utf-8\").strip(\"\\n\"),\n }\n return result\n else:\n return {\n \"code\": p.returncode,\n \"command\": \"git rev-parse --show-prefix\",\n \"message\": my_error.decode(\"utf-8\"),\n }",
"def log_git_info():\n try:\n git_dir = Path('.git')\n head_file = git_dir / 'HEAD'\n with head_file.open() as f:\n head_contents = f.readline().strip()\n log.info(f'Contents of .git/HEAD: {head_contents}')\n if head_contents.split()[0] == 'ref:':\n hash_file = git_dir / head_contents.split()[1]\n with hash_file.open() as f:\n log.info(f'Current reference hash: {f.readline().strip()}')\n except FileNotFoundError:\n return",
"def log_git(repo_path: Union[pathlib.Path, str], repo_name: str = None):\n try:\n git_info = get_repo_info(repo_path)\n return git_info\n except Exception:\n logger.error(\"Was not able to read git information, trying to continue without.\")\n return {}",
"def getinfo(self, option):\n return _librepo.Result.getinfo(self, option)",
"def _determineInfo(self, path):\n \n entry = self._sharedState.getFromCache(path)\n if entry is None:\n try:\n entry = self._client.list(self._workingCopyPath + path, \n recurse=False)[0][0]\n entry = _Info(entry)\n self._sharedState.addToCache(path, entry)\n return entry\n except ClientError, error:\n raise SubversionError(error)\n return entry",
"def show(target, rev):\n\n assert os.path.exists(target), \"%s does not exist!\" % target\n git_tree = get_git_tree(target)\n bfr = None\n target = target.replace(git_tree, \"\", 1).lstrip(\"\\\\\" if _PLATFORM == \"windows\" else \"/\")\n\n if _PLATFORM == \"windows\":\n target = target.replace(\"\\\\\", \"/\")\n if git_tree is not None:\n bfr = gitopen([\"show\", \"%s:%s\" % (rev, target)], git_tree)\n return bfr",
"def info(self, *path):\n self._download_server_info()\n if self._info:\n return self._info.get(path, {})\n path = list(path)\n path[-1] += \".info\"\n t = self._open(*path)\n if t.status_code == 200:\n return json.loads(t.text)\n else:\n return {}",
"def _resolve_entry(self, path):\n upath = pycompat.fsdecode(path)\n ent = None\n if path in self._pending_changes:\n val = self._pending_changes[path]\n if val is None:\n raise KeyError\n return val\n t = self._tree\n comps = upath.split('/')\n te = self._tree\n for comp in comps[:-1]:\n te = te[comp]\n t = self._git_repo[te.id]\n ent = t[comps[-1]]\n if ent.filemode == pygit2.GIT_FILEMODE_BLOB:\n flags = b''\n elif ent.filemode == pygit2.GIT_FILEMODE_BLOB_EXECUTABLE:\n flags = b'x'\n elif ent.filemode == pygit2.GIT_FILEMODE_LINK:\n flags = b'l'\n else:\n raise ValueError('unsupported mode %s' % oct(ent.filemode))\n return ent.id.raw, flags",
"def path(src, name='default'):\n try:\n return get_output(['hg', 'path', name], cwd=src).strip()\n except subprocess.CalledProcessError:\n return None",
"def info(self, *path):\n target = self.localpath(*path)\n return _open_file_info(target + '.info')",
"def get_git_info():\n\n diff = \"Could not extract diff\"\n githash = '00000'\n try:\n # Refers to the global qc_config\n PycQEDdir = pq.__path__[0]\n githash = subprocess.check_output(['git', 'rev-parse',\n '--short=10', 'HEAD'], cwd=PycQEDdir)\n diff = subprocess.run(['git', '-C', PycQEDdir, \"diff\"],\n stdout=subprocess.PIPE).stdout.decode('utf-8')\n except Exception:\n pass\n return githash, diff",
"def display(self, contents=False, recurse=False): # FileObj.display\n print '# File\\t\\t' + str(self.deleted) + '\\t' + str(self.ignore) + '\\t' + str(self.depth) + '\\t' + self.hexdigest + ' ' + self.pathname + ' '",
"def status(self, current_path):\n p = Popen(\n [\"git\", \"status\", \"--porcelain\"],\n stdout=PIPE,\n stderr=PIPE,\n cwd=os.path.join(self.root_dir, current_path),\n )\n my_output, my_error = p.communicate()\n if p.returncode == 0:\n result = []\n line_array = my_output.decode(\"utf-8\").splitlines()\n for line in line_array:\n to1 = None\n from_path = line[3:]\n if line[0] == \"R\":\n to0 = line[3:].split(\" -> \")\n to1 = to0[len(to0) - 1]\n else:\n to1 = line[3:]\n if to1.startswith('\"'):\n to1 = to1[1:]\n if to1.endswith('\"'):\n to1 = to1[:-1]\n result.append(\n {\"x\": line[0], \"y\": line[1], \"to\": to1, \"from\": from_path}\n )\n return {\"code\": p.returncode, \"files\": result}\n else:\n return {\n \"code\": p.returncode,\n \"command\": \"git status --porcelain\",\n \"message\": my_error.decode(\"utf-8\"),\n }",
"def _GetInfo(self, key):\r\n for line in RunShell([\"svn\", \"info\"]).splitlines():\r\n if line.startswith(key + \": \"):\r\n return line.split(\":\", 1)[1].strip()"
]
| [
"0.74270105",
"0.68815726",
"0.6252289",
"0.62171793",
"0.6192653",
"0.59567374",
"0.58447385",
"0.5779722",
"0.57291466",
"0.5719413",
"0.57182735",
"0.5684475",
"0.5651769",
"0.5619608",
"0.5599808",
"0.556769",
"0.5479113",
"0.54499125",
"0.54419017",
"0.54162186",
"0.5378484",
"0.5376298",
"0.53608155",
"0.5337636",
"0.53287363",
"0.5311206",
"0.5264388",
"0.52576",
"0.5252175",
"0.5243017"
]
| 0.7767432 | 0 |
Returns the SHA1 commit hash for the given reference. | def get_commit_hash(reference, directory=None):
# Track remote branch
if branch_exists(reference, local_only=False, directory=directory):
if not branch_exists(reference, local_only=True, directory=directory):
track_branches(reference, directory)
cmd = 'git show-branch --sha1-name ' + reference
out = check_output(cmd, shell=True, cwd=directory)
return out.split('[')[1].split(']')[0] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_hash(repo, ref='HEAD'):\n return subprocess.check_output(['git', 'rev-parse', '--verify', ref],\n cwd=repo).rstrip()",
"def _get_git_hash(self):\n try:\n with open(os.path.join(self._base_dir, '.git', 'HEAD'), 'r') as head_file:\n ref = head_file.read().strip()\n if ref[:5] == 'ref: ':\n with open(os.path.join(self._base_dir, '.git', ref[5:]), 'r') as commit_file:\n return commit_file.read().strip()\n else:\n return ref[5:]\n except Exception as err:\n self._logger.warning('Couldnt read the git commit hash: %s :: %s',\n err.__class__.__name__, err)\n return 'UNKNOWN'",
"def get_commit_hash():\n git_dir = get_git_root()\n args = [\"git\", \"-C\", git_dir, \"rev-parse\", \"--short\", \"--verify\", \"HEAD\"]\n return subprocess.check_output(args).strip().decode()",
"def cmd_get_sha(ref):\n return ['git', 'rev-parse', ref]",
"def git_sha1_commit():\n return local('git rev-parse --short HEAD', capture=True)",
"def get_sha_from_ref(repo_url, reference):\n # Using subprocess instead of convoluted git libraries.\n # Any rc != 0 will be throwing an exception, so we don't have to care\n out = subprocess.check_output(\n [\"git\", \"ls-remote\", \"--exit-code\", repo_url, reference]\n )\n # out is a b'' type string always finishing up with a newline\n # construct list of (ref,sha)\n refs = [\n (line.split(b\"\\t\")[1], line.split(b\"\\t\")[0])\n for line in out.split(b\"\\n\")\n if line != b\"\" and b\"^{}\" not in line\n ]\n if len(refs) > 1:\n raise ValueError(\n \"More than one ref for reference %s, please be more explicit %s\"\n % (reference, refs)\n )\n return refs[0][1].decode(\"utf-8\")",
"def hash(self):\n return os.popen('git rev-parse HEAD').read().strip()",
"def get_git_hash(rev='HEAD'):\n\n git_hash = ''\n try:\n git_out = subprocess.check_output(['git', 'rev-parse', rev], universal_newlines=True)\n except subprocess.CalledProcessError:\n mylogger.exception(\"Couldn't determine the git hash!\")\n else:\n git_hash = git_out.strip()\n\n return git_hash",
"def commit_hash(self):\n return self._commit_hash",
"def commit_of_ref(self, ref):\n # Check cache\n if ref in self._refs:\n return self._refs[ref]\n\n commit = self._get_commit_from_ref(ref)\n self._refs[ref] = \"\"\n if commit:\n self._refs[ref] = str(commit.id)\n\n return self._refs[ref]",
"def get_git_hash() -> Optional[str]:\n rv = _git('rev-parse', 'HEAD')\n if rv:\n return rv[:6]",
"def get_commit_hash(self, directory):\n\n return (\n subprocess.check_output([\"git\", \"rev-parse\", \"HEAD\"], cwd=directory)\n .decode(\"utf-8\")\n .replace(\"\\n\", \"\")\n )",
"def get_hash(content):\n return hashlib.sha1(content).hexdigest()",
"def get_git_revision_hash():\n return subprocess.check_output(['git', 'rev-parse', 'HEAD']).strip().decode('ascii')",
"def get_commit_hash(repo_location, commit='origin/HEAD'):\n if not os.path.exists(pjoin(repo_location, '.git')):\n raise ValueError\n ret, out = spawn_get_output(\n ['git', 'rev-parse', commit], cwd=repo_location)\n if ret != 0:\n raise ValueError(\n f'failed retrieving {commit} commit hash '\n f'for git repo: {repo_location}')\n return out[0].strip()",
"def sha1(self) -> str:\n return self.data.sha1",
"def _get_commit_sha() -> str:\n repo_root = os.path.join(os.path.dirname(__file__), '..', '..')\n repo = Repo(repo_root)\n if repo.is_dirty():\n warning_msg = 'The git repo is dirty. The commit sha for source code links will be incorrect.'\n if os.environ.get('CI', '0') == '0':\n # If developing locally, warn.\n warnings.warn(warning_msg)\n else:\n # If on CI, error.\n raise RuntimeError(warning_msg)\n return repo.commit().hexsha",
"def get_git_hash(revname):\n try:\n return check_output([\"git\", \"rev-parse\", revname],\n cwd=get_repo_dir()).strip()\n except:\n revname = \"origin/\" + revname\n return check_output([\"git\", \"rev-parse\", revname],\n cwd=get_repo_dir()).strip()",
"def get_current_commit_sha():\n return check_output(\n \"git rev-parse HEAD\".split(\" \")\n ).decode('utf-8').strip()",
"def get_hash(self, params):\n return self.sha",
"def get_head_commit_hash(git_repo: Optional[Union[str, pathlib.Path]] = None\n ) -> str:\n if not git_repo:\n git_repo = get_chromium_src_path()\n\n if not isinstance(git_repo, pathlib.Path):\n git_repo = pathlib.Path(git_repo)\n\n _assert_git_repository(git_repo)\n\n return subprocess_utils.run_command(\n ['git', 'show', '--no-patch', f'--pretty=format:%H'], cwd=git_repo)",
"def get_git_revision_hash() -> str:\n try:\n # We are not interested in gits complaints\n git_hash = subprocess.check_output(\n [\"git\", \"rev-parse\", \"HEAD\"], stderr=subprocess.DEVNULL, encoding=\"utf8\"\n )\n # ie. \"git\" was not found\n # should we return a more generic meta hash here?\n # like \"undefined\"?\n except FileNotFoundError:\n git_hash = \"git_not_available\"\n except subprocess.CalledProcessError:\n # Ditto\n git_hash = \"no_repository\"\n return git_hash.rstrip()",
"def _sha1(self):\n return hashlib.sha1(self._blob).hexdigest()",
"def get_current_commit_hash() -> FullCommitHash:\n return get_commit_hash(\"HEAD\")",
"def sha1(self):\n return self.tag(\"sha1\")",
"def get_commit_hash(revision: str) -> FullCommitHash:\n return FullCommitHash.of(popen(f\"git rev-parse {revision}\"))",
"def get_report_hash(self, consolidated):\n jsonstr = json.dumps(consolidated, sort_keys=True)\n hashobj = hashlib.sha1(jsonstr)\n hexval = hashobj.hexdigest()\n return hexval",
"def get_git_hash(git_dir, short=True):\n\n cwd = os.getcwd()\n os.chdir(git_dir)\n\n args = ['git', 'rev-parse', '--short', 'HEAD']\n if not short:\n args.remove('--short')\n\n ver = subprocess.check_output(args).strip('\\n')\n\n os.chdir(cwd)\n\n return ver",
"def calculate_hash(self):\n return sha256_2_string(str(self.header()))",
"def calculate_hash(self):\n return sha256_2_string(str(self.header()))"
]
| [
"0.7660623",
"0.725383",
"0.7150455",
"0.71380645",
"0.7114533",
"0.68922997",
"0.6765757",
"0.66595644",
"0.66213447",
"0.66117346",
"0.6610337",
"0.65057325",
"0.6426884",
"0.64247966",
"0.64152366",
"0.63599724",
"0.63132864",
"0.631321",
"0.6288229",
"0.62813455",
"0.62785685",
"0.62676406",
"0.62556356",
"0.6252314",
"0.62215626",
"0.62093776",
"0.6206994",
"0.6195915",
"0.6177936",
"0.6177936"
]
| 0.7786271 | 0 |
Returns True is the working branch has untracked files, False otherwise. | def has_untracked_files(directory=None):
out = check_output('git status', shell=True, cwd=directory)
if '# Untracked files:' in out:
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_git_dirty():\n dirty_status = local('git diff --quiet || echo \"*\"', capture=True)\n if dirty_status == '*':\n return True\n\n untracked_count = int(local('git status --porcelain 2>/dev/null| grep \"^??\" | wc -l', capture=True))\n if untracked_count > 0:\n return True\n\n return False",
"def has_unstaged_changes(repo):\n subprocess.check_call(['git', 'update-index', '-q', '--ignore-submodules',\n '--refresh'], cwd=repo)\n return subprocess.call(['git', 'diff-index', '--quiet', 'HEAD'],\n cwd=repo) != 0",
"def repo_has_uncommitted():\n buff = subprocess.check_output(['hg', 'status'])\n\n if len(buff):\n print('Dirty / uncommitted changes in repository!')\n return True\n\n return False",
"def dirty(self) -> bool:\n return len(self.detect_changed_files()) != 0",
"def local_changes():\n result, output = popen('git status', False, False)\n try:\n return not output[-1].startswith(\"nothing to commit\")\n except IndexError:\n return True",
"def has_unsaved_changes(self):\n return self._file_content != self.buffer.text",
"def check_working_tree():\n result = _subprocess(['git', '--no-pager', 'diff', '--ignore-submodules=untracked'])\n if result:\n print(result)\n print(f\"Warning: Working tree contains changes to tracked files. Please commit or discard \"\n f\"your changes and try again.\")\n exit(1)",
"def has_changes(directory=None):\n out = check_output('git status', shell=True, cwd=directory)\n if 'nothing to commit (working directory clean)' in out:\n return False\n if 'nothing to commit, working directory clean' in out:\n return False\n if 'nothing to commit, working tree clean' in out:\n return False\n if 'nothing added to commit' in out:\n return False\n return True",
"def tracked(path):\n return not any(fnmatch(part, pattern) for pattern in untracked for part in path.split(os.sep))",
"def _can_checkout(wit_path) -> bool:\n\n current_id = _get_head(wit_path)\n changes_to_be_committed = _return_as_string(_get_changes_to_be_committed, wit_path, current_id)\n changes_not_staged_for_commit = _return_as_string(_get_changes_not_staged_for_commit, wit_path)\n if changes_to_be_committed + changes_not_staged_for_commit == '':\n return True\n logging.error(FileNotSavedError('Some files are not saved. Try \"status\" command to view them.'))\n return False",
"def verify_working_tree_is_clean(self):\n logging.info('--- Verify working tree is clean ---')\n tree_status_output = self.git.status(\n '--porcelain', '--untracked-files=no')\n if tree_status_output.strip():\n gitwrapper.exit_with_error(\n 'You have local pending changes:\\n%s\\n'\n 'The working tree must be clean in order to continue.',\n tree_status_output)\n #",
"def has_staged_changes(repo):\n return subprocess.call(['git', 'diff-index', '--cached', '--quiet', 'HEAD'],\n cwd=repo) != 0",
"def has_changes(self):\n if self.repo_is_empty:\n return True\n\n tree = self.repo.get(self.index.write_tree(self.repo))\n diff = tree.diff_to_tree(self.repo.get(self.repo.head.target).tree)\n return bool(diff)",
"def _is_tracked(filename, metadata):\n current_local_sha = local_metadata.get(filename, None)\n current_remote_sha = metadata.get(filename, None)\n return current_local_sha is not None \\\n and current_remote_sha is not None \\\n and current_local_sha == current_remote_sha",
"def local_branch_exists(self, branch):\n return branch in self.repo.branches",
"def index_is_dirty():\n result, output = popen('git diff --cached', False, False)\n return len(output) > 0",
"def has_unsaved_changes():\n return False",
"def __is_file_in_working_directory(self, filename) -> bool:\n return os.path.exists(os.path.join(self.__directory.working_path,\n filename))",
"def is_staging_clean() -> bool:\n c = cmd.run(\"git diff --no-ext-diff --cached --name-only\")\n return not bool(c.out)",
"def has_unsaved_changes(self):\n # TODO\n pass",
"def check_dirty(args):\n man = load_manifest()\n any_dirty = False\n for (name, project) in man.projects.iteritems():\n repo = GitRepo(workdir_for_project(project))\n any_dirty = check_dirty_repo(repo) or any_dirty\n return any_dirty",
"def repo_has_incoming(*repo_paths):\n incoming = False\n\n for repo_path in repo_paths:\n try:\n subprocess.check_output(['hg', 'incoming', '-R', repo_path])\n print('Detected incoming changesets in \"{}\"'.format(repo_path))\n incoming = True\n except subprocess.CalledProcessError as e:\n if e.returncode != 1:\n raise\n\n return incoming",
"def prune_empty(self): # FileObj.prune_empty\n return False # can't prune a file",
"def untracked_files():\n res = run(\n \"cd %s ; git status\" % (SOURCE_ABSOLUTE),\n stdout=PIPE, stderr=PIPE,\n universal_newlines=True,\n shell=True\n )\n result = [line.strip() for line in res.stdout.split(\"\\n\")]\n\n files = [file\n for file in result if (file.endswith(\".txt\")\n and not (file.startswith(\"new file\") or\n file.startswith(\"deleted\") or file.startswith(\"modified\")))]\n\n return files",
"def is_empty(self): # DirObj.is_empty\n\n for fileName, fileEntry in self.files.iteritems():\n if not fileEntry.deleted and not fileEntry.ignore:\n #print '# ' + self.pathname + ' is not empty due to a file ' + fileEntry.name\n return False\n\n for dirName, subdir in self.subdirs.iteritems():\n if not subdir.deleted and not subdir.is_empty() and not subdir.ignore:\n #print '# ' + self.pathname + ' is not empty due to a dir ' + subdir.name\n return False\n\n #print '# ' + self.pathname + ' is empty!'\n return True",
"def _is_ignored(self, full_path):\n for ignor in self._ignored:\n if fnmatch.fnmatch(full_path, \"*/\" + ignor):\n return True\n return False",
"def contains_files(self):\n if self.file_list is None:\n self._set_file_list()\n for individual_file in self.file_list:\n if not os.path.exists(os.path.join(self.base_dir, individual_file)):\n return False\n return True",
"def is_git_repo(directory):\n files = os.listdir(directory)\n if '.git' in files:\n return True\n return False",
"def verify_git_clean(path):\n\n sys.stdout.write(\" - Checking for uncommitted changes:\")\n result = run_in_component(path, ['git', 'status', '--porcelain=v1'])\n\n lines = [x for x in result.splitlines() if len(x) > 0]\n\n if len(lines) == 0:\n print(\" OKAY\")\n return\n\n print(\" FAILED\")\n\n raise GenericError(\"There are uncommitted changes in the component, please commit or stash them\")",
"def in_file(self):\n return self.on_disk and not self.in_cached_file"
]
| [
"0.7369675",
"0.7245063",
"0.7186721",
"0.6902411",
"0.69018334",
"0.6766072",
"0.6752072",
"0.67305964",
"0.6614926",
"0.66121066",
"0.6557952",
"0.6492854",
"0.6405835",
"0.64045507",
"0.62953687",
"0.626833",
"0.6249092",
"0.6208174",
"0.61656004",
"0.6162128",
"0.6151879",
"0.6133084",
"0.61208725",
"0.6120024",
"0.61091876",
"0.60929286",
"0.6032306",
"0.6006981",
"0.5995502",
"0.5966363"
]
| 0.8167415 | 0 |
Returns True if the given tag exists, False otherwise | def tag_exists(tag, directory=None):
return tag in get_tags(directory) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def has_tag(self, tag):\n return tag in self.tags",
"def has_tag(self, tag):\n return tag in self.tags",
"async def exists(self, tag_name):\n try:\n if await self.get_id(tag_name):\n return True\n except RtbDoesntExists:\n return False",
"def has(self, tag_name: str) -> bool:\n return hasattr(self, tag_name)",
"def check_tag(self, session, tag):\n if not tag:\n return False\n\n try:\n self._tag(session.get, key=tag, session=session)\n return True\n except exceptions.NotFound:\n return False",
"def has(self, tag, index):\n return self.get(tag, index) is not None",
"def is_tag_available(self, tag):\n return tag in self.available_tags",
"def is_tagged(self,tag_name,element):\n return (tag_name in self.tag2elements.keys()) and (element in self.tag2elements[tag_name])",
"def tag_key_exists(self, key):\n return key in self.map",
"def is_existed_meta_tag(meta_tag_id):\n result = MetaTag.query.filter_by(id=meta_tag_id).first()\n return result is not None",
"def IsTagExists(Service, ResourceId, TagName):\n\n try:\n Tag = AwsTag(Service)\n if Tag.IsTagExists(ResourceId, TagName):\n return True\n except Exception as e:\n raise e\n\n return False",
"def has_tag(javadoc=None, tag=None):\n\n if javadoc is None:\n return False\n\n if tag is None:\n raise ValueError('No tag supplied')\n\n if not re.search('@[a-zA-Z]+', tag):\n raise ValueError('invalid tag format')\n\n return True if re.search(tag, javadoc, re.DOTALL) else False",
"def hasEmbedded(self, tag):\n if self.embeddedTags and self.embeddedTags[-1] == tag:\n return True\n else:\n return False",
"def _is_desired_tag(self, tag):\n if self._tags is None:\n return True\n\n if self._ignore_namespace:\n for desired_tag in self._tags:\n if tag.localname == desired_tag.localname:\n return True\n else:\n for desired_tag in self._tags:\n if tag == desired_tag:\n return True\n\n return False",
"def has(self, tag_name: str, category: ty.Optional[str] = None) -> bool:\n tags = self.__holder.db_tags.filter(lambda t: t.name == tag_name)\n if category is not None:\n tags = tags.filter(category=category)\n\n return len(tags) >= 1",
"def tag_dict_contains (self,\r\n tag):\r\n\r\n\r\n\r\n if self.using_database:\r\n aprint('TAGDICT CONTAINS')\r\n value_tuple = (notebookname, tag,)\r\n db_cursor.execute(\"SELECT rowid \"\r\n +\"FROM tags_to_keys\"\r\n +\" WHERE notebook=?\"\r\n +\" AND tag=?;\",\r\n value_tuple)\r\n try:\r\n return db_cursor.fetchone()[0] # MIGHT BE PROBLEMATIC\r\n except:\r\n return False\r\n\r\n return str(tag) in self.tag_dict",
"def tag_exists(form, field):\n if Tags.select().where(Tags.tag ** field.data).exists():\n raise ValidationError('That tag already exists.')",
"def is_tagged(self, instance_id, tag_name):\n tag_value = self.get_tag_for_instance(instance_id, tag_name)\n if tag_value is not None and tag_value == 'true':\n return True\n else:\n return False",
"def is_tag(t):\n return len(t) > 1 and t.startswith('#') and not t.startswith('##') and t",
"def check_photo_tag(self, tag_name):\n data = self.db.make_query(\n '''select * from photo_tag where tag_name = \"{}\" '''\n .format(tag_name))\n\n if len(data) > 0:\n return True\n return False",
"def has_tags(self):\n return bool(self.tags)",
"def make_tag_available(self, tag):\n if not self.is_tag_available(tag):\n self.available_tags.append(tag)\n return True\n return False",
"def verify_tag(tag, session):\n taginfo = session.getTag(tag)\n if not taginfo:\n raise RuntimeError('tag %s is not present in Koji' % tag)",
"def hasname(self, tag: str) -> bool:\n for key in self.formal_names:\n if key in tag.lower():\n return True\n\n # Exit case if key -> value not in mapping \n return False",
"def is_tag(tag_name, user_path, current_user) -> bool:\n user = current_user[0]\n tag_list = os.listdir((user_path + '\\\\' + user).encode('unicode_escape'))\n temp = list(map(bytes.decode, tag_list))\n if tag_name in temp:\n return True\n else:\n return False",
"def IsTagExists(self, ResourceId, TagName):\n\n try:\n if self.Service == 'ec2':\n response = self.DescribeTags(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['Tags']])):\n return True\n elif self.Service == 's3':\n response = self.GetBucketTagging(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['TagSet']])):\n return True\n elif self.Service == 'lambda':\n response = self.ListTags(ResourceId)\n if TagName in [x for x in response['Tags']]:\n return True\n elif self.Service == 'logs':\n response = self.ListTagsLogGroup(ResourceId)\n if TagName in [x for x in response['tags']]:\n return True\n elif self.Service == 'rds':\n response = self.ListTagsForResource(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['TagList']])):\n return True\n elif self.Service == 'es':\n response = self.ListTags(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['TagList']])):\n return True\n elif self.Service == 'emr':\n response = self.DescribeCluster(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [Tag for Tag in response['Cluster']['Tags']])):\n return True\n elif self.Service == 'dynamodb':\n response = self.ListTagsOfResource(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['Tags']])):\n return True\n elif self.Service == 'firehose':\n response = self.ListTagsForDeliveryStream(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['Tags']])):\n return True\n elif self.Service == 'glacier':\n response = self.ListTagsForVault(ResourceId)\n if TagName in [x for x in response['Tags']]:\n return True\n elif self.Service == 'kms':\n response = self.ListResourceTags(ResourceId)\n if TagName in list(map(lambda x: x['TagKey'], [x for x in response['Tags']])):\n return True\n elif self.Service == 'apigateway':\n print('No api to list tags')\n return False\n elif self.Service == 'kinesis':\n response = self.ListTagsForStream(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['Tags']])):\n return True\n elif self.Service == 'cloudtrail':\n response = self.ListTags(ResourceId)\n TagsList = map(lambda RTL: RTL['TagsList'], [RTL for RTL in response['ResourceTagList']])\n for Tags in TagsList:\n for Tag in Tags:\n if Tag['Key'] == 'Channel':\n return True\n elif self.Service == 'sqs':\n response = self.ListTags(ResourceId)\n if TagName in [x for x in response['Tags']]:\n return True\n elif self.Service == 'secretsmanager':\n response = self.DescribeSecret(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['Tags']])):\n return True\n elif self.Service == 'cloudfront':\n response = self.ListTagsForResource(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['Tags']])):\n return True\n elif self.Service == 'efs':\n response = self.DescribeTags(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['Tags']])):\n return True\n elif self.Service == 'sagemaker':\n response = self.ListTags(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['Tags']])):\n return True\n elif self.Service == 'redshift':\n response = self.DescribeTags(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['Tags']])):\n return True\n elif self.Service == 'elasticache':\n response = self.ListTagsForResource(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['TagList']])):\n return True\n elif self.Service == 'workspaces':\n response = self.DescribeTags(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['Tags']])):\n return True\n elif self.Service == 'ds':\n response = self.ListTagsForResource(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['Tags']])):\n return True\n elif self.Service == 'dax':\n response = self.ListTags(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['Tags']])):\n return True\n elif self.Service == 'route53':\n response = self.ListTagsForResource(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['Tags']])):\n return True\n elif self.Service == 'directconnect':\n response = self.DescribeTags(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['Tags']])):\n return True\n elif self.Service == 'datapipeline':\n response = self.DescribePipelines(ResourceId)\n Tags = list(map(lambda x: x['tags'], [tags for tags in response['pipelineDescriptionList']]))\n for i in Tags:\n for j in i:\n if j['key'] == 'Channel':\n return True\n else:\n raise TagNotSupportedError(self.Service)\n except Exception as e:\n raise e\n\n return False",
"def check_image_local(self, tag):\n tags = self.get_tags()\n return (tag in tags)",
"def exists(self):\n return True",
"def exists(self):\n return True",
"def image_exists(self, id=None, tag=None):\n exists = False\n if id and self.image_by_id(id):\n exists = True\n elif tag and self.image_by_tag(tag):\n exists = True\n\n return exists"
]
| [
"0.8192115",
"0.8192115",
"0.81442606",
"0.7918354",
"0.77401036",
"0.75310254",
"0.7446307",
"0.74178255",
"0.7344889",
"0.732079",
"0.72747904",
"0.7171363",
"0.71429306",
"0.7109353",
"0.7026018",
"0.67887926",
"0.6704246",
"0.6700222",
"0.6663846",
"0.66027105",
"0.6597093",
"0.6595398",
"0.6592382",
"0.65278286",
"0.6525453",
"0.648128",
"0.64781743",
"0.63847893",
"0.63847893",
"0.63773954"
]
| 0.8404489 | 0 |
Deletes a given remote tag. | def delete_remote_tag(tag, remote='origin', directory=None):
execute_command('git push {0} :{1}'.format(remote, tag), shell=True,
cwd=directory) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def tag_remove(self, remote_path, corpus_id, tag, storage_id=None):\n client, remote_path = self._get_storage(remote_path, storage_id=storage_id)\n return client.tag_remove(corpus_id, tag)",
"def delete(self, tag, params={}, **options):\n path = \"/tags/%s\" % (tag)\n return self.client.delete(path, params, **options)",
"def delete_tag(self, session, tag):\n self._tag(session.delete, key=tag, delete=True, session=session)",
"def delete_tag(tag):\n tag.destroy()",
"def delete_tag(self, tag):\n return self.__datacatalog.delete_tag(name=tag.name)",
"def delete_tag(tag, directory=None):\n execute_command('git tag -d {0}'.format(tag), shell=True, cwd=directory)",
"def delete(self):\n request = self.tags_service.delete(path=self._path)\n request.execute()",
"def delete_remote(self, remote: \"Remote\") -> str:\n return Remote.remove(self, remote)",
"def delete_tag(self,tag):\r\n\r\n # with shelf\r\n if self.using_shelf:\r\n del self.tag_dict[tag]",
"def _delete_tag_request():\n key = helpers.get('Tag.1.Key')\n resource_id = helpers.get('ResourceId.1')\n\n if resource_id in current_app.config['RESOURCE_TYPE_MAP']:\n resource_type = current_app.config['RESOURCE_TYPE_MAP'][resource_id]\n else:\n errors.invalid_request(\n str(resource_id) + \" not found in configuration\")\n\n args = {\n 'command': 'deleteTags',\n 'resourceids': resource_id,\n 'resourcetype': resource_type,\n 'tags[0].key': key\n }\n\n response = requester.make_request_async(args)\n\n return response",
"async def slashtag_remove(self, ctx: commands.Context, *, tag: GuildTagConverter):\n await ctx.send(await tag.delete())",
"def delete(self, uuid):\n\n\t\treturn self._delete(\"/tag/%s\" % base.getid(uuid), \"tag\")",
"def delete_tag(filename, tag_name):\n storeapps = APP.config[\"storage\"]\n filename = filename.encode(\"utf-8\")\n\n try:\n application = list(nativeapps.io.ls(storeapps, r\".*\" + filename + \"$\"))[0]\n meta_path = os.path.join(os.path.dirname(application), \"metadata.json\")\n metadata = json.loads(nativeapps.io.readfile(meta_path))\n tags = metadata.get(\"tags\", [])\n if tag_name in tags:\n tags.remove(tag_name)\n metadata[\"tags\"] = tags\n nativeapps.io.writefile(meta_path, json.dumps(metadata))\n except IndexError:\n return \"Unknown application: %s\" % (application), 404\n\n return \"removed\", 200",
"def delete(self, remote):\n self.target.ttbd_iface_call(\"store\", \"file\", method = \"DELETE\",\n file_path = remote)",
"def untag():\n version = git.prompt_tag('Which tag to delete?')\n if not version:\n abort('No available version tag')\n git.delete_tag(version)",
"def remove_tag(tag):\n check_call(['git', 'tag', '-d', tag])",
"def remove_tag(tag_id):\n tag = Tags.query.get(tag_id)\n db_session.delete(tag)\n db_session.commit()\n return 'Tag #%s (%s) has been deleted.' % (tag_id, tag.tag), 'success'",
"def delete_tag(self, *tags: TagReference) -> None:\n return TagReference.delete(self, *tags)",
"def delete_tag(tag_id):\n tag = Tag.query.get_or_404(tag_id)\n\n db.session.delete(tag)\n db.session.commit()\n\n return redirect('/tags')",
"def delete_tag(user_id, tag_id):\n\n tag = Tag.query.get_or_404(tag_id)\n\n db.session.delete(tag)\n db.session.commit()\n\n return redirect(f'/users/{user_id}')",
"def delete_tag(tag_id):\n\n tag = Tag.query.get_or_404(tag_id)\n db.session.delete(tag)\n db.session.commit()\n\n return redirect(\"/tags\")",
"def delete_tags(self, session):\n self._tag(session.delete, delete=True, session=session)",
"def delete_tag(tag_id):\n tag = Tag.query.get_or_404(tag_id)\n db.session.delete(tag)\n db.session.commit()\n\n return redirect(\"/tags\")",
"def delete_tag(tag_id):\n\n tag = Tag.query.get_or_404(tag_id)\n db.session.delete(tag)\n db.session.commit()\n\n return redirect(f\"/tags\")",
"def remove_tag(convo_ID, tag_ID):\n # Make API request\n url = \"https://api2.frontapp.com/conversations/\" + convo_ID + \"/tags\"\n payload = json.dumps({\"tag_ids\": [tag_ID]})\n headers = {\"Authorization\": BEARER_TOKEN, \"Content-Type\": \"application/json\"}\n requests.request(\"DELETE\", url, headers=headers, data=payload)",
"def test_delete_tag(self):\r\n\r\n with app.test_client() as client:\r\n resp = client.post(f\"/tags/{self.tag.id}/delete\", follow_redirects=True)\r\n html = resp.get_data(as_text=True)\r\n\r\n self.assertEqual(resp.status_code, 200)\r\n self.assertNotIn(\"Marvel\", html)",
"def delete(self, name):\n err = C.git_remote_delete(self._repo._repo, to_bytes(name))\n check_error(err)",
"def DeleteForTag(cls, tag):\n parent_key = cls._GetParentKeyFromTag(tag)\n frontend_job = cls.query(ancestor=parent_key).get(keys_only=True)\n if frontend_job:\n frontend_job.delete()",
"def tag_post_delete(sender, instance, **kwargs):\n instance.url.delete(False)",
"def delete_remote():\n branch = git.current_branch().name\n shell.run('git push -u origin {}'.format(branch))"
]
| [
"0.7718818",
"0.7641354",
"0.7475749",
"0.74589896",
"0.7241543",
"0.72296864",
"0.7126731",
"0.6996629",
"0.69694495",
"0.6888142",
"0.68675804",
"0.6833233",
"0.6710165",
"0.66714925",
"0.6669892",
"0.66515905",
"0.66195464",
"0.66030765",
"0.65793186",
"0.65521675",
"0.65470564",
"0.6546463",
"0.65375143",
"0.64961696",
"0.642145",
"0.64012396",
"0.63985986",
"0.6351909",
"0.6337427",
"0.6307723"
]
| 0.7991468 | 0 |
Returns a list of tags in the git repository. | def get_tags(directory=None):
out = check_output('git tag -l', shell=True, cwd=directory)
return [l.strip() for l in out.splitlines()] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def do_list_tags(cs, args):\n resp, tags = cs.repositories.list_tags(args.repository)\n tags = [{\"Tag\": t} for t in tags]\n utils.print_list(tags, [\"Tag\"], sortby=\"Tag\")",
"def get_tags(self):\n tags = []\n for image in self.client.images.list():\n for tag in image.tags:\n if tag.startswith(self.repository_name):\n tokens = tag.split(':')\n tags.append(tokens[1])\n return tags",
"def get_tags_list(url, auth_token, repo_name):\n response, _ = get_response(url + '/v2/' + repo_name + '/tags/list',\n auth_token)\n result = response.get('tags', [])\n return result",
"def __gitTagList(self):\n self.vcs.gitListTagBranch(self.project.getProjectPath(), True)",
"def tags(self) -> list[str]:\n _args: list[Arg] = []\n _ctx = self._select(\"tags\", _args)\n return _ctx.execute_sync(list[str])",
"def get_repository_tags(repository_name):\n tags_query = \"SELECT * FROM release_tag where repository=?\"\n return dbutils.execute_query(tags_query, (repository_name,), DATABASE_FILE)",
"def gettag(self):\n cmd = [\"git\", \"tag\"]\n p = Popen(cmd, cwd=self.filename, stdout=PIPE)\n data, res = p.communicate()\n return data.decode(\"utf-8\").split(\"\\n\")",
"def tags(self) -> List[str]:\n if \"RepoTags\" in self.attrs:\n return [tag for tag in self.attrs[\"RepoTags\"] if tag != \"<none>:<none>\"]\n return []",
"def git_list_tags(tags=None,\n tagrgx=TAGRGX_DEFAULT,\n append_tags=None,\n git_cmd=git_cmd,\n heading_level=heading_level,\n include_cmd=include_cmd,\n ):\n git_list_tags_cmd = git_cmd[:] + ['tag', '-l']\n\n if tags is None:\n\n if True:\n git_get_first_rev_cmd = [\n 'rev-list', '--all', '--reverse', '--abbrev-commit'] #|head -n 1\n cmd = git_cmd + git_get_first_rev_cmd\n first_rev_output = subprocess.check_output(cmd).splitlines()\n if not first_rev_output:\n raise Exception(('no first revision found:',\n ('cmd', cmd),\n ('output', first_rev_output)))\n else:\n yield first_rev_output[0].rstrip()\n\n tag_output = subprocess.check_output(git_list_tags_cmd).splitlines()\n logging.debug(('tag_output', tag_output))\n\n # import semantic_version\n versiontags = []\n for x in tag_output:\n x = str(x)\n if re.match(tagrgx, x):\n if x.startswith('v'):\n _x = x[1:]\n elif x.startswith('release/'):\n _x = x[7:]\n else:\n _x = x\n ver = semantic_version.Version(_x.rstrip())\n versiontags.append((ver, x))\n for version, _tag in sorted(versiontags):\n yield _tag\n if append_tags:\n for _tag in append_tags:\n yield _tag",
"def list(self):\n return self._post(\n request='list',\n uri=ApiUri.TAGS.value,\n ).get('tags')",
"def get_tags(self):\n resp = self.get(_u.build_uri(\"tags\", domain=self.domain))\n return utils.handle_response(resp)",
"def tags(self) -> List[str]:\n return self._db_data.tags",
"def get_tags(self):\n return self.get_url_data(self.api_url + 'refs/tags')",
"def list_tags(self, session):\n result = self._tag(session.get, session=session)\n return result['tags']",
"def get_tags_list(*args, **kwargs):\n return Tag.objects.active()",
"def get_tags_list(*args, **kwargs):\n return Tag.objects.active()",
"def find_tags(self, commit, repo):\n ref_dict = repo.repo.refs.as_dict()\n tags = []\n for tag, tag_id in [(t, ref_dict[t]) for t in repo.tags]:\n obj, obj_id = repo.repo[tag_id], None\n if isinstance(obj, Tag):\n _, obj_id = obj.object\n if isinstance(obj, Commit):\n obj_id = obj.id\n if commit.id == obj_id:\n tags.append((tag, obj))\n return tags",
"def tags(self):\r\n url = self.base_url + 'tags/'\r\n return json.loads(self.bb.load_url(url))",
"def tags(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'tags')\r\n\r\n return http.Request('GET', url), parsers.parse_json",
"def get_tags(self, *args, **kwargs):\n \n tags_data = api.get_tags(\n *args,\n api_key=self.__creds.api_key_v2,\n **kwargs)\n return [en.Tag(tag_data) for tag_data in tags_data]",
"def get_all_tags():\n try:\n tags = g.projects.distinct('tags')\n return jsonify(sorted(tags, key=str.lower))\n except Exception as err:\n raise ApiException(str(err), 500)",
"def tags(self) -> Sequence[str]:\n return pulumi.get(self, \"tags\")",
"def get_tags(self):\n return self.tags",
"def list_tags():\r\n tags = Tag.query.order_by(Tag.name).all()\r\n return render_template('tags.html', tags=tags)",
"def tags(self):\n return self._changeset.get('tags', None)",
"def get_tags(self):\n\n return self.tags",
"def list(self):\n\n\t\treturn self._list(\"/tag\", \"tag\")",
"def get_tags(self, tags):\n tag_list = []\n for tag in tags:\n tag_list.append(tag[\"name\"])\n return tag_list",
"def tags(self) -> List[Tag]:\n return self._tags",
"def list_tags():\n\n tags = Tag.query.all()\n return render_template('tags/list_tags.html', tags=tags)"
]
| [
"0.7660419",
"0.76550305",
"0.7644801",
"0.75672406",
"0.74624276",
"0.73732084",
"0.7258995",
"0.72259563",
"0.72047967",
"0.71400553",
"0.70663697",
"0.7060839",
"0.7045013",
"0.6962905",
"0.6937564",
"0.6937564",
"0.6908174",
"0.688024",
"0.68553925",
"0.68434227",
"0.68312985",
"0.67926425",
"0.6787339",
"0.67489797",
"0.6734384",
"0.6704979",
"0.66833764",
"0.66577333",
"0.6654247",
"0.6611243"
]
| 0.7836319 | 0 |
Checks that you are in the root of the git repository, else exit. | def ensure_git_root():
root = get_root()
if root is None:
error("Not in a git repository.", exit=True)
if os.getcwd() != root:
error("Must call from the top folder of the git repository",
exit=True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def navigate_to_git_root() -> bool:\n dir_climb_count = 0\n continue_dir_traverse = True\n while continue_dir_traverse:\n if not Utils.contains_dir('.git'):\n print(f\"Current dir {os.getcwd()} is not a Git repository.\")\n # Change directory up one level.\n os.chdir(\"../\")\n dir_climb_count += 1\n else:\n print(f\"Current dir {os.getcwd()} is a recognized Git repository.\")\n return True\n\n if dir_climb_count > 3:\n continue_dir_traverse = False\n\n if not Utils.contains_dir('.git'):\n logging.error('Unable to locate Git repository.')\n\n return False",
"def checkGit(directory):",
"def check_run_in_root(path):\n candidate = pl.Path.cwd() / path\n for _ in candidate.glob(pattern='.gitattributes'):\n return\n for _ in candidate.glob(pattern='.svn'):\n return\n raise ValueError(f'{candidate} does not appear to be a git or svn root')",
"def check_working_tree():\n result = _subprocess(['git', '--no-pager', 'diff', '--ignore-submodules=untracked'])\n if result:\n print(result)\n print(f\"Warning: Working tree contains changes to tracked files. Please commit or discard \"\n f\"your changes and try again.\")\n exit(1)",
"def ensure_clean_repo(ctx):\n ctx.runprocess(['git', 'status', '--porcelain'],\n check_stdout='',\n check_stderr='',\n fail_message='Repository %s not clean' % os.getcwd())",
"def find_repo_root():\n path = os.getcwd()\n\n while \".git\" not in set(os.listdir(path)) and path != \"/\":\n path = os.path.dirname(path)\n\n if path == \"/\":\n raise Exception(\"No repo found, stopping at /\")\n\n return path",
"def __gitVerify(self):\n self.vcs.gitVerify(self.project.getProjectPath())",
"def verify_git_clean(path):\n\n sys.stdout.write(\" - Checking for uncommitted changes:\")\n result = run_in_component(path, ['git', 'status', '--porcelain=v1'])\n\n lines = [x for x in result.splitlines() if len(x) > 0]\n\n if len(lines) == 0:\n print(\" OKAY\")\n return\n\n print(\" FAILED\")\n\n raise GenericError(\"There are uncommitted changes in the component, please commit or stash them\")",
"def check_workspace ():\n\n try:\n ex (\"cd $DOC_ROOT/ACE_TAO && git pull -p\")\n print (\"Successfully updated ACE/TAO working copy\")\n except:\n print (\"Unable to update ACE/TAO workspace at \" + doc_root)\n raise\n\n try:\n ex (\"cd $DOC_ROOT/MPC && git pull -p\")\n print (\"Successfully updated MPC working copy to revision \")\n except:\n print (\"Unable to update the MPC workspace at \" + doc_root + \"/ACE/MPC\")\n raise\n\n vprint (\"Repos root URL = \" + opts.repo_root + \"\\n\")\n vprint (\"Repos MPC root URL = \" + opts.mpc_root + \"\\n\")",
"def need_to_install_git(args, git_directory):\n if args.force:\n return True\n git_exe_path = os.path.join(git_directory, 'bin', 'git.exe')\n if not os.path.exists(git_exe_path):\n return True\n if subprocess.call(\n [git_exe_path, '--version'],\n stdout=DEVNULL, stderr=DEVNULL) != 0:\n return True\n for script in ('git.bat', 'gitk.bat', 'ssh.bat', 'ssh-keygen.bat',\n 'git-bash'):\n full_path = os.path.join(ROOT_DIR, script)\n if not os.path.exists(full_path):\n return True\n with open(full_path) as f:\n if os.path.relpath(git_directory, ROOT_DIR) not in f.read():\n return True\n if not os.path.exists(os.path.join(\n git_directory, 'etc', 'profile.d', 'python.sh')):\n return True\n return False",
"def is_git():\n return exists('.git') and not islink('.git')",
"def _check_inputs(self):\n\n if not os.path.isdir(self._parent_repo):\n raise Error('Invalid parent repo path %r' % self._parent_repo)\n\n self._run_git_command(['--help'], error_message='Unable to run git')\n self._run_git_command(['status'],\n error_message='%r is not a valid git repo' %\n os.path.abspath(self._parent_repo))\n self._run_git_command(['fetch', 'origin'],\n error_message='Failed to fetch origin')\n self._run_git_command(\n ['rev-parse', '%s^{commit}' % self._branch_ref],\n error_message='Branch %s not found' % self._branch_ref)\n self._run_git_command(\n ['rev-parse', '%s^{commit}' % self._revision],\n error_message='Revision \"%s\" not found' % self._revision)",
"def check_root():\n if os.getuid():\n logging.critical(\"Please run as root.\")\n sys.exit(ExitCode.ROOT_REQUIRED)",
"def check_repo(self):\n if not os.path.exists(self.path):\n log.error(\"no dots repository found at '{}'\".format(self.path))\n if not os.path.exists(self.files_path):\n log.error(\"corrupted repository, the 'files' subfolder is missing\")\n if not os.path.exists(self.enc_files_path):\n log.error(\"corrupted repository, the 'encrypted' subfolder is missing\")\n if not os.path.exists(os.path.join(self.path, '.git')):\n log.error(\"corrupted repository, folder exists but is not versioned\")\n self.git_repo = Repo(self.path)",
"def _ensure_directory(self):\n curr_working_dir = os.getcwd()\n if os.path.basename(curr_working_dir) != REPO_NAME:\n if REPO_NAME not in curr_working_dir:\n # TODO: should we clone the git repo instead?\n raise RuntimeError(\n \"Cannot find the antenny repository, please run this from the root \"\n \"of that directory.\"\n )\n # walk back up the directory tree, it's in the current working dir\n while os.path.basename(os.getcwd()) != REPO_NAME:\n os.chdir(UP_ONE_DIRECTORY)\n os.chdir(STATION_CODE_RELATIVE_PATH)",
"def is_repo_root(path: str) -> bool:\n return os.path.isdir(os.path.join(path, \".repo\"))",
"def _check_repository(self):\n if not os.path.exists(\"%s/.git\" % self._repository_path):\n Repo.clone_from(self.REPOSITORY_ADDRESS, self._repository_path)\n\n self._repo = Repo(self._repository_path)\n self._pull()",
"def test_should_raise_if_git_repo_not_exists(self): # pylint: disable=invalid-name\n with self.assertRaises(BuildFailedException) as context:\n set_version_from_git_tag(self.project, self.logger)\n err_msg = str(context.exception)\n self.assertTrue(\n \"Directory `basedir` isn't git repository root.\" in err_msg)",
"def git_available():\n null = open(\"/dev/null\", \"w\")\n subprocess.Popen(\"git\", stdout=null, stderr=null)\n null.close()",
"def gitroot(dir=\"\"):\n # Supress errors from Git\n git_cmd = \"git rev-parse --show-toplevel \" + dir + \" 2> \" + os.devnull\n if dir:\n original_cwd = os.getcwd()\n os.chdir(dir)\n try:\n sub_out = subprocess.check_output(git_cmd, shell=True)\n cmd_out = sub_out.decode().rstrip(). splitlines()[0]\n except:\n cmd_out = \"\"\n if dir:\n os.chdir(original_cwd)\n return cmd_out",
"def check_working_directory():\n if not os.path.exists(CUCKOO_ROOT):\n raise CuckooStartupError(\"You specified a non-existing root directory: %s\" % CUCKOO_ROOT)\n\n cwd = os.path.join(os.getcwd(), \"cuckoo.py\")\n if not os.path.exists(cwd):\n raise CuckooStartupError(\"You are not running Cuckoo from it's root directory\")",
"def verify_working_tree_is_clean(self):\n logging.info('--- Verify working tree is clean ---')\n tree_status_output = self.git.status(\n '--porcelain', '--untracked-files=no')\n if tree_status_output.strip():\n gitwrapper.exit_with_error(\n 'You have local pending changes:\\n%s\\n'\n 'The working tree must be clean in order to continue.',\n tree_status_output)\n #",
"def git():\n pass",
"def check_in_repo():\n if not os.path.isfile(\"setup.py\"):\n return \"Not in root-level PyTorch repo, no setup.py found\"\n with open(\"setup.py\") as f:\n s = f.read()\n if \"PyTorch\" not in s:\n return \"Not in PyTorch repo, 'PyTorch' not found in setup.py\"",
"def test_no_remotes():\n os.chdir(master_path)\n\n from PyGitUp.gitup import GitUp\n\n with pytest.raises(GitError):\n GitUp(testing=True)",
"def _check_branch(opt, params):\n\n # Check the current branch and hash\n _get_branch(opt)\n\n if params.git_branch != opt.git_branch or params.git_hash != opt.git_hash:\n msg = 'You are not on the right branch or commit. Please run the following in the repository: \\n'\n msg += f'git checkout {params.git_branch}\\n'\n msg += f'git revert {params.git_hash}'\n sys.exit(msg)",
"def main():\n # Parse command line arguments\n configfile = parse_arguments()\n # Parse config file\n (basedir, gituser, add_own_forks, forks, branches) = parse_config(configfile)\n # Check that base directory exists\n if not os.path.exists(basedir):\n raise Exception('Base directory {0} does not exist'.format(basedir))\n # Configure working directory\n workdir = setup_workdir(basedir)\n # Check out the code\n checkout_code(workdir, gituser, add_own_forks, forks, branches)\n print \"Location of code: {0}\".format(workdir)",
"def test_default_repo_branch(self):\n # network may be unavailable, but we are not interested anyway,\n # so we ignore the exitcode\n output = self.run_command(\"selfupdate --check\", exitcode=None)\n self.assertIn(\"Target: ywangd:master\", output)\n self.assertNotIn(\"Target: ywangd:dev\", output)",
"def repo_root() -> str:\n path = os.path.realpath(os.curdir)\n\n while True:\n if os.path.exists(os.path.join(path, \"setup.py\")):\n return path\n path = os.path.realpath(os.path.join(path, \"..\"))",
"def __gitVerifyBundle(self):\n self.vcs.gitVerifyBundle(self.project.getProjectPath())"
]
| [
"0.76070565",
"0.7348511",
"0.70405227",
"0.6678482",
"0.65229124",
"0.64519316",
"0.6368765",
"0.6358901",
"0.63172734",
"0.6314767",
"0.6281854",
"0.62632245",
"0.62627625",
"0.6222363",
"0.6149592",
"0.61405575",
"0.61228436",
"0.6066111",
"0.59869486",
"0.5963188",
"0.5961374",
"0.5950504",
"0.5938922",
"0.5921397",
"0.59148",
"0.59094983",
"0.589846",
"0.5859553",
"0.58548963",
"0.5845688"
]
| 0.85459024 | 0 |
Tracks all specified branches. | def track_branches(branches=None, directory=None):
if type(branches) == str:
branches = [branches]
debug("track_branches(" + str(branches) + ", " + str(directory) + ")")
if branches == []:
return
# Save the current branch
current_branch = get_current_branch(directory)
try:
# Get the local branches
local_branches = get_branches(local_only=True, directory=directory)
# Get the remote and local branches
all_branches = get_branches(local_only=False, directory=directory)
# Calculate the untracked branches
untracked_branches = []
for branch in all_branches:
if branch.startswith('remotes/'):
if branch.count('/') >= 2:
branch = '/'.join(branch.split('/')[2:])
if branch not in local_branches:
untracked_branches.append(branch)
# Prune any untracked branches by specified branches
if branches is not None:
branches_to_track = []
for untracked in untracked_branches:
if untracked in branches:
branches_to_track.append(untracked)
else:
branches_to_track = untracked_branches
# Track branches
debug("Tracking branches: " + str(branches_to_track))
for branch in branches_to_track:
checkout(branch, directory=directory)
finally:
if current_branch:
checkout(current_branch, directory=directory) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_branches(self):\n logging.info('--- Get Branches ---')\n self.local_branches = set(self.find_branches())\n self.remote_branches = set(self.find_branches(remote=True))\n # Tags are remote branches that start with \"tags/\".\n self.tags = {\n single_branch for single_branch in self.remote_branches\n if PRX_SVNTAGS_PREFIX.match(single_branch)}",
"def ensure_tracking_branches(args):\n man = load_manifest()\n for (name, project) in man.projects.iteritems():\n repo = GitRepo(workdir_for_project(project))\n branch_missing = repo.command(\n [\"rev-parse\", \"--verify\", \"-q\", project.refspec],\n capture_stdout=True)\n \n if branch_missing:\n logging.warn(\"Branch %s does not exist in project %s. checking out.\" %\n (project.refspec, name))\n repo.command([\"branch\", \"--track\",\n project.tracking_branch, project.remote_refspec])",
"def branches(self) -> list[str]:\n _args: list[Arg] = []\n _ctx = self._select(\"branches\", _args)\n return _ctx.execute_sync(list[str])",
"def checkout_branches(args):\n\n ensure_tracking_branches([])\n if check_dirty([]) and '-f' not in args:\n raise Exception(\"Cannot checkout new branches with dirty projects.\")\n \n man = load_manifest()\n for (name, project) in man.projects.iteritems():\n print >>sys.stderr, \"Checking out tracking branch in project: %s\" % name\n repo = GitRepo(workdir_for_project(project))\n # Check that sucker out\n repo.check_command([\"checkout\", project.tracking_branch])",
"def __gitBranchList(self):\n self.vcs.gitListTagBranch(self.project.getProjectPath(), False)",
"def list_all_branches(self) -> dict:\n try:\n branches_response = self.repo.get_branches()\n branches_list = []\n for branch in branches_response:\n branches_list.append(branch.raw_data.get('name'))\n return make_success_response(200, branches_list)\n except GithubException as github_exc:\n return make_error_response(github_exc.status, github_exc.data)",
"def branches_full(config, args):\n for b in config.repo.branches():\n yield config.repo.branch(b.name)",
"def fetch_branches(self):\n for jrepo in self.json_repos['repos']:\n title = str(jrepo[\"title\"])\n self.branches[title] = str(jrepo['current'])",
"def branches(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'branches')\r\n\r\n return http.Request('GET', url), parsers.parse_json",
"def branches(self):\r\n url = self.base_url + 'branches/'\r\n return json.loads(self.bb.load_url(url))",
"def branches(self):\r\n url = '{0}/branches/'.format(self.get_url())\r\n request = http.Request('GET', url)\r\n\r\n return request, parsers.parse_json",
"def collectAllBranches(self):\n\t\tallBranches = []\n\n\t\tfor unit in self.inputLayer:\n\n\t\t\tfor branch in unit.branchesOut:\n\n\t\t\t\tallBranches.append(branch)\n\n\t\tfor layer in self.hiddenLayers:\n\n\t\t\tfor unit in layer:\n\t\t\t\n\t\t\t\tfor branch in unit.branchesOut:\n\n\t\t\t\t\tallBranches.append(branch)\n\n\t\tfor unit in self.outputLayer:\n\n\t\t\tfor branch in unit.branchesOut:\n\n\t\t\t\tallBranches.append(branch)\n\n\n\t\treturn allBranches",
"def list_branches(self) -> List[str]:\n self.__verify_repo_initialized()\n branches = heads.get_branch_names(self._env.branchenv)\n return branches",
"def get_branches(self, *, refs=[\"refs/heads\", \"refs/remotes\"]):\n # type: (Sequence[str]) -> List[Branch]\n stdout = self.git(\n \"for-each-ref\",\n (\n \"--format=\"\n \"%(HEAD)%00\"\n \"%(refname)%00\"\n \"%(upstream)%00\"\n \"%(upstream:remotename)%00\"\n \"%(upstream:track,nobracket)%00\"\n \"%(committerdate:unix)%00\"\n \"%(objectname)%00\"\n \"%(contents:subject)\"\n ),\n *refs\n ) # type: str\n branches = [\n branch\n for branch in (\n self._parse_branch_line(line)\n for line in filter_(stdout.splitlines())\n )\n if branch.name != \"HEAD\"\n ]\n store.update_state(self.repo_path, {\"branches\": branches})\n return branches",
"def branchize(self):\n near_ones = self.get_near_ones()\n\n if self.current_branch[\"g_score\"] == 31:\n return\n\n for item in near_ones:\n\n if self.current_branch.get(\"move\") and self.current_branch[\"move\"] == item:\n continue\n\n self.change(item)\n\n if self.astar:\n serialized = self.serialize()\n if serialized in self.previous_branches:\n self.change(item)\n continue\n else:\n self.previous_branches.append(serialized)\n\n a_branch = {\n \"status\" : True,\n \"move\" : item,\n \"g_score\" : self.current_branch[\"g_score\"] + 1,\n \"h_score\" : self.calc_manhattan(self.goal),\n \"branches\" : [],\n \"parent\" : self.current_branch\n }\n a_branch[\"f_score\"] = a_branch[\"g_score\"] + a_branch[\"h_score\"]\n\n self.current_branch[\"branches\"].append(a_branch)\n self.score_scheme.append((a_branch[\"f_score\"], a_branch))\n self.change(item)\n\n self.score_scheme.sort(key=lambda x: x[0])",
"def hard_reset_branches(args):\n checkout_branches(args)\n man = load_manifest()\n for (name, project) in man.projects.iteritems():\n print >>sys.stderr, \"Hard resetting tracking branch in project: %s\" % name\n repo = GitRepo(workdir_for_project(project))\n repo.check_command([\"reset\", \"--hard\", project.remote_refspec])",
"def base_branches() -> list[str]:\n branches = []\n\n default = sh(\"git rev-parse --abbrev-ref origin/HEAD\").removeprefix(\"origin/\")\n branches.append(default)\n\n releases = sh(\n \"git branch --all --sort=-committerdate --list *release/* | head -10\"\n ).splitlines()\n releases = [b.removeprefix(\"*\").strip() for b in releases]\n branches.extend(releases)\n\n return branches",
"def list_branches(self, msg, args):\n trans = self._translation_util(msg)\n yield \"\\n\".join(trans.list_branches(REPOSITORY_NAME))",
"def create_branches(branches, pcoll, provider_options):\n\n logger.info('Branch count: %i' % len(branches))\n pcoll_tuple = ()\n\n for branch in branches:\n logger.info('Adding branch')\n output = create_graph(branch, pcoll, provider_options)\n pcoll_tuple = pcoll_tuple + (output,)\n\n logger.info('Transform: MergeBranches')\n output = pcoll_tuple | 'MergeBranches' >> MergeBranches()\n return output",
"def find_branches(self, remote=False):\n arguments = ['--no-color']\n if remote:\n arguments.append('-r')\n #\n for branch in self.git.branch(*arguments).splitlines():\n branch = branch.replace('*', '').strip()\n if branch:\n yield branch\n #\n #",
"def bouton_branches(self,arbor,arbor_labels,bbranches,bouton_geometry=[28,4]):\n\t\t\n\t\tfor branch in arbor.keys():\n\t\t\tif branch in bbranches:\n\t\t\t\tarbor[branch],arbor_labels[branch] = self.bouton_branch(arbor[branch],bouton_geometry)\n\t\t\n\t\treturn(arbor,arbor_labels)",
"def __gitMergedBranchList(self):\n self.vcs.gitListTagBranch(self.project.getProjectPath(), False,\n listAll=False, merged=True)",
"def __create_actions_for_all_branches(self, branches_steps, task_activities):\n for branch_action in branches_steps:\n action_factory = branch_factory.ActionFactory(xml_cache=self.xml_cache,\n mapping_cache=self.mapping_cache,\n input_data=self.input_data,\n action=branch_action,\n latency=self.latency,\n processor=self.processor\n ).create_action_factory(task_activities=task_activities)\n action_factory.add_action()",
"def show_branches(config, args):\n for item in lib.input_json_lines():\n yield config.repo.branch(item)",
"def get_branches(self):\n\n # gets all branches in repository\n branches_endpoint = f'/repositories/{self.owner}/{self.repo}/refs/branches'\n filter_param = {'fields': 'values.name'}\n response = self._get_request(branches_endpoint, filter_param)\n # guard condition\n if response.status_code != STATUS_CODE_OK:\n raise BitbucketRequestSenderExc(\n f'Invalid parameter(s) in: owner: {self.owner},'\n f' repo: {self.repo}')\n # deserialize\n branches_page = response.json()\n\n return [\n {\n 'name': branch['name']\n } for branch in branches_page['values']\n ]",
"def _push_branches_(self, state, tape_cache, outputs):\n from copy import deepcopy\n\n self._push_branch_(state, tape_cache, outputs)\n if not self.check_epsilon_transitions:\n return\n if state._in_epsilon_cycle_(self.fsm):\n if not state._epsilon_cycle_output_empty_(self.fsm):\n raise RuntimeError(\n 'State %s is in an epsilon cycle (no input), '\n 'but output is written.' % (state,))\n\n for eps_state, eps_outputs in \\\n state._epsilon_successors_(self.fsm).iteritems():\n if eps_state == state:\n continue\n # \"eps_state == state\" means epsilon cycle\n # Since we excluded epsilon cycles where\n # output is written, this has to be one\n # which does not write output; therefore\n # skipped.\n for eps_out in eps_outputs:\n new_out = [o + list(eps_out) for o in outputs]\n self._push_branch_(eps_state, deepcopy(tape_cache), new_out)",
"def _listBranches(self):\n assert self.wc.exists('branches')\n branches = self.wc.ls('branches')\n\n # Some early release branches used a different naming scheme\n # that doesn't sort properly with new-style release names. We\n # filter those out here, along with empty lines.\n branches = [b.strip('/') for b in branches\n if MELANGE_RELEASE_RE.match(b.strip('/'))]\n\n return sorted(branches)",
"def sync_all_teams_coverage():\n teams = Team.objects.all()\n\n for team in teams:\n identifier = team.identifier\n\n sync_team_coverage.apply_async(args=(identifier, ))\n sync_team_cluster_stats.apply_async(args=(identifier, ))\n sync_team_advisory_stats.apply_async(args=(identifier, ))",
"def make_branches(self, api_json=None):\n if api_json is None:\n return []\n\n obj = simplejson.loads(api_json)\n branches = [item[\"commit\"][\"sha\"] for item in obj]\n\n print branches\n\n return branches",
"def merge(self, branch_names):\n\n self.git(\"merge\", *branch_names)"
]
| [
"0.5989476",
"0.5985131",
"0.5974778",
"0.58220303",
"0.5702252",
"0.5696388",
"0.5686802",
"0.5682091",
"0.5580505",
"0.541718",
"0.53673714",
"0.5365935",
"0.5262822",
"0.52213264",
"0.5179027",
"0.5177821",
"0.5168318",
"0.5141571",
"0.5116087",
"0.50699514",
"0.50658035",
"0.50291604",
"0.49918565",
"0.4982331",
"0.49335176",
"0.49253803",
"0.491829",
"0.48711726",
"0.4864259",
"0.4827366"
]
| 0.7209036 | 0 |
Returns the most recent, by date, tag in the given local git repository. | def get_last_tag_by_date(directory=None):
cmd = "git for-each-ref --sort='*authordate' " \
"--format='%(refname:short)' refs/tags/upstream"
output = check_output(cmd, shell=True, cwd=directory, stderr=PIPE)
output = output.splitlines()
if len(output) == 0:
return ''
return output[-1] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def latest_github_tag():\n release_tags_github_url = \"https://api.github.com/repos/rackerlabs/openstack-guest-agents-unix/tags\"\n release_tags_json = urllib2.urlopen(release_tags_github_url)\n release_tags_data = json.load(release_tags_json)\n return str(release_tags_data[0]['name'])[1:]",
"def get_last_tag_by_version(directory=None):\n cmd = \"git for-each-ref --sort='*authordate' \" \\\n \"--format='%(refname:short)' refs/tags/upstream\"\n output = check_output(cmd, shell=True, cwd=directory, stderr=PIPE)\n tags = []\n versions = []\n for line in output.splitlines():\n tags.append(line.strip())\n ver = re.match(r\"[0-9]+\\.[0-9]+\\.[0-9]+\", line)\n if ver:\n versions.append(ver)\n return tags[versions.index(max(versions))] if versions else ''",
"def get_latest_build(tag, package):\n proc = Popen([\"osg-koji\", \"-q\", \"list-tagged\", \"--latest\", tag, package],\n stdout=PIPE)\n out = proc.communicate()[0] or b''\n ret = proc.returncode\n\n latest_build_line = out.decode(\"latin-1\").strip()\n\n if ret != 0 or not latest_build_line:\n return\n\n return latest_build_line.split()[0]",
"def get_latest_tag(self, repo: git.Repo) -> Tuple[Optional[\n git.refs.tag.TagReference], Optional[semantic_version.Version]]:\n raw_tag = self._search_strategy(\n repo=repo, branch=self._branch)\n if raw_tag is None:\n return None, None\n sem_tag = semantic_version.Version(\n tag_search_strategy.clean_tag_name(str(raw_tag)))\n return raw_tag, sem_tag",
"def do_latest_tag(args, image_name_tag, image_name):\n if args.latest is True:\n if tag(image_name_tag, image_name+':latest'):\n push(args, image_name+':latest')",
"def get_latest_release_version():\n repo = GITHUB.get_user(GITHUB_OWNER).get_repo(GITHUB_REPO)\n latest_release_version = repo.get_latest_release().tag_name\n return latest_release_version",
"def get_tags_and_dates(repository_name):\n tags_query = \"SELECT t.name, c.commit_author_date \" \\\n \"FROM github_commit c, release_tag t \" \\\n \"where t.commit_url = c.url and t.repository=?\"\n return dbutils.execute_query(tags_query, (repository_name,), DATABASE_FILE)",
"def get_latest_sha(repo):\n cwd = os.getcwd()\n command = \"git rev-list -1 HEAD -- {0}\".format(repo)\n os.chdir(repo)\n git_sha = process_helpers.run(command.split(\" \"))\n os.chdir(cwd)\n return git_sha.strip()",
"def _fetch_latest_config_tag():\n github_release_url = config()['github_release_url']\n if config()['github_token']:\n headers = {'Authorization': f\"token {config()['github_token']}\"}\n else:\n headers = {}\n try:\n resp = requests.get(url=github_release_url, headers=headers)\n except Exception as err:\n logging.error(f\"Unable to fetch indexer config from github: {err}\")\n # Ignore any error and continue; try the fetch again later\n return None\n if not resp.ok:\n logging.error(f\"Unable to fetch indexer config from github: {resp.text}\")\n return None\n data = resp.json()\n return data['tag_name']",
"def latest_tagged_video(tag):\n if not isinstance(tag, Tag):\n try:\n tag = Tag.objects.get(text=tag)\n except Tag.DoesNotExist:\n return mark_safe('')\n video = first_or_none(Video.objects.filter(tags=tag)\n .order_by('-issue__issue_date'))\n if video:\n return mark_safe(video.key)\n return mark_safe('')",
"def get_release_info(version='v1.1-dev', date='2021-07-22'):\n # go to the repository directory\n dir_orig = os.getcwd()\n os.chdir(os.path.dirname(os.path.dirname(__file__)))\n\n # grab git info into string\n try:\n cmd = \"git describe --tags\"\n version = subprocess.check_output(cmd.split(), stderr=subprocess.DEVNULL)\n version = version.decode('utf-8').strip()\n\n # if there are new commits after the latest release\n if '-' in version:\n version, num_commit = version.split('-')[:2]\n version += '-{}'.format(num_commit)\n\n cmd = \"git log -1 --date=short --format=%cd\"\n date = subprocess.check_output(cmd.split(), stderr=subprocess.DEVNULL)\n date = date.decode('utf-8').strip()\n except:\n pass\n\n # go back to the original directory\n os.chdir(dir_orig)\n return version, date",
"def get_most_recent_tarball(self, pkg):\n pass",
"def get_first_last_commit_date(path):\n # %at specifies a UNIX time stamp\n process = subprocess.Popen(['git', 'log', '--format=%at'], cwd=path, stdout=subprocess.PIPE)\n stdout, _ = process.communicate()\n log = stdout.decode().strip('\\n').split('\\n')\n last = int(log[0])\n first = int(log[-1])\n return (first, last)",
"def get_latest_version(self):\n latest_release = self._http_client.get(self._github_repo + '/releases/latest')\n if not 'tag_name' in latest_release.json():\n return None\n version = latest_release.json()['tag_name']\n latest_release.close()\n return version",
"def repo_rev(self, repository):\n sql = sa.select([sa.func.max(history.c.rev)]).where(history.c.path.like(repository + '%'))\n result = self.engine.execute(sql).first()[0]\n if result is None:\n result = -1\n return result",
"def get_prs_merged_since(auth_token, repo, tag):\n tag_date = get_tag_date(tag)\n prs = []\n\n def merge_date(pr):\n if pr.get('merged_at'):\n return dateutil.parser.parse(pr['merged_at'])\n else:\n return None\n\n # The GitHub API does not provide a `since` parameter to retrieve PRs\n # closed since a given date, so instead we iterate over PRs in descending\n # order of last update and stop when we reach a PR that was last updated\n # before the given tag was created.\n for closed_pr in github_request(auth_token, repo, 'pulls', state='closed',\n sort='updated', direction='desc'):\n pr_date = dateutil.parser.parse(closed_pr['updated_at'])\n if pr_date < tag_date:\n break\n merged_at = merge_date(closed_pr)\n if merged_at and merged_at > tag_date:\n prs += [closed_pr]\n\n return sorted(prs, key=merge_date)",
"def get_tag(repo: str = None) -> str:\n if not repo:\n repo = '.'\n repo=repo.replace('\\\\','/')\n cmd = ['git', 'describe']\n result = _run_git(cmd, repo=repo, expect_stderr=True)\n if not result:\n return None\n tag: str = result.stdout.decode(\"utf-8\")\n tag = tag.replace('\\r', '').replace('\\n', '')\n return tag",
"def last_commit_date():\n return subprocess.check_output(['git', 'log', '-1', '--pretty=%ad',\n '--date=format:%d %b %H:%M', 'py/calendon']).decode().strip()",
"def getversion_git(path=None):\n _program_dir = path or _get_program_dir()\n cmd = 'git'\n try:\n subprocess.Popen([cmd], stdout=subprocess.PIPE).communicate()\n except OSError:\n # some Windows git versions provide git.cmd instead of git.exe\n cmd = 'git.cmd'\n\n with open(os.path.join(_program_dir, '.git/config')) as f:\n tag = f.read()\n # Try 'origin' and then 'gerrit' as remote name; bail if can't find either.\n remote_pos = tag.find('[remote \"origin\"]')\n if remote_pos == -1:\n remote_pos = tag.find('[remote \"gerrit\"]')\n if remote_pos == -1:\n tag = '?'\n else:\n s = tag.find('url = ', remote_pos)\n e = tag.find('\\n', s)\n tag = tag[(s + 6):e]\n t = tag.strip().split('/')\n tag = f\"[{t[0][:-1]}] {'-'.join(t[3:])}\"\n dp = subprocess.Popen([cmd, '--no-pager',\n 'log', '-1',\n '--pretty=format:\"%ad|%an|%h|%H|%d\"',\n '--abbrev-commit',\n '--date=iso'],\n cwd=_program_dir,\n stdout=subprocess.PIPE)\n info, _ = dp.communicate()\n info = info.decode(config.console_encoding).split('|')\n date = info[0][:-6]\n date = time.strptime(date.strip('\"'), '%Y-%m-%d %H:%M:%S')\n dp = subprocess.Popen([cmd, 'rev-list', 'HEAD'],\n cwd=_program_dir,\n stdout=subprocess.PIPE)\n rev, stderr = dp.communicate()\n rev = f'g{len(rev.splitlines())}'\n hsh = info[3] # also stored in '.git/refs/heads/master'\n if (not date or not tag or not rev) and not path:\n raise VersionParseError\n return (tag, rev, date, hsh)",
"def _get_tag(self, current_path, commit_sha):\n command = [\"git\", \"describe\", \"--tags\", commit_sha]\n p = subprocess.Popen(\n command,\n stdout=PIPE,\n stderr=PIPE,\n cwd=os.path.join(self.root_dir, current_path),\n )\n output, error = p.communicate()\n if p.returncode == 0:\n return output.decode(\"utf-8\").strip()\n elif \"fatal: no tags can describe '{}'.\".format(commit_sha) in error.decode(\n \"utf-8\"\n ).lower():\n return None\n elif \"fatal: no names found\" in error.decode(\"utf-8\").lower():\n return None\n else:\n raise Exception(\n \"Error [{}] occurred while executing [{}] command to get nearest tag associated with branch.\".format(\n error.decode(\"utf-8\"), \" \".join(command)\n )\n )",
"def get_current_timestamp(path_to_repository):\n repo = Repo(path_to_repository)\n return repo.head.commit.committed_date",
"def get_release(repo, tag=\"latest\", quiet=False) -> dict:\n api_url = f\"https://api.github.com/repos/{owner}/{repo}\"\n req_url = (\n f\"{api_url}/releases/latest\"\n if tag == \"latest\"\n else f\"{api_url}/releases/tags/{tag}\"\n )\n request = get_request(req_url)\n releases = None\n num_tries = 0\n\n while True:\n num_tries += 1\n try:\n with urllib.request.urlopen(request, timeout=10) as resp:\n result = resp.read()\n remaining = int(resp.headers[\"x-ratelimit-remaining\"])\n if remaining <= 10:\n warnings.warn(\n f\"Only {remaining} GitHub API requests remaining \"\n \"before rate-limiting\"\n )\n break\n except urllib.error.HTTPError as err:\n if err.code == 401 and os.environ.get(\"GITHUB_TOKEN\"):\n raise ValueError(\"GITHUB_TOKEN env is invalid\") from err\n elif err.code == 403 and \"rate limit exceeded\" in err.reason:\n raise ValueError(\n f\"use GITHUB_TOKEN env to bypass rate limit ({err})\"\n ) from err\n elif err.code == 404:\n if releases is None:\n releases = get_releases(repo, quiet)\n if tag not in releases:\n raise ValueError(\n f\"Release {tag} not found (choose from {', '.join(releases)})\"\n )\n elif err.code == 503 and num_tries < max_http_tries:\n # GitHub sometimes returns this error for valid URLs, so retry\n warnings.warn(f\"URL request {num_tries} did not work ({err})\")\n continue\n raise RuntimeError(f\"cannot retrieve data from {req_url}\") from err\n\n release = json.loads(result.decode())\n tag_name = release[\"tag_name\"]\n if not quiet:\n print(f\"fetched release {tag_name!r} info from {owner}/{repo}\")\n\n return release",
"def get_latest_tags(self):\n\n start = len(self.tags) - self.num_comparisons\n tags = self.tags\n latest = []\n for i in xrange(len(tags)):\n if i >= start:\n parts = tags[i]['ref'].split('/')\n release_num = parts[2]\n sha = tags[i]['object']['sha']\n tag = [release_num, sha]\n latest.append(tag)\n return latest",
"def get_git_version(path_to_repo):\r\n try:\r\n f = open(path_to_repo+'HEAD')\r\n ref = f.readline().strip().split(' ')\r\n f.close()\r\n except IOError, e:\r\n plog('NOTICE', 'Git Repo at %s Not Found' % path_to_repo)\r\n return ('unknown','unknown')\r\n try:\r\n if len(ref) > 1:\r\n f = open(path_to_repo+ref[1])\r\n branch = ref[1].strip().split('/')[-1]\r\n head = f.readline().strip()\r\n else:\r\n branch = 'detached'\r\n head = ref[0]\r\n f.close()\r\n return (branch, head)\r\n except IOError, e:\r\n pass\r\n except IndexError, e:\r\n pass\r\n plog('NOTICE', 'Git Repo at %s Not Found' % path_to_repo)\r\n return ('unknown','unknown')",
"def get_version_from_git(opts):\n\tstdout = opts.tag or Popen(gitargs, stdout=PIPE).communicate()[0].rstrip('\\n')\n\n\tversion, gitmeta = process_git_tag(opts.regex, stdout)\n\n\treturn version, gitmeta",
"def gettime(self, tag):\n cmd = ['git', 'log', '--pretty=format:\"%ct\"', \"-1\", tag]\n p = Popen(cmd, cwd=self.filename, stdout=PIPE)\n data, res = p.communicate()\n if data == b'':\n return [], []\n time_stamp = []\n this_tag = []\n for seconds in data.decode(\"utf-8\").split(\"\\n\"):\n month = round((int(seconds.strip('\"')) - ReleaseTime.base) / ReleaseTime.month_time)\n if month not in time_stamp:\n time_stamp.append(month)\n this_tag.append(tag[0:4])\n else:\n pass\n return time_stamp, this_tag",
"def get_repository_last_update_timestamp(api_1_0_url):\n repository_last_update_timestamp = ''\n try:\n r = requests.get(api_1_0_url)\n json_string = r.content\n data = json.loads(json_string)\n try:\n repository_last_update_timestamp = data['utc_last_updated']\n except Exception as error:\n print(\"Caught error: \" + repr(error))\n except Exception as error:\n print(\"Failed to connect to bitbucket: \" + repr(error))\n exit(1)\n return repository_last_update_timestamp",
"def get_tag(self, tag, filename):\n return self.get_tag_batch(tag, [filename])[0]",
"def get_last_release_id():\n url = \"https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest\"\n try:\n with urlopen(url, timeout=10) as resp:\n return json.loads(resp.read().decode(\"utf-8\")).get(\"tag_name\", \"0\")\n except URLError as e:\n log(f\"YouTubeDLHelper error [get last release id]: {e}\")",
"def compute_version_for_latest(project_name, org_name, repo_name, distro_name):\n #TODO: update for h turtle\n assert distro_name in ['fuerte', 'groovy']\n if distro_name == 'fuerte':\n release = 'lucid'\n else:\n release = 'precise'\n project_name = project_name.replace('_', '-')\n prefix = 'debian/ros-%s-%s_'%(distro_name, project_name)\n suffix = '_%s'%(release)\n tags = list_tags(org_name, repo_name, prefix)\n tags = [t[:-len(suffix)] for t in tags if t.endswith(suffix)]\n if not tags:\n return None\n print(\"TAGS\", [t[len(prefix):] for t in tags])\n \n versions = sorted([distutils.version.LooseVersion(t[len(prefix):]) for t in tags])\n if not versions:\n return None\n version = versions[-1].vstring #for pattern\n return '%s%s%s'%(prefix, version, suffix)"
]
| [
"0.6903574",
"0.6674831",
"0.65792495",
"0.6339654",
"0.63221633",
"0.63045645",
"0.61844766",
"0.61714214",
"0.60810655",
"0.6045296",
"0.60286623",
"0.5970002",
"0.5933507",
"0.5900507",
"0.5880155",
"0.58799756",
"0.587563",
"0.57677513",
"0.57312053",
"0.5720901",
"0.56939244",
"0.56842",
"0.5667226",
"0.56623787",
"0.5655025",
"0.5595687",
"0.5578365",
"0.5551528",
"0.5548849",
"0.55120504"
]
| 0.73158556 | 0 |
Delete specified scan ID in the OpenVAS server. | def delete_scan(self, scan_id):
self.__manager.delete_task(scan_id) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_scan(self, scanid, apikey=''):\n return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/action/removeScan/', {'scanId': scanid, 'apikey': apikey})))",
"def delete(self, id: int):\n self._select_interface(self._rc_delete, self._http_delete, id)",
"def delete(self, id):\n\n ns.abort(404, 'This API is not supported yet.')",
"def delete(self, _id):",
"def delete(self):\n key = f'https://plex.tv/devices/{self.id}.xml'\n self._server.query(key, self._server._session.delete)",
"def delete(self):\n # gid must be specified for deletion\n gid = self.get_query_argument('gid')\n self.write(self._rpc.aria2.remove(self._token, gid))",
"def delete_interconnect(interconnectId=None):\n pass",
"def delete(self, application_id):",
"def delete_driver(driver_id):\n driver = utils.get_dict_by_key_value_from_list('id', driver_id, drivers)\n if driver:\n drivers.remove(driver)\n return jsonify({\"message\": \"The object was deteled successfully\"})\n return jsonify({\"message\": \"Object not found\"})",
"def delete(self, id):\n context = request.environ.get('context')\n dbapi.net_interfaces_delete(context, id)\n return None, 204, None",
"def delete(self, object_id):\n libplasma.delete(self.conn, object_id)",
"def delete(self, *, session_id, connection_id):\n\n response = openvidu().delete_connection(session_id, connection_id)\n if response.status_code == 204:\n return\n elif response.status_code == 400:\n abort(NotFound, query=f\"Session `{session_id}` does not exist\")\n elif response.status_code == 404:\n abort(NotFound, query=f\"Connection `{connection_id}` does not exist\")\n abort(response)",
"def delete(self, id=None):\n if id:\n id = str(urllib.unquote(id))\n public_key = PublicKey.get_by_id(long(id))\n if public_key:\n public_key.delete()\n self.response.write({'status' : 'success'})\n return\n else:\n self.abort(404)\n self.abort(400)",
"def delete(cls, id):\n raise Exception('Not Implemented Yet')",
"def delete():",
"def __Delete(self, url, id = None):\n\n conn = self.__GetConnection()\n if (id != None):\n url += \"/\" + str(id)\n conn.request(\"DELETE\", url, \"\", self.__MakeHeaders(True))\n response = conn.getresponse()\n self.__CheckResponse(response)",
"def delete(cls, aws_cloud_account_id: str):\n\t\tpass",
"def delete(self, id: int):\n\n del self.__clients[id]",
"def _delete_bridge(self, method, api, header, data):\n self._execute_api(method, api, header)",
"def delete(self, id):\n raise NotImplementedError",
"def deleteCard(self,id):\n if not self.cardExists(id):\n print(\"Card \"+id+\" doesn't exist\")\n return\n del self.cards[id]",
"def delete(self, id):\n self.not_supported()",
"def delete(self, id):\n self.not_supported()",
"def delete(self, id):\n self.not_supported()",
"def delete(self, id):\n self.not_supported()",
"def delete(self, id):\n self.not_supported()",
"def delete(self) -> requests.request:\n # Check if id is set\n if self.args.id is None:\n raise Exception('Provide id of asset you want to delete')\n\n # Send DELETE request\n return requests.delete(self.REQUEST_URL + str(self.args.id))",
"def delete(self, file_id: str):\n file_path = self._path_to_file(file_id)\n os.remove(file_path)\n del self.index[file_id]",
"def fusion_api_delete_sas_interconnect(self, name=None, uri=None, api=None, headers=None):\n return self.sasics.delete(name=name, uri=uri, api=api, headers=headers)",
"def _delete_router_port(self, method, api, header, data):\n self._execute_api(method, api, header, data)"
]
| [
"0.64858025",
"0.62438697",
"0.62418026",
"0.6067558",
"0.5964057",
"0.59593976",
"0.5948016",
"0.5885676",
"0.58653855",
"0.5861436",
"0.5857621",
"0.5845131",
"0.58350873",
"0.58299905",
"0.5804953",
"0.5782922",
"0.5780116",
"0.5770324",
"0.57642686",
"0.5763325",
"0.5739157",
"0.57359266",
"0.57359266",
"0.57359266",
"0.57359266",
"0.57359266",
"0.57273567",
"0.571589",
"0.5711538",
"0.5699898"
]
| 0.70357424 | 0 |
Get the results associated to the scan ID. | def get_results(self, scan_id):
if not isinstance(scan_id, basestring):
raise TypeError("Expected string, got %r instead" % type(scan_id))
m_response = None
try:
m_response = self.__manager.make_xml_request('<get_results task_id="%s"/>' % scan_id, xml_result=True)
except ServerError, e:
raise VulnscanServerError("Can't get the results for the task %s. Error: %s" % (scan_id, e.message))
return self.transform(m_response) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def results(self, scanid=None):\n params = {}\n if scanid is not None:\n params['scanId'] = scanid\n return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/results/', params)))",
"def full_results(self, scanid):\n return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/fullResults/', {'scanId': scanid})))",
"def get_results(self, task_id=None):\n\n m_query = None\n if task_id:\n m_query = '<get_results task_id=\"%s\"/>' % scan_id\n else:\n m_query = '<get_results/>'\n\n return self.__manager.xml(m_query, xml_result=True)",
"def results(self, checkid):\r\n return results.Results(self, checkid)",
"def get_results(self):\n return self._do_action_under_lock(self._get_all_results)",
"def get_results(self):\n return self.results",
"def get_results(self):\n return self.results",
"def getResults():",
"def get_results(self):\n\n return self.results_",
"def find_scan_by_id(self, scan_id):\n return super().request('GET', f'/api/network/scans/{scan_id}')",
"def get_results(self):\n return self.result",
"def get_results(self, ids):\n self.join()\n return [self.results[id] for id in ids]",
"def GetResults(self):\n return self._results",
"def getResults(self):\n return self.Results",
"def getResults(self) -> Tuple[str, Results]:\n\n return self.moss_results",
"def results(self):\n if not self._results:\n self.read_results()\n return self._results",
"def query_results(self):\n return self.details[KEY_QUERY_RESULTS]",
"def results(self):\n return self._result_list",
"def results(self):\n\n return self._search_resut",
"async def get_results(self):\n try:\n return await self._get_gist_data(comm_type='results')\n except Exception:\n self.log.debug('Retrieving results over c2 (%s) failed!' % self.__class__.__name__)\n return []",
"def results(self):\r\n return self._results",
"def get_data(self):\n return self._results",
"def scan_result(self):\n assert 'masscan' in self._scan_result, 'Do a scan before trying to get result !'\n\n return self._scan_result",
"def results(self):\n return self._results",
"def results(self):\n return self._results",
"def results(self):\n return self._results",
"def scan_devices(self):\n self._update_info()\n\n return self.last_results",
"def get_results(self, job_id):\n ujs = self.__ujs_client()\n res = ujs.get_results(job_id)\n return res",
"def _get_query_results(self, query_result_id: str) -> Dict:\n url_inputs = {'redash_host': self.redash_host, 'query_result_id': query_result_id}\n results_url = REDASH_QUERY_RESULTS_ENDPOINT.format(**url_inputs)\n resp = r.get(results_url, headers=self.headers)\n return resp.json()",
"def results(self):\n\n return self._results"
]
| [
"0.81305647",
"0.76431435",
"0.7048965",
"0.6860931",
"0.66560125",
"0.6616452",
"0.6616452",
"0.6562809",
"0.6534687",
"0.6507671",
"0.64487296",
"0.6436412",
"0.6357509",
"0.6272296",
"0.6264815",
"0.6251121",
"0.62372833",
"0.6200259",
"0.61653924",
"0.61547935",
"0.6138275",
"0.61214954",
"0.609677",
"0.6084993",
"0.6084993",
"0.6084993",
"0.60776013",
"0.60553926",
"0.60264486",
"0.60200113"
]
| 0.80211973 | 1 |
Transform the XML results of OpenVAS into GoLismero structures. | def transform(xml_results):
PORT = re.compile("([\w\d\s]*)\(([\d]+)/([\w\W\d]+)\)")
m_return = []
m_return_append = m_return.append
# All the results
for l_results in xml_results.findall(".//results"):
for l_results in l_results.findall("result"):
l_partial_result = OpenVASResult.make_empty_object()
# Ignore log messages, only get the results
if l_results.find("threat").text == "Log":
continue
# For each result
for l_val in l_results.getchildren():
l_tag = l_val.tag
if l_tag in ("subnet", "host", "threat", "description"):
# All text vars can be processes both.
setattr(l_partial_result, l_tag, l_val.text)
elif l_tag == "port":
# Extract and filter port info
l_port = PORT.findall(l_val.text)
if l_port and len(l_port) > 0:
if len(l_port[0]) == 3:
l_s = l_port[0]
l_service = l_s[0]
l_port = int(l_s[1])
l_proto = l_s[2]
l_partial_result.port = OpenVASPort(l_service,
l_port,
l_proto)
elif l_tag == "nvt":
l_nvt_symbols = [x for x in dir(l_val) if not x.startswith("_")]
# The NVT Object
l_nvt_object = OpenVASNVT.make_empty_object()
for l_nvt in l_val.getchildren():
l_nvt_tag = l_nvt.tag
if l_nvt_tag in l_nvt_symbols:
setattr(l_nvt_object, l_nvt_tag, l_nvt.text)
# Add to the NVT Object
l_partial_result.nvt = l_nvt_object
else:
pass
# Add to the return values
m_return_append(l_partial_result)
return m_return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_xml1(filename):\r\n tree = ET.parse(filename)\r\n # tree=ElementTree()\r\n # tree.parse(filename)\r\n\r\n baseInfo={}\r\n baseInfo['foder'] = tree.find('foder').text\r\n baseInfo['filename'] = tree.find('filename').text\r\n baseInfo['path'] = tree.find('path').text\r\n baseInfo['source/database'] = tree.find('source/database').text\r\n #tree.find('database')\r\n baseInfo['size/width'] = tree.find('size/width').text\r\n baseInfo['size/height'] = tree.find('size/height').text\r\n baseInfo['size/depth'] = tree.find('size/depth').text\r\n baseInfo['segmented'] = tree.find('segmented').text\r\n objects = []\r\n for obj in tree.findall('object'):\r\n obj_struct = {}\r\n obj_struct['score'] = obj.find('score').text\r\n obj_struct['region'] = obj.find('region').text\r\n obj_struct['imageptr'] = obj.find('imageptr').text\r\n if obj.find('label_des') is None:\r\n obj_struct['label_des']=\"\"\r\n else:\r\n obj_struct['label_des'] = obj.find('label_des').text\r\n obj_struct['name'] = obj.find('name').text\r\n obj_struct['pose'] = obj.find('pose').text\r\n obj_struct['truncated'] = obj.find('truncated').text #remove int()\r\n obj_struct['difficult'] = obj.find('difficult').text #remove int()\r\n bbox = obj.find('bndbox')\r\n obj_struct['bbox'] = [int(bbox.find('xmin').text),\r\n int(bbox.find('ymin').text),\r\n int(bbox.find('xmax').text),\r\n int(bbox.find('ymax').text)]\r\n objects.append(obj_struct)\r\n\r\n return baseInfo,objects",
"def complete_xml_parsing(self):\n for item in self.entities:\n item.severity = self.parsed_severity\n item.cwes.extend(self.parsed_cwes)\n item.advisory_id = self.parsed_advisory_id\n item.attack_vector = self.parsed_attack_vector\n if self.parsed_cvss_base != '' and is_correct_score(self.parsed_cvss_base):\n cvss_v3 = CvssV3(base_sc=self.parsed_cvss_base)\n if self.parsed_cvss_temporal != '' \\\n and is_correct_score(self.parsed_cvss_temporal):\n cvss_v3.temporal_sc = self.parsed_cvss_temporal\n item.cvss_v3 = cvss_v3\n item.cvss_base_sc_v3 = self.parsed_cvss_base\n item.cvss_temporal_score_v3 = self.parsed_cvss_temporal\n item.published = self.parsed_date",
"def __parse(self):\n\t\tparser=xml.sax.make_parser()\n\t\tparser.setContentHandler(OSMXMLFileParser(self))\n\t\tparser.parse(self.filename)\n\t\n\t\t# convert them back to lists\n\t\tself.nodes = self.nodes.values()\n\t\tself.ways = self.ways.values()\n\t\tself.relations = self.relations.values()",
"def parse_xml(filename):\r\n tree = ET.parse(filename)\r\n # tree=ElementTree()\r\n # tree.parse(filename)\r\n\r\n baseInfo={}\r\n #baseInfo['folder'] = tree.find('folder').text\r\n baseInfo['filename'] = tree.find('filename').text\r\n baseInfo['path'] = tree.find('path').text\r\n baseInfo['source/database'] = tree.find('source/database').text\r\n #tree.find('database')\r\n baseInfo['size/width'] = tree.find('size/width').text\r\n baseInfo['size/height'] = tree.find('size/height').text\r\n baseInfo['size/depth'] = tree.find('size/depth').text\r\n baseInfo['segmented'] = tree.find('segmented').text\r\n objects = []\r\n for obj in tree.findall('object'):\r\n obj_struct = {}\r\n if obj.find('score') is None:\r\n obj_struct['score']=\"\"\r\n else:\r\n obj_struct['score'] = obj.find('score').text\r\n if obj.find('region') is None:\r\n obj_struct['region']=\"\"\r\n else:\r\n obj_struct['region'] = obj.find('region').text\r\n if obj.find('imageptr') is None:\r\n obj_struct['imageptr']=\"\"\r\n else:\r\n obj_struct['imageptr'] = obj.find('imageptr').text\r\n # obj_struct['score'] = obj.find('score').text\r\n # obj_struct['region'] = obj.find('region').text\r\n # obj_struct['imageptr'] = obj.find('imageptr').text\r\n if obj.find('label_des') is None:\r\n obj_struct['label_des']=\"\"\r\n else:\r\n obj_struct['label_des'] = obj.find('label_des').text\r\n obj_struct['name'] = obj.find('name').text\r\n obj_struct['pose'] = obj.find('pose').text\r\n obj_struct['truncated'] = obj.find('truncated').text #remove int()\r\n obj_struct['difficult'] = obj.find('difficult').text #remove int()\r\n bbox = obj.find('bndbox')\r\n obj_struct['bbox'] = [int(bbox.find('xmin').text),\r\n int(bbox.find('ymin').text),\r\n int(bbox.find('xmax').text),\r\n int(bbox.find('ymax').text)]\r\n objects.append(obj_struct)\r\n\r\n return baseInfo,objects",
"def xml2obj(self, src):\n\n\t\tclass DataNode(object):\n\t\t\tdef __init__(self):\n\t\t\t\tself._attrs = {} # XML attributes and child elements\n\t\t\t\tself.data = None # child text data\n\n\t\t\tdef __len__(self):\n\t\t\t\t# treat single element as a list of 1\n\t\t\t\treturn 1\n\n\t\t\tdef __getitem__(self, key):\n\t\t\t\tif isinstance(key, basestring):\n\t\t\t\t\treturn self._attrs.get(key,None)\n\t\t\t\telse:\n\t\t\t\t\treturn [self][key]\n\n\t\t\tdef __contains__(self, name):\n\t\t\t\treturn self._attrs.has_key(name)\n\n\t\t\tdef __nonzero__(self):\n\t\t\t\treturn bool(self._attrs or self.data)\n\n\t\t\tdef __getattr__(self, name):\n\t\t\t\tif name.startswith('__'):\n\t\t\t\t\t# need to do this for Python special methods???\n\t\t\t\t\traise AttributeError(name)\n\t\t\t\treturn self._attrs.get(name,None)\n\n\t\t\tdef _add_xml_attr(self, name, value):\n\t\t\t\tif name in self._attrs:\n\t\t\t\t\t\t# multiple attribute of the same name are represented by a list\n\t\t\t\t\t\tchildren = self._attrs[name]\n\t\t\t\t\t\tif not isinstance(children, list):\n\t\t\t\t\t\t\tchildren = [children]\n\t\t\t\t\t\t\tself._attrs[name] = children\n\t\t\t\t\t\tchildren.append(value)\n\t\t\t\telse:\n\t\t\t\t\tself._attrs[name] = value\n\n\t\t\tdef __str__(self):\n\t\t\t\treturn self.data or ''\n\n\t\t\tdef __repr__(self):\n\t\t\t\titems = sorted(self._attrs.items())\n\t\t\t\tif self.data:\n\t\t\t\t\titems.append(('data', self.data))\n\t\t\t\treturn u'{%s}' % ', '.join([u'%s:%s' % (k,repr(v)) for k,v in items])\n\n\t\tclass TreeBuilder(xml.sax.handler.ContentHandler):\n\t\t\tdef __init__(self):\n\t\t\t\tself.stack = []\n\t\t\t\tself.root = DataNode()\n\t\t\t\tself.current = self.root\n\t\t\t\tself.text_parts = []\n\t\t\t\tself.publicObjects = {}\n\n\t\t\tdef startElement(self, name, attrs):\n\t\t\t\tself.stack.append((self.current, self.text_parts))\n\t\t\t\tself.current = DataNode()\n\t\t\t\tself.text_parts = []\n\t\t\t\t# xml attributes --> python attributes\n\t\t\t\tfor k, v in attrs.items():\n\t\t\t\t\t# Register PublicObject in lookup map\n\t\t\t\t\tif k == \"publicID\":\n\t\t\t\t\t\tself.publicObjects[v] = self.current\n\t\t\t\t\tself.current._add_xml_attr(k, v)\n\n\t\t\tdef endElement(self, name):\n\t\t\t\ttext = ''.join(self.text_parts).strip()\n\t\t\t\tif text:\n\t\t\t\t\tself.current.data = text\n\t\t\t\tif self.current._attrs:\n\t\t\t\t\tobj = self.current\n\t\t\t\telse:\n\t\t\t\t\t# a text only node is simply represented by the string\n\t\t\t\t\tobj = text or ''\n\t\t\t\t\t# try to store the object as float if possible\n\t\t\t\t\ttry: obj = float(obj)\n\t\t\t\t\texcept: pass\n\t\t\t\tself.current, self.text_parts = self.stack.pop()\n\t\t\t\tself.current._add_xml_attr(name, obj)\n\n\t\t\tdef characters(self, content):\n\t\t\t\tself.text_parts.append(content)\n\n\t\tbuilder = TreeBuilder()\n\t\tif isinstance(src,basestring):\n\t\t\txml.sax.parseString(src, builder)\n\t\telse:\n\t\t\txml.sax.parse(src, builder)\n\t\treturn builder",
"def process_cvat_xml(xml_file, image_dir, output_dir):\n KNOWN_TAGS = {'box', 'image', 'attribute'}\n #output_dir = os.path.join(output_dir, \"Annotations\")\n os.makedirs(output_dir, exist_ok=True)\n cvat_xml = etree.parse(xml_file)\n\n basename = os.path.splitext( os.path.basename( xml_file ) )[0]\n\n tracks= cvat_xml.findall( './/track' )\n\n if (tracks is not None) and (len(tracks) > 0):\n frames = {}\n\n for track in tracks:\n trackid = int(track.get(\"id\"))\n label = track.get(\"label\")\n boxes = track.findall( './box' )\n for box in boxes:\n frameid = int(box.get('frame'))\n outside = int(box.get('outside'))\n ## occluded and pose are not tested within tracks\n occluded = 0 ## Default if not found\n if 'occluded' in box.attrib: ## this is an attribute of 'box' element\n occluded = int(box.get('occluded'))\n pose = 'Unspecified'\n for attr in box.findall('attribute'):\n if (attr.get('name') == 'type'): ## Used for view type\n pose = attr.text\n #keyframe = int(box.get('keyframe')) #currently unused\n xtl = float(box.get('xtl'))\n ytl = float(box.get('ytl'))\n xbr = float(box.get('xbr'))\n ybr = float(box.get('ybr'))\n \n frame = frames.get( frameid, {} )\n \n if outside == 0:\n frame[ trackid ] = { 'xtl': xtl, 'ytl': ytl, 'xbr': xbr, 'ybr': ybr, 'label': label,\n 'pose': pose, 'truncated': occluded }\n\n frames[ frameid ] = frame\n\n width = int(cvat_xml.find('.//original_size/width').text)\n height = int(cvat_xml.find('.//original_size/height').text)\n\n # Spit out a list of each object for each frame\n for frameid in sorted(frames.keys()):\n print( frameid )\n\n image_name = \"%s_%08d.jpg\" % (basename, frameid) ## KM: Revisit this for tracks. Hardcoded?\n image_path = os.path.join(image_dir, image_name)\n if not os.path.exists(image_path):\n log.warn('{} image cannot be found. Is `{}` image directory correct?'.\n format(image_path, image_dir))\n writer = Writer(image_path, width, height)\n\n frame = frames[frameid]\n\n objids = sorted(frame.keys())\n\n for objid in objids:\n\n box = frame[objid]\n\n label = box.get('label')\n occluded = box.get('occluded')\n pose = box.get('pose')\n xmin = float(box.get('xtl'))\n ymin = float(box.get('ytl'))\n xmax = float(box.get('xbr'))\n ymax = float(box.get('ybr'))\n\n writer.addObject(label, xmin, ymin, xmax, ymax, pose, occluded)\n\n anno_name = os.path.basename(os.path.splitext(image_name)[0] + '.xml')\n anno_dir = os.path.dirname(os.path.join(output_dir, image_name))\n os.makedirs(anno_dir, exist_ok=True)\n writer.save(os.path.join(anno_dir, anno_name))\n\n else:\n for img_tag in cvat_xml.findall('image'):\n ## Discard path component; we expect user to provide path to images directory.\n ## It is probably easier for users to provide full path to images directory\n ## rather than having to figure out how much of the path is embedded in the XML\n ## as a relative or absolute path by CVAT.\n image_name = os.path.basename(img_tag.get('name'))\n width = img_tag.get('width')\n height = img_tag.get('height')\n image_path = os.path.join(image_dir, image_name)\n if not os.path.exists(image_path):\n log.warn('{} image cannot be found. Is `{}` image directory correct?'.\n format(image_path, image_dir))\n writer = Writer(image_path, width, height)\n\n unknown_tags = {x.tag for x in img_tag.iter()}.difference(KNOWN_TAGS)\n if unknown_tags:\n log.warn('Ignoring tags for image {}: {}'.format(image_path, unknown_tags))\n\n for box in img_tag.findall('box'):\n label = box.get('label')\n occluded = 0 ## Default if not found\n if 'occluded' in box.attrib: ## this is an attribute of 'box' element\n occluded = int(box.get('occluded'))\n pose = 'Unspecified' ## Default if not found\n for attr in box.findall('attribute'):\n if (attr.get('name') == 'type'): ## Used for view type\n pose = attr.text\n\n xmin = float(box.get('xtl'))\n ymin = float(box.get('ytl'))\n xmax = float(box.get('xbr'))\n ymax = float(box.get('ybr'))\n\n writer.addObject(label, xmin, ymin, xmax, ymax, pose, occluded)\n\n anno_name = os.path.basename(os.path.splitext(image_name)[0] + '.xml')\n anno_dir = output_dir #os.path.dirname(os.path.join(output_dir, image_name))\n os.makedirs(anno_dir, exist_ok=True)\n #print(\"Writing {} (image: {})\".format(anno_name, image_name))\n writer.save(os.path.join(anno_dir, anno_name))",
"def parsexml(self):\n raise NotImplementedError",
"def parseResult(self):\n\n # parse all WHYPO tags\n result = []\n for msg in [m for m in self.msg if \"WHYPO\" in m]:\n result.append({})\n\n for prop in self.pattern.findall(msg):\n key = prop.split(\"=\")[0]\n value = prop.split('\"')[1]\n\n if key == \"CM\":\n try:\n value = float(value)\n except:\n pass\n if key == \"CLASSID\":\n try:\n value = int(value)\n except:\n pass\n result[-1][key] = value\n\n return result",
"def process_cvat_xml(xml_file, image_dir, output_dir,username,password,ilabels):\n KNOWN_TAGS = {'box', 'image', 'attribute'}\n\n if (image_dir is None):\n image_dir=os.path.join(output_dir,\"data/obj\")\n os.makedirs(image_dir, exist_ok=True)\n\n os.makedirs(output_dir, exist_ok=True)\n cvat_xml = etree.parse(xml_file)\n basename = os.path.splitext( os.path.basename( xml_file ) )[0]\n current_labels = {}\n traintxt = \"\"\n auto_lbl_count = 0\n\n if (ilabels is not None):\n vlabels=ilabels.split(',')\n for _label in vlabels:\n current_labels[_label]=auto_lbl_count\n auto_lbl_count+=1\n\n tracks= cvat_xml.findall( './/track' )\n\n if (tracks is not None) and (len(tracks) > 0):\n frames = {}\n\n for track in tracks:\n trackid = int(track.get(\"id\"))\n label = track.get(\"label\")\n boxes = track.findall( './box' )\n for box in boxes:\n frameid = int(box.get('frame'))\n outside = int(box.get('outside'))\n #occluded = int(box.get('occluded')) #currently unused\n #keyframe = int(box.get('keyframe')) #currently unused\n xtl = float(box.get('xtl'))\n ytl = float(box.get('ytl'))\n xbr = float(box.get('xbr'))\n ybr = float(box.get('ybr'))\n\n frame = frames.get( frameid, {} )\n\n if outside == 0:\n frame[ trackid ] = { 'xtl': xtl, 'ytl': ytl, 'xbr': xbr, 'ybr': ybr, 'label': label }\n\n frames[ frameid ] = frame\n\n width = int(cvat_xml.find('.//original_size/width').text)\n height = int(cvat_xml.find('.//original_size/height').text)\n\n taskid = int(cvat_xml.find('.//task/id').text)\n\n urlsegment = cvat_xml.find(\".//segments/segment/url\").text\n urlbase = urlsegment.split(\"?\")[0]\n\n httpclient = requests.session()\n httpclient.get(urlbase)\n\n csrftoken = \"none\"\n sessionid = \"none\"\n\n # Spit out a list of each object for each frame\n for frameid in sorted(frames.keys()):\n image_name = \"%s_%08d.jpg\" % (basename, frameid)\n image_path = os.path.join(image_dir, image_name)\n if not os.path.exists(image_path):\n if username is None:\n log.warn('{} image cannot be found. Is `{}` image directory correct?\\n'.format(image_path, image_dir))\n else:\n log.info('{} image cannot be found. Downloading from task ID {}\\n'.format(image_path, taskid))\n\n if sessionid == \"none\":\n if \"csrftoken\" in httpclient.cookies:\n csrftoken = httpclient.cookies[\"csrftoken\"]\n elif \"csrf\" in httpclient.cookies:\n csrftoken = httpclient.cookies[\"csrf\"]\n\n login_data = dict(username=username, password=password,\n csrfmiddlewaretoken=csrftoken, next='/dashboard')\n\n urllogin = urlbase+\"/auth/login\"\n httpclient.post(urllogin, data=login_data,\n headers=dict(Referer=urllogin))\n\n if (\"sessionid\" in httpclient.cookies):\n sessionid = httpclient.cookies[\"sessionid\"]\n\n url = urlbase+\"/api/v1/tasks/\"+str(taskid)+\"/frames/\"+ str(frameid)\n\n req = httpclient.get(url, headers=dict(\n csrftoken=csrftoken, sessionid=sessionid))\n\n with open(image_path, 'wb') as fo:\n fo.write(req.content)\n print('Url saved as %s\\n' % image_path)\n\n\n frame = frames[frameid]\n\n _yoloAnnotationContent=\"\"\n\n objids = sorted(frame.keys())\n\n for objid in objids:\n\n box = frame[objid]\n\n label = box.get('label')\n xmin = float(box.get('xtl'))\n ymin = float(box.get('ytl'))\n xmax = float(box.get('xbr'))\n ymax = float(box.get('ybr'))\n\n if not label in current_labels:\n current_labels[label] = auto_lbl_count\n auto_lbl_count+=1\n\n labelid=current_labels[label]\n yolo_x= (xmin + ((xmax-xmin)/2))/width\n yolo_y= (ymin + ((ymax-ymin)/2))/height\n yolo_w = (xmax - xmin) / width\n yolo_h = (ymax - ymin) / height\n\n if len(_yoloAnnotationContent) != 0:\n _yoloAnnotationContent += \"\\n\"\n\n _yoloAnnotationContent+=str(labelid)+\" \"+\"{:.6f}\".format(yolo_x) +\" \"+\"{:.6f}\".format(yolo_y) +\" \"+\"{:.6f}\".format(yolo_w) +\" \"+\"{:.6f}\".format(yolo_h)\n anno_name = os.path.basename(os.path.splitext(image_name)[0] + '.txt')\n anno_path = os.path.join(image_dir, anno_name)\n\n _yoloFile = open(anno_path, \"w\", newline=\"\\n\")\n _yoloFile.write(_yoloAnnotationContent)\n _yoloFile.close()\n\n if len(traintxt)!=0:\n traintxt+=\"\\n\"\n\n traintxt+=image_path\n\n else:\n for img_tag in cvat_xml.findall('image'):\n image_name = img_tag.get('name')\n width = img_tag.get('width')\n height = img_tag.get('height')\n image_path = os.path.join(image_dir, image_name)\n if not os.path.exists(image_path):\n log.warn('{} image cannot be found. Is `{}` image directory correct?'.\n format(image_path, image_dir))\n\n unknown_tags = {x.tag for x in img_tag.iter()}.difference(KNOWN_TAGS)\n if unknown_tags:\n log.warn('Ignoring tags for image {}: {}'.format(image_path, unknown_tags))\n\n _yoloAnnotationContent = \"\"\n\n for box in img_tag.findall('box'):\n label = box.get('label')\n xmin = float(box.get('xtl'))\n ymin = float(box.get('ytl'))\n xmax = float(box.get('xbr'))\n ymax = float(box.get('ybr'))\n\n if not label in current_labels:\n current_labels[label] = auto_lbl_count\n auto_lbl_count += 1\n\n labelid = current_labels[label]\n yolo_x = (xmin + ((xmax-xmin)/2))/width\n yolo_y = (ymin + ((ymax-ymin)/2))/height\n yolo_w = (xmax - xmin) / width\n yolo_h = (ymax - ymin) / height\n\n if len(_yoloAnnotationContent) != 0:\n _yoloAnnotationContent += \"\\n\"\n\n _yoloAnnotationContent += str(labelid)+\" \"+\"{:.6f}\".format(yolo_x) + \" \"+\"{:.6f}\".format(\n yolo_y) + \" \"+\"{:.6f}\".format(yolo_w) + \" \"+\"{:.6f}\".format(yolo_h)\n\n anno_name = os.path.basename(os.path.splitext(image_name)[0] + '.txt')\n anno_path = os.path.join(image_dir, anno_name)\n\n _yoloFile = open(anno_path, \"w\", newline=\"\\n\")\n _yoloFile.write(_yoloAnnotationContent)\n _yoloFile.close()\n\n traintxt_file=open(output_dir+\"/train.txt\",\"w\",newline=\"\\n\")\n traintxt_file.write(traintxt)\n traintxt_file.close()",
"def convert(tree,fileName=None):\n rootNode = tree.getroot()\n if rootNode.tag not in ['Simulation', 'OutStreamManager', 'Steps']:\n ## This is not a valid input file, or at least not one we care about for\n ## this conversion\n return tree\n osmNode = None\n stepsNode = None\n if rootNode.tag == 'Simulation':\n osmNode = rootNode.find('OutStreamManager')\n stepsNode = rootNode.find('Steps')\n elif rootNode.tag == 'outstreamManager':\n ## Case for when the OutStreamManager node is specified in an external file.\n ## (Steps should not be in this file?)\n osmNode = rootNode\n elif rootNode.tag == 'Steps':\n ## Case for when the Steps node is specified in an external file.\n ## (OutStreamManager should not be in this file?)\n stepsNode = rootNode\n\n if osmNode is not None:\n osmNode.tag = 'OutStreams'\n\n if stepsNode is not None:\n for outputNode in stepsNode.iter('Output'):\n if 'class' in outputNode.attrib and outputNode.attrib['class'] == 'OutStreamManager':\n outputNode.attrib['class'] = 'OutStreams'\n\n return tree",
"def site2nrml(model, params_dict): \n \"\"\"\n # Some XML definitions\n NAMESPACE = 'http://openquake.org/xmlns/nrml/0.4'\n GML_NAMESPACE = 'http://www.opengis.net/gml'\n SERIALIZE_NS_MAP = {None: NAMESPACE, 'gml': GML_NAMESPACE} \n gml_ns = SERIALIZE_NS_MAP['gml']\n \"\"\"\n \n # Head matter \n root = etree.Element(_tag='nrml', nsmap={'gml': 'http://www.opengis.net/gml'})\n root.set('xmlns', 'http://openquake.org/xmlns/nrml/0.4')\n root.append(etree.Comment('%s' % '%s site model' %(model)))\n \n\n # Define Site Model Name \n sMod = etree.SubElement(root, \"siteModel\")\n sMod.set('name', model + ' Site Model')\n \n # Define sub element\n \n for key in params_dict:\n \n site = etree.SubElement(sMod, \"site\")\n site.set('lon', '%s' % key[0])\n site.set('lat', '%s' % key[1])\n site.set('vs30', '%s' % params_dict[key][0])\n site.set('vs30Type', '%s' % 'inferred')\n site.set('z1pt0', '%s' % '%3.3f' % float(params_dict[key][1]))\n site.set('z2pt5', '%s' % '%3.3f' % float(params_dict[key][2]))\n \n #print(getMinMax(params_dict))\n \n # Form tree and write to xml\n root_tree = etree.ElementTree(root)\n outFile = open((out_directory + '/' + out_filename), 'wb')\n root_tree.write(outFile, encoding=\"utf-8\", xml_declaration=True, pretty_print=True)",
"def buildxml(self):\n # assume self._objslock is already held here\n logger.info(\"Emane.buildxml()\")\n self.buildplatformxml()\n self.buildnemxml()\n self.buildtransportxml()\n self.buildeventservicexml()",
"def parse(self):",
"def test_parse_pi_xml_02(self):\n source = os.path.join(DATA_DIR, \"GDresults_dam.xml\")\n reader = PiXmlReader(source)\n for md, df in reader.get_series():\n pass\n self.assertTrue(True)",
"def format_xml(self,query_results):\n results=query_results.data\n factory=factory_xml()\n dump=factory.dumps({'data':results})\n print(dump)\n # TODO return output for this\n return \"\"",
"def parse(self):\n \n root = self.xml_tree.getroot()\n \n #run for creating tables\n for child in root[1]:\n if child.attrib['type'] == 'Database - Table':\n self.add_table(child)\n \n \n #if table_dict empty -> wrong type of dia diagram\n if self.table_dict == {}: ###\n self.err.print_error(\"parser:database_wrong_dia\") ###\n e_code = self.err.exit_code[\"parser\"] ###\n ###\n exit(e_code) ###\n \n \n #run for adding references\n for child in root[1]:\n if child.attrib['type'] == 'Database - Reference':\n self.add_reference(child)\n \n return",
"def convert(tree,fileName=None):\n simulation = tree.getroot()\n models = simulation.find('Models')\n if models is None: return tree # no models, no BasicStats\n for model in models:\n if model.tag == 'PostProcessor' and model.attrib['subType'] == 'BasicStatistics':\n #note that this converts exactly, it asks for everything with respect to everything\n if model.find('what') is None:\n #fix one botched attempt\n if model.find('all') is not None:\n anode = model.find('all')\n if anode.find('targets') is None:\n params = anode.text\n anode.text = ''\n targetNode = ET.Element('targets')\n targetNode.text = params\n featureNode = ET.Element('features')\n featureNode.text = params\n anode.append(targetNode)\n anode.append(featureNode)\n #already converted\n return tree\n #get the metrics\n what = model.find('what').text.strip()\n model.remove(model.find('what'))\n #get the parameters\n params = model.find('parameters').text.strip()\n model.remove(model.find('parameters'))\n #targets and features\n targetNode = ET.Element('targets')\n targetNode.text = params\n featureNode = ET.Element('features')\n featureNode.text = params\n #parameters\n if 'all' in what:\n allNode = ET.Element('all')\n allNode.append(targetNode)\n allNode.append(featureNode)\n model.append(allNode)\n else:\n needsFeatures = ['sensitivity','covariance','pearson','NormalizedSensitivity','VarianceDependentSensitivity']\n for w in (i.strip() for i in what.split(',')):\n node = ET.Element(w)\n if w in needsFeatures:\n node.append(targetNode)\n node.append(featureNode)\n else:\n node.text = params\n model.append(node)\n return tree",
"def _populate_from_xml_file(self, xml):\n '''\n example from API: http://www.ga.gov.au/www/argus.argus_api.survey?pSurveyNo=921\n\n <?xml version=\"1.0\" ?>\n <ROWSET>\n <ROW>\n <SURVEYID>921</SURVEYID>\n <SURVEYNAME>Goomalling, WA, 1996</SURVEYNAME>\n <STATE>WA</STATE>\n <OPERATOR>Stockdale Prospecting Ltd.</OPERATOR>\n <CONTRACTOR>Kevron Geophysics Pty Ltd</CONTRACTOR>\n <PROCESSOR>Kevron Geophysics Pty Ltd</PROCESSOR>\n <SURVEY_TYPE>Detailed</SURVEY_TYPE>\n <DATATYPES>MAG,RAL,ELE</DATATYPES>\n <VESSEL>Aero Commander</VESSEL>\n <VESSEL_TYPE>Plane</VESSEL_TYPE>\n <RELEASEDATE/>\n <ONSHORE_OFFSHORE>Onshore</ONSHORE_OFFSHORE>\n <STARTDATE>05-DEC-96</STARTDATE>\n <ENDDATE>22-DEC-96</ENDDATE>\n <WLONG>116.366662</WLONG>\n <ELONG>117.749996</ELONG>\n <SLAT>-31.483336</SLAT>\n <NLAT>-30.566668</NLAT>\n <LINE_KM>35665</LINE_KM>\n <TOTAL_KM/>\n <LINE_SPACING>250</LINE_SPACING>\n <LINE_DIRECTION>180</LINE_DIRECTION>\n <TIE_SPACING/>\n <SQUARE_KM/>\n <CRYSTAL_VOLUME>33.6</CRYSTAL_VOLUME>\n <UP_CRYSTAL_VOLUME>4.2</UP_CRYSTAL_VOLUME>\n <DIGITAL_DATA>MAG,RAL,ELE</DIGITAL_DATA>\n <GEODETIC_DATUM>WGS84</GEODETIC_DATUM>\n <ASL/>\n <AGL>60</AGL>\n <MAG_INSTRUMENT>Scintrex CS2</MAG_INSTRUMENT>\n <RAD_INSTRUMENT>Exploranium GR820</RAD_INSTRUMENT>\n </ROW>\n </ROWSET>\n '''\n # turn the XML doc into a Python object\n root = objectify.fromstring(xml)\n\n if hasattr(root.ROW, 'SURVEYNAME'):\n self.survey_name = root.ROW.SURVEYNAME\n if hasattr(root.ROW, 'STATE'):\n self.state = root.ROW.STATE\n if hasattr(root.ROW, 'OPERATOR'):\n self.operator = root.ROW.OPERATOR\n if hasattr(root.ROW, 'CONTRACTOR'):\n self.contractor = root.ROW.CONTRACTOR\n if hasattr(root.ROW, 'PROCESSOR'):\n self.processor = root.ROW.PROCESSOR\n if hasattr(root.ROW, 'SURVEY_TYPE'):\n self.survey_type = root.ROW.SURVEY_TYPE\n if hasattr(root.ROW, 'DATATYPES'):\n self.data_types = root.ROW.DATATYPES\n if hasattr(root.ROW, 'VESSEL'):\n self.vessel = root.ROW.VESSEL\n if hasattr(root.ROW, 'VESSEL_TYPE'):\n self.vessel_type = root.ROW.VESSEL_TYPE\n if hasattr(root.ROW, 'RELEASEDATE'):\n self.release_date = datetime.strptime(root.ROW.RELEASEDATE.text, \"%Y-%m-%dT%H:%M:%S\") if root.ROW.RELEASEDATE.text is not None else None\n if hasattr(root.ROW, 'ONSHORE_OFFSHORE'):\n self.onshore_offshore = root.ROW.ONSHORE_OFFSHORE\n if hasattr(root.ROW, 'STARTDATE'):\n self.start_date = datetime.strptime(root.ROW.STARTDATE.text, \"%Y-%m-%dT%H:%M:%S\") if root.ROW.STARTDATE.text is not None else None\n if hasattr(root.ROW, 'ENDDATE'):\n self.end_date = datetime.strptime(root.ROW.ENDDATE.text, \"%Y-%m-%dT%H:%M:%S\") if root.ROW.ENDDATE.text is not None else None\n if hasattr(root.ROW, 'WLONG'):\n self.w_long = root.ROW.WLONG\n if hasattr(root.ROW, 'ELONG'):\n self.e_long = root.ROW.ELONG\n if hasattr(root.ROW, 'SLAT'):\n self.s_lat = root.ROW.SLAT\n if hasattr(root.ROW, 'NLAT'):\n self.n_lat = root.ROW.NLAT\n if hasattr(root.ROW, 'LINE_KM'):\n self.line_km = root.ROW.LINE_KM\n if hasattr(root.ROW, 'TOTAL_KM'):\n self.total_km = root.ROW.TOTAL_KM\n if hasattr(root.ROW, 'LINE_SPACING'):\n self.line_spacing = root.ROW.LINE_SPACING\n if hasattr(root.ROW, 'LINE_DIRECTION'):\n self.line_direction = root.ROW.LINE_DIRECTION\n if hasattr(root.ROW, 'TIE_SPACING'):\n self.tie_spacing = root.ROW.TIE_SPACING\n if hasattr(root.ROW, 'SQUARE_KM'):\n self.square_km = root.ROW.SQUARE_KM\n if hasattr(root.ROW, 'CRYSTAL_VOLUME'):\n self.crystal_volume = root.ROW.CRYSTAL_VOLUME\n if hasattr(root.ROW, 'UP_CRYSTAL_VOLUME'):\n self.up_crystal_volume = root.ROW.UP_CRYSTAL_VOLUME\n if hasattr(root.ROW, 'DIGITAL_DATA'):\n self.digital_data = root.ROW.DIGITAL_DATA\n if hasattr(root.ROW, 'GEODETIC_DATUM'):\n self.geodetic_datum = root.ROW.GEODETIC_DATUM\n if hasattr(root.ROW, 'ASL'):\n self.asl = root.ROW.ASL\n if hasattr(root.ROW, 'AGL'):\n self.agl = root.ROW.AGL\n if hasattr(root.ROW, 'MAG_INSTRUMENT'):\n self.mag_instrument = root.ROW.MAG_INSTRUMENT\n if hasattr(root.ROW, 'RAD_INSTRUMENT'):\n self.rad_instrument = root.ROW.RAD_INSTRUMENT",
"def convert(self):\n coverage_data = self.parse()\n return self.generate_cobertura_xml(coverage_data)",
"def _build(self):\n xml = ET.parse(self.fn)\n root = xml.getroot()\n\n metadata = None\n trk = None\n self.prefix = root.tag[:-3]\n metadata = root.find(self._get_tag('metadata'))\n # print(metadata.find(self._get_tag('time')))\n trk = root.find(self._get_tag('trk'))\n\n trkseg = trk.find(self._get_tag('trkseg'))\n\n # I just wanted to flatten the track point and get the\n # fields that I am actually interested in.\n def walker(node):\n nonlocal data\n tags = {'lat': float,\n 'lon': float,\n 'ele': float,\n 'time': cvt_time,\n 'temp': float,\n 'hr': float}\n for tag in tags:\n if node.tag.find(tag) >= 0:\n data[tag] = tags[tag](node.text)\n for child in node:\n walker(child)\n\n for trkpt in trkseg.findall(self._get_tag('trkpt')):\n data = {}\n data['lat'] = trkpt.attrib['lat']\n data['lon'] = trkpt.attrib['lon']\n walker(trkpt)\n self.points.append(TrackPoint(**data))",
"def XML_EC_PL(Name, InputsFile, OutputFile, emin,emax):\n\n\t#On commence par afficher ce qu'on fait\r\n\tprint \" Build xml file \"\r\n\r\tprint InputsFile\n\t#ouverture du fichier dans lequel on place le source model\n\ttry:\n\t\tfresult = open(OutputFile, 'w')\n\texcept:\n\t\tprint \"Coucou\"\r\n \t#ecriture des premieres lignes invariantes\n\tfresult.write('<?xml version=\"1.0\" ?>')\r\n\tfresult.write(\"<source_library title=\\\"source library\\\">\\n\")\n\r\n \t#ouverture du fichier avec les entrees\r\n\tf = open(InputsFile,\"r\")\r\n\tlines = f.readlines()\r\n\t\r\n \t#Ajout des sources detectees dans le catalogue\n\t#Pour chaque ligne du fichier d'entree\r\n\tfor line in range(len(lines)):\n\t\t#Lire les donnees de la ligne\t\t\r\n\t\tdata = lines[line].split()\r\n\t\tname = data[0]\n\n\t\t#Verification : est on en train de traiter la source que l'on veut etudier ou une autre ?\r\n\t\tif str(name) == Name :\r\n\t\t\tmysource = 1\r\n\t\telse:\r\n\t\t\tmysource = 0\n\n\t\t#recuperation des donnees\r\n\t\tRA = data[1]\r\n\t\tDEC = data[2]\r\n\t\tIntegral = float(data[3])*float(Frac)\r\n\t\tGamma= data[4]\n\n\t\t\r\n\t\ttry:\n\t\t\t#essai de definition des donnees pour un PL avec ExpCut\n\t\t\tPrefactor = float(data[5])*float(Frac)\r\n\t\t\tEnergy = float(data[6])\r\n\t#\t\tPrefactor = Prefactor/pow(Energy/100., float(Gamma)) #Densite de flux calculee a Epivot\r\n\t#\t\tPrefactor = Prefactor*pow(1000./100., float(Gamma)) #We do the calculation with (E/1000.)^Gamma\n\t\t\tvariabilite=float(data[8])\n\n#\t\t\tprint variabilite\n\n\n\n\r\n\t\t\tcut = float(data[7]) # Cut est la variable qui nous permettra de savoir si il faut utiliser un cut off (1) ou une loi de puissance normale (2)\r\n\t\texcept:\r\n\t\t\ttry:\r\n\t\t\t\tcut = float(data[5])\r\n\t\t\texcept:\r\n\t\t\t\tprint \" Wrong size of list \"\r\n\t\t\t\tsys.exit()\r\n \t#Si on considere un ccut off exponentiel pour la source :\r\n\t\tif cut == 1:\n\t\t\t#ecriture du nom de la source consideree\r\n\t\t\tresult_line=\" <source \"\r\n\t\t\tresult_line += \"name=\\\"\"+name+\"\\\"\"\r\n\t\t\tresult_line += \" type=\\\"PointSource\\\">\\n\"\r\n\t\t\tspectrum_type = \"PLSuperExpCutoff\"\n\t\t\t#Utilisation de la modelisation PLSuperExpCutoff car plus simple et plus intuitive pour nous et pour la modelisation des pulsars si il faut en modeliser\n\r\n\t\t\t#definition des parametres spectraux a prendre en comtpe et de la chaine de caractere a integrer\r\n\n\n\n\t\t\tif variabilite==0.0 or variabilite==2.0:\n\t\t\t\tspectrum_lines = \" <parameter free=\\\"0\\\" max=\\\"10000000.0\\\" min=\\\"0.0000001\\\"\"\n\n\t\t\t\t#d'ou vient ce 1e-12\r\n\t\t\t\tIntegral = float(Prefactor)*1.0e10\r\n\t\t\t\tscale = 1.0e-10\n\r\n\t\t\t\tspectrum_lines += \" name=\\\"Prefactor\\\" scale=\\\"\"+str(scale)+\"\\\" value=\\\"\"\r\n\t\t\t\tspectrum_lines += str(Integral)+\"\\\" />\\n\"\r\n \r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"1\\\" max=\\\"5.0\\\" min=\\\"0.\\\"\"\r\n\t\t\t\tspectrum_lines += \" name=\\\"Index1\\\" scale=\\\"-1.0\\\" value=\\\"\"\r\n\t\t\t\tspectrum_lines += str(Gamma)+\"\\\"/>\\n\"\r\n \r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"20000.0\\\" min=\\\"1.0\\\"\"\r\n\t\t\t\tspectrum_lines += \" name=\\\"Scale\\\" scale=\\\"1.0\\\" value=\\\"\"+str(Energy)+\"\\\"/>\\n\"\r\n \r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"1\\\" max=\\\"100.0\\\" min=\\\"0.001\\\"\"\n\t\t\t\tspectrum_lines += \" name=\\\"Cutoff\\\" scale=\\\"1000.0\\\" value=\\\"30.0\\\"/>\\n\"\n\r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"5.0\\\" min=\\\"0.0\\\"\"\r\n\t\t\t\tspectrum_lines += \" name=\\\"Index2\\\" scale=\\\"1.0\\\" value=\\\"1.0\\\"/>\\n\"\n\t\t\telif variabilite==1.0 :\n\t\t\t\tspectrum_lines = \" <parameter free=\\\"1\\\" max=\\\"10000000.0\\\" min=\\\"0.0\\\"\"\n\n\t\t\t\t#d'ou vient ce 1e-12\r\n\t\t\t\tIntegral = float(Prefactor)*1.0e10\r\n\t\t\t\tscale = 1.0e-10\n\n\t\t\t\tspectrum_lines += \" name=\\\"Prefactor\\\" scale=\\\"\"+str(scale)+\"\\\" value=\\\"\"\r\n\t\t\t\tspectrum_lines += str(Integral)+\"\\\" />\\n\"\r\n \r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"1\\\" max=\\\"5.0\\\" min=\\\"0.\\\"\"\r\n\t\t\t\tspectrum_lines += \" name=\\\"Index1\\\" scale=\\\"-1.0\\\" value=\\\"\"\r\n\t\t\t\tspectrum_lines += str(Gamma)+\"\\\"/>\\n\"\r\n \r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"20000.0\\\" min=\\\"1.0\\\"\"\r\n\t\t\t\tspectrum_lines += \" name=\\\"Scale\\\" scale=\\\"1.0\\\" value=\\\"\"+str(Energy)+\"\\\"/>\\n\"\r\n \r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"1\\\" max=\\\"100.0\\\" min=\\\"0.0001\\\"\"\r\t\t\t\tspectrum_lines += \" name=\\\"Cutoff\\\" scale=\\\"1000.0\\\" value=\\\"30.0\\\"/>\\n\"\r\n \r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"5.0\\\" min=\\\"0.0\\\"\"\r\n\t\t\t\tspectrum_lines += \" name=\\\"Index2\\\" scale=\\\"1.0\\\" value=\\\"1.0\\\"/>\\n\"\n\n\r\n \r\n\n# <spectrum type=\"PLSuperExpCutoff\">\n# <parameter free=\"1\" max=\"100000\" min=\"0\" name=\"Prefactor\" scale=\"1e-10\" value=\"Prefactor*1e-10\"/>\n# <parameter free=\"1\" max=\"0\" min=\"5\" name=\"Index1\" scale=\"-1\" value=\"valeur du catalogue\"/>\n# <parameter free=\"0\" max=\"20000\" min=\"1.0\" name=\"Scale\" scale=\"1\" value=\"Epivot\"/>\n# <parameter free=\"1\" max=\"300000\" min=\"100\" name=\"Cutoff\" scale=\"1\" value=\"3000\"/>\n# <parameter free=\"0\" max=\"5\" min=\"0\" name=\"Index2\" scale=\"1\" value=\"1.5\"/>\n# </spectrum>\n\n\r\n\t\telse:\n\t\t#Sinon (si on considere une loi de puissance simple)\n\t\t#definition de la chaine de caractere comportant le nom de la source\r\n\t\t\tresult_line=\" <source \"\r\n\t\t\tresult_line += \"name=\\\"\"+name+\"\\\"\"\n\t\t\tif mysource == 0:\r\t\t\t\tresult_line += \" type=\\\"PointSource\\\">\\n\"\n\t\t\telse:\n\t\t\t\tresult_line += \" type=\\\"PointSource\\\">\\n\"\t\t\t\t\n\n\t\t\t#definition de la chaine de caractere correspondant a la forme de fit que l'on souhaite utiliser (Loi de puissance)\r\n\t\t\tspectrum_type = \"PowerLaw2\"\r\n\r\n\t\t\tif mysource == 0 and variabilite!=1.0:\n\t\t\t#si ce n'est pas la source que l'on etudie on fige le parametre Integrale\n\t\t\t\tspectrum_lines = \" <parameter free=\\\"0\\\" max=\\\"1000000.0\\\" min=\\\"0.0\\\"\"\r\n\t\t\telse:\n\t\t\t#sinon on le libere\r\n\t\t\t\tspectrum_lines = \" <parameter free=\\\"1\\\" max=\\\"1000000.0\\\" min=\\\"0.0\\\"\"\n\n\n\n\n\n\t\t\t#Toujours ce facteur....\r\n\t\t\tIntegral = float(Integral)*1e10\r\n\t\t\tscale = 1e-10\n\n\n\t\n\r\n\t\t\tspectrum_lines += \" name=\\\"Integral\\\" scale=\\\"\"+str(scale)+\"\\\" value=\\\"\"\r\n\t\t\tspectrum_lines += str(Integral)+\"\\\" />\\n\"\n\r\n\t\t\tif mysource == 0 and variabilite!=1.0:\n\t\t\t\t#si ce n'est pas la source que l'on etudie on fige le parametre gamma\r\n\t\t \t\tspectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"5.0\\\" min=\\\"0.\\\"\"\r\n\t\t\telse:\n\t\t\t\t#si c'est pas la source que l'on etudie on le laisse libre\r\n\t\t \t\tspectrum_lines += \" <parameter free=\\\"1\\\" max=\\\"5.0\\\" min=\\\"0.\\\"\"\n\n\t\t\t#fin de la chaine de parametres sur le modele spectral\r\n\t\t\tspectrum_lines += \" name=\\\"Index\\\" scale=\\\"-1.0\\\" value=\\\"\"\r\n\t\t\tspectrum_lines += str(Gamma)+\"\\\"/>\\n\"\r\n \r\n\t\t\tif mysource == 0 and variabilite!=1.0:\n\t \n\t\t\t spectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"200000.0\\\" min=\\\"20.0\\\"\"\r\n\t\t\t spectrum_lines += \" name=\\\"LowerLimit\\\" scale=\\\"1.0\\\" value=\\\"1000.0\\\"/>\\n\"\r\n \r\n\t\t\t spectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"1000000.0\\\" min=\\\"20.0\\\"\"\r\n\t\t\t spectrum_lines += \" name=\\\"UpperLimit\\\" scale=\\\"1.0\\\" value=\\\"100000.0\\\"/>\\n\"\n\t\t\telse:\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"200000.0\\\" min=\\\"20.0\\\"\"\n\t\t\t\tspectrum_lines += \" name=\\\"LowerLimit\\\" scale=\\\"1.0\\\" value=\\\"100\\\"/>\\n\"\n\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"100000.0\\\" Min =\\\"20.0\\\"\"\n\t\t\t\tspectrum_lines += \" name=\\\"UpperLimit\\\" scale=\\\"1.0\\\" value=\\\"100000.0\\\"/>\\n\"\n\n \t\t#ajout du modele spectral a la liste de parametres \r\n\t\tresult_line += \" <spectrum type=\\\"\"+spectrum_type+\"\\\">\\n\"\r\t\tresult_line += spectrum_lines\r\n\t\tresult_line += \" </spectrum>\\n\"\n\n\t\t\n\n\t\tif mysource==0 and variabilite!=1.0:\n \t\t\t#ajout du modele spatial a la liste de parametres \r\n\t\t\tresult_line += \" <spatialModel type=\\\"SkyDirFunction\\\">\\n\"\r\n\t\t\tresult_line += \" <parameter free=\\\"0\\\" max=\\\"360\\\" min=\\\"-360\\\"\"\r\n\t\t\tresult_line += \" name=\\\"RA\\\" scale=\\\"1\\\" value=\\\"\"+RA+\"\\\"/>\\n\"\r\n\t\t\tresult_line += \" <parameter free=\\\"0\\\" max=\\\"90\\\" min=\\\"-90\\\"\"\r\n\t\t\tresult_line += \" name=\\\"DEC\\\" scale=\\\"1\\\" value=\\\"\"+DEC+\"\\\"/>\\n\"\r\n\t\t\tresult_line += \" </spatialModel>\\n\"\n\t\telif mysource==0 and variabilite==1.0:\n \t\t\t#ajout du modele spatial a la liste de parametres \r\n\t\t\tresult_line += \" <spatialModel type=\\\"SkyDirFunction\\\">\\n\"\r\n\t\t\tresult_line += \" <parameter free=\\\"1\\\" max=\\\"360\\\" min=\\\"-360\\\"\"\r\n\t\t\tresult_line += \" name=\\\"RA\\\" scale=\\\"1\\\" value=\\\"\"+RA+\"\\\"/>\\n\"\r\n\t\t\tresult_line += \" <parameter free=\\\"1\\\" max=\\\"90\\\" min=\\\"-90\\\"\"\r\n\t\t\tresult_line += \" name=\\\"DEC\\\" scale=\\\"1\\\" value=\\\"\"+DEC+\"\\\"/>\\n\"\r\n\t\t\tresult_line += \" </spatialModel>\\n\"\n\t\telse:\n #ajout du modele spatial a la liste de parametres \n\t\t\tresult_line += \" <spatialModel type=\\\"SkyDirFunction\\\">\\n\"\n\t\t\tresult_line += \" <parameter free=\\\"1\\\" max=\\\"360\\\" min=\\\"-360\\\"\"\n\t\t\tresult_line += \" name=\\\"RA\\\" scale=\\\"1\\\" value=\\\"\"+RA+\"\\\"/>\\n\"\n\t\t\tresult_line += \" <parameter free=\\\"1\\\" max=\\\"90\\\" min=\\\"-90\\\"\"\n\t\t\tresult_line += \" name=\\\"DEC\\\" scale=\\\"1\\\" value=\\\"\"+DEC+\"\\\"/>\\n\"\n\t\t\tresult_line += \" </spatialModel>\\n\"\n\t\t\t\n\t\tresult_line += \" </source>\\n\"\r\n\t\tfresult.write(result_line+\"\\n\")\r\n #Ajout du fond diffus galactique\n\tresult_line=\" <source \"\r\n\tresult_line += \"name=\\\"gal_v02\\\"\"\r\n\tresult_line += \" type=\\\"DiffuseSource\\\">\\n\"\r\n\tspectrum_type = \"ConstantValue\"\r\n\r\n\tspectrum_lines = \" <parameter free=\\\"1\\\" max=\\\"10.0\\\" min=\\\"0\\\"\"\r\n\tspectrum_lines += \" name=\\\"Value\\\" scale=\\\"1.0\\\" value=\\\"\"+str(Frac)+\"\\\" />\\n\"\r\n\r\n\tresult_line += \" <spectrum type=\\\"\"+spectrum_type+\"\\\">\\n\"\r\n\tresult_line += spectrum_lines\r\n\tresult_line += \" </spectrum>\\n\"\r\n\r\n\tresult_line += \" <spatialModel file=\\\"/nfs/farm/g/glast/u31/marianne/VelaX/July09_Pointed/gll_iem_v02.fit\\\" type=\\\"MapCubeFunction\\\">\\n\"\r\n\tresult_line += \" <parameter free=\\\"0\\\" max=\\\"1000.0\\\" min=\\\"0.0\\\"\"\r\n\tresult_line += \" name=\\\"Normalization\\\" scale=\\\"1\\\" value=\\\"1.0\\\"/>\\n\"\r\n\tresult_line += \" </spatialModel>\\n\"\r\n\tresult_line += \" </source>\\n\"\r\n\tfresult.write(result_line+\"\\n\")\r\n\r\n \t#Ajout du fond diffus extragalactique\r\n\tresult_line=\" <source \"\r\n\tresult_line += \"name=\\\"eg_v02\\\"\"\r\n\tresult_line += \" type=\\\"DiffuseSource\\\">\\n\"\r\n\tspectrum_type = \"FileFunction\"\r\n\r\tspectrum_lines = \" <parameter free=\\\"1\\\" max=\\\"10.0\\\" min=\\\"0\\\"\"\r\n\tspectrum_lines += \" name=\\\"Normalization\\\" scale=\\\"1.0\\\" value=\\\"\"+str(Frac)+\"\\\" />\\n\"\r\n\r\n\tresult_line += \" <spectrum file=\\\"/nfs/farm/g/glast/u31/marianne/VelaX/July09_Pointed/isotropic_iem_v02.txt\\\" type=\\\"\"+spectrum_type+\"\\\">\\n\"\r\n\tresult_line += spectrum_lines\r\n\tresult_line += \" </spectrum>\\n\"\r\n \r\n\tresult_line += \" <spatialModel type=\\\"ConstantValue\\\">\\n\"\r\n\tresult_line += \" <parameter free=\\\"0\\\" max=\\\"100.0\\\" min=\\\"0.0\\\"\"\r\n\tresult_line += \" name=\\\"Value\\\" scale=\\\"1\\\" value=\\\"1.0\\\"/>\\n\"\r\n\tresult_line += \" </spatialModel>\\n\"\r\n\tresult_line += \" </source>\\n\"\r\n\tfresult.write(result_line+\"\\n\")\r\n\n \t#Fermeture des fichiers \r\n\tf.close() \r\n\tfresult.write(\"\\n</source_library>\\n\")\r\n\tfresult.close()\r\n\treturn",
"def xml_to_otr(xml_file, destination, speakers=False):\n\n tree = etree.parse(xml_file)\n root = tree.getroot()\n\n # Check xml version and force encoding for compatibility purpose\n if(len(root.findall(\".//word[@value]\")) > 0):\n version = 'v2'\n encoding = 'utf-8'\n word_selector = 'value'\n spk_selector = 'speaker'\n gender_selector = 'gender'\n elif(len(root.findall(\".//word[@sel]\")) > 0):\n version = 'v1'\n encoding = 'iso-8859-1'\n word_selector = 'sel'\n spk_selector = 'locuteur'\n gender_selector = 'sexe'\n else:\n return []\n\n previous_speaker = None\n previous_gender = None\n speaker_turns = []\n speaker_sentences = []\n\n output = codecs.open(destination, 'w', encoding = 'utf-8') if destination else sys.stdout\n\n try:\n\n json_output = {}\n\n html = \"\";\n for i, sentence in enumerate(root):\n if(i != 0):\n html += \"</p>\";\n\n html += \"<p>\";\n\n speaker = sentence.attrib[spk_selector]\n words = []\n\n if speakers:\n # Speaker change\n if(speaker != previous_speaker):\n html += \"<p><strong>Locuteur {} :</strong> \".format(speaker);\n else:\n html += \"<p>\";\n else:\n html += \"<p>\";\n\n nb_words = len(sentence)\n for j, word in enumerate(sentence):\n value = word.attrib[word_selector]\n space_after = \" \"\n if(value.endswith(\"'\") or \\\n (j+1 < nb_words and sentence[j+1].attrib[word_selector].startswith('-'))):\n space_after=\"\"\n\n html += \"<span class=\\\"word\\\" data-start=\\\"{start}\\\">{value}{space}</span>\".format(start=word.attrib['start'], value=value, space=space_after)\n\n previous_speaker = speaker\n previous_gender = sentence.attrib[gender_selector]\n\n\n html += \"</p>\"\n #import pprint; pprint.pprint(html)\n\n json_output['text'] = html\n json_output['media_source'] = \"\"\n print(json.dumps(json_output), file=output)\n\n\n finally:\n if output is not sys.stdout:\n output.close()",
"def xmlify(content):\n\n env = Environment(\n loader=PackageLoader('falcon', 'templates'),\n autoescape=select_autoescape(['html', 'xml'])\n )\n #template = env.get_template('geste.xml')\n template = env.get_template('geste_with_sents.xml')\n\n documents = {}\n\n for wit in content:\n #tokens = [t for sent in content[wit] for t in sent]\n #sentences = [sent for sent in content[wit]]\n sentences = content[wit] #TODO: fix the getting of sentences\n # if format == \"tei-geste\": Right now only 1 format\n #documents[wit] = template.render(tokens=tokens)\n documents[wit] = template.render(sentences=sentences)\n\n return documents",
"def test_parse_pi_xml_08(self):\n source = os.path.join(DATA_DIR, \"GDresults_dam.xml\")\n reader = PiXmlReader(source)\n for md, df in reader.get_series():\n pass\n self.assertTrue(True)",
"def parse_data(self):\n\n msg = self.xml['dom'].childNodes[0]\n self.data = xml_to_dicts(msg, False)\n\n # Get some metadata together\n self.id = \"%s:%s\" % (self.data['src']['name']['#cdata'], self.data['src']['id']['#cdata'])\n self.temp = self.data['tmpr']['#cdata']\n self.watts = self.data['ch1']['watts']['#cdata']\n\n # Time - CurrentCost and local\n self.date = {}\n self.date['cc'] = [ int(self.data['date'][k]['#cdata']) for k in ('dsb','hr','min','sec') ]\n self.date['now'] = localtime()",
"def convert(self, sm):\n return self.visit(sm)",
"def buildxml2(self):\n # assume self._objslock is already held here\n logger.info(\"Emane.buildxml2()\")\n # on master, control network bridge added earlier in startup()\n ctrlnet = self.session.add_remove_control_net(net_index=0, remove=False, conf_required=False)\n self.buildplatformxml2(ctrlnet)\n self.buildnemxml()\n self.buildeventservicexml()",
"def process_document(as_xml, as_csv):\n tree = ET.parse(as_xml)\n lists = xml_to_list(tree.getroot())\n list_to_csv(lists, as_csv)\n # raise NotImplementedError()\n pass",
"def __call__(self, scenario):\n self.osm = OSM()\n self.lanelet_network = scenario.lanelet_network\n self.first_nodes = dict() # saves first left and right node\n self.last_nodes = dict() # saves last left and right node\n self.left_ways = dict()\n self.right_ways = dict()\n for lanelet in scenario.lanelet_network.lanelets:\n self._convert_lanelet(lanelet)\n\n return self.osm.serialize_to_xml()",
"def _sparv_get_analysis(self) -> ET.Element:\n settings: str = json.dumps(\n {\n \"corpus\": \"untitled\",\n \"lang\": \"sv\",\n \"textmode\": \"xml\",\n \"word_segmenter\": \"default_tokenizer\",\n \"sentence_segmentation\": {\n \"tag\": \"sentence\",\n \"attributes\": [\"original\"],\n },\n \"paragraph_segmentation\": {\"tag\": \"paragraph\", \"attributes\": [\"name\"]},\n \"root\": {\"tag\": \"text\", \"attributes\": [\"title\"]},\n \"extra_tags\": [],\n \"positional_attributes\": {\n \"lexical_attributes\": [\"pos\", \"msd\", \"lemma\", \"lex\", \"sense\"],\n \"compound_attributes\": [\"complemgram\", \"compwf\"],\n \"dependency_attributes\": [\"ref\", \"dephead\", \"deprel\"],\n \"sentiment\": [\"sentiment\", \"sentimentclass\"],\n },\n \"named_entity_recognition\": [\"ex\", \"type\", \"subtype\"],\n \"text_attributes\": {\"readability_metrics\": [\"lix\", \"ovix\", \"nk\"]},\n }\n )\n response: requests.models.Response = requests.get(\n \"https://ws.spraakbanken.gu.se/ws/sparv/v2/\",\n data={\n \"text\": self._sparv_convert_document_to_xml(),\n \"mode\": \"xml\",\n \"settings\": settings,\n },\n )\n if response.status_code == 200:\n sparv_data: str = response.text.strip()\n return ET.fromstring(sparv_data)\n\n raise Exception(f\"Sparv returned unexpected code: {response.status_code}\")"
]
| [
"0.5746617",
"0.57251436",
"0.5701722",
"0.55126",
"0.5400629",
"0.52202314",
"0.51965374",
"0.5187935",
"0.5169699",
"0.5107423",
"0.5096457",
"0.50949335",
"0.5082679",
"0.50738865",
"0.50707245",
"0.5036144",
"0.5033714",
"0.50212145",
"0.5018997",
"0.50144964",
"0.5011265",
"0.500156",
"0.4990221",
"0.4946466",
"0.49430832",
"0.4936324",
"0.49344876",
"0.49277863",
"0.4925836",
"0.49242565"
]
| 0.64514476 | 0 |
Lowlevel interface to send OMP XML to the manager. `xmldata` may be either a utf8 encoded string or an etree Element. If `xml_result` is true, the result is returned as an etree Element, otherwise a utf8 encoded string is returned. | def make_xml_request(self, xmldata, xml_result=False):
if xml_result:
return self._xml_command(xmldata)
else:
return self._text_command(xmldata) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def output_xml(data, code, headers=None):\r\n resp = make_response(dumps({'response' :data}), code)\r\n resp.headers.extend(headers or {})\r\n return resp",
"def output_xml(data,code,headers=None):\r\n resp = make_response(dumps({'response': data}), code)\r\n resp.headers.extend(headers or {})\r\n return resp",
"def output_xml(data, code, headers=None):\n response = make_response(simplexml.dumps({'response': data}), code)\n response.headers.extend(headers or {})\n return response",
"def output_xml(data, code, headers=None):\n resp = make_response(simplexml_dumps({\"response\": data}), code)\n resp.headers.extend(headers or {})\n return resp",
"def _send(self, data):\n\n # Make sure the data is a string.\n if etree.iselement(data):\n data = etree.dump(data)\n if isinstance(data, unicode):\n data = data.encode('utf-8')\n\n # Synchronize access to the socket.\n with self.__socket_lock:\n\n # Send the data to the server.\n self.socket.sendall(data)\n\n # Get the response from the server.\n data = \"\"\n tree = None\n while True:\n chunk = self.socket.recv(1024)\n if not chunk:\n break\n data += chunk\n try:\n tree = etree.fromstring(data)\n except Exception:\n continue\n break\n if tree is None:\n tree = etree.fromstring(data)\n\n # Return the parsed response.\n return tree",
"def to_xml(self, data, options=None):\r\n options = options or {}\r\n\r\n if lxml is None:\r\n raise UnsupportedSerializationFormat(\"Usage of the XML aspects requires lxml.\")\r\n\r\n return tostring(self.to_etree(data, options), xml_declaration=True, encoding='utf-8')",
"def comando_xmls(self):\r\n status = self.retorna_xmls(args.data, args.status, args.local)\r\n return",
"def xml():\n try:\n return Response(render_template(\n 'lti.xml.j2'), mimetype='application/xml'\n )\n except:\n app.logger.error(\"Error with XML.\")\n return return_error('''Error with XML. Please refresh and try again. If this error persists,\n please contact support.''')",
"def serialize_result(result: Any) -> Union[str, bytes]:\n if isinstance(result, Node):\n return result.serialize(how='default' if RESULT_FILE_EXTENSION != '.xml' else 'xml')\n else:\n return repr(result)",
"def run(self, xml, **kwargs):\n kwargs['output'] = self.__graph__()\n if isinstance(xml, str):\n try:\n self.source = etree.XML(xml)\n except ValueError:\n try:\n self.source = etree.XML(xml.encode())\n except:\n raise ValueError(\"Cannot run error {}\".format(sys.exc_info()[0]))\n else:\n self.source = xml\n super(XMLProcessor, self).run(**kwargs)\n self.output = kwargs['output']\n return kwargs['output']",
"def as_xml(data: str) -> etree.Element:\n try:\n return etree.fromstring(data)\n except etree.ParseError as error:\n raise ValueError(\n f\"Tried to parse malformed data as XML. Error: {error}, Got data: '{data}'\"\n ) from error",
"def adaptXmlToXml(self, *args):\n return _SALOMERuntime.RuntimeSALOME_adaptXmlToXml(self, *args)",
"def xml_string(self):\r\n if self._xml_string is not None:\r\n return self._xml_string\r\n\r\n return etree.tostring(self._xml_node)",
"def _xml_command(self, request):\n response = self._send(request)\n self._check_response(response)\n return response",
"def xml_string(self):\n if self._xml_string is not None:\n return self._xml_string\n\n return etree.tostring(self._xml_node)",
"def parse_xml_str(self, data):\n try:\n dom = parseString(data)\n process_includes(dom)\n except ExpatError, x:\n raise EzXMLError(\"Failed to parse: %s\" % x)\n return self.walkdom(dom.documentElement)",
"def get_xml(self):\n with io.StringIO() as string:\n string.write(ET.tostring(self.root, encoding=\"unicode\"))\n return string.getvalue()",
"def display_xml(data: XMLLike) -> str:\n\n xmlstring = etree.tostring(data, encoding='unicode', pretty_print=True)\n return highlight(xmlstring, XmlLexer(), HtmlFormatter(noclasses=True, nobackground=False))",
"def xml(string, token=[WORD, POS, CHUNK, PNP, REL, ANCHOR, LEMMA]):\n return Text(string, token).xml",
"def xml_tostring(tree):\n if six.PY2:\n return ET.tostring(tree)\n else:\n return ET.tostring(tree, encoding='unicode')",
"def parsexml(self):\n raise NotImplementedError",
"def xml(self):\n raise NotImplementedError('This api does not return xml')",
"def data_to_xml(data, xml=None):\n\n for element in data:\n name = element[0]\n val = element[1]\n if len(element) > 2:\n converter = element[2]\n else:\n converter = None\n\n if val is not None:\n if converter is not None:\n text = _str(converter(_str(val)))\n else:\n text = _str(val)\n\n entry = ET.Element(name)\n entry.text = text\n if xml is not None:\n xml.append(entry)\n else:\n return entry\n return xml",
"def buildtransportxml(self):\n try:\n subprocess.check_call([\"emanegentransportxml\", \"platform.xml\"], cwd=self.session.session_dir)\n except subprocess.CalledProcessError:\n logger.exception(\"error running emanegentransportxml\")",
"def render(self, data, accepted_media_type=None, renderer_context=None):\n if data is None:\n return ''\n\n stream = StringIO()\n\n xml = SimplerXMLGenerator(stream, self.charset)\n xml.startDocument()\n #xml.startElement(\"root\", {})\n\n self._to_xml(xml, data)\n\n #xml.endElement(\"root\")\n xml.endDocument()\n return stream.getvalue()",
"def saveXml(self, buf: java.lang.StringBuilder) -> None:\n ...",
"def _run_test_and_get_xml(self, flag):\n\n xml_fhandle, xml_fname = tempfile.mkstemp()\n os.close(xml_fhandle)\n\n try:\n binary = self._get_helper()\n args = [binary, flag, '--xml_output_file=%s' % xml_fname]\n ret = subprocess.call(args)\n self.assertEqual(ret, 0)\n\n xml = ElementTree.parse(xml_fname).getroot()\n finally:\n os.remove(xml_fname)\n\n return xml",
"def xmlrpc_marshal(data):\n if isinstance(data, xmlrpclib.Fault):\n return xmlrpclib.dumps(data)\n else:\n return xmlrpclib.dumps((data,), methodresponse=True)",
"def test_xml_nodes(self):\n try:\n import xmltodict\n except ImportError:\n raise unittest.SkipTest(\"Missing dependency xmltodict.\")\n\n n1 = nodes.XMLToPython()\n n2 = nodes.PythonToXML()\n\n channel = FakeChannel(self.loop)\n\n n1.channel = channel\n n2.channel = channel\n\n m = generate_msg()\n\n m.payload = '<?xml version=\"1.0\" encoding=\"utf-8\"?>\\n<test>hello</test>'\n\n base = str(m.payload)\n\n ret = self.loop.run_until_complete(n1.handle(m))\n ext_new = self.loop.run_until_complete(n2.handle(ret))\n # Check return\n self.assertTrue(isinstance(ret, message.Message))\n self.assertEqual(base, ext_new.payload, \"XML nodes not working !\")",
"def xmlrpc_response(data):\n xml = xmlrpc_marshal(data)\n response = webob.Response(xml)\n response.content_type = 'text/xml'\n response.content_length = len(xml)\n return response"
]
| [
"0.59842473",
"0.590035",
"0.58694977",
"0.58375615",
"0.5746175",
"0.56521654",
"0.55872303",
"0.5380795",
"0.52754253",
"0.52619267",
"0.51760083",
"0.5154628",
"0.5122051",
"0.5100714",
"0.5063454",
"0.5046226",
"0.5041392",
"0.50323397",
"0.50272864",
"0.498374",
"0.49792615",
"0.49792132",
"0.49709037",
"0.49399117",
"0.491631",
"0.48834038",
"0.4823373",
"0.4814221",
"0.47976992",
"0.47761545"
]
| 0.7286837 | 0 |
Delete a target in OpenVAS server. | def delete_target(self, target_id):
request = """<delete_target target_id="%s" />""" % (target_id)
self.make_xml_request(request, xml_result=True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def DeleteTarget(self, target_instance_id):",
"def delete_target(self, target_id):\n self.__manager.delete_target(target_id)",
"def delete_target(\n self,\n ) -> Callable[[cloud_deploy.DeleteTargetRequest], operations_pb2.Operation]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"delete_target\" not in self._stubs:\n self._stubs[\"delete_target\"] = self.grpc_channel.unary_unary(\n \"/google.cloud.deploy.v1.CloudDeploy/DeleteTarget\",\n request_serializer=cloud_deploy.DeleteTargetRequest.serialize,\n response_deserializer=operations_pb2.Operation.FromString,\n )\n return self._stubs[\"delete_target\"]",
"def target_delete(obj, product_name, target_uri):\n client = get_client(obj)\n\n product = client.product_list(name=product_name)\n if not product:\n fatal_error('Product {} does not exist'.format(product_name))\n\n product = product[0]\n\n target = client.target_get(target_uri)\n\n if product['name'] != target['product_name']:\n fatal_error('Cannot delete Target {} as it does not belong to product {}'.format(target_uri, product_name))\n\n with Action('Deleting Target: {}'.format(target_uri), nl=True):\n client.target_delete(target)",
"def delete(self, host, file):",
"def delete_server(ServerName=None):\n pass",
"def delete():",
"def delete(self):\n raise NotImplementedError(\"Deleting not supported for servers\")",
"def target_remove():\r\n try:\r\n target_id = request.post_vars[\"target\"]\r\n group_id = request.post_vars[\"group\"]\r\n except KeyError:\r\n pass\r\n else:\r\n result = gl.remove_from_targetgroup(target_id, group_id)\r\n if result:\r\n return response.json({'success': 'true'})\r\n return response.json({'success': 'false'})",
"def clear_target(remote):\n cmd = mmapi.StoredCommands()\n cmd.AppendSceneCommand_ClearTarget();\n remote.runCommand(cmd)",
"def delete(self, db: Session) -> Optional[FidesopsBase]:\n _ = [target.delete(db=db) for target in self.targets]\n return super().delete(db=db)",
"def do_command(self, args):\n hostops = dbops.Hosts()\n hostops.delete(args)",
"def delete(self):\n key = f'https://plex.tv/devices/{self.id}.xml'\n self._server.query(key, self._server._session.delete)",
"def delete_file(mapper, connection, target):\n if target.filename and app.config['CLEANUP_FILES']:\n try:\n os.remove(join(app.config['FILE_PATH'], str(target.talk.id),\n str(target.version), target.filename))\n except OSError:\n # We don't care if wasn't deleted because it does not exist\n pass",
"def delete_mirroring_session(self, port, target, mode):\n pass",
"def delete(self, oid):\n path = '/servers/%s' % oid\n res = self.client.call(path, 'DELETE', data='', \n token=self.manager.identity.token)\n self.logger.debug('Delete openstack server: %s' % truncate(res))\n return res[0]",
"def delete(self, hostname):\n self.not_supported()",
"def test_debugger_delete_invalid_target(self):\n target = lldb.SBTarget()\n self.assertFalse(target.IsValid())\n self.dbg.DeleteTarget(target)",
"def delete(self, remote):\n self.target.ttbd_iface_call(\"store\", \"file\", method = \"DELETE\",\n file_path = remote)",
"def delete_from_provider(self, builder, provider, credentials, target, parameters):",
"def delete():\n # Must be logged in to perform any delete commands.\n auth_required()\n pass",
"async def delete(self, delete: TPayload) -> None:",
"def delete_host(self, conf, tenant_id, network_id, host_id):\n\t\tpass",
"def wipe():\n\tdb.session.query(Target).delete()\n\tdb.session.commit()",
"def delete(self):\n self.call('DELETE', expect=error.NO_CONTENT)",
"def delete_compute_target_by_name(ws, name):\n ws.compute_targets[name].delete()",
"def delete(self):\n ...",
"async def _remove_heist(self, ctx, *, target: str):\r\n author = ctx.message.author\r\n guild = ctx.guild\r\n targets = await self.thief.get_guild_targets(guild)\r\n if string.capwords(target) in targets:\r\n await ctx.send(\"Are you sure you want to remove {} from the list of \"\r\n \"targets?\".format(string.capwords(target)))\r\n response = await self.bot.wait_for('MESSAGE', timeout=15, check=lambda x: x.author == author)\r\n if response is None:\r\n msg = \"Canceling removal. You took too long.\"\r\n elif response.content.title() == \"Yes\":\r\n targets.pop(string.capwords(target))\r\n await self.thief.save_targets(guild, targets)\r\n msg = \"{} was removed from the list of targets.\".format(string.capwords(target))\r\n else:\r\n msg = \"Canceling target removal.\"\r\n else:\r\n msg = \"That target does not exist.\"\r\n await ctx.send(msg)",
"def test_delete_remote(self):\n # Create source site and remote project\n source_site = self.make_site(\n name=REMOTE_SITE_NAME,\n url=REMOTE_SITE_URL,\n mode=SITE_MODE_SOURCE,\n description=REMOTE_SITE_DESC,\n secret=REMOTE_SITE_SECRET,\n )\n self.make_remote_project(\n project_uuid=self.project.sodar_uuid,\n project=self.project,\n site=source_site,\n level=SODAR_CONSTANTS['REMOTE_LEVEL_READ_ROLES'],\n )\n self.assertEqual(RoleAssignment.objects.count(), 3)\n url = reverse(\n 'projectroles:api_role_destroy',\n kwargs={'roleassignment': self.update_as.sodar_uuid},\n )\n response = self.request_knox(url, method='DELETE')\n self.assertEqual(response.status_code, 400, msg=response.content)\n self.assertEqual(RoleAssignment.objects.count(), 3)",
"def delete(ctx):\n click.echo('deleting')\n ctx.delete()\n click.echo('done')"
]
| [
"0.8091861",
"0.7256433",
"0.6986218",
"0.6948263",
"0.6703882",
"0.6629129",
"0.6562303",
"0.64718026",
"0.6440984",
"0.6424786",
"0.636235",
"0.63571507",
"0.6321625",
"0.63056296",
"0.62310606",
"0.6209659",
"0.61825114",
"0.60985464",
"0.60440814",
"0.60434717",
"0.60291183",
"0.60288113",
"0.60260576",
"0.60083276",
"0.59896994",
"0.59889704",
"0.59744245",
"0.59633577",
"0.59481394",
"0.5936508"
]
| 0.73861814 | 1 |
Get IDs of tasks of the server. If name param is provided, only get the ID associated to this name. | def get_tasks_ids(self, name=None):
m_return = {}
for x in self.get_tasks().findall("task"):
m_return[x.find("name").text] = x.get("id")
if name:
return {name : m_return[name]}
else:
return m_return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getTaskIdsFromName(tasks_name):\n ids = []\n for name in tasks_name:\n task_obj = Tafv2Task.objects.get(script=name)\n ids.append(task_obj.id)\n\n return ids",
"def list(self, name=None):\n if name is not None:\n tasks = self._list_all_tasks_from_single_dataset(name)\n else:\n tasks = self._list_all_tasks_from_all_datasets()\n return tasks",
"def get_tasks_by_name(self, name: str) -> Set[\"Task\"]: # noqa: F821\n find = set()\n for task in self.tasks.values():\n if task.name == name:\n find.add(task)\n return find",
"def get_tasks(self):\n res = self.conn.cursor().execute(\"SELECT * FROM tasks\")\n return res.fetchall()",
"def taskdetail_get_ids_names():\n return IMPL.taskdetail_get_ids_names()",
"def getTaskIds(self, director):\n # the computation record\n computation = self._getComputationRecord(director)\n \n # search for tasks\n iworker = self.inventory.iworker\n tasks = computation.findTasks(director.clerk.db, iworker)\n\n ids = [t.id for t in tasks]\n return ','.join(ids)",
"def get(self):\n\n return task_service.get_tasks()",
"def get_tasks(self, *args, **kwargs):\n tasks_endpoint = furl(self.ENDPOINT) / self.id / \"tasks\"\n return self._client.list(Task, endpoint=tasks_endpoint.url, *args, **kwargs)",
"def get_tasks(self):\n return self.tasks",
"def get_tasks(self):\n return self.stn.get_tasks()",
"def get_tasks(self, task_id=None):\n # Recover all config from OpenVAS\n if task_id:\n return self.make_xml_request('<get_tasks id=\"%s\"/>' % name, xml_result=True)\n else:\n return self.make_xml_request(\"<get_tasks />\", xml_result=True)",
"def get_tasks_list(self):\n return self.task_controller.get_list()",
"def get_shadow_scheduler_tasks(self, name):\n # First look for an index.\n url = SHADOW_SCHEDULER_ARTIFACT_URL.format(rev=self.rev, name=name)\n r = requests.get(url)\n\n if r.status_code != 200:\n if name not in self._shadow_scheduler_artifacts:\n return None\n r = requests.get(self._shadow_scheduler_artifacts[name])\n\n tasks = r.text\n return set(tasks.splitlines())",
"def get_tasks(self):\n return self.tasks.all()",
"def get_tasks_ids_by_status(self, status=\"Done\"):\n if status not in (\"Done\", \"Paused\", \"Running\", \"Stopped\"):\n raise ValueError(\"Requested status are not allowed\")\n\n\n m_task_ids = {}\n\n for x in self.get_tasks().findall(\"task\"):\n if x.find(\"status\").text == status:\n m_task_ids[x.find(\"name\").text] = x.attrib[\"id\"]\n\n return m_task_ids",
"def task_template_ids(self):\n return self._get('task_templates')",
"def get_all_tasks(self):\n \n sql = \"select * from tasks;\"\n return self._query_all(sql)",
"def get_ids(self) -> List[str]:",
"async def list_tasks():",
"def _get_ids_from_name_private(self, name):\r\n results = self.list_private_images(name=name)\r\n return [result['id'] for result in results]",
"def get_tasks(self):\n return self.task_collection",
"def remote_get_ids(self):\n return self.smultiengine.get_ids()",
"def get_subtasks(self, tid):\n return self.task_controller.get_subtasks(tid)",
"def _get_ids_from_name_public(self, name):\r\n results = self.list_public_images(name=name)\r\n return [result['id'] for result in results]",
"def get_id_by_name(self, names):\n result = []\n name_field = 'name'\n synonym_field = 'synonyms'\n pos_0 = {name_field: {'$in': names}}\n pos_1 = {synonym_field: {'$in': names}}\n query = {'$or': [pos_0, pos_1]}\n projection = {'_id': 1}\n docs = self.collection.find(filter=query, projection=projection, collation=self.collation)\n for doc in docs:\n result.append(doc['_id'])\n return result",
"def get_task_list(self):\n raise NotImplementedError()",
"def get_tasks(self):\n if self.tasks_url:\n resp = self._api.list_tasks(url=self.tasks_url)\n\n else:\n resp = self._api.list_tasks(job_id=self.id)\n\n if resp.success:\n self.tasks = [Task(self._api, self.id, **task_def)\n for task_def in resp.result]\n\n return self.tasks\n\n else:\n raise resp.result",
"def _get_server_ids(self, unit_name):\n missing_unit_err = (\"Failed to find Server IDs. Unit {} is already\"\n \" missing from the {} cluster status\")\n sb_status, nb_status = self._cluster_status_action()\n\n for unit, server_id in sb_status[\"unit_map\"].items():\n if unit_name == unit:\n sb_id = server_id\n break\n else:\n self.fail(missing_unit_err.format(unit_name, \"Southbound\"))\n\n for unit, server_id in nb_status[\"unit_map\"].items():\n if unit_name == unit:\n nb_id = server_id\n break\n else:\n self.fail(missing_unit_err.format(unit_name, \"Northbound\"))\n\n return sb_id, nb_id",
"def get_worker_id_list(self):\r\n return self._workers_id",
"def get_server_job_ids(self):\n self.server_job_ids = list()\n for server in self.servers:\n if server != 'local':\n with SSHClient(server) as ssh:\n self.server_job_ids.extend(ssh.check_running_jobs_ids())\n else:\n self.server_job_ids.extend(check_running_jobs_ids())"
]
| [
"0.7241726",
"0.66047156",
"0.61854565",
"0.617998",
"0.6130526",
"0.6116685",
"0.59722537",
"0.5819954",
"0.581914",
"0.5793457",
"0.572261",
"0.5721991",
"0.5720298",
"0.5713903",
"0.5683129",
"0.5647397",
"0.5627572",
"0.5593505",
"0.5576658",
"0.5566203",
"0.5542736",
"0.5528111",
"0.5519115",
"0.5509228",
"0.5500368",
"0.5467041",
"0.5448062",
"0.54190856",
"0.5416938",
"0.54156226"
]
| 0.7461567 | 0 |
Get IDs of tasks of the server depending of their status. | def get_tasks_ids_by_status(self, status="Done"):
if status not in ("Done", "Paused", "Running", "Stopped"):
raise ValueError("Requested status are not allowed")
m_task_ids = {}
for x in self.get_tasks().findall("task"):
if x.find("status").text == status:
m_task_ids[x.find("name").text] = x.attrib["id"]
return m_task_ids | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_status(self, ids):\n return [self.tasks[id].status for id in ids]",
"async def get_task_status(task_id: TaskId):",
"def get_by_status(status):\n return list(tasks.find({'status': status}))",
"def getTaskIds(self, director):\n # the computation record\n computation = self._getComputationRecord(director)\n \n # search for tasks\n iworker = self.inventory.iworker\n tasks = computation.findTasks(director.clerk.db, iworker)\n\n ids = [t.id for t in tasks]\n return ','.join(ids)",
"def get_tasks(taskid_list, module):\n tasks = module.client.api.get_tasks_by_status('Pending')\n task_list = list()\n for task in tasks:\n if task['workOrderId'] in taskid_list:\n task_list.append(task)\n return task_list",
"def get_server_job_ids(self):\n self.server_job_ids = list()\n for server in self.servers:\n if server != 'local':\n with SSHClient(server) as ssh:\n self.server_job_ids.extend(ssh.check_running_jobs_ids())\n else:\n self.server_job_ids.extend(check_running_jobs_ids())",
"def taskbystatus(self, **kwargs):\n rows = self.api.query(None, None, self.Task.TaskByStatus_sql, username_=kwargs[\"username\"], taskstatus=kwargs[\"taskstatus\"])\n\n return rows",
"def select_task_by_status(conn, status):\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM nodes WHERE status=?\", (status,))\n \n rows = cur.fetchall()\n return rows",
"def tasks_by_status(self) -> Dict[TaskStatus, Sequence[str]]:\n out = {}\n for key in self.task_graph.keys():\n info = self.task_graph.get_info(key)\n out.setdefault(info.status, []).append(key)\n return out",
"async def list_tasks():",
"def get_archieve(self):\n all_tasks = self.task_controller.get_list()\n return [task for task in all_tasks if task.is_completed == Status.DONE]",
"def get_tasks(self):\n res = self.conn.cursor().execute(\"SELECT * FROM tasks\")\n return res.fetchall()",
"def _get_server_ids(self, unit_name):\n missing_unit_err = (\"Failed to find Server IDs. Unit {} is already\"\n \" missing from the {} cluster status\")\n sb_status, nb_status = self._cluster_status_action()\n\n for unit, server_id in sb_status[\"unit_map\"].items():\n if unit_name == unit:\n sb_id = server_id\n break\n else:\n self.fail(missing_unit_err.format(unit_name, \"Southbound\"))\n\n for unit, server_id in nb_status[\"unit_map\"].items():\n if unit_name == unit:\n nb_id = server_id\n break\n else:\n self.fail(missing_unit_err.format(unit_name, \"Northbound\"))\n\n return sb_id, nb_id",
"def taskdetail_get_ids_names():\n return IMPL.taskdetail_get_ids_names()",
"def tasks(self) -> List[TaskStatusDefinition]:\n return self._tasks",
"def show_tasks(self):\n task_ids = [\n t and t['id'] for t in self.controller.selected_tasks\n ]\n\n if self._check_cluster():\n self.print_list(\n ('id', 'status'), self.controller.get_tasks(),\n lambda x: task_ids.index(x['id'])\n )",
"def getTaskIdsFromName(tasks_name):\n ids = []\n for name in tasks_name:\n task_obj = Tafv2Task.objects.get(script=name)\n ids.append(task_obj.id)\n\n return ids",
"def get_services_by_status(self, status):\n\n services_project_ids = \\\n self.storage_controller.get_services_by_status(status)\n\n return services_project_ids",
"def running_celery_tasks(request):\n active_dict = CELERY_INSPECT.active()\n active_tasks = []\n if active_dict:\n for task_list in active_dict.values():\n active_tasks.extend(task_list)\n if active_tasks:\n active_tasks = [dikt.get(\"id\", \"\") for dikt in active_tasks]\n return Response({\"active_tasks\": active_tasks})",
"def list_running_tasks():\n inspector = current_app.control.inspect()\n\n return inspector.active()",
"def get(self):\n\n return task_service.get_tasks()",
"def getTasks(server, appId, maxNumberTasks, completedOnly, oper = 0, fileName = 'data/jsonTasksInfo.dat'):\n if oper == 0:\n if completedOnly == 1:\n JSONdata = urllib2.urlopen(url=server+\"/api/task?app_id=\"+ \\\n str(appId)+\"&state=completed&limit=\"+ \\\n str(maxNumberTasks)).read()\n else:\n JSONdata = urllib2.urlopen(url=server+\"/api/task?app_id=\"+ \\\n str(appId)+\"&limit=\"+str(maxNumberTasks)).read()\n data = json.loads(JSONdata)\n with open(fileName,'w') as outfile:\n json.dump(data, outfile)\n outfile.close()\n elif oper == 1:\n with open(fileName,'r') as outfile:\n data = json.load(outfile)\n outfile.close()\n numberTasks = len(data)\n tasksInfo = []\n for item in range(numberTasks):\n tasksInfo.append({'taskId':data[item]['id'], \\\n 'area':data[item]['info']['tile']['restrictedExtent']})\n print 'number of total completed tasks: ', len(tasksInfo)\n return tasksInfo",
"def get_tasks_list(self):\n return self.task_controller.get_list()",
"def job_ids(config):\n errcode, output = queue(config)\n parse_line = False\n current_sched = None\n ids = []\n if errcode != 0:\n logger.debug('queue command issued return code: %s', errcode)\n return ids\n\n for line in output.splitlines():\n line = line.strip()\n parse_line = parse_line and bool(line)\n if parse_line:\n assert current_sched\n ids.append( (current_sched, line.split()[0]) )\n continue\n\n if line.startswith('--'):\n current_sched = line.split()[2].strip()\n\n if line.startswith('ID'):\n parse_line = True\n\n logger.debug('found the following jobs in Condor queue: %s', ids)\n return ids",
"def get_subtasks(self, tid):\n return self.task_controller.get_subtasks(tid)",
"def get_tasks(self):\n return self.stn.get_tasks()",
"def get_child_ids(self, job_specifier, project=None, status=None):\n if project is None:\n project = self._project\n id_master = self.get_job_id(project=project, job_specifier=job_specifier)\n if id_master is None:\n return []\n else:\n if status is not None:\n id_lst = self._job_table[\n (self._job_table.masterid == id_master) & (self._job_table.status == status)].id.values\n else:\n id_lst = self._job_table[(self._job_table.masterid == id_master)].id.values\n return sorted(id_lst)",
"def get_image_ids(task):\n run_ids = [\n i for i, code in enumerate(BOLD_NAMES, 1) if task.upper() in code\n ]\n if not run_ids:\n raise ValueError(f\"Found no data for '{task}''\")\n return run_ids",
"def get_tasks(self):\n return self.tasks",
"def get_created_tasks(self):\n tasks = []\n for task_config in get_task_configs(self.context):\n task = task_config.get_created_task(self.context)\n if not task:\n continue\n matched, not_matched = task.start_conditions_status()\n if not not_matched:\n continue\n tasks.append((task, not_matched))\n\n return tasks"
]
| [
"0.7409147",
"0.6804526",
"0.67901206",
"0.67296845",
"0.671727",
"0.6612482",
"0.6425006",
"0.6319168",
"0.6282953",
"0.6219129",
"0.6155044",
"0.60994595",
"0.60912615",
"0.6084754",
"0.6075681",
"0.60585046",
"0.6018567",
"0.5985774",
"0.5976151",
"0.59108424",
"0.59107476",
"0.5910728",
"0.59047",
"0.5899568",
"0.5890914",
"0.5878222",
"0.5845215",
"0.582932",
"0.5818097",
"0.57977194"
]
| 0.7915353 | 0 |
Send OMP data to the manager and read the result. `data` may be either an unicode string, an utf8 encoded string or an etree Element. The result is as an etree Element. | def _send(self, data):
# Make sure the data is a string.
if etree.iselement(data):
data = etree.dump(data)
if isinstance(data, unicode):
data = data.encode('utf-8')
# Synchronize access to the socket.
with self.__socket_lock:
# Send the data to the server.
self.socket.sendall(data)
# Get the response from the server.
data = ""
tree = None
while True:
chunk = self.socket.recv(1024)
if not chunk:
break
data += chunk
try:
tree = etree.fromstring(data)
except Exception:
continue
break
if tree is None:
tree = etree.fromstring(data)
# Return the parsed response.
return tree | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run(self, data):\n\t\t# no processing here\n\t\treturn data",
"def process_data(self, data):\n return data",
"def process(self, data):\n if self.__head:\n self.__head.send(Element(\n stream_id=self.id,\n data=data))",
"def receive_data(self, data):\n if not self._has_list:\n self._process_main_list(data)\n return\n source = QXmlInputSource()\n source.setData(data)\n\n try:\n self._reader.parse(source)\n except Exception:\n pass",
"def run(self, data):\n\n if data and self.application:\n # Build tuples for embedding index\n if self.application.embeddings:\n data = [(x, element, None) for x, element in enumerate(data)]\n\n # Process workflow\n with st.spinner(\"Running workflow....\"):\n results = []\n for result in self.application.workflow(self.name, data):\n # Store result\n results.append(result)\n\n # Write result if this isn't an indexing workflow\n if not self.application.embeddings:\n st.write(result)\n\n # Store workflow results\n self.data = results",
"def mxt_send(self, data):\n return self._mxt_sock.send(data)",
"def reply_data( self, data, datatype=\"application/json\" ):\n self.send_response( 200 )\n self.send_header( \"Content-Type\", datatype )\n self.send_header( \"Content-Length\", len(data) )\n self.end_headers()\n \n self.wfile.write( data )\n return",
"def on_data(self, data):\n if data is not None:\n # Send the data to the parent process\n logging.debug('Received raw data : ' + str(data))\n self.mp_queue.put(data)",
"def data(self, data):\n try:\n self.put_nowait(data)\n except Full:\n pass",
"def process_data(data):\n bio = BytesIO()\n bio.write(data)\n bio.seek(0)\n process(bio)",
"def processor(self, data):\n streaming_data = self.decoder.decodeData(data)\n # Add Your code here to process data and handle transport/storage",
"def processor(self, data):\n streaming_data = self.decoder.decodeData(data)\n # Add Your code here to process data and handle transport/storage",
"def processor(self, data):\n streaming_data = self.decoder.decodeData(data)\n # Add Your code here to process data and handle transport/storage",
"def processor(self, data):\n streaming_data = self.decoder.decodeData(data)\n # Add Your code here to process data and handle transport/storage",
"def processor(self, data):\n streaming_data = self.decoder.decodeData(data)\n # Add Your code here to process data and handle transport/storage",
"def processor(self, data):\n streaming_data = self.decoder.decodeData(data)\n # Add Your code here to process data and handle transport/storage",
"def _send_data(self, data, time):\n \n # Prepare data string with the values in data buffer\n data_string = ''\n # Timestamp\n data_string += '&time=' + str(time)\n # Node ID\n data_string += '&node=' + str(data[0])\n # Data\n data_string += '&json={'\n for i, val in enumerate(data[1:]):\n data_string += str(i+1) + ':' + str(val)\n data_string += ','\n # Remove trailing comma and close braces\n data_string = data_string[0:-1]+'}'\n self._log.debug(\"Data string: \" + data_string)\n \n # Prepare URL string of the form\n # 'http://domain.tld/emoncms/input/post.json?apikey=12345\n # &node=10&json={1:1806, 2:1664}'\n url_string = self._settings['protocol'] + self._settings['domain'] + \\\n self._settings['path'] + '/input/post.json?apikey=' + \\\n self._settings['apikey'] + data_string\n self._log.debug(\"URL string: \" + url_string)\n\n # Send data to server\n self._log.info(\"Sending to \" + \n self._settings['domain'] + self._settings['path'])\n try:\n result = urllib2.urlopen(url_string, timeout=60)\n except urllib2.HTTPError as e:\n self._log.warning(\"Couldn't send to server, HTTPError: \" + \n str(e.code))\n except urllib2.URLError as e:\n self._log.warning(\"Couldn't send to server, URLError: \" + \n str(e.reason))\n except httplib.HTTPException:\n self._log.warning(\"Couldn't send to server, HTTPException\")\n except Exception:\n import traceback\n self._log.warning(\"Couldn't send to server, Exception: \" + \n traceback.format_exc())\n else:\n if (result.readline() == 'ok'):\n self._log.debug(\"Send ok\")\n return True\n else:\n self._log.warning(\"Send failure\")",
"def data(self, msg):\n self.putcmd(\"data\")\n (code,repl)=self.getreply()\n if self.debuglevel >0 : print>>sys.stderr, \"data:\", (code,repl)\n if code != 354:\n raise SMTPDataError(code,repl)\n else:\n q = quotedata(msg)\n if q[-2:] != CRLF:\n q = q + CRLF\n q = q + \".\" + CRLF\n\n # begin modified send code\n chunk_size = 10240\n bytes_sent = 0\n\n while bytes_sent != len(q):\n chunk = q[bytes_sent:bytes_sent+chunk_size]\n self.send(chunk)\n bytes_sent += len(chunk)\n if hasattr(self, \"callback\"):\n self.callback(bytes_sent, len(q))\n # end modified send code\n\n (code, msg) = self.getreply()\n if self.debuglevel > 0 : print>>sys.stderr, \"data:\", (code,msg)\n return (code,msg)",
"async def data_received(self, data: bytes) -> None:\n\n self.response_message.set_result(data)",
"def handle_data(self, data):\n \n line_num, offset = self.getpos()\n new_pos = self.new_line_pos[line_num] + offset\n self.data_buffer += self.html_doc[self.current_pos:new_pos]\n\n content = data\n if self.filtering:\n content = self.typogrify._apply_filters(content, self.lasttag)\n self.filtered_data_length = len(content)\n\n self.data_buffer += content\n self.current_pos = new_pos + len(data)",
"def _send_data(self):\n pass",
"def parse(self, data):\r\n\r\n parser.Parser.parse(self, data)\r\n\r\n # in case the current state of the parser is finished, must\r\n # reset the state to the start position as the parser is\r\n # re-starting (probably a new data sequence)\r\n if self.state == FINISH_STATE: self.clear()\r\n\r\n # retrieves the size of the data that has been sent for parsing\r\n # and saves it under the size original variable\r\n size = len(data)\r\n size_o = size\r\n\r\n # iterates continuously to try to process all that\r\n # data that has been sent for processing\r\n while size > 0:\r\n\r\n if self.state <= self.state_l:\r\n method = self.states[self.state - 1]\r\n count = method(data)\r\n if count == -1: break\r\n if count == 0: continue\r\n\r\n size -= count\r\n data = data[count:]\r\n\r\n continue\r\n\r\n elif self.state == FINISH_STATE:\r\n self.clear()\r\n\r\n continue\r\n\r\n else:\r\n raise netius.ParserError(\"Invalid state '%d'\" % self.state)\r\n\r\n # in case not all of the data has been processed\r\n # must add it to the buffer so that it may be used\r\n # latter in the next parsing of the message\r\n if size > 0: self.buffer.append(data)\r\n\r\n # returns the number of read (processed) bytes of the\r\n # data that has been sent to the parser\r\n return size_o - size",
"def data_received(self, data):\n # self.debug(\"received data=%r\", binascii.hexlify(data))\n self.dispatcher.add_data(data)",
"def _loadData(self, data):\n self._data = data\n self.id = utils.cast(int, data.attrib.get('id'))\n self.accountID = utils.cast(int, data.attrib.get('accountID'))\n self.serverId = utils.cast(int, data.attrib.get('serverId'))\n self.machineIdentifier = data.attrib.get('machineIdentifier')\n self.name = data.attrib.get('name')\n self.lastSeenAt = utils.toDatetime(data.attrib.get('lastSeenAt'))\n self.numLibraries = utils.cast(int, data.attrib.get('numLibraries'))\n self.allLibraries = utils.cast(bool, data.attrib.get('allLibraries'))\n self.owned = utils.cast(bool, data.attrib.get('owned'))\n self.pending = utils.cast(bool, data.attrib.get('pending'))",
"def sendData (self, data) :\n\n assert len(data) <= 255\n \n return self.sendCommand(\"CMD_IN_DATA_STDIN\", data).addCallback(self._sendData_result)",
"def process(self, data):\n return self.transformer.transform(data)",
"def unread_data(self, data: bytes) -> None:\n ...",
"def send_data(self, data: dict):\n pass",
"def handle_data(self, data):\n if self.article_body:\n if not self.suspend_acquisition:\n self.article_data += data",
"def send_data(data):\n\n if global_serializer is None:\n raise SystemExit('global serializer was not set')\n\n if not isinstance(data, dict):\n raise SerializationError('message is not a dictionary, got %r' % data)\n if not data:\n raise SerializationError('empty message dictionary')\n\n try:\n serialized = global_serializer(data)\n except Exception as ex:\n raise SerializationError(ex)\n real_print(serialized or global_serializer({}))"
]
| [
"0.56227386",
"0.5599538",
"0.5506999",
"0.5491832",
"0.5463451",
"0.5454051",
"0.54514366",
"0.54094595",
"0.5358407",
"0.53560555",
"0.52742416",
"0.52742416",
"0.52742416",
"0.52742416",
"0.52742416",
"0.52742416",
"0.5257809",
"0.5192717",
"0.518057",
"0.5177661",
"0.515765",
"0.51552266",
"0.5154576",
"0.5152458",
"0.5145331",
"0.50722814",
"0.5049184",
"0.50297934",
"0.5001911",
"0.49764076"
]
| 0.62201273 | 0 |
Make a request and get the text of the response in raw format. | def _text_command(self, request):
response = self._send(request)
self._check_response(response)
return response.text | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_request_txt(self):\n #print (self.url)\n try:\n with closing(get(self.url, stream=True)) as resp: #returns b`txt`\n if self.is_txt(resp):\n return resp.content.decode(\"utf-8\")\n else:\n return None\n except RequestException as e:\n print('Error during requests to {0} : {1}'.format(url, str(e)))\n return None",
"def get(self, request):\r\n data = {\r\n 'results': 'THIS IS THE PROTECTED STRING FROM SERVER',\r\n }\r\n return Response(data, status=status.HTTP_200_OK)",
"async def text(self, encoding=\"utf-8\", errors=\"strict\"):\n return self.response.decode(encoding, errors=errors)",
"def get_text(self):\n return self.res.text",
"def get(self, *args, **kwargs):\n as_text = kwargs.pop('as_text', True)\n kwargs['follow_redirects'] = kwargs.get('follow_redirects', True)\n response = self.app.get(*args, **kwargs)\n if as_text:\n return response.get_data(as_text=True)\n return response",
"def text_body(response: tornado.httpclient.HTTPResponse) -> str:\n return response.body.decode(encoding(response))",
"def get_raw_data(url):\n\n req = requests.get(url, stream=True)\n req.raw.decode_content = True\n return req.raw",
"def printable_reponse(self):\n resp = self.response\n msg = \"-- Reponse : {} -- \\r\\n\".format(resp.status_code)\n msg += \"Headers: {} \\r\\n\".format(str(resp.headers))\n msg += \"Body: {} \\r\\n\\r\\n\".format(str(resp.content))\n return msg",
"def raw_response(self):\r\n return self._full_response",
"def process_response(raw_response):\n\n # Note Currently SNE supports text or application/json response\n # get the content - type\n content_type = raw_response.headers.get('content-type')\n result = ''\n if 'text' in content_type:\n result = raw_response.text\n elif 'application/json' in content_type:\n result = raw_response.json()\n else:\n result = raw_response.content\n\n return result",
"def get_response(self):\n result = self.get_response_impl()\n if self.log_dest is not None:\n is_error, response = result\n if is_error:\n response = \"? \" + response\n else:\n response = \"= \" + response\n self._log(\"<< \", response.rstrip())\n return result",
"def get_response(self, request):\n data = self.get_data(request)\n outrepr = self.get_outrepr(request)\n return outrepr(data)",
"def _get_raw_html(self):\n buffer = BytesIO()\n c = pycurl.Curl()\n c.setopt(c.URL, self.url)\n c.setopt(c.WRITEDATA, buffer)\n c.perform()\n c.close()\n return buffer.getvalue()",
"def read_text(self, url: str) -> str:\n response = self._session().get(url)\n if not response.ok:\n response.raise_for_status()\n return response.text",
"def echo(self):\r\n request = http.Request('GET', self.get_url() + '/echo')\r\n\r\n return request, parsers.parse_json",
"def get(self):\n\n # Return a plain text response\n return self.plain_text_response(\"Alive!\", 200)",
"def get_text(self, url, *, timeout, headers):",
"def text( self, data ):\n self.res.content_type = 'text/plain'\n self.res.status = 202\n self.res.body = data\n return self.res(self.environ, self.start_response)",
"def do_GET(self):\n response = \"\"\n response_code = 200\n try:\n self.server.log(self.client_address, str(self.path))\n\n query = urlparse(self.path).query\n\n query_components = dict(qc.split(\"=\") for qc in query.split(\"&\"))\n user_text = urllib.unquote(query_components.get('ut', ''))\n request_data = urllib.unquote(query_components.get('rd', ''))\n\n # connection test\n if not user_text or not request_data:\n user_text = ''\n response = 'online'\n\n # fluency assessment\n else:\n l1 = langdetect.detect(user_text)\n l2, _ = langid.classify(user_text)\n good, tot = self.server.spellcheck(request_data.split(','), user_text)\n ban_phr = self.server.check_banned_phrases(user_text)\n # one of the detectors must say it's English, allow 1 typo per 10 words\n response = ('yes'\n if ((l1 == 'en' or l2 == 'en') and (tot - good <= tot / 10 + 1) and not ban_phr)\n else ('no:' + l1 + ' ' + l2 +\n (' spellcheck score: %d / %d' % (good, tot)) +\n ' ban_phr: ' + ban_phr))\n\n except Exception as e:\n print >> sys.stderr, unicode(e).encode('utf-8')\n import traceback\n traceback.print_exc()\n user_text = ''\n response = 'no'\n\n self.send_response(response_code)\n self.send_header(\"Access-Control-Allow-Origin\", \"*\") # CORS\n self.end_headers()\n\n ret = {'result': response, 'text': user_text}\n self.wfile.write(json.dumps(ret))",
"def request_message_txt(self):\r\n headers, body = self.create_request()\r\n\r\n header_txt = \"\\n\".join(\r\n \"{}: {}\".format(h, v) for h, v in sorted(headers.items())\r\n )\r\n body_txt = json.dumps(body, indent=2, sort_keys=True, ensure_ascii=False).encode('utf-8')\r\n\r\n return header_txt + \"\\n\\n\" + body_txt",
"def send_request(request):\n auth()\n response = urllib2.urlopen(request)\n\n return BeautifulSoup(response).resultmessage.string",
"def get(self, url):\n \n content = \"\"\n if hasattr(http.client, \"HTTPSConnection\"): \n url_options = urlparse(url)\n\n conn = http.client.HTTPSConnection(url_options.netloc)\n conn.request('GET', url_options.path + '?' + url_options.query)\n content = conn.getresponse().read().decode('utf-8')\n conn.close()\n else: \n p = os.popen('curl -k \"' + url + '\"')\n content = p.read()\n p.close() \n\n return content",
"def return_response_string(self):\n response = \"{} {}\\r\\n\".format(self.protocol, self.code)\n str_headers = \"\"\n if self.headers:\n for k, v in self.headers.items():\n str_headers += \"{}: {}\\r\\n\".format(k, v)\n\n encoded_response = \"{}{}\\r\\n\".format(response, str_headers)\n encoded_response = encoded_response.encode(\"utf-8\")\n if self.body:\n if type(self.body) is not bytes:\n self.body = self.body.encode(\"utf-8\")\n encoded_response = encoded_response + self.body\n return encoded_response",
"def send_request(self, request):\n json_results = requests.get(request).json()\n\n status = json_results['status']\n\n if status == const.STATUS_OK:\n return json_results['results']\n\n self.log.warning(self.get_status_code(status))",
"def request_and_show(self, verb, path, body=None):\n path = '%s%s' % (self.prefix, path.lstrip('/'))\n try:\n if body is None: self.conn.request(verb, path)\n else: self.conn.request(verb, path, body)\n except socket.error, e:\n print 'Cannot request %r %r: %s' % (verb, path, e)\n sys.exit(1)\n rl = self.conn.getresponse()\n if self.verbose or rl.status//100 != 2:\n print '%s %s gave: %s %r' % (verb, path, rl.status, rl.reason)\n if rl.status//100 == 2:\n if self.verbose:\n print 'HEADERS:'\n for h, v in rl.getheaders(): print ' ', h, v\n print 'CONTENTS:'\n body = rl.read()\n if self.verbose:\n for line in body.splitlines():\n print ' ', line\n print\n return simplejson.loads(body)\n else:\n return None",
"def fetch(self, url):\n self.log.info(\"Fetching URL: \" + url)\n\n r = requests.get(url, verify=False)\n # raise an HTTPError on badness\n r.raise_for_status()\n\n # this decodes r.content using a guessed encoding\n return r.text",
"def query(self, cmd, raw=False):\n url = self.__baseurl.format(cmd)\n req = self.session.get(url)\n if not req.ok:\n req.raise_for_status()\n\n return req.text if raw else to_dict(req.text)",
"async def http_get(self, url, ignore_errors=False):\n self.logger.debug(\"HTTP GET %s\", url)\n code, header, body = await fetch(\n url,\n request_timeout=config.activator.http_request_timeout,\n follow_redirects=True,\n validate_cert=config.activator.http_validate_cert,\n eof_mark=b\"</html>\",\n )\n if 200 <= code <= 299:\n return smart_text(body, errors=\"replace\")\n elif ignore_errors:\n metrics[\"error\", (\"type\", f\"http_error_{code}\")] += 1\n self.logger.debug(\"HTTP GET %s failed: %s %s\", url, code, body)\n return smart_text(header, errors=\"replace\") + smart_text(body, errors=\"replace\")\n else:\n metrics[\"error\", (\"type\", f\"http_error_{code}\")] += 1\n self.logger.debug(\"HTTP GET %s failed: %s %s\", url, code, body)\n return None",
"def get_response(request_url):\n response = requests.get(request_url)\n return json.loads(response.text)",
"def get_response(request_url):\n return requests.get(request_url)"
]
| [
"0.76783127",
"0.7064034",
"0.70067334",
"0.65991724",
"0.6491401",
"0.64328295",
"0.635193",
"0.6351664",
"0.63195264",
"0.63114756",
"0.6302482",
"0.6273952",
"0.6234914",
"0.6232645",
"0.62322557",
"0.6210151",
"0.61522424",
"0.61335206",
"0.6114845",
"0.6100542",
"0.60428107",
"0.60405904",
"0.60307604",
"0.6029749",
"0.6028149",
"0.6000216",
"0.59928536",
"0.59905666",
"0.5987539",
"0.5965705"
]
| 0.7290089 | 1 |
Make a request and get the response as xml tree format. | def _xml_command(self, request):
response = self._send(request)
self._check_response(response)
return response | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_request_xml(self):\n #print (self.url)\n try:\n with closing(get(self.url, stream=True)) as resp: #returns b`xml`\n if self.is_good_enough_xml(resp):\n return resp.content\n else:\n return None\n except RequestException as e:\n print('Error during requests to {0} : {1}'.format(url, str(e)))\n return None",
"def execute(self):\n headers = {\n 'Content-type': 'application/x-www-form-urlencoded',\n 'Accept-Charset': 'utf-8',\n 'User-Agent': USER_AGENT\n }\n request = urllib2.Request(self.url(), headers=headers)\n response = urllib2.urlopen(request)\n \n return etree.parse(response)",
"def render_GET(self, request):\n return etree.tostring(self.xml(request), pretty_print=True)",
"def _api_call(url: str) -> ET.Element:\n result = requests.get(url)\n if result.status_code != 200:\n raise RequestException(f\"API status code {result.status_code} for URL: {url}\")\n\n # Remove HTML line breaks (which cause confusion in the XML parsing)\n t: str = re.sub(r\"\\s*(<br/>)+\\s*\", r\" \", result.text)\n\n x_tree = ET.fromstring(t)\n return x_tree",
"def output_xml(data,code,headers=None):\r\n resp = make_response(dumps({'response': data}), code)\r\n resp.headers.extend(headers or {})\r\n return resp",
"def output_xml(data, code, headers=None):\n resp = make_response(simplexml_dumps({\"response\": data}), code)\n resp.headers.extend(headers or {})\n return resp",
"def xml():\n response = make_response(render_template(\"sample.xml\"))\n response.headers[\"Content-Type\"] = \"application/xml\"\n return response",
"def content_api_xml(url, request):\n headers = {'content-type': 'application/xml'}\n content = 'xml string'\n return response(status_code=200,\n content=content,\n headers=headers,\n request=request)",
"def output_xml(data, code, headers=None):\n response = make_response(simplexml.dumps({'response': data}), code)\n response.headers.extend(headers or {})\n return response",
"def output_xml(data, code, headers=None):\r\n resp = make_response(dumps({'response' :data}), code)\r\n resp.headers.extend(headers or {})\r\n return resp",
"def getxml(url, **kwargs):\n xml = fetch_resource(url, **kwargs)\n return etree.fromstring(xml)",
"def request_xml(self):\n xml_filename = pkg_resources.resource_filename(__name__, 'data/request.xml')\n with open(xml_filename, 'r') as xml_file:\n xml = xml_file.read()\n xml = xml.format(username=self.username,\n password=self.password,\n timestamp=time.time(),\n hardware_id=self.hardware_id(),\n advertisement_id=self.advertisement_id(),\n locale=self.locale)\n return xml",
"def do_request(xml_location):\n request = open(xml_location,\"r\").read()\n webservice = httplib.HTTP(HOST,PORT)\n webservice.putrequest(\"POST\", API_URL)\n webservice.putheader(\"Host\", HOST)\n webservice.putheader(\"User-Agent\",\"Python post\")\n webservice.putheader(\"Content-type\", \"text/xml; charset=\\\"UTF-8\\\"\")\n webservice.putheader(\"Content-length\", \"%d\" % len(request))\n webservice.endheaders()\n webservice.send(request)\n statuscode, statusmessage, header = webservice.getreply()\n result = webservice.getfile().read()\n print statuscode, statusmessage, header\n print result",
"def make_xml_request(self, xmldata, xml_result=False):\n\n if xml_result:\n return self._xml_command(xmldata)\n else:\n return self._text_command(xmldata)",
"def test():\n r = Response(response=\"This worked!\", status=200,\n mimetype=\"application/xml\")\n r.headers[\"Content-Type\"] = \"text/xml; charset=utf-8\"\n return r",
"def __call_api(self, values):\n # Add auth key to the request dictionary if not supplie\n if 'auth' not in values:\n values['auth'] = self.auth_data['auth']\n\n # Encode the data for a GET request\n data = urllib.parse.urlencode(values)\n\n #print values\n\n # Try to make the request\n xml_string = urllib.request.urlopen(self.xml_rpc + '?' + data).read()\n\n # Parse the XML\n response_data = xmltodict(self.__sanitize(xml_string))\n\n # Ensure that there was XML to parse\n if not response_data:\n return None\n\n # Grab the root element\n response_data = response_data['root'][0]['child']\n\n return response_data",
"def xml(self):\n raise NotImplementedError('This api does not return xml')",
"def request(self, host, handler, request_body, verbose=0):\n self.verbose = verbose\n\n headers = {'Content-type': 'text/xml'}\n data = request_body\n req = urllib2.Request('http://' + host + handler, data, headers)\n\n response = self.opener.open(req)\n\n return self.parse_response(response)",
"def get_list(self, request, **kwargs):\n # :TODO modify top_level_serializer or pass a list with self as\n # argument?\n registry = {getattr(self._meta , 'resource_name'): self}\n content = serializers.top_level_serializer(registry)\n response = HttpResponse(\n content = content,\n content_type = 'application/xml')\n response = add_das_headers(response)\n return response",
"def get_list(self, request, **kwargs):\n # :TODO modify top_level_serializer or pass a list with self as\n # argument?\n registry = {getattr(self._meta , 'resource_name'): self}\n content = serializers.top_level_serializer(registry)\n response = HttpResponse(\n content = content,\n content_type = 'application/xml')\n response = add_das_headers(response)\n return response",
"def xml(self, request):\n raise Exception(\"Not Implemented\")",
"def retrieval():\n try:\n if request.method == 'GET':\n country = request.args.get('country') # If no key then null\n year = request.args.get('year') # If no key then null\n return spout(country, year)\n except Exception as e:\n # Unfortunately I'm not going to wrap this in indv. strings\n r = Response(response=error_msg+str(e),\n status=404,\n mimetype=\"application/xml\")\n r.headers[\"Content-Type\"] = \"text/xml; charset=utf-8\"\n return r",
"def get_xml(self):\n with io.StringIO() as string:\n string.write(ET.tostring(self.root, encoding=\"unicode\"))\n return string.getvalue()",
"def xml():\n try:\n return Response(render_template(\n 'lti.xml.j2'), mimetype='application/xml'\n )\n except:\n app.logger.error(\"Error with XML.\")\n return return_error('''Error with XML. Please refresh and try again. If this error persists,\n please contact support.''')",
"def _send_xml(self, url, xml):\n http = httplib2.Http()\n headers = {\"Content-type\": \"application/x-www-form-urlencoded\",\n \"Authorization\": \"Basic %s\" % self.get_basic_auth()}\n return http.request(url, \"POST\", xml, headers=headers)",
"def request(self, host, handler, request_body, verbose):\n headers = {'User-Agent': self.user_agent,\n 'Content-Type': 'text/xml',\n }\n url = self._build_url(host, handler)\n kwargs = {}\n if StrictVersion(requests.__version__) >= StrictVersion('0.8.8'):\n kwargs['verify'] = True\n else:\n if self.use_https:\n warnings.warn(\n 'using https transport but no certificate '\n 'verification. (Hint: upgrade requests package.)')\n try:\n resp = requests.post(url, data=request_body, headers=headers,\n **kwargs)\n except ValueError:\n raise\n except Exception:\n raise # something went wrong\n else:\n try:\n resp.raise_for_status()\n except requests.RequestException as e:\n raise xmlrpc.ProtocolError(\n url, resp.status_code, str(e), resp.headers)\n else:\n return self.parse_response(resp)",
"def xmlrpc_response(data):\n xml = xmlrpc_marshal(data)\n response = webob.Response(xml)\n response.content_type = 'text/xml'\n response.content_length = len(xml)\n return response",
"def device_xml() -> Response:\n xml = render_template('device.xml',\n device_model=config.device_model,\n device_version=config.device_version,\n friendly_name=locast_service.city,\n uid=uid,\n host_and_port=host_and_port)\n return Response(xml, mimetype='text/xml')",
"def getXmlTree(url):\n return lxml.etree.parse(url)",
"def _parse_xml(self, response):\n if response.startswith('\\n'):\n response = response[1:]\n tree = etree.fromstring(response)\n return tree"
]
| [
"0.72486365",
"0.65347195",
"0.6500496",
"0.6489617",
"0.64853764",
"0.64333767",
"0.64266306",
"0.6416313",
"0.6391655",
"0.63781",
"0.6280537",
"0.62649214",
"0.6152969",
"0.6142053",
"0.6107071",
"0.59933704",
"0.5915285",
"0.59001976",
"0.5878529",
"0.5878529",
"0.5843144",
"0.580357",
"0.5794353",
"0.5771155",
"0.5769727",
"0.56864065",
"0.56843656",
"0.5661927",
"0.5643771",
"0.56300867"
]
| 0.6877252 | 1 |
Return all the measurements from the given candidate | def all_measurements(candidate, godata):
measurements = OrderedDict()
measurements.update(concept_measurements(candidate, godata))
measurements.update(evidence_measurements(candidate))
measurements.update(bias_measurements(candidate))
return measurements | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getAllMeasurement(self): \n return self.measurement",
"def measurements(self):\n # TODO: add in empty measurements for assays that have none?\n return self._measure_queryset",
"def get_all_DLP_measurements(self):\n pass",
"def measurements(self) -> List[Measurement]:\n return self._measurements",
"def measurements(self):\n return self._measurements",
"def get_measurements(self):\n all_measurements = []\n for m_name, m_obj in flavio.classes.Measurement.instances.items():\n if m_name.split(' ')[0] == 'Pseudo-measurement':\n # skip pseudo measurements generated by FastFit instances\n continue\n if set(m_obj.all_parameters).isdisjoint(self.observables):\n # if set of all observables constrained by measurement is disjoint\n # with fit observables, do nothing\n continue\n else:\n # else, add measurement name to output list\n all_measurements.append(m_name)\n if self.exclude_measurements is None and self.include_measurements is None:\n return all_measurements\n elif self.exclude_measurements is not None:\n return list(set(all_measurements) - set(self.exclude_measurements))\n elif self.include_measurements is not None:\n return list(set(all_measurements) & set(self.include_measurements))",
"def getMeasures():",
"def measurements(self):\n # get available measurement types for this node\n measurement_types = self.measurement_types()\n\n # retrieve measurement for each type\n return list(self.measurement(t) for t in measurement_types)",
"def get_all_measurements(self, start_time, end_time):\n return",
"def getMeasurements(self):\n return self._Measurements",
"def getMeasures(unique_name=None):",
"def get_meas_list(self, **kwargs):\n channel = kwargs.get(\"channel\", self.active_channel)\n meas_list = self.scpi.query_meas_name_list(channel)\n if len(meas_list) == 1:\n return None # if there isnt a single comma, then there arent any measurments\n return [(meas_list[k], meas_list[k + 1]) for k in range(0, len(meas_list) - 1, 2)]",
"def measurements(self) -> NONEARRAY:\n\n return self._measurements",
"def measurements(self) -> NONEARRAY:\n pass",
"def __calculate_statistics(self, candidates):\n pdf = {}\n for candidate in candidates:\n neighbors = list(self.G.neighbors(candidate))\n capacity = sum([self.G.get_edge_data(candidate, n)[\"satoshis\"] for n in neighbors])\n average = capacity / len(neighbors)\n pdf[candidate] = average\n cumsum = sum(pdf.values())\n pdf = {k:v/cumsum for k,v in pdf.items()}\n w = 0.7\n print(\"percentage smoothed percentage capacity numchannels alias\")\n print(\"----------------------------------------------------------------------\")\n res_pdf = {}\n for k,v in pdf.items():\n neighbors = list(self.G.neighbors(k))\n capacity = sum([self.G.get_edge_data(k, n)[\"satoshis\"] for n in neighbors])\n name = k\n if \"alias\" in self.G.node[k]:\n name = self.G.node[k][\"alias\"]\n print(\"{:12.2f} \".format(100*v), \"{:12.2f} \".format(100*(w * v + (1-w)/len(candidates))) ,\"{:10} {:10} \".format( capacity, len(neighbors)), name)\n res_pdf[k] = (w * v + (1-w)/len(candidates))\n return res_pdf",
"def get_measurements(address, temperature_correction):\n\n # Removes address since it is irrelevant\n del address\n\n # Initializes dictionnary of measurements collected\n all_values = {\n 'temperature': None,\n 'humidity': None,\n 'pressure': None\n }\n\n if SenseHat is not None:\n\n try:\n # Gets all measurements from Sensehat, and applies correction\n sense = SenseHat()\n all_values['temperature'] = sense.get_temperature() + temperature_correction\n all_values['humidity'] = sense.get_humidity()\n all_values['pressure'] = sense.get_pressure()\n\n except Exception as e:\n\n # Something went wrong when retrieving the values. Log error.\n general_utils.log_error(-409, 'Sensehat', str(e))\n\n ##################\n return all_values\n ##################",
"def get_all_candidates(self) -> list:",
"def get_results():\n _, body = API.measurements(city='Los Angeles', parameter='pm25', limit=100)\n result = []\n for dict in body['results']:\n date = dict['date']['utc']\n value = dict['value']\n result.append((date, value))\n return result",
"def get_measurements(self, pipeline, object_name, category):\n result = self.get_object_measurements(pipeline, object_name, category,\n {self.object_name.value: [] })\n return result",
"def measurements(self):\n return self.config['measurements']",
"def get(self):\n return self._measurementController.getMeasurements(), 200",
"def measurements_lookup(client, database):\n client.switch_database(database)\n mlist_dict = client.get_list_measurements()\n # print(\"def measurements_lookup 010:\", mlist_dict[:10])\n return mlist_dict",
"def load_meters(self, account_pk: int) -> List[Meter]:\n query = \"\"\"\n select *\n from Meter\n where AccountFK=%s and ServiceType in ('water', 'sewer', 'irrigation', 'sanitation')\n \"\"\"\n result_set = self.fetch_all(query, account_pk)\n return [UrjanetPyMySqlDataSource.parse_meter_row(row) for row in result_set]",
"def get_meters(api):\n devices = api.device_list()\n return [Meter(api, device.hardware_address) for device in devices if device.model_id == 'electric_meter']",
"def get_all_measurements(self):\n self.__database_cursor.execute('SELECT id, measurement_time, sensor_used, magnetic_field_used, gas_used '\n f'FROM measurements ORDER BY measurement_time DESC')\n result = self.__database_cursor.fetchall()\n\n results_organized = []\n\n for row in result:\n results_organized.append({'id': row[0], 'measurement_time': row[1], 'sensor_used': row[2],\n 'magnetic_field': row[3], 'gas_used': row[4]})\n\n return results_organized",
"def _extract_results(self) -> None:\n metric_name = self.metric.name\n for inference_name in ['train', 'test', 'opt']:\n # TODO: Extract information from self.search_results\n data = getattr(self.search_results, f'{inference_name}_metric_dict')[metric_name]\n if all([d is None for d in data]):\n if inference_name not in OPTIONAL_INFERENCE_CHOICES:\n raise ValueError(f\"Expected {metric_name} score for {inference_name} set\"\n f\" to not be None, but got {data}\")\n else:\n continue\n self.data[f'single::{inference_name}::{metric_name}'] = np.array(data)\n\n if self.ensemble_results.empty() or inference_name == 'opt':\n continue\n\n data = getattr(self.ensemble_results, f'{inference_name}_scores')\n if all([d is None for d in data]):\n if inference_name not in OPTIONAL_INFERENCE_CHOICES:\n raise ValueError(f\"Expected {metric_name} score for {inference_name} set\"\n f\" to not be None, but got {data}\")\n else:\n continue\n self.data[f'ensemble::{inference_name}::{metric_name}'] = np.array(data)",
"def evaluator(self, candidates, args):\r\n fitness = []\r\n if self._use_ants:\r\n for candidate in candidates:\r\n total = 0\r\n for c in candidate:\r\n total += c.value\r\n fitness.append(total)\r\n else:\r\n for candidate in candidates:\r\n total_value = 0\r\n total_weight = 0\r\n for c, i in zip(candidate, self.items):\r\n total_weight += c * i[0]\r\n total_value += c * i[1]\r\n if total_weight > self.capacity:\r\n fitness.append(self.capacity - total_weight)\r\n else:\r\n fitness.append(total_value)\r\n return fitness",
"def get_measurements(serial):\n try: \n measurements = pd.read_csv('Data/kulkijat-15m.csv', sep=',')\n except FileNotFoundError:\n print('\\nHaivantoja sisältävää tiedostoa kulkijat-15m.csv ei löytynyt.\\n')\n measurements = pd.DataFrame()\n\n return measurements[['0', serial]]",
"def _config_measurements(self, spec, period):\r\n logging.info(\"Config measurement for spec {0}\".format(spec))\r\n \r\n eq = self._get_equipment()\r\n\r\n measurements=[[],[],[]]\r\n \r\n mplane_param2value={}\r\n for k in spec.parameter_names():\r\n v = spec.get_parameter_value(k)\r\n if isinstance(v,float):\r\n v = \"{:.0f}\".format(v)\r\n else:\r\n v = str(v)\r\n mplane_param2value[k] = v\r\n \r\n for meas_type in sorted(self._meas[\"types\"].keys()):\r\n (meas,add2)=self._add_or_update_measurement(eq,meas_type,mplane_param2value,period)\r\n measurements[add2].append(meas)\r\n \r\n return measurements",
"def getMeasure(unique_name):"
]
| [
"0.5974163",
"0.5968742",
"0.5916268",
"0.58943176",
"0.58498883",
"0.58254296",
"0.5810822",
"0.57148933",
"0.5701141",
"0.56946844",
"0.56884414",
"0.5596696",
"0.5587437",
"0.5565164",
"0.553554",
"0.5528445",
"0.55090314",
"0.5449595",
"0.54183304",
"0.540518",
"0.5405111",
"0.53540605",
"0.53201896",
"0.5312054",
"0.5307381",
"0.5225559",
"0.519977",
"0.5197234",
"0.51815355",
"0.5158378"
]
| 0.70730406 | 0 |
Seed the numpy prng and return a data frame w/ predictable test inputs so that the tests will have consistent results across builds. | def random_df(request):
old_state = np.random.get_state()
def fin():
# tear down: reset the prng after the test to the pre-test state
np.random.set_state(old_state)
request.addfinalizer(fin)
np.random.seed(1)
return pd.DataFrame(
{'some_count': np.random.randint(1, 8, 20)},
index=range(0, 20)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_data_for_tests():\n X = np.random.randn(100, input_dim)\n Y = np.random.randn(100, output_dim)\n X_new = np.random.randn(100, input_dim)\n return X, X_new, Y",
"def build_data(seed):\n rs = np.random.RandomState(seed)\n\n def y(x):\n \"\"\" y(x) = 1 + 0.3 * x_1 - 0.6 * x_2^2 - 0.2 * x_3^3 + 0.5 x_4^4 \"\"\"\n x1, x2, x3, x4 = x[:, 0], x[:, 1], x[:, 2], x[:, 3]\n return 1 + 0.3 * x1 - 0.6 * x2 ** 2 - 0.2 * x3 ** 3 + 0.5 * x4 ** 4\n\n xtrain = rs.rand(10000, 4)\n xtest = rs.rand(1000, 4)\n ytrain = y(xtrain) + rs.rand(10000) / 10\n ytest = y(xtest) + rs.rand(1000) / 10\n return xtrain, xtest, ytrain, ytest",
"def random():\n np.random.seed(0)",
"def data_gen(size, p):\n #print(np.random.get_state()[1][0])\n random_table = np.random.binomial(size = size, p = p, n = 1)\n test_array = np.zeros((size, 2), dtype = int)\n for i in range(size):\n test_array[i,0] = i\n test_array[i,1] = random_table[i]\n return test_array",
"def experiment4():\n np.random.seed()\n state['result'] = np.random.rand(1)",
"def set_random_seed():\n np.random.seed(42)",
"def set_seed():\n np.random.seed(1423)",
"def random():\n np.random.seed(1939)",
"def generate_test_set(data, pts): \n test_set = np.asarray(random.sample(data, pts))\n \n return test_set",
"def setUp(self):\n self.samples = 5\n self.otus = 10\n seed(0) # this will seed numpy prng at 0 before each test",
"def numpy_random_seed():\n numpy.random.seed(42)",
"def setUp(self) -> None:\n self.random = np.random.RandomState(seed=42)",
"def make_repeatable():\n random.seed(1234)\n np.random.seed(1234)",
"def get_data(generator, random, bench_id):\n x_train, y_train, x_test, y_test = generator(random, bench_id)\n x_train = np.c_[np.ones(len(x_train)), x_train]\n x_test = np.c_[np.ones(len(x_test)), x_test]\n return x_train, y_train, x_test, y_test",
"def test_data():\n batch_size = 10\n input_dim = 28\n test_data = np.random.rand(batch_size, input_dim)\n\n return test_data",
"def generate_rand():\n n_cols = 5\n n_rows = 100\n n_class = 10\n trial_x = np.random.rand(n_rows, n_cols)\n trial_y = np.random.random_integers(1, n_class, size = (n_rows, 1))\n\n # Append response to data\n trial_data = np.append(trial_x, trial_y, 1)\n return trial_data, n_class, n_cols",
"def import_data(seed: object = 42) -> object:\n\n # Read input data\n df = pd.read_csv(\"x_train_gr_smpl.csv\").astype(int)\n\n # label data-frame rows based on sample data\n for x in range(10):\n index = ~pd.read_csv(\"y_train_smpl_%s.csv\" % x, squeeze=True).astype(bool) # reversed flags (~)\n df.loc[index, 'label'] = str(x)\n\n input_data_ordered = df.iloc[:, 0:2304].to_numpy()\n output_data_ordered = df.iloc[:, 2304].to_numpy()\n\n # Randomise instance order (forcing the same result each time)\n np.random.seed(seed)\n permutation = np.random.permutation(df.shape[0])\n\n # Create base input and output arrays\n input_data = input_data_ordered[permutation]\n output_data = output_data_ordered[permutation]\n\n return input_data, output_data, df, input_data_ordered, output_data_ordered",
"def make_test_data(self):\r\n\r\n \r\n\r\n print (\"Creating Test Sample:\")\r\n\r\n print (' Period, rate, reps, phases: ', self.period, self.framerate, self.nrepetitions, self.nPhases)\r\n\r\n nframes = int(self.period * self.framerate * self.nrepetitions)\r\n\r\n print (' nframes: ', nframes)\r\n\r\n if self.bkgdNoise > 0.:\r\n\r\n d = np.random.normal(size=(nframes,self.imageSize[0],self.imageSize[1]),\r\n\r\n loc=self.bkgdIntensity, scale=self.bkgdNoise).astype('float32')\r\n\r\n else:\r\n\r\n d = self.bkgdIntensity*np.ones((nframes,self.imageSize[0],self.imageSize[1])).astype('float32')\r\n\r\n \r\n\r\n ds = d.shape\r\n\r\n print (' data shape: ', ds)\r\n\r\n dx = int(ds[2]/4)\r\n\r\n xc = int(ds[2]/2)\r\n\r\n xo = [xc-dx, xc+dx]\r\n\r\n ywidth = int(ds[2]/(self.nPhases+2))\r\n\r\n framedelay = 4\r\n\r\n\r\n\r\n if not self.mode:\r\n\r\n self.phasex = []\r\n\r\n self.phasey = []\r\n\r\n for i in range(0,self.nPhases):\r\n\r\n dy = int((i+1)*ds[2]/(self.nPhases+2)) # each phase is assigned to a region\r\n\r\n self.resp = np.zeros((nframes,))\r\n\r\n self.resp = np.cos(\r\n\r\n np.linspace(0, 2.0*np.pi*nframes/(self.period*self.framerate), nframes-framedelay)+i*np.pi/8 - np.pi/2.0)\r\n\r\n self.resp = np.concatenate((np.zeros(framedelay), self.resp))\r\n\r\n d[:, xo[0]:xo[1], dy:dy+ywidth ] += self.resp[:, np.newaxis, np.newaxis]\r\n\r\n self.phasey.append( (2+(dy+int(ds[2]/self.nPhases))/2))\r\n\r\n self.phasex.append((6+int(ds[1]/2)/2)) # make the signal equivalent of digitized one (baseline 3000, signal at 1e-4 of baseline)\r\n\r\n else:\r\n\r\n self.nPhases = 4\r\n\r\n self.spotsize = 16\r\n\r\n nrpts = 20\r\n\r\n nsites = 4\r\n\r\n one_rep = int(self.period*self.framerate)\r\n\r\n isi = int(self.period*self.framerate/self.nPhases)\r\n\r\n print('period, isi: ', self.period, isi)\r\n\r\n r = np.arange(0, nrpts, 1.)\r\n\r\n alpha = 4.\r\n\r\n A = r/alpha *np.exp(-(r-alpha)/alpha) # scaled alpha function\r\n\r\n self.spot= self.gauss_spot(self.spotsize, 3.) # the 2d spot\r\n\r\n sigsize = np.random.normal(size=self.nPhases, loc=self.signal_size, scale=self.signal_size*2)\r\n\r\n sigsize = [np.abs(s) for s in sigsize] # restrict to positive amplitudes\r\n\r\n print ('sigsize: ', sigsize)\r\n\r\n for j in range(self.nrepetitions):\r\n\r\n for i in range(self.nPhases):\r\n\r\n self.resp = np.zeros((nrpts, self.spot.shape[0], self.spot.shape[1]))\r\n\r\n for k in range(nrpts):\r\n\r\n self.resp[k,:,:] += sigsize[i]*A[k] * self.spot # make response an alpha time course of gaussian spot\r\n\r\n start = j*one_rep + i*isi + framedelay\r\n\r\n stop = start + nrpts\r\n\r\n dy = int((i+1)*ds[2]/(self.nPhases+2)) # location for phase\r\n\r\n #dy = dy + 2*z\r\n\r\n# print ('start, stop: ', start, stop)\r\n\r\n for z in range(nsites):\r\n\r\n #self.resp = np.concatenate((np.zeros(framedelay), self.resp))\r\n\r\n xp = xo[0] + i*10 - 10*z\r\n\r\n yp = dy - i*10 + 10*z\r\n\r\n d[start:stop, xp:xp+self.spotsize, yp:yp+self.spotsize ] += self.resp\r\n\r\n self.imageData = d # reduce to a 16-bit map to match camera data type\r\n\r\n self.nFrames = self.imageData.shape[0]\r\n\r\n self.times = np.arange(0, nframes/self.framerate, 1.0/self.framerate)\r\n\r\n print( \" Test Image Created\")\r\n\r\n # imv = pg.ImageView()\r\n\r\n # imv.show()\r\n\r\n # imv.setImage(self.imageData)\r\n\r\n\r\n\r\n if self.layout is not None:\r\n\r\n self.layout.addWidget(imv, 0, 0)\r\n\r\n\r\n\r\n avgImage = np.mean(self.imageData, axis=0)\r\n\r\n ima = pg.ImageView()\r\n\r\n ima.setImage(avgImage)\r\n\r\n self.layout.addWidget(ima, 0, 1)\r\n\r\n self.adjust_image_data()\r\n\r\n self.avgimg = np.mean(self.imageData, axis=0) # get mean image for reference later: average across all time\r\n\r\n print (' Test file, original Image Info: ')\r\n\r\n self.print_image_info()\r\n\r\n self.rebin_image()\r\n\r\n #self.clean_windowerrors()\r\n\r\n # pg.image(self.imageData)\r\n\r\n # pg.show()\r\n\r\n # mpl.figure(1)\r\n\r\n # mpl.show()\r\n\r\n if not self.mode: # FFT analysis\r\n\r\n self.analysis_fourier_map(target=1, mode=0)\r\n\r\n self.plot_maps(mode=2, gfilter=self.gfilter)\r\n\r\n else:\r\n\r\n self.analysis_dFF_map()\r\n\r\n mpl.show()",
"def load_test_data():\r\n X_test = np.load('data/test/X_test.npy')\r\n scaling_test = np.load('data/test/scaling_test.npy')\r\n ids_test = np.load('data/test/ids_test.npy')\r\n y_test = np.load('data/test/y_test.npy')\r\n\r\n seed = np.random.randint(1, 10e6)\r\n np.random.seed(seed)\r\n np.random.shuffle(X_test)\r\n np.random.seed(seed)\r\n np.random.shuffle(scaling_test)\r\n np.random.seed(seed)\r\n np.random.shuffle(ids_test)\r\n np.random.seed(seed)\r\n np.random.shuffle(y_test)\r\n\r\n return X_test, scaling_test, ids_test, y_test",
"def _prepare_evaluate(self):\n labels = list()\n labels += ['num_procs', 'num_periods', 'is_debug', 'seed_emax', 'seed_sim']\n labels += ['num_draws_emax', 'num_agents_sim', 'num_types', 'edu_spec', 'version']\n labels += ['num_draws_prob', 'seed_prob']\n num_procs, num_periods, is_debug, seed_emax, seed_sim, num_draws_emax, num_agents_sim, \\\n num_types, edu_spec, version, num_draws_prob, seed_prob = \\\n dist_class_attributes(self.respy_base, *labels)\n\n periods_draws_emax = create_draws(num_periods, num_draws_emax, seed_emax, is_debug)\n periods_draws_sims = create_draws(num_periods, num_agents_sim, seed_sim, is_debug)\n\n disturbances = (periods_draws_emax, periods_draws_sims)\n\n # We want to maintain a pure PYTHON version for testing purposes.\n args = list()\n args += [num_periods, num_types, edu_spec['start'], edu_spec['max'], edu_spec['max'] + 1]\n state_space_info = respy_f2py.wrapper_create_state_space(*args)\n if self.mpi_setup == MISSING_INT:\n slavecomm = self.mpi_setup\n else:\n slavecomm = self.mpi_setup.py2f()\n self.set_up_baseline(periods_draws_emax, None)\n\n initial_conditions = get_initial_conditions(self.respy_base)\n\n args = (smm_sample_f2py, state_space_info, initial_conditions, disturbances, slavecomm)\n self.simulate_sample = partial(*args)",
"def setUp(self):\n\n pass\n # screenip2 = screenip_model.screenip(0, pd_obj_inputs, pd_obj_exp_out)\n # setup the test as needed\n # e.g. pandas to open screenip qaqc csv\n # Read qaqc csv and create pandas DataFrames for inputs and expected outputs",
"def setUp(self):\n self.TestData = array([0,1,1,4,2,5,2,4,1,2])\n self.NoSingles = array([0,2,2,4,5,0,0,0,0,0])\n self.NoDoubles = array([0,1,1,4,5,0,0,0,0,0])",
"def init_predictions(nb_rows, nb_cols, dtype=np.float64):\n return np.zeros((nb_rows, nb_cols), dtype=dtype)",
"def init_benchmark_data(\n num_inputs, input_size, num_classes, rand_seed=None,\n **kwargs\n):\n N, D, C = num_inputs, input_size, num_classes\n\n rs = np.random.RandomState(seed=rand_seed)\n X = rs.rand(N, D)\n y = rs.choice(C, size=N)\n return X, y",
"def prepare_test_data(dataframe, start_date, window_days, test_size=1):\n data = dataframe.copy()\n date_test_start = pd.to_datetime(start_date) + dt.timedelta(days=window_days)\n date_test_end = date_test_start + dt.timedelta(hours=test_size)\n \n columns = data.columns.values\n outputs = [col_name for col_name in columns if 'Load+' in col_name]\n inputs = [col_name for col_name in columns if col_name not in outputs]\n \n if test_size == 1:\n # Array with a single value needs to be reshaped accordingly\n X_new = data[inputs].loc[date_test_start].values.reshape(1,-1)\n else:\n X_new = data[inputs].loc[date_test_start:date_test_end].values[:-1]\n\n return X_new",
"def reproducible(seed: int = 0) -> None:\n\n os.environ[\"PYTHONHASHSEED\"] = \"0\"\n\n np.random.seed(seed)\n python_random.seed(seed)\n tf.random.set_seed(seed)",
"def rng():\n return np.random.default_rng()",
"def __init__(self, env, random_seed=None):\n self.env = env \n self.RandomState = np.random.RandomState(random_seed)",
"def prepare_simulation(master_seed, n_populations):\n nest.ResetKernel()\n # set global kernel parameters\n nest.SetKernelStatus(\n {\"communicate_allgather\": sim.allgather,\n \"overwrite_files\": sim.overwrite_existing_files,\n \"resolution\": sim.dt,\n \"total_num_virtual_procs\": sim.n_vp})\n if sim.to_text_file:\n nest.SetKernelStatus({\"data_path\": data_path_test})\n \n # Set random seeds\n \n # PYNEST\n #nest.sli_run('0 << /rngs [%i %i] Range { rngdict/gsl_mt19937 :: exch CreateRNG } Map >> SetStatus'%(\n # master_seed, master_seed + sim.n_vp - 1))\n #nest.SetKernelStatus({\"rng_seeds\" : range(master_seed, master_seed + sim.n_vp)})\n #nest.sli_run('0 << /grng rngdict/gsl_mt19937 :: %i CreateRNG >> SetStatus'%(master_seed + sim.n_vp))\n #nest.SetKernelStatus({\"grng_seed\" : master_seed + sim.n_vp})\n #pyrngs = [np.random.RandomState(s) for s in \n # range(master_seed + sim.n_vp + 1, master_seed + 2 * sim.n_vp + 1)]\n\n # SLI VERSION\n sli_str = \"0 << \\n\"\n #sli_str += \"/rngs %i [0 %i 1 sub] add Range { rngdict/gsl_mt19937 :: exch CreateRNG } Map\\n\"%(master_seed, sim.n_vp) # local RNG, seeded\n #sli_str += \"/grng rngdict/gsl_mt19937 :: %i %i add CreateRNG\\n\"%(master_seed, sim.n_vp) # global RNG\n sli_str += \"/rng_seeds %i [0 %i 1 sub] add Range\\n\"%(master_seed, sim.n_vp) # local RNG seeds\n sli_str += \"/grng_seed %i %i add\\n\"%(master_seed, sim.n_vp) # global RNG seed\n sli_str += \">> SetStatus\"\n nest.sli_run(sli_str)\n sli_str2 = \"/script_rngs [%i]\\n\"%sim.n_vp\n sli_str2 += \"{%i add rngdict /gsl_mt19937 get exch CreateRNG } Table def\\n\"%(master_seed + sim.n_vp)\n sli_str2 += \"/normal_rdvs script_rngs { rdevdict /normal get CreateRDV } Map def\"\n nest.sli_run(sli_str2)\n pyrngs = None\n return pyrngs",
"def seed(self, seed):\n\n random.seed(seed)\n np.random.seed(seed)"
]
| [
"0.62640226",
"0.626317",
"0.6150496",
"0.60767347",
"0.6075911",
"0.6051545",
"0.6035735",
"0.6022615",
"0.6013968",
"0.6004197",
"0.5934268",
"0.59313226",
"0.59139127",
"0.5891353",
"0.5879932",
"0.58529955",
"0.57775",
"0.5763144",
"0.57435983",
"0.5702521",
"0.56983835",
"0.567166",
"0.5666585",
"0.5665843",
"0.563339",
"0.56327754",
"0.5619918",
"0.56131685",
"0.5611192",
"0.5602149"
]
| 0.6335399 | 0 |
Create Tab and fill with content of default_script_content | def create_new_tab(default_script_content=""):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def init_tab(self):",
"def createDefaultTab(self):\n self.welcomePage = WelcomePage(self)\n tabId = self.tab.addTab(self.welcomePage, \"\" )\n self.tab.setTabIcon(tabId, QIcon(\":/main.png\") )\n\n self.welcomePage.LinkConnect.connect(self.onConnectLinkClicked)\n self.welcomePage.LinkDisconnect.connect(self.onDisconnectLinkClicked)\n self.welcomePage.LinkTux.connect(self.newTestUnit)\n self.welcomePage.LinkTsx.connect(self.newTestSuite)\n self.welcomePage.LinkTpx.connect(self.newTestPlan)\n self.welcomePage.LinkTgx.connect(self.newTestGlobal)\n self.welcomePage.LinkMacro.connect(self.onMacroLinkClicked)\n self.welcomePage.LinkBasicMacro.connect(self.onBasicMacroLinkClicked)\n self.welcomePage.LinkWebMacro.connect(self.onWebMacroLinkClicked)\n self.welcomePage.LinkMobileMacro.connect(self.onMobileMacroLinkClicked)\n self.welcomePage.OpenWeb.connect(self.onOpenWebsite)\n self.welcomePage.OpenProductWeb.connect(self.onOpenProductWebsite)\n self.welcomePage.LinkSysMacro.connect(self.onSysMacroLinkClicked)\n self.welcomePage.LinkPlugin.connect(self.onPluginLinkClicked)",
"def _create_data_tabs(self):\n self.tab_ctrl.tab.children = []\n for name, (ctrl_cls, args) in self._get_tab_definitions().items():\n ctrl = ctrl_cls(*args)\n # add to tabs\n self.tab_ctrl.add_tab(name, control=ctrl)\n # Set these controls as named attributes on the object\n setattr(self, name.replace(\" \", \"_\"), ctrl)",
"def new_tab(self, tab_name=None, tab_data={}):\n index = self.currentIndex()+1\n\n if (tab_name is None\n or not tab_name):\n tab_name = 'Tab {0}'.format(index)\n\n self.insertTab(index, tab_name)\n data = {\n 'uuid' : str(uuid.uuid4()),\n 'name' : tab_name,\n 'text' : '',\n 'path' : '',\n 'date' : '',\n 'saved' : False,\n }\n data.update(**tab_data)\n self.setTabData(index, data)\n self.setCurrentIndex(index)",
"def new_tab (self, url = None, key = None):\n # create the tab content\n wv = WV(key)\n #if url: wv.open(url)\n self._construct_tab_view(wv, url)\n return wv",
"def crearTabs (self):\n self.tabs_datos = self.mi_base.obtenerTabs()\n\n \"\"\" De los datos de tabs creo las solapas \"\"\" \n for indice in range(0,len(self.tabs_datos[\"idTabs\"])):\n self.tabs.append(myNotebook(self.tabs_datos[\"nombreTabs\"][indice],self.tabs_datos[\"pathIcono\"][indice],self.notebook,self.xScreen,self.yScreen,self.diccConfig))\n \n self.programas=[]\n self.programas = self.mi_base.obtenerProgramas(indice+1)\n \n if len(self.programas) > 0: \n \"\"\" ejecucion\n\t\t descripcion\n\t\t pathIcono\n\t\t\"\"\"\n for datos in range(0,len(self.programas[\"ejecucion\"])):\n self.tabs[indice].crearBotones(self.programas[\"pathIcono\"][datos],self.programas[\"descripcion\"][datos],self.programas[\"ejecucion\"][datos],self.onClickListener,self.programas[\"label\"][datos])\n \n \n\t\"\"\" Ejecuta el aplicativo y se oculta la interfaz \"\"\"",
"def createTabs(self):\r\n self.tab1 = QWidget()\r\n self.tab2 = QWidget()\r\n self.tab3 = QWidget()\r\n self.tab4 = QWidget()\r\n self.tab5 = QWidget()\r\n self.tab6 = QWidget()\r\n self.tab7 = QWidget()\r\n self.tab8 = QWidget()\r\n self.addTab(self.tab1, \"Registro\")\r\n self.addTab(self.tab2, \"Base de Datos\")\r\n self.addTab(self.tab3, \"Ingresos\")\r\n self.addTab(self.tab4, \"Compras\")\r\n self.addTab(self.tab5, \"Gastos\")\r\n self.addTab(self.tab6, \"Res. Diarios\")\r\n self.addTab(self.tab7, \"Res. Mensuales\")\r\n self.addTab(self.tab8, \"Res. Anuales\")",
"def __init__(self):\r\n super().__init__()\r\n self._setupTab1()",
"def new_tab(self, widget):\n print('new tab added')\n name = Gtk.Buildable.get_name(widget)\n if name == 'new':\n param = 'create'\n elif name == 'button_open':\n param = 'open'\n else:\n param = 'create'\n self.notebook.append_page(*self.create_tab(param))\n self.notebook.show_all()",
"def createNewTab(self, axisList, tabName):\n\n if (self.tabExists(tabName)):\n self.removeTab(self.getTabIndexFromName(tabName))\n\n if tabName == 'quickplot':\n self.insertTab(0, axisList, tabName) # quickplot is always first tab\n else:\n self.addTab(axisList, tabName)\n \n self.setTabTip(tabName)\n self.setCurrentIndex(self.getTabIndexFromName(tabName))",
"def populateScript(self):\n filePath = pm.fileDialog2(fileMode=1,\n startingDirectory=self.startDir,\n fileFilter=' Post Script .py (*%s)' % \".py\")\n if not filePath:\n return\n if not isinstance(filePath, string_types):\n filePath = filePath[0]\n self.gtUIInst.script_lineEdit.setText(filePath)",
"def newTestTxt(self):\n self.newTab( extension = TestTxt.TYPE, repoDest=UCI.REPO_UNDEFINED )",
"def create_tab(self, type):\n tab = Tabs(self.builder.get_object('window1'), type)\n label_widget = tab.get_label_widget()\n\n # connect label_widget's close button to close_tab()\n label_widget.get_children()[-1].connect('clicked', self.close_tab)\n label_widget.show_all()\n\n # set save, run, terminal button active if not\n save_button = self.builder.get_object('save')\n run_button = self.builder.get_object('run')\n terminal_button = self.builder.get_object('terminal')\n\n for button in [save_button, run_button, terminal_button]:\n button.set_sensitive(True)\n\n return tab, label_widget",
"def create(cls, *args: Any, **kwargs: Any) -> \"Tab\":",
"def extend_ui(self):\n for name, tab in self.build_general_tabs().items():\n scroll = self.build_tab()\n self.add_tab(\"General\", name, scroll)\n self.fill_tab(\"General\", name, tab)\n for name, tab in self.build_display_tabs().items():\n scroll = self.build_tab()\n self.add_tab(\"Display\", name, scroll)\n self.fill_tab(\"Display\", name, tab)\n for name, tab in self.build_data_tabs().items():\n scroll = self.build_tab()\n self.add_tab(\"Data\", name, scroll)\n self.fill_tab(\"Data\", name, tab)",
"def render_content(tab):\n # rander_holder = True\n for index in range(len(figs)):\n \"\"\"Render by start and callback.\"\"\"\n tabbi = f'tab-{index+1}'\n print(tabbi, 'in tabbi')\n if tab == tabbi:\n # returns the complete content for the browser\n return get_content(key_list[index], index)",
"def add_default_content(self):\n data = get_default_eventpage_data()\n\n for i, section in enumerate(data):\n section[\"position\"] = i\n section[\"content\"] = render_to_string(section[\"template\"])\n del section[\"template\"]\n self.content.create(**section)",
"def loadGenerateTab(index):\n\tgenPasswordScreen.show()\n\tgenPasswordNotebook.selectionBar.runTabCommand(index)",
"def newTab(self, path = None, filename = None, extension = None, remoteFile=False, contentFile=None, \n repoDest=None, newAdp=False, newLib=False, project=0, testDef=None, testExec=None,\n testInputs=None, testOutputs=None, testAgents=None, isReadOnly=False, isLocked=False,\n subtest_id=\"\"):\n if RCI.instance() is None: return\n \n if RCI.instance().authenticated: self.runAction.setEnabled(True)\n if RCI.instance().authenticated: self.runStepByStepAction.setEnabled(True)\n if RCI.instance().authenticated: self.runBreakpointAction.setEnabled(True)\n if RCI.instance().authenticated: self.runBackgroundAction.setEnabled(True)\n if RCI.instance().authenticated: self.checkSyntaxAction.setEnabled(True)\n if RCI.instance().authenticated: self.checkDesignAction.setEnabled(True)\n if RCI.instance().authenticated: self.updateTestAction.setEnabled(True)\n if RCI.instance().authenticated: self.runSchedAction.setEnabled(True)\n if RCI.instance().authenticated: self.checkAction.setEnabled(True)\n \n self.saveAsAction.setEnabled(True)\n self.codefoldingAction.setEnabled(True)\n self.codeWrappingAction.setEnabled(True)\n self.whitespaceVisibilityAction.setEnabled(True)\n self.indentGuidesVisibilityAction.setEnabled(True)\n self.closeTabAction.setEnabled(True)\n self.closeAllTabAction.setEnabled(True)\n \n # normalize extension to lower case\n extension = extension.lower()\n \n absPath = r'%s/%s.%s' % (path, filename, extension)\n if path is not None:\n if len(path) == 0:\n absPath = r'%s.%s' % (filename, extension)\n\n if extension == TestAdapter.TYPE or extension == TestLibrary.TYPE or extension == TestTxt.TYPE:\n cur_prj_id = 0\n else:\n cur_prj_id = project\n \n # new in v17\n nameLimit = Settings.instance().readValue( key = 'Editor/tab-name-limit' )\n nameLimit = int(nameLimit)\n # end of new\n \n tabId = self.checkAlreadyOpened(path = absPath, remoteFile=remoteFile, \n repoType=repoDest, project=cur_prj_id)\n if tabId is not None:\n self.tab.setCurrentIndex(tabId)\n \n # dbr13 >>> Find usage\n if extension in [TestPlan.TYPE, TestPlan.TYPE_GLOBAL]:\n doc = self.getCurrentDocument()\n doc.showFileUsageLine(line_id=subtest_id)\n # dbr13 <<<\n \n else:\n __error__ = False\n if extension == TestUnit.TYPE:\n doc = TestUnit.WTestUnit(self, path, filename, extension, self.nonameIdTs, \n remoteFile, repoDest, project, isLocked)\n if filename is None:\n doc.defaultLoad(testDef=testDef, testInputs=testInputs, \n testOutputs=testOutputs, testAgents=testAgents)\n doc.setModify()\n self.nonameIdTs += 1\n else:\n self.BusyCursor.emit()\n res = doc.load(contentFile)\n self.ArrowCursor.emit()\n if not res:\n __error__ = True\n del doc\n QMessageBox.critical(self, \n self.tr(\"Open Failed\") , \n self.tr(\"Corrupted Test Unit file\") )\n if not __error__:\n tabName = self.addTag( repoType=doc.repoDest, txt=doc.getShortName(), \n addSlash=False, project=doc.project )\n\n # new in v17\n if nameLimit == 0:\n _tabName = tabName\n else:\n if len(tabName) > nameLimit:\n _tabName = \"%s...\" % tabName[:nameLimit]\n else:\n _tabName = tabName\n # end of new in v17\n \n tabId = self.tab.addTab(doc, _tabName)\n self.tab.setTabIcon(tabId, QIcon(\":/%s.png\" % TestUnit.TYPE) )\n self.setCloseButton(tabId=tabId, doc=doc)\n \n if QtHelper.str2bool( Settings.instance().readValue( key = 'TestProperties/show-on-opening' ) ): \n self.ShowPropertiesTab.emit()\n \n doc.setFolding( self.codeFolding )\n doc.setIndentationGuidesVisible( self.indentationGuidesVisible )\n doc.setWhitespaceVisible( self.whitespaceVisible )\n doc.setLinesNumbering( self.linesNumbering )\n doc.foldAll()\n doc.setDefaultCursorPosition()\n \n elif extension == TestPng.TYPE:\n doc = TestPng.WTestPng(self, path, filename, extension, self.nonameIdTs, \n remoteFile, repoDest, project)\n self.BusyCursor.emit()\n res = doc.load(contentFile)\n self.ArrowCursor.emit()\n if not res:\n __error__ = True\n del doc\n QMessageBox.critical(self, self.tr(\"Open Failed\") , self.tr(\"Corrupted Png file\") )\n if not __error__:\n tabName = self.addTag( repoType=doc.repoDest, txt=doc.getShortName(), \n addSlash=False, project=doc.project )\n \n # new in v17\n if nameLimit == 0:\n _tabName = tabName\n else:\n if len(tabName) > nameLimit:\n _tabName = \"%s...\" % tabName[:nameLimit]\n else:\n _tabName = tabName\n # end of new in v17\n \n tabId = self.tab.addTab(doc, _tabName )\n self.tab.setTabIcon(tabId, QIcon(\":/png.png\") )\n self.setCloseButton(tabId=tabId, doc=doc)\n\n elif extension == TestSuite.TYPE:\n # self.findWidget.show()\n doc = TestSuite.WTestSuite(self, path, filename, extension, self.nonameIdTs, \n remoteFile, repoDest, project, isLocked)\n if filename is None:\n doc.defaultLoad(testDef=testDef, testExec=testExec, testInputs=testInputs, \n testOutputs=testOutputs, testAgents=testAgents)\n doc.setModify()\n self.nonameIdTs += 1\n else:\n self.BusyCursor.emit()\n res = doc.load(contentFile)\n self.ArrowCursor.emit()\n if not res:\n __error__ = True\n del doc\n QMessageBox.critical(self, self.tr(\"Open Failed\") , \n self.tr(\"Corrupted Test Suite file\") )\n if not __error__:\n\n tabName = self.addTag( repoType=doc.repoDest, txt=doc.getShortName(), \n addSlash=False, project=doc.project )\n \n # new in v17\n if nameLimit == 0:\n _tabName = tabName\n else:\n if len(tabName) > nameLimit:\n _tabName = \"%s...\" % tabName[:nameLimit]\n else:\n _tabName = tabName\n # end of new in v17\n \n tabId = self.tab.addTab(doc, _tabName)\n self.tab.setTabIcon(tabId, QIcon(\":/%s.png\" % TestSuite.TYPE) )\n self.setCloseButton(tabId=tabId, doc=doc)\n\n if QtHelper.str2bool( Settings.instance().readValue( key = 'TestProperties/show-on-opening' ) ): \n self.ShowPropertiesTab.emit()\n \n doc.setFolding( self.codeFolding )\n doc.setIndentationGuidesVisible( self.indentationGuidesVisible )\n doc.setWhitespaceVisible( self.whitespaceVisible )\n doc.setLinesNumbering( self.linesNumbering )\n doc.foldAll()\n doc.setDefaultCursorPosition()\n \n elif extension == TestPlan.TYPE:\n doc = TestPlan.WTestPlan(self, path, filename, extension, self.nonameIdTp,remoteFile,repoDest, project, \n iRepo=self.iRepo, lRepo=self.lRepo, isLocked=isLocked )\n if filename is None:\n doc.defaultLoad()\n doc.setModify()\n self.nonameIdTp += 1\n else:\n self.BusyCursor.emit()\n res = doc.load(contentFile)\n self.ArrowCursor.emit()\n if not res:\n __error__ = True\n del doc\n QMessageBox.critical(self, self.tr(\"Open Failed\") , self.tr(\"Corrupted Test Plan file\") )\n if not __error__:\n tabName = self.addTag( repoType=doc.repoDest, txt=doc.getShortName(), \n addSlash=False, project=doc.project )\n \n # new in v17\n if nameLimit == 0:\n _tabName = tabName\n else:\n if len(tabName) > nameLimit:\n _tabName = \"%s...\" % tabName[:nameLimit]\n else:\n _tabName = tabName\n # end of new in v17\n \n tabId = self.tab.addTab(doc, _tabName )\n self.tab.setTabIcon(tabId, QIcon(\":/%s.png\" % TestPlan.TYPE) )\n self.setCloseButton(tabId=tabId, doc=doc)\n \n if QtHelper.str2bool( Settings.instance().readValue( key = 'TestProperties/show-on-opening' ) ): \n self.ShowPropertiesTab.emit()\n\n elif extension == TestPlan.TYPE_GLOBAL:\n doc = TestPlan.WTestPlan(self, path, filename, extension, self.nonameIdTp,remoteFile,repoDest, project, \n iRepo=self.iRepo, testGlobal=True, lRepo=self.lRepo, isLocked=isLocked )\n if filename is None:\n doc.defaultLoad()\n doc.setModify()\n self.nonameIdTp += 1\n else:\n self.BusyCursor.emit()\n res = doc.load(contentFile)\n self.ArrowCursor.emit()\n if not res:\n __error__ = True\n del doc\n QMessageBox.critical(self, self.tr(\"Open Failed\") , self.tr(\"Corrupted Test Global file\") )\n if not __error__:\n tabName = self.addTag( repoType=doc.repoDest, txt=doc.getShortName(), \n addSlash=False, project=doc.project )\n \n # new in v17\n if nameLimit == 0:\n _tabName = tabName\n else:\n if len(tabName) > nameLimit:\n _tabName = \"%s...\" % tabName[:nameLimit]\n else:\n _tabName = tabName\n # end of new in v17\n \n tabId = self.tab.addTab(doc, _tabName )\n self.tab.setTabIcon(tabId, QIcon(\":/%s.png\" % TestPlan.TYPE_GLOBAL) )\n self.setCloseButton(tabId=tabId, doc=doc)\n \n if QtHelper.str2bool( Settings.instance().readValue( key = 'TestProperties/show-on-opening' ) ): \n self.ShowPropertiesTab.emit()\n\n elif extension == TestConfig.TYPE:\n doc = TestConfig.WTestConfig(self, path, filename, extension, self.nonameIdTp,\n remoteFile,repoDest, project, isLocked)\n if filename is None:\n doc.defaultLoad()\n doc.setModify()\n self.nonameIdTp += 1\n else:\n self.BusyCursor.emit()\n res = doc.load(contentFile)\n self.ArrowCursor.emit()\n if not res:\n __error__ = True\n del doc\n QMessageBox.critical(self, self.tr(\"Open Failed\") , self.tr(\"Corrupted config file\") )\n if not __error__:\n tabName = self.addTag( repoType=doc.repoDest, txt=doc.getShortName(), \n addSlash=False, project=doc.project )\n \n # new in v17\n if nameLimit == 0:\n _tabName = tabName\n else:\n if len(tabName) > nameLimit:\n _tabName = \"%s...\" % tabName[:nameLimit]\n else:\n _tabName = tabName\n # end of new in v17\n \n tabId = self.tab.addTab(doc, _tabName )\n self.tab.setTabIcon(tabId, QIcon(\":/%s.png\" % TestConfig.TYPE) )\n self.setCloseButton(tabId=tabId, doc=doc)\n\n elif extension == TestAdapter.TYPE and newAdp:\n doc = TestAdapter.WTestAdapter(self, path, filename, extension, self.nonameIdTs,\n remoteFile, repoDest, project=0, isLocked=isLocked)\n\n if filename is None:\n doc.defaultLoad()\n doc.setModify()\n self.nonameIdTs += 1\n else:\n self.BusyCursor.emit()\n res = doc.load(contentFile)\n self.ArrowCursor.emit()\n if not res:\n __error__ = True\n del doc\n QMessageBox.critical(self, self.tr(\"Open Failed\") , self.tr(\"Corrupted adapter file\") )\n if not __error__:\n tabName = self.addTag( repoType=doc.repoDest, txt=doc.getShortName(), \n addSlash=False, project=doc.project )\n \n # new in v17\n if nameLimit == 0:\n _tabName = tabName\n else:\n if len(tabName) > nameLimit:\n _tabName = \"%s...\" % tabName[:nameLimit]\n else:\n _tabName = tabName\n # end of new in v17\n \n tabId = self.tab.addTab(doc, _tabName )\n self.tab.setTabIcon(tabId, QIcon(\":/file-adp2.png\") )\n self.setCloseButton(tabId=tabId, doc=doc)\n\n doc.setFolding( self.codeFolding )\n doc.setIndentationGuidesVisible( self.indentationGuidesVisible )\n doc.setWhitespaceVisible( self.whitespaceVisible )\n doc.setLinesNumbering( self.linesNumbering )\n doc.setDefaultCursorPosition()\n\n elif extension == TestLibrary.TYPE and newLib:\n # self.findWidget.show()\n doc = TestLibrary.WTestLibrary(self, path, filename, extension, self.nonameIdTs, \n remoteFile, repoDest, project=0, isLocked=isLocked)\n\n if filename is None:\n doc.defaultLoad()\n doc.setModify()\n self.nonameIdTs += 1\n else:\n self.BusyCursor.emit()\n res = doc.load(contentFile)\n self.ArrowCursor.emit()\n if not res:\n __error__ = True\n del doc\n QMessageBox.critical(self, self.tr(\"Open Failed\") , self.tr(\"Corrupted library file\") )\n if not __error__:\n tabName = self.addTag( repoType=doc.repoDest, txt=doc.getShortName(), \n addSlash=False, project=doc.project )\n \n # new in v17\n if nameLimit == 0:\n _tabName = tabName\n else:\n if len(tabName) > nameLimit:\n _tabName = \"%s...\" % tabName[:nameLimit]\n else:\n _tabName = tabName\n # end of new in v17\n \n tabId = self.tab.addTab(doc, _tabName )\n self.tab.setTabIcon(tabId, QIcon(\":/file-lib-adp.png\") )\n self.setCloseButton(tabId=tabId, doc=doc)\n\n doc.setFolding( self.codeFolding )\n doc.setIndentationGuidesVisible( self.indentationGuidesVisible )\n doc.setWhitespaceVisible( self.whitespaceVisible )\n doc.setLinesNumbering( self.linesNumbering )\n doc.setDefaultCursorPosition()\n\n elif extension == TestTxt.TYPE:\n # self.findWidget.show()\n doc = TestTxt.WTestTxt(self, path, filename, extension, self.nonameIdTs, remoteFile,\n repoDest, project=0, isLocked=isLocked)\n if filename is None:\n doc.defaultLoad()\n doc.setModify()\n self.nonameIdTs += 1\n else:\n self.BusyCursor.emit()\n res = doc.load(contentFile)\n self.ArrowCursor.emit()\n if not res:\n __error__ = True\n del doc\n QMessageBox.critical(self, self.tr(\"Open Failed\") , self.tr(\"Corrupted Txt file\") )\n if not __error__:\n tabName = self.addTag( repoType=doc.repoDest, txt=doc.getShortName(), \n addSlash=False, project=doc.project )\n \n # new in v17\n if nameLimit == 0:\n _tabName = tabName\n else:\n if len(tabName) > nameLimit:\n _tabName = \"%s...\" % tabName[:nameLimit]\n else:\n _tabName = tabName\n # end of new in v17\n \n tabId = self.tab.addTab(doc, _tabName )\n self.tab.setTabIcon(tabId, QIcon(\":/file-txt.png\") )\n self.setCloseButton(tabId=tabId, doc=doc)\n \n doc.setFolding( self.codeFolding )\n doc.setIndentationGuidesVisible( self.indentationGuidesVisible )\n doc.setWhitespaceVisible( self.whitespaceVisible )\n doc.setLinesNumbering( self.linesNumbering )\n doc.setDefaultCursorPosition()\n \n elif extension == TestData.TYPE:\n # self.findWidget.show()\n doc = TestData.WTestData(self, path, filename, extension, self.nonameIdTs, \n remoteFile, repoDest, project, isLocked)\n if filename is None:\n doc.defaultLoad()\n doc.setModify()\n self.nonameIdTs += 1\n else:\n self.BusyCursor.emit()\n res = doc.load(contentFile)\n\n # active xml lexer, depend of the value in data mode in the description of the test\n # new in 8.0.0\n if 'descriptions' in doc.dataModel.properties['properties']:\n dataMode = None\n for kv in doc.dataModel.properties['properties']['descriptions']['description']:\n if kv['key'] == 'data mode':\n dataMode = kv['value']\n if dataMode is not None:\n if dataMode.lower() == 'xml':\n doc.activeXmlLexer()\n # end in 8.0.0\n\n self.ArrowCursor.emit()\n if not res:\n __error__ = True\n del doc\n QMessageBox.critical(self, self.tr(\"Open Failed\") , self.tr(\"Corrupted Test Data file\") )\n if not __error__:\n tabName = self.addTag( repoType=doc.repoDest, txt=doc.getShortName(), \n addSlash=False, project=doc.project )\n \n # new in v17\n if nameLimit == 0:\n _tabName = tabName\n else:\n if len(tabName) > nameLimit:\n _tabName = \"%s...\" % tabName[:nameLimit]\n else:\n _tabName = tabName\n # end of new in v17\n \n tabId = self.tab.addTab(doc, _tabName )\n self.tab.setTabIcon(tabId, QIcon(\":/%s.png\" % TestData.TYPE) )\n self.setCloseButton(tabId=tabId, doc=doc)\n \n self.ShowPropertiesTab.emit()\n \n doc.setFolding( self.codeFolding )\n doc.setIndentationGuidesVisible( self.indentationGuidesVisible )\n doc.setWhitespaceVisible( self.whitespaceVisible )\n doc.setLinesNumbering( self.linesNumbering )\n doc.setDefaultCursorPosition()\n \n else:\n self.error( \"extension unknown %s\" % extension )\n \n if not __error__: \n # set the current tab index\n self.tab.setCurrentIndex( tabId )\n # update tooltip of the tab\n self.tab.setTabToolTip( tabId, doc.getPath(absolute=True) )\n self.updateActions( wdocument=doc )\n self.findWidget.setEnabled(True)\n self.DocumentOpened.emit(doc)\n \n # dbr13 >>>> Find Usage functionality\n if extension in [TestPlan.TYPE, TestPlan.TYPE_GLOBAL]:\n doc.showFileUsageLine(line_id=subtest_id)\n # dbr13 <<<",
"def newTab(self, name, image =None):\n newT = imageTab(image) #create widget\n self.addTab(newT, name) #adds tab to tab widget\n self.totalTabs +=1 #increase total tabs count\n self.baseUI.objects[newT]= {} #add to objects dictionary\n return newT",
"def test_create(self):\n content = u'A content.'\n tab = widgets.StaticTab(\n id=u'id',\n title=u'Title',\n content=content)\n self.assertEquals(tab.contentFactory(), content)\n\n tab = widgets.StaticTab(\n id=u'id',\n title=u'Title',\n contentFactory=lambda: content * 2)\n self.assertEquals(tab.contentFactory(), content * 2)",
"def newTestSuiteWithContent (self, testDef=None, testExec=None, testInputs=None, \n testOutputs=None, testAgents=None):\n self.newTab( extension = TestSuite.TYPE, repoDest=UCI.REPO_UNDEFINED, testDef=testDef, \n testExec=testExec, testInputs=testInputs, \n testOutputs=testOutputs, testAgents=testAgents )",
"def new_tab_with_webview (self, webview):\n self.tabs._construct_tab_view(webview)",
"async def connect_to_tab(self) -> None:",
"def render_tab_content(active_tab):\r\n if active_tab is not None:\r\n if active_tab == \"Info\":\r\n return html.Div([html.P('We will remember coronavirus for a long time as our society got affected worldwide adapting to a new normal. It was a global pandemic causing transformations to the daily life. The World Health Organization declared a Public Health Emergency of International Concern regarding COVID-19 on 30 January 2020, and later declared a pandemic on March 2020. We have been in lockdown for more than a year and as off now, May 2021 most of the countries are offering doses of vaccines to their citizens. For the final project of MA705 class I wanted to show a dashboard with visualizations using python concepts to represent a summary of data and graphs for Covid-19 vaccination by manufacturer.'),dcc.Graph(figure=example_graph1, id='graph')])\r\n elif active_tab == \"USA\":\r\n return dcc.Graph(figure=example_graph2, id='graph') \r\n elif active_tab == \"Daily vaccinations\":\r\n return dcc.Graph(figure=example_graph3, id='graph')\r\n elif active_tab == \"Manufacturer\":\r\n return dcc.Graph(figure=example_graph4, id='graph')\r\n elif active_tab == \"Top 5\":\r\n return dcc.Graph(figure=example_graph5, id='graph') \r\n return \"No tab selected\"",
"def set_editor_contents(self, index):\n data = self.tabs.tabData(index)\n\n if not data:\n return # this will be a new empty tab, ignore.\n\n text = data['text']\n\n if text is None or not text.strip():\n path = data.get('path')\n if path is None:\n text = ''\n elif not os.path.isfile(path):\n text = ''\n else:\n with open(path, 'r') as f:\n text = f.read()\n data['text'] = text\n\n self.editor.setPlainText(text)\n\n if self.tabs.get('cursor_pos') is not None:\n cursor = self.editor.textCursor()\n pos = self.tabs['cursor_pos']\n cursor.setPosition(pos)\n self.editor.setTextCursor(cursor)\n\n if self.tabs.get('selection') is not None:\n # TODO: this won't restore a selection that\n # starts from below and selects upwards :( (yet)\n has, start, end = self.tabs['selection']\n if not has:\n return\n cursor = self.editor.textCursor()\n cursor.setPosition(start, QtGui.QTextCursor.MoveAnchor)\n cursor.setPosition(end, QtGui.QTextCursor.KeepAnchor)\n self.editor.setTextCursor(cursor)\n\n # for the autosave check_document_modified\n self.tab_switched_signal.emit()",
"def populateUI():\n \n # Main form layout\n form = cmds.formLayout()\n\n # Tab Layout\n tabs = cmds.tabLayout(innerMarginWidth=5, innerMarginHeight=5)\n # Form attachment config\n cmds.formLayout( form, edit=True, attachForm=((tabs, 'top', 0), (tabs, 'left', 0), (tabs, 'bottom', 0), (tabs, 'right', 0)) )\n\n # The different Tabs on the window\n spawnTab = SpawnObjectsTab()\n roadTab = RoadRiverTab()\n environmentTab = EnvironmentTab()\n\n # Tab creation\n cmds.tabLayout( tabs, edit=True, tabLabel=((spawnTab, 'Spawn Buildings'), (roadTab, 'Create Roads / Rivers'), (environmentTab, \"Create Environment\") ))",
"def newTestUnitWithContent(self, testDef=None, testInputs=None, testOutputs=None, testAgents=None):\n self.newTab( extension = TestUnit.TYPE, repoDest=UCI.REPO_UNDEFINED, \n testDef=testDef, testInputs=testInputs,\n testOutputs=testOutputs, testAgents=testAgents )",
"def new_tab(self):\n \n self.driver.execute_script(\"window.open('about:blank');\")",
"def insert_tab(self, value=None):\n self.my_text.insert(INSERT, \" \" * 3)"
]
| [
"0.64131993",
"0.60441524",
"0.59892756",
"0.59824836",
"0.5936139",
"0.591632",
"0.5874173",
"0.586149",
"0.5849421",
"0.57874686",
"0.575115",
"0.5731017",
"0.5721766",
"0.57141954",
"0.5708622",
"0.5690848",
"0.56825405",
"0.5657574",
"0.5649704",
"0.56325084",
"0.56227934",
"0.5611232",
"0.55558044",
"0.55350035",
"0.5525444",
"0.55117136",
"0.5479843",
"0.5473163",
"0.54425806",
"0.54310566"
]
| 0.85935897 | 0 |
Return the discriminator object that is wrapped. Subclasses may not need to implement this method but can chose to if they are wrapping an object capable of discrimination. | def discriminator(self) -> Any:
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def discriminator(self) -> str:\n return self.__class__.__name__",
"def discriminator(self) -> Any:\r\n return self._qda",
"def get_real_child_model(self, data):\n discriminator_value = data[self.discriminator].lower()\n return self.discriminator_value_class_map.get(discriminator_value)",
"def discriminator(self) -> Any:\r\n return self._lda",
"def remotediscriminator(self) :\n\t\ttry :\n\t\t\treturn self._remotediscriminator\n\t\texcept Exception as e:\n\t\t\traise e",
"def discriminator(self, images): # pylint: disable=R0201\n return standard_discriminator(images)",
"def localdiscriminator(self) :\n\t\ttry :\n\t\t\treturn self._localdiscriminator\n\t\texcept Exception as e:\n\t\t\traise e",
"def get_real_child_model(self, data):\n discriminator_key = self.attribute_map[self.discriminator]\n discriminator_value = data[discriminator_key]\n return self.discriminator_value_class_map.get(discriminator_value)",
"def get_real_child_model(self, data):\n discriminator_key = self.attribute_map[self.discriminator]\n discriminator_value = data[discriminator_key]\n return self.discriminator_value_class_map.get(discriminator_value)",
"def get_real_child_model(self, data):\n discriminator_key = self.attribute_map[self.discriminator]\n discriminator_value = data[discriminator_key]\n return self.discriminator_value_class_map.get(discriminator_value)",
"def get_real_child_model(self, data):\n discriminator_key = self.attribute_map[self.discriminator]\n discriminator_value = data[discriminator_key]\n return self.discriminator_value_class_map.get(discriminator_value)",
"def discriminator(self) -> str:",
"def discriminator (self) -> tf.keras.Sequential:\n return self._discriminator",
"def discriminator(self) -> undefined.UndefinedOr[str]:",
"def get_real_child_model(self, data):\n discriminator_key = self.attribute_map[self.discriminator]\n discriminator_value = data[discriminator_key]\n from regula.documentreader.webclient.ext.models import RawAuthenticityCheckResultItem\n return self.discriminator_value_class_map.get(discriminator_value, RawAuthenticityCheckResultItem.__name__)",
"def discriminator(self, discriminator: str):\n pass # setter is ignored for discriminator property",
"def build_discriminator(self):\n\n def d_block(layer_input, filters, strides=1, bn=True):\n\n d = tf.keras.layers.Conv2D(filters, kernel_size=3, strides=strides, padding='same')(layer_input)\n if bn:\n d = tf.keras.layers.BatchNormalization(momentum=0.8)(d)\n d = tf.keras.layers.LeakyReLU(alpha=0.2)(d)\n \n return d\n\n # Input img\n d0 = tf.keras.layers.Input(shape=self.hr_shape)\n\n d1 = d_block(d0, self.df, bn=False)\n d2 = d_block(d1, self.df, strides=2)\n d3 = d_block(d2, self.df)\n d4 = d_block(d3, self.df, strides=2)\n d5 = d_block(d4, self.df * 2)\n d6 = d_block(d5, self.df * 2, strides=2)\n d7 = d_block(d6, self.df * 2)\n d8 = d_block(d7, self.df * 2, strides=2)\n\n validity = tf.keras.layers.Conv2D(1, kernel_size=1, strides=1, activation='sigmoid', padding='same')(d8)\n\n return tf.keras.models.Model(d0, validity)",
"def build_discriminator(self):\n # label input\n in_label = Input(shape=(1,))\n # embedding for categorical input\n li = Embedding(self.n_classes, 50)(in_label)\n # scale up to image dimensions with linear activation\n n_nodes = self.in_shape[0] * self.in_shape[1]\n li = Dense(n_nodes)(li)\n # reshape to additional channel\n li = Reshape((self.in_shape[0], self.in_shape[1], 1))(li)\n # image input\n in_image = Input(shape=self.in_shape)\n # concat label as a channel\n merge = Concatenate()([in_image, li])\n # downsample\n fe = Conv2D(128, (3,3), strides=(2,2), padding='same')(merge)\n fe = LeakyReLU(alpha=0.2)(fe)\n # downsample\n fe = Conv2D(128, (3,3), strides=(2,2), padding='same')(fe)\n fe = LeakyReLU(alpha=0.2)(fe)\n # flatten feature maps\n fe = Flatten()(fe)\n # dropout\n fe = Dropout(0.4)(fe)\n # output\n out_layer = Dense(1, activation='sigmoid')(fe)\n # define model\n self.d_model = Model([in_image, in_label], out_layer)\n # compile model\n opt = Adam(lr=0.0002, beta_1=0.5)\n self.d_model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])",
"def get_discrinminator_trained_model(self):\n return Model(self.model.inputs[0], self.model.layers[2](self.model.layers[1](self.model.inputs[0])))",
"def create_from_discriminator_value(parse_node: Optional[ParseNode] = None) -> OnenoteEntityHierarchyModel:\n if not parse_node:\n raise TypeError(\"parse_node cannot be null.\")\n try:\n mapping_value = parse_node.get_child_node(\"@odata.type\").get_str_value()\n except AttributeError:\n mapping_value = None\n if mapping_value and mapping_value.casefold() == \"#microsoft.graph.notebook\".casefold():\n from .notebook import Notebook\n\n return Notebook()\n if mapping_value and mapping_value.casefold() == \"#microsoft.graph.onenoteSection\".casefold():\n from .onenote_section import OnenoteSection\n\n return OnenoteSection()\n if mapping_value and mapping_value.casefold() == \"#microsoft.graph.sectionGroup\".casefold():\n from .section_group import SectionGroup\n\n return SectionGroup()\n return OnenoteEntityHierarchyModel()",
"def build_discriminator(self):\n with tf.variable_scope(\"discriminator\") as scope:\n\n # --- build the convolutional layers\n self.d_convlayers = list()\n mi = self.num_colors\n dim = self.img_dim\n count = 0\n for mo, filter_size, stride, apply_batch_norm in self.d_sizes['conv_layers']:\n name = f\"convlayer_{count}\" # name is used for get_variable later\n count += 1\n layer = ConvLayer(name, mi, mo, apply_batch_norm, filter_size, stride, lrelu)\n self.d_convlayers.append(layer)\n mi = mo\n print(f\"dim: {dim}\")\n # --- keep track of image dimensionality: need this for the first Dense layer\n dim = int(np.ceil(float(dim) / stride))\n\n # --- get the input dimensionalith for the first Dense layer\n mi = mi * dim * dim\n\n # --- build the dense layers\n self.d_denselayers = list()\n for mo, apply_batch_norm in self.d_sizes['dense_layers']:\n name = f\"denselayer_{count}\"\n count += 1\n layer = DenseLayer(name, mi, mo, apply_batch_norm, lrelu)\n mi = mo\n self.d_denselayers.append(layer)\n\n # --- final logistic regression layer (use it in the d_forward\n # function below to get the final logits)\n name = f\"denselayer_{count}\"\n self.d_finallayer = DenseLayer(name, mi, 1, False, lambda x: x)\n\n # --- get and return the logits\n logits = self.d_forward(self.X)\n return logits",
"def _discriminator(self, x, reuse=False):\n with tf.variable_scope(\"discriminator\", reuse=reuse) as scope:\n layer_1= tf.contrib.slim.fully_connected(inputs = x, num_outputs = 151, activation_fn = tf.nn.relu)\n layer_2 = tf.contrib.slim.fully_connected(inputs = layer_1, num_outputs = 71,activation_fn = tf.nn.relu)\n y = tf.contrib.slim.fully_connected(inputs = layer_2, num_outputs = 1,activation_fn = None)\n print('y shape', tf.shape(y))\n return y",
"async def get_composed_with_discriminator(self, **kwargs: Any) -> JSON:\n error_map = {\n 401: ClientAuthenticationError,\n 404: ResourceNotFoundError,\n 409: ResourceExistsError,\n 304: ResourceNotModifiedError,\n }\n error_map.update(kwargs.pop(\"error_map\", {}) or {})\n\n _headers = kwargs.pop(\"headers\", {}) or {}\n _params = kwargs.pop(\"params\", {}) or {}\n\n cls: ClsType[JSON] = kwargs.pop(\"cls\", None)\n\n request = build_polymorphism_get_composed_with_discriminator_request(\n headers=_headers,\n params=_params,\n )\n request.url = self._client.format_url(request.url)\n\n _stream = False\n pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access\n request, stream=_stream, **kwargs\n )\n\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n if _stream:\n await response.read() # Load the body in memory and close the socket\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n raise HttpResponseError(response=response)\n\n if response.content:\n deserialized = response.json()\n else:\n deserialized = None\n\n if cls:\n return cls(pipeline_response, cast(JSON, deserialized), {})\n\n return cast(JSON, deserialized)",
"def make_discriminator():\n constraint_shape = Params.environment.constraint_shape()\n solution_shape = Params.environment.solution_shape()\n joint_shape = constraint_shape[:]\n joint_shape[0] += solution_shape[0]\n\n constraint_input = placeholder_node(\"constraint_input\", constraint_shape, 1)\n solution_input = placeholder_node(\"solution_input\", solution_shape, 1)\n joint_input = tf.concat([constraint_input, solution_input], 1)\n return (\n constraint_input,\n solution_input,\n FeedforwardNetwork(\n name=\"artificial_discriminator\",\n session=Params.session,\n input_shape=joint_shape,\n layer_shapes=Params.internal_layer_shapes + [[1]],\n activations=Params.activation,\n input_node=joint_input,\n save_location=Params.save_location,\n ),\n )",
"def parent(self):\n return getattr(self, \"parent_%s\" % self.discriminator)",
"def discriminator(self):\n\n # Initializate the neural network\n discriminator = Sequential()\n\n # Convolution, bias, activate\n discriminator.add(Conv2D(filters=self.first_layer_filter,\n kernel_size=self.kernel_size,\n strides=self.stride_size,\n padding='same',\n data_format='channels_last',\n kernel_initializer='glorot_uniform',\n input_shape=self.image_shape))\n # Activate\n discriminator.add(LeakyReLU(0.2))\n # Convolution\n discriminator.add(Conv2D(filters=self.second_layer_filter,\n kernel_size=self.kernel_size,\n strides=self.stride_size,\n padding='same',\n data_format='channels_last',\n kernel_initializer='glorot_uniform'))\n\n # Normalize\n discriminator.add(BatchNormalization(momentum=0.5, epsilon=1e-5))\n # Activate\n discriminator.add(LeakyReLU(0.2))\n\n # Convolution\n discriminator.add(Conv2D(filters=self.third_layer_filter,\n kernel_size=self.kernel_size,\n strides=self.stride_size,\n padding='same',\n data_format='channels_last',\n kernel_initializer='glorot_uniform'))\n\n # Normalize\n discriminator.add(BatchNormalization(momentum=0.5, epsilon=1e-5))\n # Activate\n discriminator.add(LeakyReLU(0.2))\n # Convolution\n discriminator.add(Conv2D(filters=self.last_layer_size,\n kernel_size=self.kernel_size,\n strides=self.stride_size,\n padding='same',\n data_format='channels_last',\n kernel_initializer='glorot_uniform'))\n # Normalize\n discriminator.add(BatchNormalization(momentum=0.5, epsilon=1e-5))\n # Activate\n discriminator.add(LeakyReLU(0.2))\n\n discriminator.add(Flatten())\n discriminator.add(Dense(1))\n discriminator.add(Activation('sigmoid'))\n\n optimizer = Adam(lr=self.lr, beta_1=self.beta)\n discriminator.compile(loss=self.loss,\n optimizer=optimizer,\n metrics=None)\n\n return discriminator",
"def get_class(self):\n return devices.get_class(self.type)",
"def base_dyadic(self):\n return self._base_instance",
"def discriminator():\n\n # img = Input(shape=(28, 28, 1))\n # validity = ident(img)\n\n model = Model(img, validity)\n\n model.compile(loss=\"binary_crossentropy\", optimizer=op1,\n metrics=['accuracy'])\n\n # model.summary()\n\n return model",
"def build_discriminator(shape):\n input_img = Input(shape=(shape)) \n x = Conv2D(64, (3, 3), padding='same')(input_img)\n x = LeakyReLU()(x)\n x = Dropout(0.25)(x)\n x = BatchNormalization(momentum=0.8)(x)\n x = AveragePooling2D((2, 2), padding='same')(x)\n x = Conv2D(8, (3, 3), padding='same')(x)\n x = LeakyReLU()(x)\n x = Dropout(0.25)(x)\n x = BatchNormalization(momentum=0.8)(x)\n x = AveragePooling2D((2, 2), padding='same')(x)\n x = Conv2D(8, (3, 3), padding='same')(x)\n x = LeakyReLU()(x)\n x = Dropout(0.25)(x)\n x = BatchNormalization(momentum=0.8)(x)\n x = AveragePooling2D((2, 2), padding='same')(x)\n x = Conv2D(1, (3, 3), padding='same')(x)\n x = LeakyReLU()(x)\n x = Flatten()(x)\n o = Dense(1,activation='sigmoid')(x)\n Discriminator=Model(input_img,o,name='discriminator')\n return input_img,o,Discriminator"
]
| [
"0.7162278",
"0.7028762",
"0.6861705",
"0.6829797",
"0.68028903",
"0.67468226",
"0.6720729",
"0.6682106",
"0.6682106",
"0.6682106",
"0.6682106",
"0.65834624",
"0.6465254",
"0.6457462",
"0.6250308",
"0.6051711",
"0.58467674",
"0.5799867",
"0.5700982",
"0.5668812",
"0.5561934",
"0.556133",
"0.55470264",
"0.55230564",
"0.54798114",
"0.5477248",
"0.5393605",
"0.5378467",
"0.53648335",
"0.53330505"
]
| 0.72365224 | 0 |
Create a discriminator from the configuration. | def from_config(cls, config: Dict[str, Any]) -> "BaseDiscriminator": | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build_discriminator(self):\n # label input\n in_label = Input(shape=(1,))\n # embedding for categorical input\n li = Embedding(self.n_classes, 50)(in_label)\n # scale up to image dimensions with linear activation\n n_nodes = self.in_shape[0] * self.in_shape[1]\n li = Dense(n_nodes)(li)\n # reshape to additional channel\n li = Reshape((self.in_shape[0], self.in_shape[1], 1))(li)\n # image input\n in_image = Input(shape=self.in_shape)\n # concat label as a channel\n merge = Concatenate()([in_image, li])\n # downsample\n fe = Conv2D(128, (3,3), strides=(2,2), padding='same')(merge)\n fe = LeakyReLU(alpha=0.2)(fe)\n # downsample\n fe = Conv2D(128, (3,3), strides=(2,2), padding='same')(fe)\n fe = LeakyReLU(alpha=0.2)(fe)\n # flatten feature maps\n fe = Flatten()(fe)\n # dropout\n fe = Dropout(0.4)(fe)\n # output\n out_layer = Dense(1, activation='sigmoid')(fe)\n # define model\n self.d_model = Model([in_image, in_label], out_layer)\n # compile model\n opt = Adam(lr=0.0002, beta_1=0.5)\n self.d_model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])",
"def make_discriminator():\n constraint_shape = Params.environment.constraint_shape()\n solution_shape = Params.environment.solution_shape()\n joint_shape = constraint_shape[:]\n joint_shape[0] += solution_shape[0]\n\n constraint_input = placeholder_node(\"constraint_input\", constraint_shape, 1)\n solution_input = placeholder_node(\"solution_input\", solution_shape, 1)\n joint_input = tf.concat([constraint_input, solution_input], 1)\n return (\n constraint_input,\n solution_input,\n FeedforwardNetwork(\n name=\"artificial_discriminator\",\n session=Params.session,\n input_shape=joint_shape,\n layer_shapes=Params.internal_layer_shapes + [[1]],\n activations=Params.activation,\n input_node=joint_input,\n save_location=Params.save_location,\n ),\n )",
"def create_from_discriminator_value(parse_node: Optional[ParseNode] = None) -> IosGeneralDeviceConfiguration:\n if not parse_node:\n raise TypeError(\"parse_node cannot be null.\")\n return IosGeneralDeviceConfiguration()",
"def create_from_discriminator_value(parse_node: Optional[ParseNode] = None) -> MacOSGeneralDeviceConfiguration:\n if not parse_node:\n raise TypeError(\"parse_node cannot be null.\")\n return MacOSGeneralDeviceConfiguration()",
"def create_from_discriminator_value(parse_node: Optional[ParseNode] = None) -> TargetedManagedAppConfiguration:\n if not parse_node:\n raise TypeError(\"parse_node cannot be null.\")\n return TargetedManagedAppConfiguration()",
"def build_discriminator(shape):\n input_img = Input(shape=(shape)) \n x = Conv2D(64, (3, 3), padding='same')(input_img)\n x = LeakyReLU()(x)\n x = Dropout(0.25)(x)\n x = BatchNormalization(momentum=0.8)(x)\n x = AveragePooling2D((2, 2), padding='same')(x)\n x = Conv2D(8, (3, 3), padding='same')(x)\n x = LeakyReLU()(x)\n x = Dropout(0.25)(x)\n x = BatchNormalization(momentum=0.8)(x)\n x = AveragePooling2D((2, 2), padding='same')(x)\n x = Conv2D(8, (3, 3), padding='same')(x)\n x = LeakyReLU()(x)\n x = Dropout(0.25)(x)\n x = BatchNormalization(momentum=0.8)(x)\n x = AveragePooling2D((2, 2), padding='same')(x)\n x = Conv2D(1, (3, 3), padding='same')(x)\n x = LeakyReLU()(x)\n x = Flatten()(x)\n o = Dense(1,activation='sigmoid')(x)\n Discriminator=Model(input_img,o,name='discriminator')\n return input_img,o,Discriminator",
"def build_discriminator(self):\n\n def d_block(layer_input, filters, strides=1, bn=True):\n\n d = tf.keras.layers.Conv2D(filters, kernel_size=3, strides=strides, padding='same')(layer_input)\n if bn:\n d = tf.keras.layers.BatchNormalization(momentum=0.8)(d)\n d = tf.keras.layers.LeakyReLU(alpha=0.2)(d)\n \n return d\n\n # Input img\n d0 = tf.keras.layers.Input(shape=self.hr_shape)\n\n d1 = d_block(d0, self.df, bn=False)\n d2 = d_block(d1, self.df, strides=2)\n d3 = d_block(d2, self.df)\n d4 = d_block(d3, self.df, strides=2)\n d5 = d_block(d4, self.df * 2)\n d6 = d_block(d5, self.df * 2, strides=2)\n d7 = d_block(d6, self.df * 2)\n d8 = d_block(d7, self.df * 2, strides=2)\n\n validity = tf.keras.layers.Conv2D(1, kernel_size=1, strides=1, activation='sigmoid', padding='same')(d8)\n\n return tf.keras.models.Model(d0, validity)",
"def discriminator(self):\n\n # Initializate the neural network\n discriminator = Sequential()\n\n # Convolution, bias, activate\n discriminator.add(Conv2D(filters=self.first_layer_filter,\n kernel_size=self.kernel_size,\n strides=self.stride_size,\n padding='same',\n data_format='channels_last',\n kernel_initializer='glorot_uniform',\n input_shape=self.image_shape))\n # Activate\n discriminator.add(LeakyReLU(0.2))\n # Convolution\n discriminator.add(Conv2D(filters=self.second_layer_filter,\n kernel_size=self.kernel_size,\n strides=self.stride_size,\n padding='same',\n data_format='channels_last',\n kernel_initializer='glorot_uniform'))\n\n # Normalize\n discriminator.add(BatchNormalization(momentum=0.5, epsilon=1e-5))\n # Activate\n discriminator.add(LeakyReLU(0.2))\n\n # Convolution\n discriminator.add(Conv2D(filters=self.third_layer_filter,\n kernel_size=self.kernel_size,\n strides=self.stride_size,\n padding='same',\n data_format='channels_last',\n kernel_initializer='glorot_uniform'))\n\n # Normalize\n discriminator.add(BatchNormalization(momentum=0.5, epsilon=1e-5))\n # Activate\n discriminator.add(LeakyReLU(0.2))\n # Convolution\n discriminator.add(Conv2D(filters=self.last_layer_size,\n kernel_size=self.kernel_size,\n strides=self.stride_size,\n padding='same',\n data_format='channels_last',\n kernel_initializer='glorot_uniform'))\n # Normalize\n discriminator.add(BatchNormalization(momentum=0.5, epsilon=1e-5))\n # Activate\n discriminator.add(LeakyReLU(0.2))\n\n discriminator.add(Flatten())\n discriminator.add(Dense(1))\n discriminator.add(Activation('sigmoid'))\n\n optimizer = Adam(lr=self.lr, beta_1=self.beta)\n discriminator.compile(loss=self.loss,\n optimizer=optimizer,\n metrics=None)\n\n return discriminator",
"def discriminator():\n\n # img = Input(shape=(28, 28, 1))\n # validity = ident(img)\n\n model = Model(img, validity)\n\n model.compile(loss=\"binary_crossentropy\", optimizer=op1,\n metrics=['accuracy'])\n\n # model.summary()\n\n return model",
"def discriminator(self, inpt, reuse, is_train):\n with tf.variable_scope(\"discriminator\"):\n if reuse:\n tf.get_variable_scope().reuse_variables()\n net = conv2d(x=inpt, num_kernels=self.d_init, name=\"conv1\", activation=lkrelu, padding=\"SAME\",\n alpha=0.02, is_train=is_train, stddv=self.stddv)\n net = conv2d(x=net, num_kernels=self.d_init*2, name=\"conv2\", activation=lkrelu, padding=\"SAME\",\n alpha=0.02, is_train=is_train, stddv=self.stddv)\n net = conv2d(x=net, num_kernels=self.d_init*4, name=\"conv3\", activation=lkrelu, padding=\"SAME\",\n alpha=0.02, is_train=is_train, stddv=self.stddv)\n net = conv2d(x=net, num_kernels=self.d_init*8, name=\"conv4\", activation=lkrelu, padding=\"SAME\",\n alpha=0.02, is_train=is_train, stddv=self.stddv)\n net = dense_layer(x=net, num_neurons=1, name=\"output\", activation=tf.identity, is_train=is_train,\n stddv=self.stddv)\n return net",
"def build_discriminator(self):\n with tf.variable_scope(\"discriminator\") as scope:\n\n # --- build the convolutional layers\n self.d_convlayers = list()\n mi = self.num_colors\n dim = self.img_dim\n count = 0\n for mo, filter_size, stride, apply_batch_norm in self.d_sizes['conv_layers']:\n name = f\"convlayer_{count}\" # name is used for get_variable later\n count += 1\n layer = ConvLayer(name, mi, mo, apply_batch_norm, filter_size, stride, lrelu)\n self.d_convlayers.append(layer)\n mi = mo\n print(f\"dim: {dim}\")\n # --- keep track of image dimensionality: need this for the first Dense layer\n dim = int(np.ceil(float(dim) / stride))\n\n # --- get the input dimensionalith for the first Dense layer\n mi = mi * dim * dim\n\n # --- build the dense layers\n self.d_denselayers = list()\n for mo, apply_batch_norm in self.d_sizes['dense_layers']:\n name = f\"denselayer_{count}\"\n count += 1\n layer = DenseLayer(name, mi, mo, apply_batch_norm, lrelu)\n mi = mo\n self.d_denselayers.append(layer)\n\n # --- final logistic regression layer (use it in the d_forward\n # function below to get the final logits)\n name = f\"denselayer_{count}\"\n self.d_finallayer = DenseLayer(name, mi, 1, False, lambda x: x)\n\n # --- get and return the logits\n logits = self.d_forward(self.X)\n return logits",
"def create_from_discriminator_value(parse_node: Optional[ParseNode] = None) -> Onenote:\n if not parse_node:\n raise TypeError(\"parse_node cannot be null.\")\n return Onenote()",
"def discriminator(self, discriminator: str):\n pass # setter is ignored for discriminator property",
"def create_from_discriminator_value(parse_node: Optional[ParseNode] = None) -> SchemaExtension:\n if not parse_node:\n raise TypeError(\"parse_node cannot be null.\")\n return SchemaExtension()",
"def create_from_discriminator_value(parse_node: Optional[ParseNode] = None) -> DeviceInstallState:\n if not parse_node:\n raise TypeError(\"parse_node cannot be null.\")\n return DeviceInstallState()",
"def define_discriminator(sample_size, code_size, hidden_size=50):\r\n input_1 = Input(shape=(sample_size, ))\r\n input_2 = Input(shape=(code_size, ))\r\n inputs = concatenate([input_1, input_2])\r\n # Define the discriminator Layers\r\n d = Dense(hidden_size, kernel_initializer='he_uniform', activation='tanh')(inputs)\r\n out_classifier = Dense(1, kernel_initializer='he_uniform', activation=\"sigmoid\")(d)\r\n d_model = Model([input_1, input_2], out_classifier)\r\n d_model.compile(loss='binary_crossentropy', optimizer=Adam(lr=0.0005, beta_1=0.5))\r\n return d_model",
"def discriminator(self, images): # pylint: disable=R0201\n return standard_discriminator(images)",
"def discriminator_model():\n\n Discriminator = Sequential(name='Discriminator')\n\n # Downsampling : 32x32x3 --> 16x16x64\n Discriminator.add(Conv2D(filters=64, kernel_size=(5, 5), strides=2, padding='same', \n kernel_initializer=RandomNormal(stddev=GAUSS_SD), \n input_shape=DISCRIMINATOR_INPUT))\n Discriminator.add(LeakyReLU(ALPHA))\n\n # Downsampling : 16x16x64 --> 8x8x128\n Discriminator.add(Conv2D(filters=128, kernel_size=(5, 5), strides=2, padding='same'))\n Discriminator.add(BatchNormalization(momentum=MOMENTUM))\n Discriminator.add(LeakyReLU(ALPHA))\n\n # Downsampling : 8x8x128 --> 4x4x256\n Discriminator.add(Conv2D(filters=128, kernel_size=(5, 5), strides=2, padding='same'))\n Discriminator.add(BatchNormalization(momentum=MOMENTUM))\n Discriminator.add(LeakyReLU(ALPHA))\n\n # Downsampling : 4x4x256 --> 2x2x512\n Discriminator.add(Conv2D(filters=512, kernel_size=(5, 5), strides=2, padding='same'))\n Discriminator.add(BatchNormalization(momentum=MOMENTUM))\n Discriminator.add(LeakyReLU(ALPHA))\n\n # Fully Connected Layer (classifier) , 2x2x512 (2048) --> 1\n Discriminator.add(Flatten())\n Discriminator.add(Dropout(DROPOUT))\n Discriminator.add(Dense(1))\n\n return Discriminator",
"def build_discriminator():\n\n #Slope and weight initializer are chosen to match parmeters in the paper\n weight_initializer = tf.keras.initializers.RandomNormal(stddev=0.02)\n slope = 0.2\n inputs = keras.Input(shape=(64,64,3))\n x = preprocessing.Rescaling(scale=1./127.5, offset=-1.)(inputs)\n\n # First conv layer\n x = Conv2D(\n 64,\n 4,\n 2,\n padding='same',\n use_bias=False,\n kernel_initializer=weight_initializer\n )(x)\n x = LeakyReLU(alpha=slope)(x)\n\n # Second conv layer\n x = Conv2D(\n 128,\n 4,\n 2,\n padding='same',\n use_bias=False,\n kernel_initializer=weight_initializer\n )(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=slope)(x)\n \n # Third conv layer\n x = Conv2D(\n 256,\n 4,\n 2,\n padding='same',\n use_bias=False,\n kernel_initializer=weight_initializer\n )(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=slope)(x)\n\n # Fourth conv layer\n x = Conv2D(\n 512,\n 4,\n 2,\n padding='same',\n use_bias=False,\n kernel_initializer=weight_initializer\n )(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=slope)(x)\n\n # Predictions. Note that we use logits so thhere is no activation at the end. \n x = layers.Flatten()(x)\n x = layers.Dense(1,kernel_initializer=weight_initializer)(x)\n \n model = keras.Model(inputs=inputs, outputs=x)\n return model",
"def create_from_discriminator_value(parse_node: Optional[ParseNode] = None) -> DelegatedAdminRelationshipRequest:\n if not parse_node:\n raise TypeError(\"parse_node cannot be null.\")\n return DelegatedAdminRelationshipRequest()",
"def define_discriminator(image_shape=(256, 256, 1)):\n\n # weight initialization\n init = RandomNormal(stddev=0.02)\n # source image input\n in_src_image = Input(shape=image_shape)\n # target image input\n in_target_image = Input(shape=image_shape)\n # concatenate images channel-wise\n merged = Concatenate()([in_src_image, in_target_image])\n # C64\n d = Conv2D(64, (4, 4), strides=(2, 2), padding='same',\n kernel_initializer=init)(merged)\n d = LeakyReLU(alpha=0.2)(d)\n # C128\n d = Conv2D(128, (4, 4), strides=(2, 2), padding='same',\n kernel_initializer=init)(d)\n d = BatchNormalization()(d)\n d = LeakyReLU(alpha=0.2)(d)\n # C256\n d = Conv2D(256, (4, 4), strides=(2, 2), padding='same',\n kernel_initializer=init)(d)\n d = BatchNormalization()(d)\n d = LeakyReLU(alpha=0.2)(d)\n # C512\n d = Conv2D(512, (4, 4), strides=(2, 2), padding='same',\n kernel_initializer=init)(d)\n d = BatchNormalization()(d)\n d = LeakyReLU(alpha=0.2)(d)\n # second last output layer\n d = Conv2D(512, (4, 4), padding='same', kernel_initializer=init)(d)\n d = BatchNormalization()(d)\n d = LeakyReLU(alpha=0.2)(d)\n # patch output\n d = Conv2D(1, (4, 4), padding='same', kernel_initializer=init)(d)\n patch_out = Activation('sigmoid')(d)\n # define model\n model = Model([in_src_image, in_target_image], patch_out)\n # compile model\n opt = Adam(lr=0.0002, beta_1=0.5)\n model.compile(loss='binary_crossentropy', optimizer=opt,\n loss_weights=[0.5])\n\n return model",
"def build_discriminator(self):\n img_shape = (self.img_size[0], self.img_size[1], self.channels)\n\n model = Sequential()\n ###############\n # Conv Stack 1:\n ###############\n model.add(\n Conv2D(128, kernel_size=5, strides=2, input_shape=img_shape, padding=\"same\")\n ) # 128x128 -> 64x64\n\n model.add(LeakyReLU(alpha=0.2))\n model.add(BatchNormalization(momentum=0.8))\n # model.add(Dropout(0.2))\n\n ###############\n # Conv Stack 2:\n ###############\n model.add(\n Conv2D(128, kernel_size=5, strides=2, padding=\"same\")\n ) # 64x64 -> 32x32\n # model.add(ZeroPadding2D(padding=((0, 1), (0, 1))))\n\n model.add(LeakyReLU(alpha=0.2))\n model.add(BatchNormalization(momentum=0.8))\n # model.add(Dropout(0.25))\n\n ###############\n # Conv Stack 3:\n ###############\n model.add(\n Conv2D(128, kernel_size=4, strides=2, padding=\"same\")\n ) # 32x32 -> 16x16\n\n model.add(LeakyReLU(alpha=0.2))\n model.add(BatchNormalization(momentum=0.8))\n # model.add(Dropout(0.25))\n\n ###############\n # Conv Stack 4:\n ###############\n model.add(Conv2D(128, kernel_size=4, strides=1, padding=\"same\")) # 16x16 -> 8x8\n model.add(LeakyReLU(alpha=0.2))\n model.add(BatchNormalization(momentum=0.8))\n # model.add(Dropout(0.25))\n\n ###############\n # Conv Stack 5:\n ###############\n model.add(Conv2D(128, kernel_size=3, strides=1, padding=\"same\")) # 8x8 -> 4x4\n model.add(LeakyReLU(alpha=0.2))\n model.add(BatchNormalization(momentum=0.8))\n model.add(Dropout(0.4))\n\n model.add(Flatten())\n model.add(Dense(1, activation=\"sigmoid\")) # important binary classification.\n\n model.summary()\n\n # Model require Pair.\n img = Input(shape=img_shape)\n validity = model(img)\n\n return Model(img, validity)",
"def create_from_discriminator_value(parse_node: Optional[ParseNode] = None) -> RequestorManager:\n if not parse_node:\n raise TypeError(\"parse_node cannot be null.\")\n return RequestorManager()",
"def discriminator(self) -> str:",
"def create_from_discriminator_value(parse_node: Optional[ParseNode] = None) -> SynchronizationRule:\n if not parse_node:\n raise TypeError(\"parse_node cannot be null.\")\n return SynchronizationRule()",
"def discriminator (self) -> tf.keras.Sequential:\n return self._discriminator",
"def create_from_discriminator_value(parse_node: Optional[ParseNode] = None) -> TeamsAppDefinition:\n if not parse_node:\n raise TypeError(\"parse_node cannot be null.\")\n return TeamsAppDefinition()",
"def create_from_discriminator_value(parse_node: Optional[ParseNode] = None) -> Win32LobAppProductCodeRule:\n if not parse_node:\n raise TypeError(\"parse_node cannot be null.\")\n return Win32LobAppProductCodeRule()",
"def create_from_discriminator_value(parse_node: Optional[ParseNode] = None) -> OnenoteSection:\n if not parse_node:\n raise TypeError(\"parse_node cannot be null.\")\n return OnenoteSection()",
"def __init__(self):\n super(Discriminator, self).__init__()\n\n # Use stride in convolutions to downsample image to size 1\n\n # Using BatchNorm2d 0.8 for stability based on reading of https://github.com/eriklindernoren/PyTorch-GAN code\n layers = [nn.Conv2d(512, 1, kernel_size=4, stride=1, padding=0, bias=True),\n nn.Flatten(), nn.Sigmoid()]\n for i in range(3):\n out_chans = int(512 / (2 ** i))\n in_chans = int(out_chans / 2)\n layers.insert(0, nn.LeakyReLU(0.2, inplace=True))\n layers.insert(0, nn.BatchNorm2d(out_chans, 0.8))\n layers.insert(0, nn.Conv2d(in_chans, out_chans, kernel_size=4, stride=2, padding=1, bias=False))\n layers.insert(0, nn.LeakyReLU(0.2, inplace=True))\n layers.insert(0, nn.BatchNorm2d(64, 0.8))\n layers.insert(0, nn.Conv2d(3, 64, kernel_size=4, stride=2, padding=1, bias=False))\n print(layers)\n self.network = nn.Sequential(*layers)"
]
| [
"0.7203782",
"0.71097237",
"0.7004021",
"0.69149435",
"0.6872702",
"0.6715182",
"0.6653551",
"0.6575609",
"0.6497181",
"0.6439283",
"0.64347684",
"0.6390408",
"0.6352338",
"0.63269067",
"0.62924856",
"0.6271239",
"0.62504613",
"0.62000203",
"0.6198145",
"0.6189163",
"0.6158901",
"0.61503166",
"0.6147955",
"0.61343503",
"0.6112828",
"0.6110136",
"0.60753685",
"0.6064791",
"0.604252",
"0.6040981"
]
| 0.7627585 | 0 |
Gets a hash code for the current DateTime instance. | def __hash__(self):
return hash(self.date) ^ hash(self.time_of_day) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __hash__(self):\n return int(((self._year % 100 * 12 + self._month) * 31 +\n self._day + self.time) * 100)",
"def _hashDateTime(self):\r\n i = 0\r\n for tp in self._listTimePoints:\r\n self._dateTimeHash[tp.getDateTime()] = i\r\n i += 1",
"def __hash__(self):\n return hash((self.century, self.year, self.month, self.week, self.day))",
"def __hash__(self):\n return hash(self.starttime)",
"def __hash__(self):\n return hash(\n (self.hour,\n self.minute,\n self.second,\n self.GetZoneOffset()))",
"def hash(self):\n return Hash.dhash(bytes(self))",
"def hash(self):\n return self.__hash__()",
"def __hash__(self):\n\n return int(self._hash_value_)",
"def get_hash(self):\n return self.__hash",
"def __hash__(self):\n return hash(self.semitone_interval)",
"def __hash__(self):\n return hash(self.get_canonical_identifier())",
"def get_hash(self) -> str:\n return self.__hash.hexdigest()",
"def hash(self):\n return self._hash",
"def __hash__(self):\n return hash((self.date_start, self.date_end, self.type_event))",
"def __hash__(self):\n return hash(str(self.__id__))",
"def get_hash(self):\r\n return",
"def hash_method(self):\n return self._hash_class",
"def __hash__(self):\n return self.to_hash()",
"def _hash(self):\r\n MAX = sys.maxint\r\n MASK = 2 * MAX + 1\r\n n = len(self)\r\n h = 1927868237 * (n + 1)\r\n h &= MASK\r\n for x in self:\r\n hx = hash(x)\r\n h ^= (hx ^ (hx << 16) ^ 89869747) * 3644798167\r\n h &= MASK\r\n h = h * 69069 + 907133923\r\n h &= MASK\r\n if h > MAX:\r\n h -= MASK + 1\r\n if h == -1:\r\n h = 590923713\r\n return h",
"def __hash__(self):\n return hash(self.hash)",
"def get_hash(self):\r\n block_data = self.prev_hash\r\n block_data += bytearray(struct.pack(\"!f\", self.time))\r\n block_data += self.user_id.encode()\r\n block_data += self.signature.encode()\r\n block_data += self.choice.encode()\r\n\r\n digest = hashes.Hash(hashes.SHA256())\r\n digest.update(block_data)\r\n return digest.finalize()",
"def __Hash(self):\n return self._Hash()",
"def __hash__(self):\n return hash(str(self))",
"def __hash__(self):\n return hash(str(self))",
"def __hash__(self):\n return hash(str(self))",
"def __hash__(self):\n return hash(id(self))",
"def __hash__(self):\n return hash(id(self))",
"def __hash__(self) -> int:\n return hash(self.__key__())",
"def get_hash(self):\r\n block_data = self.prev_hash\r\n block_data += bytearray(struct.pack(\"f\", self.time))\r\n block_data += self.user_id.encode()\r\n block_data += self.public_key.public_bytes(serialization.Encoding.X962,\r\n serialization.PublicFormat.CompressedPoint)\r\n\r\n digest = hashes.Hash(hashes.SHA256())\r\n digest.update(block_data)\r\n return digest.finalize()",
"def current_hash(self):"
]
| [
"0.7116641",
"0.70797104",
"0.7021982",
"0.6897718",
"0.6678579",
"0.64955324",
"0.6468827",
"0.6436917",
"0.6384239",
"0.6383091",
"0.6382933",
"0.63715667",
"0.6305246",
"0.6295998",
"0.62853426",
"0.62744904",
"0.62410605",
"0.6240619",
"0.621996",
"0.6212789",
"0.62097067",
"0.6185434",
"0.6181819",
"0.6181819",
"0.6181819",
"0.6168902",
"0.6168902",
"0.613269",
"0.6130061",
"0.61167353"
]
| 0.741404 | 0 |
Finds out if two 'DateTime' instances are not equal. | def __ne__(self, Other):
return self.date != Other.date or self.time_of_day != Other.time_of_day | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def almost_same_datetime(dt1, dt2, allowed_delta=timedelta(minutes=1)):\r\n return abs(dt1 - dt2) < allowed_delta",
"def compare_datetime(self_datetime, other_datetime):\n # pylint: disable=superfluous-parens\n if (isinstance(self_datetime and other_datetime, (datetime, type(None)))):\n return (\n (self_datetime == other_datetime\n if all(str(_.time()) != \"00:00:00\"\n for _ in [self_datetime, other_datetime])\n else self_datetime.date() == other_datetime.date())\n if self_datetime and other_datetime\n else self_datetime == other_datetime)\n else:\n Representation.attrs_values_types_error(\n self_attr=self_datetime, other_attr=other_datetime,\n expected_types=(datetime.__name__, type(None).__name__))",
"def __cmp__(self, other):\n if not isinstance(other, datetime):\n types = (type(other), datetime)\n raise TypeError('Type mismatch: %s not instance of %s' % types)\n # pylint: disable=protected-access\n return (self._cmp(self._days, other._days)\n or self._cmp(self.seconds, other.seconds)\n or self._cmp(self.nanosecond, other.nanosecond))",
"def __eq__(self, t):\n if not isinstance(t, DateTime):\n return False\n return (self._micros, self._tz) == (t._micros, t._tz)",
"def test_date1_equal_date2(self):\n date1 = datetime.date(2014, 11, 29)\n date2 = datetime.date(2014, 11, 29)\n\n self.assertFalse(self.expander.is_same_date_month_ahead(date1, date2))",
"def __ne__(self, other):\n if not isinstance(other, Todays):\n return True\n\n return self.to_dict() != other.to_dict()",
"def assertDateEqual(self, date1, date2):\n date1 = date1.replace(microsecond=0)\n date2 = date2.replace(microsecond=0)\n self.assertEqual(date1, date2)",
"def check_dt_consistency(date_dt):\n\n # https://en.wikipedia.org/wiki/Tz_database\n # https://www.iana.org/time-zones\n \n if date_dt.tzinfo is None:\n return True\n else:\n \n # This check is quite heavy but there is apparently no other way to do it.\n if date_dt.utcoffset() != dt_from_s(s_from_dt(date_dt), tz=date_dt.tzinfo).utcoffset():\n return False\n else:\n return True",
"def assertEqualDates(self, dt1, dt2, seconds=None):\n if seconds is None:\n seconds = self.date_tolerance\n\n if dt1 > dt2:\n diff = dt1 - dt2\n else:\n diff = dt2 - dt1\n if not diff < datetime.timedelta(seconds=seconds):\n raise AssertionError('%r and %r are not within %r seconds.' %\n (dt1, dt2, seconds))",
"def test_equal_inputs(self):\n from sosbeacon.utils import get_latest_datetime\n\n lhs = rhs = datetime(2012, 9, 20, 2, 59)\n\n result = get_latest_datetime(lhs, rhs)\n\n self.assertIs(rhs, result)\n self.assertIs(lhs, result)",
"def compare_dates(dt1, dt2):\n return dt1.year == dt2.year and dt1.month == dt2.month and dt1.day == dt2.day",
"def _assert_not_series_equal_both(a, b, **kwargs):\n _assert_not_series_equal(a, b, **kwargs)\n _assert_not_series_equal(b, a, **kwargs)",
"def is_outdated(self):\n today = datetime.datetime.today()\n day = datetime.datetime.combine(self.date, self.start_time)\n return day <= today",
"def are_not_equal(value1, value2):\n return not ObjectComparator.are_equal(value1, value2)",
"def test_datetime(self):\n diff = self.machine_date - self.actual_date < datetime.timedelta(0, 20, 0)",
"def _assert_not_series_equal(a, b, **kwargs):\n try:\n tm.assert_series_equal(a, b, **kwargs)\n msg = \"The two Series were equal when they shouldn't have been\"\n\n pytest.fail(msg=msg)\n except AssertionError:\n pass",
"def assert_not_equal(self, first, second, msg=\"\"):\r\n assert first != second",
"def assert_not_equal(self, first, second):\n if not first != second:\n raise AssertionError('%s and %s is equal' % (str(first), str(second)))",
"def __ne__(self, other):\n if not isinstance(other, ScheduleUpdate):\n return True\n\n return self.to_dict() != other.to_dict()",
"def _checkDT(self):\r\n dt = np.diff(self.tsec)\r\n \r\n dt_unique = np.unique(dt)\r\n \r\n if np.size(dt_unique) == 1:\r\n self.isequal = True\r\n else:\r\n self.isequal = False\r\n \r\n try:\r\n self.dt = dt[1]\r\n except:\r\n self.dt = 0.0",
"def test_validate_datetime(self):\n self.datatrue = pd.read_csv(pkg_resources.resource_filename(resource_package, 'tests/testing_data/report_2_counts.csv'))\n\n self.datafalse = pd.read_csv(pkg_resources.resource_filename(resource_package, 'tests/testing_data/random_date1.csv'))\n\n self.test1 = utils.validate_datetime(self.datatrue)\n\n self.test2 = utils.validate_datetime(self.datafalse)\n\n self.assertTrue(isinstance(self.test1, pd.DataFrame))\n\n self.assertTrue(np.dtype('datetime64[ns]') in self.test1.dtypes.tolist())\n\n self.assertFalse(np.dtype('datetime64[ns]') in self.test2.dtypes.tolist())",
"def __ne__(self, other):\n return self.dtype != other.dtype",
"def is_datetime(self) -> bool:\n return False",
"def __ne__(self, other):\n if not isinstance(other, ScheduleH4):\n return True\n\n return self.to_dict() != other.to_dict()",
"def __ne__(self, other):\n if (self.timestamp != other.timestamp) and (self.hash != other.hash):\n return True\n\n else:\n return False",
"def __eq__(self, Other):\n return self.date == Other.date and self.time_of_day == Other.time_of_day",
"def __eq__(self, other):\n if self.day == other.day and self.month == other.month and self.year == other.year:\n return True\n else:\n return False",
"def __ne__(self, other):\n if not isinstance(other, ValuationSchedule):\n return True\n\n return self.to_dict() != other.to_dict()",
"def test_larger_rhs(self):\n from sosbeacon.utils import get_latest_datetime\n\n lhs = datetime(2012, 9, 20, 2, 59)\n rhs = datetime(2012, 9, 20, 3, 00)\n\n result = get_latest_datetime(lhs, rhs)\n\n self.assertIs(rhs, result)",
"def test_date2_lower_date1(self):\n date1 = datetime.date(2019, 5, 2)\n date2 = datetime.date(2019, 5, 1)\n\n self.assertFalse(self.expander.is_same_date_month_ahead(date1, date2))"
]
| [
"0.7060995",
"0.7005772",
"0.69751614",
"0.67829657",
"0.67326754",
"0.67311144",
"0.66667795",
"0.66145265",
"0.6613488",
"0.6596073",
"0.6548462",
"0.64864844",
"0.6370382",
"0.6360114",
"0.6351058",
"0.625587",
"0.61844254",
"0.61789215",
"0.6171636",
"0.6166754",
"0.61339045",
"0.60925704",
"0.60745394",
"0.6066214",
"0.6057168",
"0.605532",
"0.6039756",
"0.60084665",
"0.5996526",
"0.59863573"
]
| 0.70775914 | 0 |
Gets this timestamp's time of day. | def time_of_day(self):
return self.time_of_day_value | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def day_ts(self):\n return self.raw() // (60 * 24)",
"def get_time_of_the_day(self, ts):\n h, m, s = ts.hour, ts.minute, ts.second\n # Convert the hours, minutes, and seconds to seconds: referenced to 0 AM\n t = int(h) * 3600 + int(m) * 60 + int(s)\n if t >= 0:\n return t\n else:\n return t + 24*3600",
"def gettime(self):\n return self.t",
"def get_time(self):\n return self.time",
"def datetime(self):\n\n time = self.time()\n if time is None or time < 0 or time >= 24:\n time = 0\n\n try:\n d = datetime(self.year(), self.month(), self.day()) + timedelta(hours=time)\n return d\n except:\n return",
"def get_time_of_day(self, time_stamp):\n hour = datetime.strptime(time_stamp, self.fmt).hour\n \n if hour < 12:\n return 'morning'\n elif hour > 18:\n return 'evening'\n else:\n return 'afternoon'",
"def get_time(self):\n return self.__time",
"def datetime(self):\n time = self.time()\n if time is None or time < 0 or time >= 24:\n time = 0\n\n try:\n d = datetime(self.year(), self.month(), self.day()) + \\\n timedelta(hours=time)\n return d\n except:\n return",
"def time_of_the_day(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"time_of_the_day\")",
"def get_time(self):\n return self._time",
"def get_time(self):\n return self._time",
"def get_time(self):\n return self._time",
"def get_time_date(self):\n return time.strftime(\"%m-%d-%Y %H:%M\")",
"def time(self):\n return self.raw() % (60 * 24)",
"def _clock_day(self):\n return int(self._shifted_time / 86400)",
"def get_time(self):\r\n\t\tactual_time = '{}, {} de {} del {} ({}:{} {} (UTC))'\r\n\t\tda_taim = actual_time.format(self.wday, self.day, self.mon,\r\n\t\t\t\t\t\t\t\t\t self.year, self.hour, self.min,\r\n\t\t\t\t\t\t\t\t\t self.day_or_night)\r\n\t\treturn da_taim",
"def day(self):\n return self._day",
"def day(self):\n return self._day",
"def day(self):\n return self._day",
"def get_time(self):\n return self._current_time",
"def get_time(self):\n return self._current_time",
"def get_time(self):\n return self._current_time_sec",
"def time(self):\n try:\n if self.single_date:\n return self.stime\n else:\n return self.stime + (self.etime - self.stime) / 2\n except TypeError:\n return None",
"def get_time(self):\n return time.strftime(\"%d/%m/%y %M:%H:%S\", self.time)",
"async def get_time(self) -> DateTime:\n return await DateTime.get(self._api)",
"async def time(self) -> dt.time:\n now = await self.AD.sched.get_now()\n return now.astimezone(self.AD.tz).time()",
"def event_date_time(self) -> EventDateTime:\n return self._event_date_time",
"def get_timestamp(self, days=1):\n offset = datetime.datetime.utcnow().date() - datetime.timedelta(days=days-1)\n # est = tz.gettz('Europe/Amsterdam')\n # temporary dirty fix for timezone:\n timezone = '+02:00'\n start = datetime.datetime(offset.year, offset.month, offset.day)\n return start.isoformat() + timezone",
"def Day(self):\n return self._fday",
"def day(self) -> int:\r\n return self._day"
]
| [
"0.7439932",
"0.74265647",
"0.69092965",
"0.6888736",
"0.68819934",
"0.6868406",
"0.68665147",
"0.68561435",
"0.68515694",
"0.684759",
"0.684759",
"0.684759",
"0.6750747",
"0.6740334",
"0.67259836",
"0.67207116",
"0.671258",
"0.671258",
"0.671258",
"0.67064935",
"0.67064935",
"0.66225463",
"0.66221786",
"0.6572463",
"0.6570535",
"0.65625244",
"0.6447272",
"0.63905054",
"0.63895094",
"0.63838065"
]
| 0.8198794 | 0 |
Sets this timestamp's time of day. This accessor is private. | def time_of_day(self, value):
self.time_of_day_value = value | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_time(self, time):\n self._time = time",
"def setTime(self, timeObj, day=None):\n\n # override day if it's None\n if not day:\n day = getDayFromNum(timeObj.weekday())\n\n self._fileCache[day][\"time-hr\"] = timeObj.hour\n self._fileCache[day][\"time-min\"] = timeObj.minute\n self._updateConfig()",
"def set_time(self, set_time):\n\n self._set_time = set_time",
"def time_of_day(self, time_of_day):\n if time_of_day is None:\n raise ValueError(\"Invalid value for `time_of_day`, must not be `None`\") # noqa: E501\n\n self._time_of_day = time_of_day",
"def _set_timestamp(self):\n d = datetime.now()\n self._time_stamp = \"{:>2} {} {} {:>2}:{:>02}\".format(\n d.day, MONTH_ABBREV[d.month], d.year, d.hour, d.minute)",
"def day(self, day):\n\n self._day = day",
"def day(self, day):\n\n self._day = day",
"def setHour(self, *args):\n return _libsbml.Date_setHour(self, *args)",
"def setDay(self, *args):\n return _libsbml.Date_setDay(self, *args)",
"def change_time(self, new_time):\r\n self.when = new_time",
"def _update_time(self):\n if self.time.year != datetime.datetime.now().year or self._this_year is None:\n self._this_year = _data.this_year(self.df, 'case_timestamp')\n if self.time.month != datetime.datetime.now().month or self._this_month is None:\n self._this_month = _data.this_month(self.df, 'case_timestamp')\n if self.time.day != datetime.datetime.now().day or self._today is None:\n self._today = _data.today(self.df, 'case_timestamp')\n self.time = datetime.datetime.now()",
"def time_of_day(self):\n return self.time_of_day_value",
"def set_time(self, value: float):\n raise NotImplementedError()",
"def set_update_date(self, now=None, hour=None, minute=None):\n # if 1:00 < 3:00 AM\n if now < now.replace(hour=hour, minute=minute):\n # update_date = 3:00 18.01.2020\n self.update_date = now.replace(hour=hour, minute=minute)\n else:\n # update_date = 3:00 19.01.2020\n self.update_date = (now + timedelta(days=1)).replace(hour=hour, minute=minute)\n logging.info(\"Update date time: {}\".format(self.update_date), extra=self.extra)\n\n return self.update_date",
"def setTime(self,time):\n self.time = time",
"def setTestTime(self, timestamp):\n self._test_time = timestamp",
"def set_time_datum(self, new_datum=None):\n if new_datum is None:\n dt = _time.TimeDelta(self['t'].min(), format=self['t'].unit.to_string())\n new_datum = self.time_datum + dt\n else:\n dt = new_datum - self.time_datum\n dt = dt.to(self['t'].unit).value\n\n # ensure appropriate precision is maintained\n ddt = _np.diff(self['t'])\n dtmin = ddt[ddt > 0].min()\n max_bit = _np.log2(self['t'][-1] + abs(dt))\n min_bit = _np.log2(dtmin)\n need_bits = _np.ceil(max_bit - min_bit) + 3\n need_bytes = _np.ceil(need_bits/8.)\n if need_bytes > 8:\n raise ValueError('Resetting the time atum of this observation by {} {} will result in loss of numerical '\n 'precision of the photon arrival times.'.format(dt, self['t'].unit))\n use_bytes = have_bytes = int(self['t'].dtype.str[-1])\n while need_bytes > use_bytes and use_bytes < 8:\n use_bytes *= 2\n if use_bytes != have_bytes:\n new_dtype = 'f' + str(use_bytes)\n self['t'] = self['t'].astype(new_dtype)\n\n self['t'] -= dt\n self.time_datum = new_datum\n self.obs_times = [t - dt for t in self.obs_times]",
"def set_imeastime(self, time):\n self.itime = time",
"def set_time(self, timestamp):\n\n\t\tdata = pack(\"!bL\", 2, timestamp)\n\t\tself._send_message(\"TIME\", data)",
"def date_time(self, date_time):\n\n self._date_time = date_time",
"def update_timestamp(self):\n self._timestamp = datetime.datetime.now()",
"def setTimepoint(self, tp):\n\t\tpass",
"def valkkafsmanager_set_time_cb(self, t):\n self.signals.set_time.emit(t)",
"def set_datetime(self, date):\n self.date = date",
"def time(self, time):\n\n self._time = time",
"def time(self, time):\n\n self._time = time",
"def time(self, time):\n\n self._time = time",
"def time(self, time):\n\n self._time = time",
"def time(self, time):\n\n self._time = time",
"def set_time(self, time):\n with self.loopback_guard('time'):\n self.widget().setTime(time)"
]
| [
"0.67705643",
"0.65735674",
"0.6559325",
"0.6531947",
"0.63953936",
"0.6381746",
"0.6381746",
"0.6374305",
"0.6324803",
"0.6319383",
"0.62798166",
"0.6268972",
"0.62550294",
"0.62300986",
"0.6218603",
"0.62182754",
"0.62113154",
"0.62104666",
"0.6195025",
"0.6189597",
"0.6188849",
"0.61720645",
"0.6143365",
"0.61099476",
"0.6087479",
"0.6087479",
"0.6087479",
"0.6087479",
"0.6087479",
"0.60577214"
]
| 0.7851839 | 0 |
Params ====== input_count = number of inputs node_count = number of nodes in the layer activations = activations for each node | def __init__(self, input_count, node_count, activations=[]):
self.input_count = input_count
self.node_count = node_count
# If no activations are passed, generate them randomly.
if (len(activations) == 0):
rand_activations = [random.randint(0, self.node_count) for i in range(self.node_count)]
self.activations = np.asarray(rand_activations)
else:
self.activations = np.asarray(activations) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def activate(self, inputs, max_iters=10, verbose=False):\n # Node activation values\n self.node_vals = np.zeros(self.A.shape[0])\n\n # Bool to check if node was activated in the current time step\n self.active_nodes = np.zeros((self.A.shape[0]), dtype=bool)\n\n # Label inputs and bias as active\n self.active_nodes[self.inputs] = True\n self.active_nodes[self.bias] = True\n\n # While some output nodes are inactive, pass the signal farther\n # through the network\n\n i=0\n # while not self.active_nodes[self.outputs].all():\n while True:\n\n # Activate inputs\n # NOTE: This step disallows recurrent connections between hidden and input nodes\n\n self.node_vals[self.inputs] = inputs\n self.node_vals[self.bias] = 1.\n\n # Drive the activations one time step farther through the network\n self.node_vals = self.A.dot(self.node_vals)\n\n # Keep track of new node activations\n self.active_nodes = (self.A != 0).dot(self.active_nodes) + self.active_nodes\n\n # Apply sigmoid to active nodes\n self.node_vals[self.active_nodes] = self.sigmoid(self.node_vals[self.active_nodes])\n if verbose:\n print(self.node_vals)\n\n\n\n i += 1\n # Stop if the number of iterations exceeds max_iters\n if i > max_iters:\n break\n #\n # return np.array([np.nan]*len(self.outputs))\n\n return self.node_vals[self.outputs]",
"def build_layer(self) :\n inputsWithBias = self.input_count + 1\n self.weights = np.random.rand(inputsWithBias, self.node_count)\n self.weights_and_activations = (self.weights, self.activations)",
"def nn_layer(input_tensor, input_dim, output_dim, act=tf.nn.relu):\n\n with tf.device('/device:SYCL:0'):\n weights = weight_variable([input_dim, output_dim])\n biases = bias_variable([output_dim])\n preactivate = tf.matmul(input_tensor, weights) + biases\n activations = act(preactivate, name='activation')\n return activations",
"def process_layer(layer_def, inputs):\n\n outputs = []\n for n in layer_def['neurons']:\n n_res = n.activate(inputs)\n\n outputs.append(n_res)\n\n return outputs",
"def __init__(self, input_nodes, hidden_nodes, hidden_layers, output_nodes):\n # Class members:\n # num_input_nodes\n # num_hidden_nodes\n # num_hidden_layers\n # num_output_nodes\n # weights = [[num_hidden_nodes, num_input_nodes],[num_hidden_nodes, num_hidden_nodes],[]<- for each hl,\n # [num_output_nodes, num_hidden_nodes]]\n # biases\n\n self.num_input_nodes = input_nodes\n self.num_hidden_nodes = hidden_nodes\n self.num_hidden_layers = hidden_layers\n self.num_output_nodes = output_nodes\n\n self.weights = []\n for i in range(self.num_hidden_layers + 1):\n if i is 0:\n # first weights array is input to hidden\n self.weights.append(.5 * np.random.rand(self.num_hidden_nodes, self.num_input_nodes) - .25)\n\n elif i < self.num_hidden_layers:\n # next weight array is hidden nodes to hidden nodes\n self.weights.append(.5 * np.random.rand(self.num_hidden_nodes, self.num_hidden_nodes) - .25)\n else:\n # last weight array is hidden nodes to output nodes\n self.weights.append(.5 * np.random.rand(self.num_output_nodes, self.num_hidden_nodes) - .25)\n\n self.biases = []\n for i in range(self.num_hidden_layers + 1):\n if i < self.num_hidden_layers:\n # for every hidden node there is a bias\n self.biases.append(0.5 * np.random.rand(self.num_hidden_nodes) - .25)\n else:\n # for the output node there is a bias as well\n self.biases.append(0.5 * np.random.rand(self.num_output_nodes) - .25)\n\n self.activation = np.vectorize(self.tanh, otypes=[float])",
"def activate(self, inputs):\n # Calculate values of hidden nodes\n hidden_values = []\n for i in range(self.hidden_layer_size):\n hidden_node_value = 0\n bias_weight = self.bias_weights[i]\n hidden_node_value += bias_weight\n for j in range(self.input_values):\n weight = self.input_to_hidden_layer_weights[i][j]\n hidden_node_value += inputs[j] * weight\n\n # ReLU activation function\n hidden_node_value = max(hidden_node_value, 0)\n\n hidden_values.append(hidden_node_value)\n\n # Calculate output value\n output_value = 0\n for i in range(self.hidden_layer_size):\n output_value += hidden_values[i] * \\\n self.hidden_to_output_layer_weights[i]\n\n return output_value",
"def nn_layer(input_tensor, input_dim, output_dim, layer_name, act=tf.nn.relu,method = \"xavier\"):\n # Adding a name scope ensures logical grouping of the layers in the graph.\n with tf.name_scope(layer_name):\n # This Variable will hold the state of the weights for the layer\n with tf.name_scope('weights'):\n weights = weight_variable([input_dim, output_dim],method = method,name = layer_name)\n variable_summaries(weights, layer_name + '/weights')\n with tf.name_scope('Wx_plus_b'):\n preactivate = tf.matmul(input_tensor, weights)\n tf.histogram_summary(layer_name + '/pre_activations', preactivate)\n if act is None:\n activations = preactivate\n else:\n activations = act(preactivate, 'activation')\n tf.histogram_summary(layer_name + '/activations', activations)\n return activations",
"def nn_layer(input_tensor, input_dim, output_dim, layer_name, act=tf.nn.relu):\r\n # Adding a name scope ensures logical grouping of the layers in the graph.\r\n with tf.name_scope(layer_name):\r\n # This Variable will hold the state of the weights for the layer\r\n with tf.name_scope('weights'):\r\n weights = weight_variable([input_dim, output_dim])\r\n variable_summaries(weights)\r\n with tf.name_scope('biases'):\r\n biases = bias_variable([output_dim])\r\n variable_summaries(biases)\r\n with tf.name_scope('Wx_plus_b'):\r\n preactivate = tf.matmul(input_tensor, weights) + biases\r\n tf.summary.histogram('pre_activations', preactivate)\r\n activations = act(preactivate, name='activation')\r\n tf.summary.histogram('activations', activations)\r\n return activations",
"def agent(item_count = env.N):\n\n input_layer = keras.Input((item_count * 3,))\n hidden_layer = keras.layers.Dense(item_count, activation=\"sigmoid\", name=\"Hidden\")(input_layer)\n hidden_layer_2 = keras.layers.Dense(item_count * 3, activation=\"relu\", name=\"Hidden_2\")(hidden_layer)\n output_layer = keras.layers.Dense(item_count, activation=\"sigmoid\", name=\"Output\")(hidden_layer_2)\n model = keras.Model(inputs=[input_layer], outputs=[output_layer])\n model.compile(\"adam\",\n tf.keras.losses.binary_crossentropy,\n metrics = [\"categorical_accuracy\"] )\n \n return model",
"def nn_layer(input_tensor, input_dim, output_dim, layer_name, act=tf.nn.relu):\n # Adding a name scope ensures logical grouping of the layers in the graph.\n with tf.name_scope(layer_name):\n # This Variable will hold the state of the weights for the layer\n with tf.name_scope('weights'):\n weights = weight_variable([input_dim, output_dim])\n variable_summaries(weights)\n with tf.name_scope('biases'):\n biases = bias_variable([output_dim])\n variable_summaries(biases)\n with tf.name_scope('Wx_plus_b'):\n preactivate = tf.matmul(input_tensor, weights) + biases\n tf.summary.histogram('pre_activations', preactivate)\n activations = act(preactivate, name=\"activation\")\n tf.summary.histogram('activations', activations)\n return activations",
"def forward(self, nodes_batch):\n lower_layer_nodes = list(nodes_batch) # node idx\n\n nodes_batch_layers = [(lower_layer_nodes,)]\n\n for i in range(self.num_layers):\n lower_layer_neighs, lower_layer_nodes = self._get_unique_neighs_list(lower_layer_nodes,\n num_sample=self.num_neighbor_samples)\n # lower_layer_neighs: list(list())\n # lower_layer_nodes: list(nodes of next layer)\n nodes_batch_layers.insert(0, (lower_layer_nodes, lower_layer_neighs))\n\n all_nodes = np.unique([int(n) for n in list(chain(*[layer[0] for layer in nodes_batch_layers]))])\n all_nodes_idx = dict([(node, idx) for idx, node in enumerate(all_nodes)])\n\n all_neigh_nodes = pad_sequence([self.nodes_tokenized[node] for node in all_nodes], padding_value=1).transpose(0,\n 1)[\n :, :MAX_SEQ_LENGTH].to(self.device)\n\n pre_hidden_embs = self.get_roberta_embs(\n all_neigh_nodes\n )\n\n # (num_all_node, emb_size)\n\n for layer_idx in range(1, self.num_layers + 1):\n this_layer_nodes = nodes_batch_layers[layer_idx][0] # all nodes in this layer\n neigh_nodes, neighbors_list = nodes_batch_layers[layer_idx - 1] # previous layer\n # list(), list(list())\n\n aggregate_feats = self.aggregate(neighbors_list, pre_hidden_embs, all_nodes_idx)\n # (this_layer_nodes_num, emb_size)\n\n sage_layer = getattr(self, 'sage_layer' + str(layer_idx))\n\n cur_hidden_embs = sage_layer(self_feats=pre_hidden_embs[[all_nodes_idx[int(n)] for n in this_layer_nodes]],\n # pre_hidden_embs[layer_nodes],\n aggregate_feats=aggregate_feats)\n\n # cur_hidden_embs = torch.cat([pre_hidden_embs[[all_nodes_idx[int(n)] for n in this_layer_nodes]].unsqueeze(1), \n # aggregate_feats.unsqueeze(1)], dim=1) # (b_s, 2, emb_size)\n # cur_hidden_embs = torch.mean(cur_hidden_embs, dim=1)\n\n pre_hidden_embs[[all_nodes_idx[int(n)] for n in this_layer_nodes]] = cur_hidden_embs\n\n # (input_batch_node_size, emb_size)\n # output the embeddings of the input nodes\n return pre_hidden_embs[[all_nodes_idx[int(n)] for n in nodes_batch]]",
"def trainNet():",
"def __init__(self, input_size, nb_action):\r\n super(Network, self).__init__()\r\n self.input_size = input_size\r\n self.nb_action = nb_action\r\n \r\n #Connection with input layer and hidden layer\r\n self.fc1 = nn.Linear(input_size, 30)\r\n #Connection with hidden layer and output layer\r\n self.fc2 = nn.Linear(30, nb_action)",
"def apply_neurons(self):\n for neuron in range(self.n_outputs):\n self.uf_activate(neuron)",
"def __init__(self, dims, act = 'relu', random_seed = 201809, splitseed = 215, optimizer = Adam(),\n weights_dir = 'CarDEC Count Weights', n_features = 32, mode = 'HVG'):\n \n super(count_model, self).__init__()\n\n tf.keras.backend.clear_session()\n \n self.mode = mode\n self.name_ = mode + \" Count\"\n \n if mode == 'HVG':\n self.embed_name = 'embedding'\n else:\n self.embed_name = 'LVG embedding'\n \n self.weights_dir = weights_dir\n \n self.dims = dims\n n_stacks = len(dims) - 1\n \n self.optimizer = optimizer\n self.random_seed = random_seed\n self.splitseed = splitseed\n \n random.seed(random_seed)\n np.random.seed(random_seed)\n tf.random.set_seed(random_seed)\n \n self.activation = act\n self.MeanAct = lambda x: tf.clip_by_value(tf_exp(x), 1e-5, 1e6)\n self.DispAct = lambda x: tf.clip_by_value(tf.nn.softplus(x), 1e-4, 1e4)\n \n model_layers = []\n for i in range(n_stacks - 1, 0, -1):\n model_layers.append(Dense(dims[i], kernel_initializer = \"glorot_uniform\", activation = self.activation\n , name='base%d' % (i-1)))\n self.base = Sequential(model_layers, name = 'base')\n\n self.mean_layer = Dense(dims[0], activation = self.MeanAct, name='mean')\n self.disp_layer = Dense(dims[0], activation = self.DispAct, name='dispersion')\n\n self.rescale = Lambda(lambda l: tf.matmul(tf.linalg.diag(l[0]), l[1]), name = 'sf scaling')\n \n build_dir(self.weights_dir)\n \n self.construct(n_features, self.name_)",
"def nn_layer(input_tensor, input_dim, output_dim, layer_name, act=tf.nn.relu):\n # Adding a name scope ensures logical grouping of the layers in the graph.\n with tf.name_scope(layer_name):\n # This Variable will hold the state of the weights for the layer\n with tf.name_scope('weights'):\n weights = weight_variable([input_dim, output_dim])\n with tf.name_scope('biases'):\n biases = bias_variable([output_dim])\n with tf.name_scope('Wx_plus_b'):\n preactivate = tf.matmul(input_tensor, weights) + biases\n activations = act(preactivate, 'activation')\n return activations",
"def forward_propagate(self, inputs):\n\n activations = inputs\n self.activations[0] = inputs\n\n for i, w in enumerate(self.weights):\n # Calculate the net inputs\n net_inputs = np.dot(activations, w)\n\n # Calculate the activations\n activations = self._sigmoid(net_inputs)\n self.activations[i+1] = activations\n\n return activations",
"def __init__(self, N_sym, n_nodes, activations, N_element, bias = True, scaling = None):\n super(MultiLayerNet, self).__init__()\n N_layers = len(n_nodes)\n if N_layers == 0:\n self.net = torch.nn.Linear(N_sym, N_element, bias = bias)\n else:\n layers = []\n for n in range(N_layers):\n if n == 0:\n layers += [torch.nn.Linear(N_sym, n_nodes[n], bias = bias)]\n layers += [activations[n]]\n else:\n layers += [torch.nn.Linear(n_nodes[n-1], n_nodes[n], bias = bias)]\n layers += [activations[n]]\n layers += [torch.nn.Linear(n_nodes[-1], N_element, bias = bias)]\n self.net = torch.nn.Sequential(*layers)\n \n self.scaling = scaling",
"def __init__(self, num_inputs=3, hidden_layers=[3, 3], num_outputs=2):\n\n self.num_inputs = num_inputs\n self.hidden_layers = hidden_layers\n self.num_outputs = num_outputs\n\n # create a generic representation of the layers\n layers = [num_inputs] + hidden_layers + [num_outputs]\n\n # create random connection weights for the layers\n weights = []\n for i in range(len(layers) - 1):\n w = np.random.rand(layers[i], layers[i + 1])\n weights.append(w)\n self.weights = weights\n\n activations = []\n\n for i in range(len(layers)):\n a = np.zeros(layers[i])\n activations.append(a)\n self.activations = activations\n\n derivatives = []\n\n for i in range(len(layers) - 1):\n d = np.zeros(layers[i])\n derivatives.append(d)\n self.derivatives = derivatives",
"def main(tetrode_number=TETRODE_NUMBER,num_hidden_units=500,num_hidden_units_2=300,num_hidden_units_3=200,num_code_units=50):\n \n print(\"Making the model...\")\n network = model((None,200),200,num_hidden_units,num_hidden_units_2,num_hidden_units_3,num_code_units)\n print(\"Done!\")\n\n\n for tetrode_number in [10]:\n\n print(\"Loading the model parameters from {}\".format(MODEL_FILENAME+str(tetrode_number)))\n f = open(MODEL_FILENAME+str(tetrode_number),'r')\n all_param_values = pickle.load(f)\n f.close()\n # print(all_param_values)\n lasagne.layers.set_all_param_values(network, all_param_values)\n\n print(\"Loading the data...\")\n dataset = load_data(tetrode_number)\n print(\"Done!\")\n\n print(dataset['data'].shape)\n\n print(\"Setting up the training functions...\")\n training = funcs(dataset,network)\n print(\"Done!\")\n\n for i in range(NUM_EPOCHS):\n costs = []\n\n for start, end in zip(range(0, dataset['data'].shape[0], BATCH_SIZE), range(BATCH_SIZE, dataset['data'].shape[0], BATCH_SIZE)):\n cost = training['train'](dataset['data'][start:end],dataset['data'][start:end])\n costs.append(cost)\n\n meanTrainCost = np.mean(np.asarray(costs,dtype=np.float32))\n # accuracy = training['accuracy'](dataset['X_test'],dataset['y_test'])\n\n print(\"Epoch: {}, Training cost: {}\".format(i+1,meanTrainCost))\n # NUM_POINTS = 5000\n codes = training['code'](dataset['data'][0:NUM_POINTS])\n\n \n\n # y = set(list(d.predict(dataset['data'][0:NUM_POINTS])))\n\n # print(y)\n\n # activations_1 = training['activations_1'](dataset['data'][0:NUM_POINTS])\n # activations_2 = training['activations_2'](dataset['data'][0:NUM_POINTS])\n # codes = training['code'](dataset['data'][0:NUM_POINTS])\n # # print(codes.shape)\n # # codes_2d = bh_sne(codes)\n\n # for k in range(3):\n # print(k)\n\n # codes_2d = bh_sne(np.asarray(codes[:(k+1)*12000],dtype=np.float64))\n\n # # d = DPGMM(n_components=10, covariance_type='full')\n # d = DPGMM(n_components=15,n_iter=100)\n\n # d.fit(codes_2d[:(k+1)*12000])\n\n # hdp = d.predict_proba(codes_2d[:(k+1)*12000])\n\n # hdp_1d = [np.argmax(z) for z in hdp]\n\n # print(set(list(hdp_1d)))\n\n # plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=hdp_1d, alpha=0.8,lw=0)\n # plt.savefig('dbscan_labels/deep/sparse/hdp_{}_{}.png'.format(tetrode_number,k), bbox_inches='tight')\n # plt.close()\n\n # # m = TSNE(n_components=2, random_state=0)\n \n # # codes_2d = m.fit_transform(codes[:NUM_POINTS])\n # # activations_1_2d = bh_sne(activations_1)\n # # activations_2_2d = bh_sne(activations_2)\n\n # plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=dataset['labels'][0:NUM_POINTS][:(k+1)*12000],alpha=0.8,lw=0)\n # plt.savefig('dbscan_labels/deep/sparse/tsne_codes_{}_{}.png'.format(tetrode_number,k), bbox_inches='tight')\n # plt.close()\n\n # # This is where the code for the video will go\n # ##############################################################################\n # # Compute DBSCAN\n # db = None\n # core_samples_mask = None\n # labels = None\n\n # num_labels = 0\n # eps=1.0\n # while(num_labels < 10):\n # db = DBSCAN(eps=eps, min_samples=10).fit(codes_2d)\n # core_samples_mask = np.zeros_like(db.labels_, dtype=bool)\n # core_samples_mask[db.core_sample_indices_] = True\n # labels = db.labels_\n # num_labels = np.amax(labels)\n # eps -= 0.1\n\n # print(\"Num learned labels: {}\".format(num_labels))\n\n # plt.title('Estimated number of clusters: {}'.format(np.amax(labels)))\n # plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=labels[0:NUM_POINTS][:(k+1)*12000],lw=0)\n # plt.savefig('dbscan_labels/deep/sparse/dbscan_codes_{}_{}.png'.format(tetrode_number,k), bbox_inches='tight')\n # plt.close()\n\n # # f=open('dbscan_labels/deep/sparse/tetrode_{}.npy'.format(tetrode_number),'w')\n # # pickle.dump(labels, f)\n # # f.close()\n\n codes_2d = bh_sne(np.asarray(codes,dtype=np.float64),theta=0.4)\n\n # d = DPGMM(n_components=10, covariance_type='full')\n d = DPGMM(n_components=15,n_iter=1000)\n\n d.fit(codes_2d)\n\n hdp = d.predict_proba(codes_2d)\n\n hdp_1d = [np.argmax(z) for z in hdp]\n\n print(set(list(hdp_1d)))\n\n plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=hdp_1d, alpha=0.8,lw=0)\n plt.savefig('dbscan_labels/deep/sparse/hdp_{}.png'.format(tetrode_number), bbox_inches='tight')\n plt.close()\n\n # m = TSNE(n_components=2, random_state=0)\n \n # codes_2d = m.fit_transform(codes[:NUM_POINTS])\n # activations_1_2d = bh_sne(activations_1)\n # activations_2_2d = bh_sne(activations_2)\n\n plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=dataset['labels'][0:NUM_POINTS],alpha=0.8,lw=0)\n plt.savefig('dbscan_labels/deep/sparse/tsne_codes_{}.png'.format(tetrode_number), bbox_inches='tight')\n plt.close()\n\n # This is where the code for the video will go\n ##############################################################################\n # Compute DBSCAN\n db = None\n core_samples_mask = None\n labels = None\n\n num_labels = 0\n eps=1.0\n while(num_labels < 10):\n db = DBSCAN(eps=eps, min_samples=10).fit(codes_2d)\n core_samples_mask = np.zeros_like(db.labels_, dtype=bool)\n core_samples_mask[db.core_sample_indices_] = True\n labels = db.labels_\n num_labels = np.amax(labels)\n eps -= 0.1\n\n print(\"Num learned labels: {}\".format(num_labels))\n\n plt.title('Estimated number of clusters: {}'.format(np.amax(labels)))\n plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=labels[0:NUM_POINTS],lw=0)\n plt.savefig('dbscan_labels/deep/sparse/dbscan_codes_{}.png'.format(tetrode_number), bbox_inches='tight')\n plt.close()\n\n # f=open('dbscan_labels/deep/sparse/tetrode_{}.npy'.format(tetrode_number),'w')\n # pickle.dump(labels, f)\n # f.close()",
"def n_inputs(self):",
"def __init__(self, in_features, out_features, activation, batch_norm=False, combine_with_1x1=False):\n super(InceptionBlock, self).__init__()\n\n self.activation = activation\n\n if not combine_with_1x1:\n if out_features % 4 != 0:\n raise ValueError(\"If not combining with 1x1, out_features must be divisible by 4\")\n conv_out_features = int(out_features/4)\n else:\n conv_out_features = out_features\n\n if batch_norm:\n self.batch_norm = nn.BatchNorm2d(conv_out_features)\n self.batch_norm1 = nn.BatchNorm2d(conv_out_features)\n self.batch_norm3 = nn.BatchNorm2d(conv_out_features)\n self.batch_norm5 = nn.BatchNorm2d(conv_out_features)\n self.batch_norm7 = nn.BatchNorm2d(conv_out_features)\n else:\n self.batch_norm = None\n\n self.conv1x1 = nn.Conv2d(in_features, conv_out_features, kernel_size=1, padding=0)\n self.conv3x3 = nn.Conv2d(in_features, conv_out_features, kernel_size=3, padding=1)\n self.conv5x5 = nn.Conv2d(in_features, conv_out_features, kernel_size=5, padding=2)\n self.conv7x7 = nn.Conv2d(in_features, conv_out_features, kernel_size=7, padding=3)\n\n self.combine_with_1x1 = combine_with_1x1\n if combine_with_1x1:\n self.combiner = nn.Conv2d(out_features*4, out_features, kernel_size=1)\n self.batch_norm_comb = nn.BatchNorm2d(out_features)",
"def __call__(self, inputs: np.ndarray):\n # Denote the impact the inputs have directly on the outputs\n output_inputs: np.ndarray = np.matmul(self.in2out, inputs.transpose()).transpose()\n \n # Denote the impact hidden nodes have on the outputs, if there are hidden nodes\n if self.n_hidden > 0:\n # Nice to know:\n # - np.transpose() will transpose the tensor\n # - np.matmul(tensor1, tensor2) will perform a matrix multiplication between tensor and tensor2\n \n # The activation is defined by:\n # - the inputs mapping to the hidden nodes\n # - the hidden nodes mapping to themselves\n # - the hidden nodes' biases\n \n # 1) Propagate the hidden nodes\n self.hidden_act = self.act_f(np.matmul(self.in2hid, inputs.transpose()).transpose() +\n np.matmul(self.hid2hid, self.hidden_act.transpose()).transpose() +\n self.hidden_biases)\n \n # 2) Execute the RNN nodes if they exists (updating current hidden state)\n for i, rnn_idx in enumerate(self.rnn_idx):\n self.rnn_state[:, i] = self.rnn_array[i](\n np.concatenate((self.in2hid[rnn_idx] * inputs,\n self.hid2hid[rnn_idx] * self.hidden_act),\n axis=1)[self.rnn_map[i]].reshape(self.bs, self.rnn_array[i].input_size)\n )\n self.hidden_act[:, rnn_idx] = self.rnn_state[:, i, 0]\n \n # 3) Propagate hidden-values to the outputs\n output_inputs += np.matmul(self.hid2out, self.hidden_act.transpose()).transpose()\n \n # Define the values of the outputs, which is the sum of their received inputs and their corresponding bias\n self.output_act = self.act_f(output_inputs + self.output_biases)\n return self.output_act",
"def __init__(self, input_nodes, hidden_nodes, hidden_layers, output_nodes):\n super().__init__(input_nodes, hidden_nodes, hidden_layers, output_nodes)",
"def predict_from(self, inputs, to_layers):",
"def predict(self, input_vector):\r\n inputlayer = self.layers[0]\r\n for i in range(len(inputlayer)):\r\n inputlayer[i].activation = input_vector[i]\r\n\r\n layers = self.layers\r\n output_vector=[]\r\n a=0\r\n #skips input layer\r\n\r\n #i=current layer\r\n for i in range(1,len(layers)):\r\n #j is the node we are on in the layer\r\n for j in range(len(layers[i])):\r\n layers[i][j].compute_activation()\r\n if i==len(layers)-1:\r\n output_vector.append(layers[i][j].activation)\r\n return output_vector",
"def identity_block(input_tensor, kernel_size, filters, stage, block):\n filters0, filters1, filters2 = filters\n conv_name_base = 'res' + str(stage) + block\n bn_name_base = 'bn' + str(stage) + block\n add_name = 'add' + str(stage) + \"_\" + block\n relu_name = 'relu' + str(stage) + \"_\" + block\n\n # Tensors\n input_tensor_chans = input_tensor.dims(\n 3) if input_tensor.shape.layout == sg.NHWC else input_tensor.dims(1)\n conv0_tensor = sg.Tensor(\n data_layout=sg.NHWC, tensor_data=generate_random_data(\n (filters0, 1, 1, input_tensor_chans)))\n bn0_mean_tensor = sg.Tensor(\n data_layout=sg.NC, tensor_data=generate_random_data((1, filters0)))\n bn0_var_tensor = sg.Tensor(\n data_layout=sg.NC, tensor_data=generate_random_data((1, filters0)))\n bn0_gamma_tensor = sg.Tensor(\n data_layout=sg.NC, tensor_data=generate_random_data((1, filters0)))\n bn0_beta_tensor = sg.Tensor(\n data_layout=sg.NC, tensor_data=generate_random_data((1, filters0)))\n conv1_tensor = sg.Tensor(\n data_layout=sg.NHWC, tensor_data=generate_random_data(\n (filters1, kernel_size, kernel_size, filters0)))\n bn1_mean_tensor = sg.Tensor(\n data_layout=sg.NC, tensor_data=generate_random_data((1, filters1)))\n bn1_var_tensor = sg.Tensor(\n data_layout=sg.NC, tensor_data=generate_random_data((1, filters1)))\n bn1_gamma_tensor = sg.Tensor(\n data_layout=sg.NC, tensor_data=generate_random_data((1, filters1)))\n bn1_beta_tensor = sg.Tensor(\n data_layout=sg.NC, tensor_data=generate_random_data((1, filters1)))\n conv2_tensor = sg.Tensor(\n data_layout=sg.NHWC, tensor_data=generate_random_data(\n (filters2, 1, 1, filters1)))\n bn2_mean_tensor = sg.Tensor(\n data_layout=sg.NC, tensor_data=generate_random_data((1, filters2)))\n bn2_var_tensor = sg.Tensor(\n data_layout=sg.NC, tensor_data=generate_random_data((1, filters2)))\n bn2_gamma_tensor = sg.Tensor(\n data_layout=sg.NC, tensor_data=generate_random_data((1, filters2)))\n bn2_beta_tensor = sg.Tensor(\n data_layout=sg.NC, tensor_data=generate_random_data((1, filters2)))\n\n x = sg.nn.convolution(\n input_tensor, conv0_tensor, stride=[1, 1], padding=\"same\",\n name=conv_name_base + '_2a')\n x = sg.nn.batch_norm(\n x, bn0_mean_tensor, bn0_var_tensor, bn0_gamma_tensor, bn0_beta_tensor,\n activation=\"relu\", name=bn_name_base + '_2a')\n x = sg.nn.convolution(\n x, conv1_tensor, stride=[1, 1], padding=\"same\",\n name=conv_name_base + '_2b')\n x = sg.nn.batch_norm(\n x, bn1_mean_tensor, bn1_var_tensor, bn1_gamma_tensor, bn1_beta_tensor,\n activation=\"relu\", name=bn_name_base + '_2b')\n x = sg.nn.convolution(\n x, conv2_tensor, stride=[1, 1], padding=\"same\",\n name=conv_name_base + '_2c')\n x = sg.nn.batch_norm(\n x, bn2_mean_tensor, bn2_var_tensor, bn2_gamma_tensor, bn2_beta_tensor,\n name=bn_name_base + '_2c')\n x = sg.math.add(x, input_tensor, name=add_name)\n x = sg.nn.relu(x, name=relu_name)\n return x",
"def _activation(self,components,activation):\r\n \r\n if activation == \"ReLU\":\r\n components.append(nn.ReLU())\r\n elif activation == \"Sigmoid\":\r\n components.append(nn.Sigmoid())\r\n else:\r\n raise Exception(\"Invalid activation fn: \"+activation)",
"def _init_layers(self) -> None:\n weight_nums, bias_nums = [], []\n for i in range(self.num_layers):\n if i == 0:\n weight_nums.append((self.in_channels + 2) * self.feat_channels)\n bias_nums.append(self.feat_channels)\n elif i == self.num_layers - 1:\n weight_nums.append(self.feat_channels * 1)\n bias_nums.append(1)\n else:\n weight_nums.append(self.feat_channels * self.feat_channels)\n bias_nums.append(self.feat_channels)\n\n self.weight_nums = weight_nums\n self.bias_nums = bias_nums\n self.num_params = sum(weight_nums) + sum(bias_nums)",
"def output_layer_activation(x):\n return x"
]
| [
"0.6643204",
"0.6569007",
"0.6522227",
"0.64859754",
"0.6354559",
"0.6301704",
"0.62646145",
"0.62479687",
"0.6245054",
"0.6192256",
"0.61219674",
"0.60853064",
"0.60848325",
"0.6047769",
"0.6016878",
"0.6013377",
"0.60133046",
"0.6000324",
"0.5991866",
"0.59814864",
"0.5956167",
"0.59558624",
"0.5936928",
"0.5928056",
"0.5918551",
"0.59167904",
"0.59072816",
"0.5902511",
"0.5898917",
"0.58892256"
]
| 0.6972768 | 0 |
Top level function to create Share or Batch instance depending on number of symbols given | def IexFinance(symbol, **kwargs):
if type(symbol) is str:
if not symbol:
raise ValueError("Please input a symbol or list of symbols")
else:
inst = Share(symbol, **kwargs)
elif type(symbol) is list:
if not symbol:
raise ValueError("Please input a symbol or list of symbols")
if len(symbol) == 1:
inst = Share(symbol, **kwargs)
if len(symbol) > 100:
raise ValueError("Invalid symbol list. Maximum 100 symbols.")
else:
inst = Batch(symbol, **kwargs)
return inst
else:
raise TypeError("Please input a symbol or list of symbols")
return inst | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, symbols, short_window, long_window, command_execute):\n\n\t\t# Create a new Macs object for every stock\n\t\tself.data = {s: Macs(short_window, long_window) for s in symbols}\n\t\tself.command_execute = command_execute",
"def test_create_nas_share_by_nas(self):\n pass",
"def create(*args):",
"def make_random_mnemonic_shares(minimum_shares, nof_shares, prime=PRIME_12TH_MERSENNE):\n if minimum_shares > 255 or nof_shares > 255:\n raise ValueError(\"Can only create up to 255 shares\")\n secret, shares = make_random_shares(\n minimum=minimum_shares, shares=int(nof_shares), prime=prime\n )\n secret_bits = bits_in_number(secret)\n mnemonic_secret = number_to_mnemonic(secret)\n mnemonic_shares = [number_to_mnemonic(pad_number(s, minimum_shares, i)) for i, s in shares]\n return mnemonic_secret, secret_bits, mnemonic_shares,",
"def create_instance(c_instance):\n\treturn 0",
"def create(ctx, amount, save_to, type):\n skale = ctx.obj['skale']\n for i in range(amount):\n schain_info = create_schain(skale, skale.wallet, type)\n save_info(i, schain_info, skale.wallet, save_to)\n logger.info(LONG_LINE)\n show_all_schains_names(skale)",
"def createSharedNotebook(self, authenticationToken, sharedNotebook):\r\n pass",
"def create_share(self):\n\t\treturn handle_to_object(call_sdk_function('PrlVmCfg_CreateShare', self.handle))",
"def create_by_foundation(ctx, amount, save_to, type):\n skale = ctx.obj['skale']\n skale_ima = SkaleIma(ENDPOINT, IMA_ABI_FILEPATH, skale.wallet)\n\n for i in range(amount):\n schain_info = create_schain(skale, skale.wallet, type,\n by_foundation=True, skale_ima=skale_ima)\n save_info(i, schain_info, skale.wallet, save_to)\n logger.info(LONG_LINE)\n show_all_schains_names(skale)",
"def create():",
"def create():",
"def _createMaster(self, *args, **kwds):\n raise NotImplementedError",
"def create_share(self, pool, project, share):\n self.verify_avail_space(pool, project, share, share['quota'])\n svc = self.share_path % (pool, project, share['name'])\n ret = self.rclient.get(svc)\n if ret.status != restclient.Status.OK:\n svc = self.shares_path % (pool, project)\n ret = self.rclient.post(svc, share)\n if ret.status != restclient.Status.CREATED:\n exception_msg = (_('Error creating '\n 'share: %(name)s '\n 'return code: %(ret.status)d '\n 'message: %(ret.data)s.')\n % {'name': share['name'],\n 'ret.status': ret.status,\n 'ret.data': ret.data})\n raise exception.ShareBackendException(msg=exception_msg)\n else:\n exception_msg = (_('Share with name %s already exists.')\n % share['name'])\n raise exception.ShareBackendException(msg=exception_msg)",
"def create_share(self, ctx, share, share_server=None):\n # probe into getting a NAS protocol helper for the share in order\n # to facilitate early detection of unsupported protocol type\n self._get_helper(share)\n sizestr = six.text_type(share['size']) + 'GB'\n share_dir = '/' + share['name']\n local_share_path = self._get_local_share_path(share)\n cmd = ['mkdir', local_share_path]\n # set hard limit quota on the sub-directory/share\n args = ('volume', 'quota', self.gluster_manager.volume,\n 'limit-usage', share_dir, sizestr)\n try:\n self._execute(*cmd, run_as_root=True)\n self.gluster_manager.gluster_call(*args)\n except exception.ProcessExecutionError as exc:\n self._cleanup_create_share(local_share_path, share['name'])\n LOG.error(_LE('Unable to create share %s'), share['name'])\n raise exception.GlusterfsException(exc)\n\n export_location = os.path.join(self.gluster_manager.qualified,\n share['name'])\n return export_location",
"def make(cls, *args, **kwargs):\n if not args and 'n_repeats' not in kwargs:\n return kwargs.pop('base_block', MultiLayer)(**kwargs)\n return cls(*args, **kwargs)",
"def create_multiple(cls, count=2, attrs=None):\n objects = []\n for i in range(0, count):\n objects.append(\n cls.create_one(attrs))\n\n return objects",
"def __init__(self, model_names):\n for name in model_names:\n model = spacy.load(name)\n self.pool[name] = SharedModel(name, model)\n log.debug(\"Initialized shared models in pool\")",
"def create(self):\n\t\tself.creating += 1\n\t\ttry:\n\t\t\tself.adopt(self.factory())\n\t\tfinally:\n\t\t\tself.creating -= 1",
"def newPool(name: str, superPool, types: [], cls):\n try:\n if name == \"colorholder\":\n superPool = P0(len(types), cls)\n return superPool\n elif name == \"abstractnode\":\n superPool = P1(len(types), cls)\n return superPool\n elif name == \"node\":\n superPool = P2(len(types), superPool, cls)\n return superPool\n \n elif name == \"subnode\":\n superPool = P3(len(types), superPool, cls)\n return superPool\n \n else:\n if superPool is None:\n superPool = BasePool(len(types), name, StoragePool.noKnownFields, StoragePool.noAutoFields, cls)\n else:\n superPool = superPool.makeSubPool(len(types), name, cls)\n return superPool\n finally:\n types.append(superPool)",
"def make_instances():\n body = request.json\n return create_instances(\n flavor=body.get(\"flavor\"),\n name=body.get(\"name\"),\n network_name=body.get(\"network_name\"),\n )",
"def bdev_split_create(client, base_bdev, split_count, split_size_mb=None):\n params = {\n 'base_bdev': base_bdev,\n 'split_count': split_count,\n }\n if split_size_mb:\n params['split_size_mb'] = split_size_mb\n\n return client.call('bdev_split_create', params)",
"def createSymbol(self, address: ghidra.program.model.address.Address, name: unicode, makePrimary: bool, makeUnique: bool, sourceType: ghidra.program.model.symbol.SourceType) -> ghidra.program.model.symbol.Symbol:\n ...",
"def __init__(__self__, *,\n accelerator_count: Optional[pulumi.Input[str]] = None,\n accelerator_type: Optional[pulumi.Input[str]] = None,\n gpu_partition_size: Optional[pulumi.Input[str]] = None,\n gpu_sharing_config: Optional[pulumi.Input['GPUSharingConfigArgs']] = None,\n max_time_shared_clients_per_gpu: Optional[pulumi.Input[str]] = None):\n if accelerator_count is not None:\n pulumi.set(__self__, \"accelerator_count\", accelerator_count)\n if accelerator_type is not None:\n pulumi.set(__self__, \"accelerator_type\", accelerator_type)\n if gpu_partition_size is not None:\n pulumi.set(__self__, \"gpu_partition_size\", gpu_partition_size)\n if gpu_sharing_config is not None:\n pulumi.set(__self__, \"gpu_sharing_config\", gpu_sharing_config)\n if max_time_shared_clients_per_gpu is not None:\n pulumi.set(__self__, \"max_time_shared_clients_per_gpu\", max_time_shared_clients_per_gpu)",
"def do_create(self, args):\n args = args.split()\n l = len(args)\n if l < 1:\n print(\"** class name missing **\")\n else:\n if args[0] in HBNBCommand.valid_classes.keys():\n if l == 1:\n new_obj = HBNBCommand.valid_classes[args[0]]()\n else:\n result = self.__create_help(args[1:])\n if result is None:\n print(\"** Object fails **\")\n return\n new_obj = HBNBCommand.valid_classes[args[0]](**result)\n print(new_obj.id)\n new_obj.save()\n else:\n print(\"** class doesn't exist **\")",
"def create_split(self) -> NoReturn:\n raise NotImplementedError",
"def make_inheritable(token):\r\n return win32api.DuplicateHandle(win32api.GetCurrentProcess(), token,\r\n win32api.GetCurrentProcess(), 0, 1,\r\n win32con.DUPLICATE_SAME_ACCESS)",
"def create():\n pass",
"def createSharedNotebook(self, authenticationToken, sharedNotebook):\r\n self.send_createSharedNotebook(authenticationToken, sharedNotebook)\r\n return self.recv_createSharedNotebook()",
"def ex1_create(alpha,beta,pace,delta):\n\t\n\tfilename = seed+\"/ex_sim_a\"+str(alpha)+\"_p\"+str(pace)+\"_d\"+str(delta)+\".tmp\"\n\t\n\t# generate the K random walks\n\tfor _ in range(K):\n\t\tavancement(_,K)\n\t\tcall(filename,alpha,beta,'all',pace,delta)",
"def create_batch(count):\n\n if count < 1:\n raise click.BadParameter('count needs to be > 0')\n\n factory = V2ProfileFactory()\n output = factory.create_batch(count, export_json=True)\n click.echo(output)"
]
| [
"0.5649457",
"0.5466746",
"0.54380417",
"0.5288137",
"0.52806205",
"0.5269566",
"0.52595186",
"0.51963764",
"0.5187528",
"0.5149091",
"0.5149091",
"0.5115753",
"0.51052445",
"0.5080091",
"0.50597936",
"0.49435118",
"0.49334913",
"0.49333498",
"0.49316835",
"0.49204686",
"0.49200734",
"0.4901451",
"0.48983923",
"0.48966104",
"0.48901957",
"0.48595884",
"0.48553044",
"0.48505154",
"0.48504826",
"0.4847486"
]
| 0.56744677 | 0 |
Universal selector method to obtain custom datapoints from an individual endpoint. If an invalid endpoint is specified, throws an IEXEndpointError. If an invalid datapoint is specified, throws an IEXDatapointError. If there are issues with the query, throws an IEXQueryError. | def get_select_datapoints(self, endpoint, attrList= []):
if type(attrList) is str:
attrList = [attrList]
result = {}
if not attrList:
raise ValueError("Please give a valid attribute list")
try:
ep = self.data_set[endpoint]
except:
raise IEXEndpointError(endpoint)
for attr in attrList:
try:
query = ep[attr]
except:
raise IEXDatapointError(endpoint, attr)
result.update({attr: query})
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_select_datapoints(self, endpoint, attrList= []):\r\n if type(attrList) is str:\r\n attrList = [attrList]\r\n result = {}\r\n if not attrList:\r\n raise ValueError(\"Please give a valid attribute list\")\r\n for symbol in self.symbolList:\r\n try: \r\n ep = self.data_set[symbol][endpoint]\r\n except:\r\n raise IEXEndpointError(endpoint)\r\n temp = {}\r\n for attr in attrList:\r\n try:\r\n query = ep[attr]\r\n except:\r\n raise IEXDatapointError(endpoint, attr)\r\n temp.update({attr: query})\r\n result.update({symbol:temp})\r\n return result",
"def get_endpoints(self, **kwargs):\n return self._database.lookup('endpoint', kwargs)",
"def get_endpoint(self, *args):\n\t\traise NotImplementedError",
"def get_select_endpoints(self, endpointList=[]):\r\n if type(endpointList) is str:\r\n endpointList = [endpointList]\r\n result = {}\r\n if not endpointList:\r\n raise ValueError(\"Please provide a valid list of endpoints\")\r\n for endpoint in endpointList:\r\n try:\r\n query = self.data_set[endpoint]\r\n except:\r\n raise IEXEndpointError(endpoint)\r\n result.update({endpoint: query})\r\n return result",
"def get_select_endpoints(self, endpoints=[]): \r\n if type(endpoints) is str:\r\n endpoints = [endpoints]\r\n elif not endpoints:\r\n raise ValueError(\"Please provide a valid list of endpoints\")\r\n result = {}\r\n for symbol in self.symbolList:\r\n temp = {}\r\n try:\r\n ds = self.data_set[symbol]\r\n except:\r\n IEXSymbolError(symbol)\r\n for endpoint in endpoints:\r\n try:\r\n query = ds[endpoint]\r\n except:\r\n raise IEXEndpointError(endpoint)\r\n temp.update({endpoint: query})\r\n result.update({symbol:temp})\r\n return result",
"def get_endpoint(self, endpoint_id):\n raise exception.NotImplemented() # pragma: no cover",
"def _datapoint(self,\n datapoint_param: DataPointParam) \\\n -> Optional[List[DataPointResult]]:\n logger.debug('getting datapoint {}'.format(datapoint_param))\n # block until token refreshed. Make sure it is a valid token\n with self.data.tokenManager.valid_token_ctx() as token:\n res = jGetter.get_data_points(self.auth, token,\n datapoint_param)\n return res",
"def get_endpoint(self, endpoint):\n for item in self.endpoints:\n if endpoint == item[0]:\n return item\n return None",
"def fetch(*args, **kwargs):\n raise InvalidEndpoint('Not a valid location on this endpoint')",
"def fetch(*args, **kwargs):\n raise InvalidEndpoint('Not a valid location on this endpoint')",
"def get_one(self, endpoint_ident):\n context = pecan.request.context\n endpoint = api_utils.get_resource('Endpoint', endpoint_ident)\n return Endpoint.convert_with_links(endpoint)",
"def getPointValues(self, *args, **kwargs):\n ...",
"def get_endpoint_data(self, session,\n endpoint_override=None,\n discover_versions=True,\n **kwargs):\n if not endpoint_override:\n return None\n endpoint_data = discover.EndpointData(catalog_url=endpoint_override)\n\n if endpoint_data.api_version and not discover_versions:\n return endpoint_data\n\n return endpoint_data.get_versioned_data(\n session, cache=self._discovery_cache,\n discover_versions=discover_versions)",
"def get_endpoint_data(self, session,\n endpoint_override=None,\n discover_versions=True,\n **kwargs):\n return super(FixedEndpointPlugin, self).get_endpoint_data(\n session,\n endpoint_override=endpoint_override or self.endpoint,\n discover_versions=discover_versions,\n **kwargs)",
"def url_for(self, attr=None, filter_value=None,\r\n service_type='network', endpoint_type='publicURL'):\r\n\r\n catalog = self.catalog['access'].get('serviceCatalog', [])\r\n matching_endpoints = []\r\n for service in catalog:\r\n if service['type'] != service_type:\r\n continue\r\n\r\n endpoints = service['endpoints']\r\n for endpoint in endpoints:\r\n if not filter_value or endpoint.get(attr) == filter_value:\r\n matching_endpoints.append(endpoint)\r\n\r\n if not matching_endpoints:\r\n raise exceptions.EndpointNotFound()\r\n elif len(matching_endpoints) > 1:\r\n raise exceptions.AmbiguousEndpoints(\r\n matching_endpoints=matching_endpoints)\r\n else:\r\n if endpoint_type not in matching_endpoints[0]:\r\n raise exceptions.EndpointTypeNotFound(type_=endpoint_type)\r\n\r\n return matching_endpoints[0][endpoint_type]",
"def validate_raw_endpoint(raw_endpoint: Dict[str, Any]) -> None:\n validate_raw_endpoint_route(raw_endpoint)\n validate_raw_endpoint_method(raw_endpoint)",
"def EvaluatePointDataField(self, *float, **kwargs):\n ...",
"def connect_data_api(self, endpoint):\n\n url = 'https://api.gdax.com' + endpoint\n res = requests.get(url)\n\n if res.status_code == 200:\n return res.json()\n else:\n raise ValueError(res.content)",
"def get_data(endpoint_name, arg=None,\n project_name=None, fields=None, size=get_setting_value('DEFAULT_SIZE'), page=0,\n data_category=None, query_args={}, verify=False, *args, **kwargs):\n endpoint = get_setting_value('GDC_API_ENDPOINT').format(endpoint=endpoint_name)\n if arg:\n endpoint = endpoint+'/{}'.format(arg)\n else:\n ## prep extra-params, including `from` param, as dict\n extra_params = {}\n if page>0:\n from_param = helpers.compute_start_given_page(page=page, size=size)\n extra_params.update({\n 'from': from_param,\n })\n if fields:\n extra_params.update({'fields': ','.join(helpers.convert_to_list(fields))})\n if dict(**kwargs):\n ## TODO check on whether this handles redundant param spec \n ## correctly\n extra_params.update(dict(**kwargs))\n params = _params.construct_parameters(project_name=project_name,\n size=size,\n data_category=data_category,\n query_args=query_args,\n verify=verify,\n **extra_params\n )\n # requests URL-encodes automatically\n log.info('submitting request for {endpoint} with params {params}'.format(endpoint=endpoint, params=params))\n response = requests_get(endpoint, params=params)\n log.info('url requested was: {}'.format(response.url))\n response.raise_for_status()\n return response",
"def request_endpoints(self):\n\n endpoints_url = self.std[\"api\"]\n endpoints_paramd = {\n \"access_token\": self.std[\"access_token\"]\n }\n\n endpoints_response = requests.get(url=endpoints_url, params=endpoints_paramd)\n print endpoints_response\n self.endpointd = endpoints_response.json()[0]",
"def run(self, endpoint_uri, **kwargs):\n return self.get(endpoint_uri, **kwargs)",
"def get_data_from_endpoint(self, from_, to_, endpoint):\n endpoint = self.make_endpoint(endpoint)\n from_, to_ = str(from_), str(to_)\n payload = {\n 'auth': self.auth_token,\n 'id': self.monitor_id,\n 'start': from_,\n 'end': to_,\n 'extendLimit': 'true',\n 'fullContents': 'true'\n }\n\n r = self.session.get(endpoint, params=payload)\n ratelimit_remaining = r.headers['X-RateLimit-Remaining']\n #print ('Remaining Ratelimit = ' + str(ratelimit_remaining))\n\n # If the header is empty or 0 then wait for a ratelimit refresh.\n if (not ratelimit_remaining) or (float(ratelimit_remaining) < 1):\n #print('Waiting for ratelimit refresh...')\n sleep(self.ratelimit_refresh)\n\n return r",
"def get( # type: ignore [override]\n self,\n id: Optional[int] = None,\n external_id: Optional[str] = None,\n ) -> Union[None, Datapoints, List[Datapoints]]:\n # TODO: Question, can we type annotate without specifying the function?\n return super().get(id, external_id) # type: ignore [return-value]",
"async def get_query(self,\n endpoint: str,\n params: Optional[Dict[str, str]]=None,\n extra_headers: Optional[Dict[str, str]]=None) -> Dict[str, str]:\n target = self._generate_url(endpoint,params=params)\n api_sig = self._gen_api_sig(target)\n headers = {\"apisign\": api_sig}\n if extra_headers is not None:\n headers.update(extra_headers)\n return await self._get(target, headers=headers)",
"def describe_endpoint(EndpointName=None):\n pass",
"def readDatapoint(self, wait_for_clean_data=False):\n raise NotImplementedError(\"Implement in Headset child class\")",
"def test_access_all_data_all_endpoints(self):\n\n # Some end points just can't be fetched so we have to ignore them.\n end_point_exceptions = [\n \"/api/help/\",\n \"/api/test_host/\",\n \"/api/system_status/\",\n \"/api/updates_available/\",\n \"/api/session/\",\n \"/api/action/\",\n \"/api/run_stratagem/\",\n \"/api/stratagem_configuration/\",\n ]\n\n end_points = self.get_json_by_uri(\"/api/\", args={\"limit\": 0})\n\n for end_point in end_points.values():\n if end_point[\"list_endpoint\"] not in end_point_exceptions:\n import sys\n\n sys.stderr.write(\"\\nReading endpoint %s\\n\" % end_point[\"list_endpoint\"])\n self.get_json_by_uri(end_point[\"list_endpoint\"], args={\"limit\": 0})\n sys.stderr.write(\"\\nRead endpoint %s\\n\" % end_point[\"list_endpoint\"])",
"def read_endpoint(endpoint):\n if not re.search('{(.+?)}', endpoint):\n return endpoint\n read = re.findall('{(.+?)}', endpoint)\n result = endpoint\n for item in read:\n result = re.sub('{(%s)}' % item, str(STORED_ID[item]), result)\n return result",
"def check_endpoint_in_paths(context, endpoint):\n data = context.response.json()\n paths = check_and_get_attribute(data, \"paths\")\n assert endpoint in paths, \"Cannot find the expected endpoint {e}\".format(\n e=endpoint)",
"def get_endpoint(cls):\n raise NotImplementedError(\n \"%s must have implemented get_endpoint.\" % cls.__name__,\n )"
]
| [
"0.6004158",
"0.5757326",
"0.56205434",
"0.559747",
"0.5489849",
"0.54704744",
"0.51634777",
"0.5106707",
"0.5091631",
"0.5091631",
"0.50713134",
"0.50563973",
"0.5046296",
"0.5007252",
"0.4949157",
"0.48976734",
"0.488121",
"0.48496354",
"0.48321018",
"0.47773814",
"0.47669613",
"0.47596082",
"0.47289252",
"0.47275785",
"0.47238904",
"0.4701501",
"0.4683974",
"0.46816677",
"0.46789894",
"0.4676493"
]
| 0.6258382 | 0 |
Universal selector method to obtain custom endpoints from the data set. Will throw a IEXEndpointError if an invalid endpoint is specified and an IEXQueryError if the endpoint cannot be retrieved. | def get_select_endpoints(self, endpoints=[]):
if type(endpoints) is str:
endpoints = [endpoints]
elif not endpoints:
raise ValueError("Please provide a valid list of endpoints")
result = {}
for symbol in self.symbolList:
temp = {}
try:
ds = self.data_set[symbol]
except:
IEXSymbolError(symbol)
for endpoint in endpoints:
try:
query = ds[endpoint]
except:
raise IEXEndpointError(endpoint)
temp.update({endpoint: query})
result.update({symbol:temp})
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_endpoints(self, **kwargs):\n return self._database.lookup('endpoint', kwargs)",
"def get_select_endpoints(self, endpointList=[]):\r\n if type(endpointList) is str:\r\n endpointList = [endpointList]\r\n result = {}\r\n if not endpointList:\r\n raise ValueError(\"Please provide a valid list of endpoints\")\r\n for endpoint in endpointList:\r\n try:\r\n query = self.data_set[endpoint]\r\n except:\r\n raise IEXEndpointError(endpoint)\r\n result.update({endpoint: query})\r\n return result",
"def get_endpoint(self, *args):\n\t\traise NotImplementedError",
"def get_select_datapoints(self, endpoint, attrList= []):\r\n if type(attrList) is str:\r\n attrList = [attrList]\r\n result = {}\r\n if not attrList:\r\n raise ValueError(\"Please give a valid attribute list\")\r\n try:\r\n ep = self.data_set[endpoint]\r\n except:\r\n raise IEXEndpointError(endpoint)\r\n for attr in attrList:\r\n try:\r\n query = ep[attr]\r\n except:\r\n raise IEXDatapointError(endpoint, attr)\r\n result.update({attr: query})\r\n return result",
"def url_for(self, attr=None, filter_value=None,\r\n service_type='network', endpoint_type='publicURL'):\r\n\r\n catalog = self.catalog['access'].get('serviceCatalog', [])\r\n matching_endpoints = []\r\n for service in catalog:\r\n if service['type'] != service_type:\r\n continue\r\n\r\n endpoints = service['endpoints']\r\n for endpoint in endpoints:\r\n if not filter_value or endpoint.get(attr) == filter_value:\r\n matching_endpoints.append(endpoint)\r\n\r\n if not matching_endpoints:\r\n raise exceptions.EndpointNotFound()\r\n elif len(matching_endpoints) > 1:\r\n raise exceptions.AmbiguousEndpoints(\r\n matching_endpoints=matching_endpoints)\r\n else:\r\n if endpoint_type not in matching_endpoints[0]:\r\n raise exceptions.EndpointTypeNotFound(type_=endpoint_type)\r\n\r\n return matching_endpoints[0][endpoint_type]",
"def get_endpoint(self, endpoint_id):\n raise exception.NotImplemented() # pragma: no cover",
"def endpoint_list(self):\n _, body = self.request('/v1.1/endpoints', 'GET')\n return body",
"def get_endpoints(self):\n url = self.urlobject_single.format(self._cb.credentials.org_key, self._model_unique_id) + \"/endpoints\"\n resp = self._cb.get_object(url)\n return resp.get(\"results\", [])",
"def get_select_datapoints(self, endpoint, attrList= []):\r\n if type(attrList) is str:\r\n attrList = [attrList]\r\n result = {}\r\n if not attrList:\r\n raise ValueError(\"Please give a valid attribute list\")\r\n for symbol in self.symbolList:\r\n try: \r\n ep = self.data_set[symbol][endpoint]\r\n except:\r\n raise IEXEndpointError(endpoint)\r\n temp = {}\r\n for attr in attrList:\r\n try:\r\n query = ep[attr]\r\n except:\r\n raise IEXDatapointError(endpoint, attr)\r\n temp.update({attr: query})\r\n result.update({symbol:temp})\r\n return result",
"def list_endpoints(self):\n resp, body = self.get(\"endpoints\")\n body = self._parse_array(etree.fromstring(body))\n return resp, body",
"def get_endpoint(self, endpoint):\n for item in self.endpoints:\n if endpoint == item[0]:\n return item\n return None",
"def _get_endpoint_list(name, filter_by, limit, offset, quiet, all_items):\n\n get_endpoint_list(name, filter_by, limit, offset, quiet, all_items)",
"async def connections_endpoints(request: web.BaseRequest):\n context: AdminRequestContext = request[\"context\"]\n connection_id = request.match_info[\"conn_id\"]\n\n profile = context.profile\n connection_mgr = ConnectionManager(profile)\n try:\n endpoints = await connection_mgr.get_endpoints(connection_id)\n except StorageNotFoundError as err:\n raise web.HTTPNotFound(reason=err.roll_up) from err\n except (BaseModelError, StorageError, WalletError) as err:\n raise web.HTTPBadRequest(reason=err.roll_up) from err\n\n return web.json_response(dict(zip((\"my_endpoint\", \"their_endpoint\"), endpoints)))",
"def get_endpoint(cls):\n raise NotImplementedError(\n \"%s must have implemented get_endpoint.\" % cls.__name__,\n )",
"def endpoints(self) -> pulumi.Input[Sequence[pulumi.Input['EndpointDependencyArgs']]]:\n return pulumi.get(self, \"endpoints\")",
"def match_endpoint(self, method, request):\n if \"?\" in request:\n raise InvalidRequest()\n all_endpoints = self.config.endpoints()\n match_str = \"%s %s\" % (method, request)\n matched_endpoints = set()\n # Note: fnmatch.filter seemed to be broken when trying to do exaclty this.\n for endpoint in all_endpoints:\n if fnmatch.fnmatch(match_str, endpoint):\n matched_endpoints.add(endpoint)\n return matched_endpoints",
"def get_one(self, endpoint_ident):\n context = pecan.request.context\n endpoint = api_utils.get_resource('Endpoint', endpoint_ident)\n return Endpoint.convert_with_links(endpoint)",
"def endpoints(self) -> Optional[Sequence['outputs.EndpointResponse']]:\n return pulumi.get(self, \"endpoints\")",
"def get_endpoints(self):\r\n return ENDPOINTS",
"def get_endpoints(user):\n return [{'name': endpoint.name,\n 'url': get_endpoint_url(user, endpoint),\n 'help_url': get_endpoint_help_url(user, endpoint)}\n for endpoint in _CODE_ENDPOINT_MAP.values()\n if endpoint.permission_func(user, endpoint)]",
"def read_endpoint(endpoint):\n if not re.search('{(.+?)}', endpoint):\n return endpoint\n read = re.findall('{(.+?)}', endpoint)\n result = endpoint\n for item in read:\n result = re.sub('{(%s)}' % item, str(STORED_ID[item]), result)\n return result",
"def get_endpoints(self):\n return self.endpoints.values()",
"def endpoint_ips(self, endpoint_name=None):\n if endpoint_name is None:\n _, body = self.request('/v1.1/endpoint/ips', 'GET')\n else:\n _, body = self.request('/v1.1/endpoints/%s/ips' % endpoint_name, 'GET')\n return body",
"def get_endpoints(self, epg_dn):\n result = []\n for item in filter(lambda x: type(x).__name__ == 'CEp', self.query_child_objects(epg_dn)):\n # Creates a dynamic object type.\n endpoint = type('endpoint', (object,), {})\n\n # Filter the endpoint in memory looking for the object that contains the interface where the endpoint is\n # attached\n endpoint_connection_mo = filter(lambda x: type(x).__name__ == 'RsCEpToPathEp',\n self.query_child_objects(item.dn))[0]\n\n # Format the string to be human readable\n endpoint_connection_interface = str(endpoint_connection_mo.tDn).replace('topology/pod-1/paths','node').\\\n replace('pathep-[', '').replace(']','')\n\n # Add attributes to the object\n endpoint.ip = item.ip\n endpoint.mac = item.mac\n endpoint.name = item.name\n endpoint.interface = endpoint_connection_interface\n\n # Append it to the list\n result.append(endpoint)\n return result",
"def endpoints(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['LiveEventEndpointArgs']]]]:\n return pulumi.get(self, \"endpoints\")",
"def endpoints(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['LiveEventEndpointArgs']]]]:\n return pulumi.get(self, \"endpoints\")",
"def get_endpoint_data(self, session,\n endpoint_override=None,\n discover_versions=True,\n **kwargs):\n if not endpoint_override:\n return None\n endpoint_data = discover.EndpointData(catalog_url=endpoint_override)\n\n if endpoint_data.api_version and not discover_versions:\n return endpoint_data\n\n return endpoint_data.get_versioned_data(\n session, cache=self._discovery_cache,\n discover_versions=discover_versions)",
"def get_endpoint(group, **adapter_kwargs):\n result = get_adapter(group, **adapter_kwargs).get_endpoint()\n if not result:\n service_type = adapter_kwargs.get(\n 'service_type',\n getattr(getattr(CONF, group), 'service_type', group))\n endpoint_type = adapter_kwargs.get('endpoint_type', 'internal')\n raise exception.CatalogNotFound(\n service_type=service_type, endpoint_type=endpoint_type)\n return result",
"def get_endpoint(self, session, **kwargs):\n return kwargs.get('endpoint_override') or self.endpoint",
"def get_endpoint_data(self, session,\n endpoint_override=None,\n discover_versions=True,\n **kwargs):\n return super(FixedEndpointPlugin, self).get_endpoint_data(\n session,\n endpoint_override=endpoint_override or self.endpoint,\n discover_versions=discover_versions,\n **kwargs)"
]
| [
"0.7035371",
"0.6563425",
"0.5973638",
"0.5787101",
"0.56657445",
"0.56499183",
"0.56115985",
"0.55519307",
"0.5545966",
"0.5541535",
"0.546775",
"0.542534",
"0.5412308",
"0.5348684",
"0.5323091",
"0.53188264",
"0.5276792",
"0.5265353",
"0.521188",
"0.52017355",
"0.51605594",
"0.5144382",
"0.512637",
"0.51246655",
"0.5107491",
"0.5107491",
"0.5065098",
"0.50640476",
"0.50534725",
"0.50406915"
]
| 0.6566298 | 1 |
Universal selector method to obtain custom datapoints from an individual endpoint. If an invalid endpoint is specified, throws an IEXEndpointError. If an invalid datapoint is specified, throws an IEXDatapointError. If there are issues with the query, throws an IEXQueryError. | def get_select_datapoints(self, endpoint, attrList= []):
if type(attrList) is str:
attrList = [attrList]
result = {}
if not attrList:
raise ValueError("Please give a valid attribute list")
for symbol in self.symbolList:
try:
ep = self.data_set[symbol][endpoint]
except:
raise IEXEndpointError(endpoint)
temp = {}
for attr in attrList:
try:
query = ep[attr]
except:
raise IEXDatapointError(endpoint, attr)
temp.update({attr: query})
result.update({symbol:temp})
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_select_datapoints(self, endpoint, attrList= []):\r\n if type(attrList) is str:\r\n attrList = [attrList]\r\n result = {}\r\n if not attrList:\r\n raise ValueError(\"Please give a valid attribute list\")\r\n try:\r\n ep = self.data_set[endpoint]\r\n except:\r\n raise IEXEndpointError(endpoint)\r\n for attr in attrList:\r\n try:\r\n query = ep[attr]\r\n except:\r\n raise IEXDatapointError(endpoint, attr)\r\n result.update({attr: query})\r\n return result",
"def get_endpoints(self, **kwargs):\n return self._database.lookup('endpoint', kwargs)",
"def get_endpoint(self, *args):\n\t\traise NotImplementedError",
"def get_select_endpoints(self, endpointList=[]):\r\n if type(endpointList) is str:\r\n endpointList = [endpointList]\r\n result = {}\r\n if not endpointList:\r\n raise ValueError(\"Please provide a valid list of endpoints\")\r\n for endpoint in endpointList:\r\n try:\r\n query = self.data_set[endpoint]\r\n except:\r\n raise IEXEndpointError(endpoint)\r\n result.update({endpoint: query})\r\n return result",
"def get_select_endpoints(self, endpoints=[]): \r\n if type(endpoints) is str:\r\n endpoints = [endpoints]\r\n elif not endpoints:\r\n raise ValueError(\"Please provide a valid list of endpoints\")\r\n result = {}\r\n for symbol in self.symbolList:\r\n temp = {}\r\n try:\r\n ds = self.data_set[symbol]\r\n except:\r\n IEXSymbolError(symbol)\r\n for endpoint in endpoints:\r\n try:\r\n query = ds[endpoint]\r\n except:\r\n raise IEXEndpointError(endpoint)\r\n temp.update({endpoint: query})\r\n result.update({symbol:temp})\r\n return result",
"def get_endpoint(self, endpoint_id):\n raise exception.NotImplemented() # pragma: no cover",
"def _datapoint(self,\n datapoint_param: DataPointParam) \\\n -> Optional[List[DataPointResult]]:\n logger.debug('getting datapoint {}'.format(datapoint_param))\n # block until token refreshed. Make sure it is a valid token\n with self.data.tokenManager.valid_token_ctx() as token:\n res = jGetter.get_data_points(self.auth, token,\n datapoint_param)\n return res",
"def get_endpoint(self, endpoint):\n for item in self.endpoints:\n if endpoint == item[0]:\n return item\n return None",
"def fetch(*args, **kwargs):\n raise InvalidEndpoint('Not a valid location on this endpoint')",
"def fetch(*args, **kwargs):\n raise InvalidEndpoint('Not a valid location on this endpoint')",
"def get_one(self, endpoint_ident):\n context = pecan.request.context\n endpoint = api_utils.get_resource('Endpoint', endpoint_ident)\n return Endpoint.convert_with_links(endpoint)",
"def getPointValues(self, *args, **kwargs):\n ...",
"def get_endpoint_data(self, session,\n endpoint_override=None,\n discover_versions=True,\n **kwargs):\n if not endpoint_override:\n return None\n endpoint_data = discover.EndpointData(catalog_url=endpoint_override)\n\n if endpoint_data.api_version and not discover_versions:\n return endpoint_data\n\n return endpoint_data.get_versioned_data(\n session, cache=self._discovery_cache,\n discover_versions=discover_versions)",
"def get_endpoint_data(self, session,\n endpoint_override=None,\n discover_versions=True,\n **kwargs):\n return super(FixedEndpointPlugin, self).get_endpoint_data(\n session,\n endpoint_override=endpoint_override or self.endpoint,\n discover_versions=discover_versions,\n **kwargs)",
"def url_for(self, attr=None, filter_value=None,\r\n service_type='network', endpoint_type='publicURL'):\r\n\r\n catalog = self.catalog['access'].get('serviceCatalog', [])\r\n matching_endpoints = []\r\n for service in catalog:\r\n if service['type'] != service_type:\r\n continue\r\n\r\n endpoints = service['endpoints']\r\n for endpoint in endpoints:\r\n if not filter_value or endpoint.get(attr) == filter_value:\r\n matching_endpoints.append(endpoint)\r\n\r\n if not matching_endpoints:\r\n raise exceptions.EndpointNotFound()\r\n elif len(matching_endpoints) > 1:\r\n raise exceptions.AmbiguousEndpoints(\r\n matching_endpoints=matching_endpoints)\r\n else:\r\n if endpoint_type not in matching_endpoints[0]:\r\n raise exceptions.EndpointTypeNotFound(type_=endpoint_type)\r\n\r\n return matching_endpoints[0][endpoint_type]",
"def validate_raw_endpoint(raw_endpoint: Dict[str, Any]) -> None:\n validate_raw_endpoint_route(raw_endpoint)\n validate_raw_endpoint_method(raw_endpoint)",
"def EvaluatePointDataField(self, *float, **kwargs):\n ...",
"def connect_data_api(self, endpoint):\n\n url = 'https://api.gdax.com' + endpoint\n res = requests.get(url)\n\n if res.status_code == 200:\n return res.json()\n else:\n raise ValueError(res.content)",
"def get_data(endpoint_name, arg=None,\n project_name=None, fields=None, size=get_setting_value('DEFAULT_SIZE'), page=0,\n data_category=None, query_args={}, verify=False, *args, **kwargs):\n endpoint = get_setting_value('GDC_API_ENDPOINT').format(endpoint=endpoint_name)\n if arg:\n endpoint = endpoint+'/{}'.format(arg)\n else:\n ## prep extra-params, including `from` param, as dict\n extra_params = {}\n if page>0:\n from_param = helpers.compute_start_given_page(page=page, size=size)\n extra_params.update({\n 'from': from_param,\n })\n if fields:\n extra_params.update({'fields': ','.join(helpers.convert_to_list(fields))})\n if dict(**kwargs):\n ## TODO check on whether this handles redundant param spec \n ## correctly\n extra_params.update(dict(**kwargs))\n params = _params.construct_parameters(project_name=project_name,\n size=size,\n data_category=data_category,\n query_args=query_args,\n verify=verify,\n **extra_params\n )\n # requests URL-encodes automatically\n log.info('submitting request for {endpoint} with params {params}'.format(endpoint=endpoint, params=params))\n response = requests_get(endpoint, params=params)\n log.info('url requested was: {}'.format(response.url))\n response.raise_for_status()\n return response",
"def request_endpoints(self):\n\n endpoints_url = self.std[\"api\"]\n endpoints_paramd = {\n \"access_token\": self.std[\"access_token\"]\n }\n\n endpoints_response = requests.get(url=endpoints_url, params=endpoints_paramd)\n print endpoints_response\n self.endpointd = endpoints_response.json()[0]",
"def run(self, endpoint_uri, **kwargs):\n return self.get(endpoint_uri, **kwargs)",
"def get_data_from_endpoint(self, from_, to_, endpoint):\n endpoint = self.make_endpoint(endpoint)\n from_, to_ = str(from_), str(to_)\n payload = {\n 'auth': self.auth_token,\n 'id': self.monitor_id,\n 'start': from_,\n 'end': to_,\n 'extendLimit': 'true',\n 'fullContents': 'true'\n }\n\n r = self.session.get(endpoint, params=payload)\n ratelimit_remaining = r.headers['X-RateLimit-Remaining']\n #print ('Remaining Ratelimit = ' + str(ratelimit_remaining))\n\n # If the header is empty or 0 then wait for a ratelimit refresh.\n if (not ratelimit_remaining) or (float(ratelimit_remaining) < 1):\n #print('Waiting for ratelimit refresh...')\n sleep(self.ratelimit_refresh)\n\n return r",
"def get( # type: ignore [override]\n self,\n id: Optional[int] = None,\n external_id: Optional[str] = None,\n ) -> Union[None, Datapoints, List[Datapoints]]:\n # TODO: Question, can we type annotate without specifying the function?\n return super().get(id, external_id) # type: ignore [return-value]",
"async def get_query(self,\n endpoint: str,\n params: Optional[Dict[str, str]]=None,\n extra_headers: Optional[Dict[str, str]]=None) -> Dict[str, str]:\n target = self._generate_url(endpoint,params=params)\n api_sig = self._gen_api_sig(target)\n headers = {\"apisign\": api_sig}\n if extra_headers is not None:\n headers.update(extra_headers)\n return await self._get(target, headers=headers)",
"def describe_endpoint(EndpointName=None):\n pass",
"def readDatapoint(self, wait_for_clean_data=False):\n raise NotImplementedError(\"Implement in Headset child class\")",
"def test_access_all_data_all_endpoints(self):\n\n # Some end points just can't be fetched so we have to ignore them.\n end_point_exceptions = [\n \"/api/help/\",\n \"/api/test_host/\",\n \"/api/system_status/\",\n \"/api/updates_available/\",\n \"/api/session/\",\n \"/api/action/\",\n \"/api/run_stratagem/\",\n \"/api/stratagem_configuration/\",\n ]\n\n end_points = self.get_json_by_uri(\"/api/\", args={\"limit\": 0})\n\n for end_point in end_points.values():\n if end_point[\"list_endpoint\"] not in end_point_exceptions:\n import sys\n\n sys.stderr.write(\"\\nReading endpoint %s\\n\" % end_point[\"list_endpoint\"])\n self.get_json_by_uri(end_point[\"list_endpoint\"], args={\"limit\": 0})\n sys.stderr.write(\"\\nRead endpoint %s\\n\" % end_point[\"list_endpoint\"])",
"def read_endpoint(endpoint):\n if not re.search('{(.+?)}', endpoint):\n return endpoint\n read = re.findall('{(.+?)}', endpoint)\n result = endpoint\n for item in read:\n result = re.sub('{(%s)}' % item, str(STORED_ID[item]), result)\n return result",
"def check_endpoint_in_paths(context, endpoint):\n data = context.response.json()\n paths = check_and_get_attribute(data, \"paths\")\n assert endpoint in paths, \"Cannot find the expected endpoint {e}\".format(\n e=endpoint)",
"def _getHyperslabSlices(dsetshape, select):\n\n if select is None:\n # Default: return entire dataset\n return tuple(slice(0, extent) for extent in dsetshape)\n\n if not select.startswith('['):\n msg = \"Bad Request: selection query missing start bracket\"\n raise HTTPError(400, reason=msg)\n if not select.endswith(']'):\n msg = \"Bad Request: selection query missing end bracket\"\n raise HTTPError(400, reason=msg)\n\n # strip brackets\n select = select[1:-1]\n\n select_array = select.split(',')\n if len(select_array) > len(dsetshape):\n msg = \"Bad Request: number of selected dimensions exceeds the rank of the dataset\"\n raise HTTPError(400, reason=msg)\n\n slices = []\n for dim, dim_slice in enumerate(select_array):\n extent = dsetshape[dim]\n\n # default slice values\n start = 0\n stop = extent\n step = 1\n if dim_slice.find(':') < 0:\n # just a number - return slice(start, start + 1, 1) for this dimension\n try:\n start = int(dim_slice)\n except ValueError:\n msg = \"Bad Request: invalid selection parameter (can't convert to int) for dimension: \" + str(dim)\n raise HTTPError(400, reason=msg)\n stop = start + 1\n elif dim_slice == ':':\n # select everything (default)\n pass\n else:\n fields = dim_slice.split(\":\")\n if len(fields) > 3:\n msg = \"Bad Request: Too many ':' seperators for dimension: \" + str(dim)\n raise HTTPError(400, reason=msg)\n try:\n if fields[0]:\n start = int(fields[0])\n if fields[1]:\n stop = int(fields[1])\n if len(fields) > 2 and fields[2]:\n step = int(fields[2])\n except ValueError:\n msg = \"Bad Request: invalid selection parameter (can't convert to int) for dimension: \" + str(dim)\n raise HTTPError(400, reason=msg)\n\n if start < 0 or start > extent:\n msg = \"Bad Request: Invalid selection start parameter for dimension: \" + str(dim)\n raise HTTPError(400, reason=msg)\n if stop > extent:\n msg = \"Bad Request: Invalid selection stop parameter for dimension: \" + str(dim)\n raise HTTPError(400, reason=msg)\n if step <= 0:\n msg = \"Bad Request: invalid selection step parameter for dimension: \" + str(dim)\n raise HTTPError(400, reason=msg)\n slices.append(slice(start, stop, step))\n\n return tuple(slices)"
]
| [
"0.6259866",
"0.57575727",
"0.5619659",
"0.5598211",
"0.5490989",
"0.5469542",
"0.5162907",
"0.51040107",
"0.50893545",
"0.50893545",
"0.5070232",
"0.50570154",
"0.504547",
"0.50083077",
"0.49490505",
"0.48968083",
"0.4880763",
"0.48488453",
"0.48321742",
"0.47770387",
"0.47664103",
"0.47583073",
"0.4729616",
"0.47259435",
"0.4723121",
"0.47002792",
"0.46837214",
"0.46807247",
"0.46781614",
"0.46767294"
]
| 0.6005972 | 1 |
Parses a comment body for amazon book urls, return all comentions | def get_comentions(body):
isbns = set(isbn for _, isbn in ISBN_REGEX.findall(body))
for isbn1, isbn2 in permutations(isbns, 2):
yield isbn1, isbn2 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def scrape_one_book(self, url):\n\n if url in self.url_to_explore:\n self.url_to_explore.remove(url)\n req = requests.get(url, headers = self.headers).content\n soup = BeautifulSoup(req, 'html5lib')\n soupbody = soup.body\n\n book_data = {}\n # get book url\n book_url = url\n book_data[\"url\"] = book_url\n\n # get book title\n book_title = soupbody.find('h1', attrs={'id':'bookTitle'}).text.strip()\n if book_title:\n book_data[\"title\"] = book_title\n\n # # get book id\n reg = 'https://www.goodreads.com/book/show/([0-9]+)'\n book_id = re.search(reg, url).group(1)\n book_data[\"id\"] = book_id\n\n # get book ISBN\n book_databox = soupbody.find('div', attrs={'id':'bookDataBox'})\n if book_databox:\n all_float_divs = book_databox.find_all('div',\n attrs = {'class' : 'clearFloats'})\n book_isbn = ''\n for div in all_float_divs:\n title = div.find('div',\n attrs = {'class':'infoBoxRowTitle'}).text.strip()\n if title == 'ISBN':\n book_isbn = div.find('div',\n attrs = {'class':'infoBoxRowItem'}).contents[0].strip()\n book_data[\"ISBN\"] = book_isbn\n\n # get book author url and author name\n author_name_container = soupbody.find('div',\n attrs = {'class':\"authorName__container\"})\n if author_name_container:\n all_authors = author_name_container.find_all('a',\n href = True, attrs = {'class':\"authorName\"})\n cur_author_url = []\n cur_author_name = []\n for author in all_authors:\n cur_author_url.append(author['href'])\n name = author.find('span', attrs = {'itemprop':'name'}).text.strip()\n cur_author_name.append(name)\n book_data[\"authorURLs\"] = cur_author_url\n book_data[\"author_names\"] = cur_author_name\n\n # get book rating and review\n book_meta = soupbody.find('div', attrs = {'id':'bookMeta'})\n if book_meta:\n rating = book_meta.find('span',\n attrs = {'itemprop':'ratingValue'}).text.strip()\n book_data[\"rating\"] = rating\n\n book_rating_count_container = book_meta.find('meta',\n attrs = {'itemprop':'ratingCount'})\n if book_rating_count_container:\n book_rating_count = book_rating_count_container['content']\n book_data[\"rating_count\"] = book_rating_count\n\n book_review_count_container = book_meta.find('meta',\n attrs = {'itemprop':'reviewCount'})\n if book_review_count_container:\n book_review_count = book_review_count_container['content']\n book_data[\"review_count\"] = book_review_count\n\n # get book image\n image_tag = soupbody.find('img', attrs = {'id':'coverImage'})\n if image_tag:\n image_src = image_tag['src']\n book_data[\"bookImage\"] = image_src\n # print(authorLink.span.text)\n\n # get related_books\n related_works_container = soupbody.find('div', id=re.compile('relatedWorks-'))\n if related_works_container:\n related_books_div = related_works_container.find('div', class_='bigBoxBody')\n if related_books_div:\n related_books_carousel = related_books_div.find('div', class_='bookCarousel')\n if related_books_carousel:\n carousel_row = related_books_carousel.find('div', class_='carouselRow')\n if carousel_row:\n related_books_list_li = carousel_row.find('ul').find_all('li')\n related_books = []\n for item in related_books_list_li:\n link = item.find('a', href = True)['href']\n self.url_to_explore.add(link)\n related_books.append(link)\n book_data[\"similar_books\"] = related_books\n\n self.data_collection.push_to_collection(book_data)\n print(\"Book successfully scraped: \" + book_title)",
"def get_comments(self,comments):\n all_comments = []\n for comment in comments:\n try :\n all_comments.append({\n 'comment':comment['data']['body'],\n 'score':comment['data']['score']\n })\n except: pass\n return all_comments",
"def get_book(url):\n text = requests.get(url).text\n time.sleep(5) # Delay for 5 seconds\n\n # Transform \\r\\n with \\n newline\n subtext = re.sub(r'\\r\\n', '\\n', text) \n\n # Extract content from START to END\n pat = '\\*\\*\\* START OF THIS PROJECT GUTENBERG EBOOK .* \\*\\*\\*([\\S\\s]*)\\*\\*\\* END OF THIS PROJECT GUTENBERG EBOOK'\n content = re.search(pat, subtext).group(1)\n return content",
"def read_book(url,book_num):\n\t#calls open_url function to open the url\n\tbook_contents = open_url(url)\n\tif book_contents != None:\n\t\t#calls filter data function to clean the data\n\t\tclean_data = filter_data(book_contents)\n\t\t#create dictionary for all the words in this book with 0's filling for count in all the books\n\t\tcreate_dict(clean_data)\n\t\treturn clean_data\n\telse:\n\t\treturn []",
"def parse_comment(self, node):\n\n data = []\n\n if node is not None:\n comment_id_pattern = re.compile('comment-(\\d+)')\n for comment_node in node.find_all('div', class_='comment'):\n item = {}\n item['is_deletable'] = False\n item['is_editable'] = False\n \n comment_id_result = comment_id_pattern.search(comment_node.get('id'))\n if comment_id_result:\n item['id'] = int(comment_id_result.group(1))\n \n comment_body_node = comment_node.find('div', class_='comment-body')\n if comment_body_node is not None:\n item['content'] = ''\n for p in comment_body_node.find_all(recursive=False):\n if 'class' in p.attrs and 'author' in p['class']:\n item['author'] = p.get_text()\n item['profile_url'] = self.get_link(p.get('href'))\n author_id = self._parse_user_id_from_url(item['profile_url'])\n if self.userId == author_id:\n item['is_deletable'] = True\n item['is_editable'] = True\n elif 'class' in p.attrs and 'age' in p['class']:\n item['date'] = p.abbr['title']\n item['date_ago'] = timeago.format(self._parse_datetime(item['date']), datetime.now(TIMEZONE))\n elif 'class' in p.attrs and 'edit' in p['class']:\n continue\n elif p.name == 'form':\n continue\n else:\n item['content'] += str(p)\n\n data.append(item)\n\n return data",
"def get_comments(self, isbn, n):\n result = []\n self.cursor.execute(\"\"\"SELECT * FROM comment WHERE ISBN=%s ORDER BY avg_usefulness DESC LIMIT %s\"\"\",\n (str(isbn), n))\n for comment in self.cursor.fetchall():\n result.append(comment)\n return result",
"def comments(self):\n comments_url = self.data['comments_url']\n return json.load(urllib2.urlopen(comments_url))",
"def parse_comments_html(advertise: Dict[str, Any]) -> Optional[List[str]]:\n if \"comments_html\" in advertise.keys():\n\n filtred_comments: str = advertise[\"comments_html\"][200::]\n\n tmp: List[str] = re.split(\"[ \\n\\t]{2,}\", filtred_comments)\n if '' in tmp:\n tmp.remove('')\n\n # Breaking comments\n master: List[List[str]] = []\n tmp_vec: List[str] = []\n for line in tmp:\n\n if re.search(\"de \\d{4,}\", line): # matches 'de 2018' that signals the end of comment\n master.append(tmp_vec)\n tmp_vec = []\n else:\n tmp_vec.append(line)\n\n # Cleaning comments\n for comment in master:\n if \"...\" in comment:\n comment.remove(\"...\")\n if \"O usuário contratou o serviço em\" in comment:\n comment.remove(\"O usuário contratou o serviço em\")\n\n return [\" \".join(m) for m in master]",
"def parse_thrift_books(email):\n tracking_numbers = []\n\n soup = BeautifulSoup(email[EMAIL_ATTR_BODY], 'html.parser')\n elements = soup.find_all('a')\n\n for element in elements:\n link = element.get('href')\n\n if not link:\n continue\n\n if 'spmailtechno' not in link:\n continue\n\n try:\n if re.search(track_copy_pattern, element.text):\n match = re.search(order_number_pattern, email[EMAIL_ATTR_BODY])\n if match:\n tracking_numbers.append({\n 'link': link,\n 'tracking_number': match.group(1)\n })\n except:\n pass\n\n return tracking_numbers",
"def parse_books_from_html(html):\n root = lxml.html.fromstring(html)\n for a in root.cssselect(\"a\"):\n if not 'href' in a.attrib:\n continue\n href = a.attrib['href']\n if href.startswith(\"javascript\"):\n continue\n if not href.startswith(\"http\"):\n href = urljoin(base_url, href)\n book_title = a.text_content()\n\n d = parse_qs(urlparse(href).query)\n if 'M' in d and d['M'][0] in ('book', 'Share'):\n if 'P' in d:\n book_id = d['P'][0]\n book = {'id': book_id,\n 'url': href,\n 'title': book_title}\n save_bookpages(book)",
"def _get_comments(**kwargs):\r\n\r\n # Log in to get cookies.\r\n cookies = _login(**kwargs)\r\n\r\n if 'r' not in kwargs:\r\n # This is the first comments request.\r\n # Make the comments request and set an empty list.\r\n kwargs['r'] = requests.get('https://news.ycombinator.com/threads?id=%s' % kwargs['args'].username,\r\n cookies=cookies)\r\n\r\n # Check to make sure we have a good response.\r\n if not _good_response(**kwargs):\r\n kwargs.pop('r')\r\n return _get_comments(**kwargs)\r\n\r\n kwargs['comments'] = []\r\n\r\n # Grab the comments.\r\n J = pq(kwargs['r'].content)\r\n comments = J('table table td.default')\r\n\r\n for c in comments:\r\n\r\n comment = _sanitize_comment(J, c)\r\n\r\n if kwargs['args'].no_owner and comment['user'] == kwargs['args'].username:\r\n continue\r\n\r\n # Add the comment to the saved list.\r\n kwargs['comments'].append({\r\n 'user': comment['user'],\r\n 'comment': comment['comment'],\r\n 'reply': comment['reply'],\r\n 'points': comment['points'],\r\n 'link': comment['link'],\r\n 'parent': comment['parent'],\r\n 'story': comment['story'],\r\n 'date': comment['date'],\r\n })\r\n\r\n # If we're getting all comments.\r\n if kwargs['args'].all:\r\n\r\n # Find the 'More' link and load it.\r\n last = J('a', J('table table tr td.title:last'))\r\n if last.text() == 'More':\r\n kwargs['r'] = requests.get('https://news.ycombinator.com%s' % last.attr('href'),\r\n cookies=cookies)\r\n\r\n # Check to make sure we have a good response.\r\n if not _good_response(**kwargs):\r\n kwargs.pop('r')\r\n return _get_comments(**kwargs)\r\n\r\n # Call this function again, this time with the new list.\r\n return _get_comments(**kwargs)\r\n\r\n return kwargs['comments']",
"async def parse_website_body(body: str) -> None:\n parsed_body = BeautifulSoup(body, 'html.parser')\n tags = parsed_body.find_all('a')\n\n category_pattern: str = r'\\/no\\/categories\\/\\d+'\n product_pattern: str = r'\\/no\\/products\\/\\d+'\n\n categories: list[str] = await find_pattern_in_tags(pattern=category_pattern, tags=tags)\n products: list[str] = await find_pattern_in_tags(pattern=product_pattern, tags=tags)\n\n await filter_categories_and_products(categories=categories, products=products)",
"def make_parsed_comments(self):\n if not hasattr(self, 'separated_comments'):\n self.separated_comments = self.separate_comments()\n \n # build comments list of dictionaries, one dictionary for each article\n self.comments = []\n for self.separated_comment in self.separated_comments:\n try:\n comment_data = self.get_comment_data(self.separated_comment)\n self.comments.append(comment_data)\n except Exception as e:\n pass\n return self.comments",
"def parseComments(data):\n global comments\n reviewBegins = '<div style=\"margin-left:0.5em;\">'\n reviewEnds = '<div style=\"padding-top: 10px; clear: both; width: 100%;\">'\n stars_line = 'margin-right:5px;'\n stars = re.compile('\\d+.\\d+ out of 5 stars')\n header_line = '<span style=\"vertical-align:middle;\"'\n helpful_line ='people found the following review helpful'\n helpful = re.compile('\\d+ of \\d+ people found the following review helpful')\n reviewText = '<span class=\"h3color tiny\">' # Actual review\n\n boundaries = commentsStartStopLineNmbr(data)\n for i in range(boundaries[0], boundaries[1] + 1):\n if reviewBegins in data[i]:\n curcomment = Comment()\n while reviewEnds not in data[i]:\n # Parse stars\n if stars_line in data[i]:\n stars_found = re.search(stars, data[i])\n if stars_found != None:\n curcomment.stars = stars_found.group()\n # Parse header\n elif header_line in data[i]:\n line = data[i]\n begin = line.find('<b>') + 3\n end = line.find('</b>')\n curcomment.header = line[begin : end]\n # Parse helpfulness\n elif helpful_line in data[i]:\n helpful_found = data[i].replace(\",\", \"\")\n helpful_found = re.search(helpful, helpful_found)\n if helpful_found != None:\n curcomment.helpful = helpful_found.group()\n # Parse body text\n elif reviewText in data[i]:\n i += 3\n if '<span class=\"small\"' in data[i]: # Yep, dirty trick :(\n i += 3\n data[i] = stripHtmlTags(data[i])\n curcomment.comment = re.sub(\"\\s+\", \" \", data[i])\n i += 1\n comments.append(curcomment.getonelinecomment())\n #comments.append(curcomment.__repr__())",
"def scrap_book_links(category_link):\n # list where the links of the books will be stored\n book_links = []\n\n while True:\n # check to see if url was successfully gotten (if ok response=200,otherwise 404)\n response = requests.get(category_link)\n\n # get the content of the page as html and saves it in an object called page\n page = response.content\n\n # we use BeautifulSoup to parse(converting information into a format that's easier to work with) the html\n soup = BeautifulSoup(page, \"html.parser\")\n\n # in the parsed html all children of the parent article,because this is where all the information we need is\n urls_of_books = soup.find_all('article')\n\n # links are found in the a href\n book_links += [book_rel_url_to_book_abs_url(the_stuff.find('a')['href']) for the_stuff in urls_of_books]\n\n # check whether a next button exists\n if a := soup.select_one(\".next > a\"):\n category_link = remove_last_part_of_url(category_link) + \"/\" + a[\"href\"]\n else:\n break\n return book_links",
"def _parse_reviewers(self, content):\n soup = bs(content, ['fast', 'lxml'])\n table = soup.find('table', {'id': 'productReviews'})\n reviewers = [link['href'] for link in table.findAll('a')\\\n if link.contents == ['See all my reviews']]\n return reviewers",
"def scrap_book_info(book_url):\n response = requests.get(book_url)\n page = response.content\n soup = BeautifulSoup(page, \"html.parser\")\n\n return {\n \"product_page_url\": book_url,\n \"upc\": soup.select_one(\"table tr:nth-child(1) > td\").text,\n \"title\": soup.select_one(\"article div.col-sm-6.product_main > h1\").text,\n \"price_including_tax\": soup.select_one(\"table tr:nth-child(4) > td\").text,\n \"price_excluding_tax\": soup.select_one(\"table tr:nth-child(3) > td\").text,\n \"number_available\": number_only(soup.select_one(\"#content_inner > article > table tr:nth-child(6) > td\").text),\n \"product_description\": soup.select_one(\"article > p\").text,\n \"category\": soup.select_one(\"#default > div > div > ul > li:nth-child(3) > a\").text,\n \"review_rating\": word_to_number(soup.select_one(\".star-rating\")[\"class\"][1]),\n \"image_url\": remove_suffix(soup.select_one(\"#product_gallery img\")[\"src\"]),\n }",
"def _get_review_comments_body(\n self, pull_request_number: int) -> List[Tuple[str, str]]:\n review_comments = get_pull_request_review_comments(\n self._repo_name, pull_request_number, self._auth)\n if not review_comments:\n return []\n review_comments_msg = []\n for comment in review_comments:\n review_comments_msg.append((comment['path'], comment['body']))\n return review_comments_msg",
"def get_all_book(url_book: list, rows: list):\n # loop from book url\n for i in range(len(url_book)):\n book = get_book(url_book[i])\n # write each result to rows\n rows.append(book)",
"def parse_amazon(email):\r\n tracking_numbers = []\r\n \r\n soup = BeautifulSoup(email[EMAIL_ATTR_BODY], 'html.parser')\r\n\r\n # see if it's an shipped order email\r\n order_number_match = re.search('Your AmazonSmile order #(.*?) has shipped', email[EMAIL_ATTR_SUBJECT])\r\n if not order_number_match:\r\n order_number_match = re.search('Your Amazon.com order #(.*?) has shipped', email[EMAIL_ATTR_SUBJECT])\r\n if not order_number_match:\r\n return tracking_numbers\r\n\r\n order_number = order_number_match.group(1)\r\n\r\n # find the link that has 'track package' text\r\n linkElements = soup.find_all('a')\r\n for linkElement in linkElements:\r\n if not re.search(r'track package', linkElement.text, re.IGNORECASE):\r\n continue\r\n \r\n # if found we no get url and check for duplicates\r\n link = linkElement.get('href')\r\n\r\n # make sure we dont have dupes\r\n order_numbers = list(map(lambda x: x['tracking_number'], tracking_numbers))\r\n if order_number not in order_numbers:\r\n tracking_numbers.append({\r\n 'link': link,\r\n 'tracking_number': order_number\r\n })\r\n\r\n return tracking_numbers",
"def get_books_url(url):\n url_array = []\n nbr_pages = get_nbr_of_pages(url) \n if(nbr_pages == None):\n nbr_pages = 1\n formatted_url = split_url(url)\n formatted_url = formatted_url.split('page')\n for i in range(1, int(nbr_pages) + 1):\n if nbr_pages != 1:\n join_url = formatted_url[0] + 'page-' + str(i) + '.html'\n else: \n join_url = url\n response = requests.get(join_url)\n if(response.ok):\n soup = BeautifulSoup(response.text, 'lxml')\n table = soup.find('ol', {'class': 'row'})\n rows = table.find_all('a', href=True)\n for row in rows:\n if row.text:\n url_array.append(\n \"http://books.toscrape.com/catalogue/\" \n + row['href'].strip('../'))\n return url_array",
"def book(args: list, update: Update) -> None:\n\n book_list = []\n if len(args) > 1:\n update.message.reply_text('fetching books, this may take a while...')\n book_list = scrape(' '.join(args[1:]))\n update.message.reply_text(f'found {len(book_list)} books')\n counter = 0\n msg = ''\n\n if len(book_list) > 0:\n for book in book_list:\n msg = msg + f'{book.Title} - {book.Author}\\n'\n counter += 1\n if counter == 5:\n msg = msg + '...'\n break\n update.message.reply_text(msg)\n\n else:\n update.message.reply_text(\n 'please add the name of the book after /book')",
"def search_books_body(collection_of_books: tuple) -> list:\r\n search_tag, search_keyword = search_info()\r\n try:\r\n found_books = search(collection_of_books, search_tag, search_keyword)\r\n except KeyError as err:\r\n print(f\"Invalid input: {err}\\n\"\r\n f\"{err} raised KeyError. Please follow the instruction carefully.\\n\")\r\n else:\r\n print_as_ordered_list(found_books)\r\n\r\n return found_books",
"def _parse_past_documents(self, item):\n doc_list = []\n for doc in item.css('a'):\n doc_list.append({\n 'url': 'http://{}{}'.format(self.allowed_domains[0], doc.attrib['href']),\n 'note': doc.css('*::text').extract_first(),\n })\n return doc_list",
"def get_comments_by_country(pages, hotel, country):\n url = \"http://www.booking.com/reviewlist.es.html\"\n headers = {\n 'User-Agent': \"PostmanRuntime/7.20.1\",\n 'Accept': \"*/*\",\n 'Cache-Control': \"no-cache\",\n 'Postman-Token': \"4b4e2c78-12c0-42a7-807a-29f5f7378ae5,e75b58fb-25dd-4fdd-b97a-47650ed52d41\", # NOQA\n 'Host': \"www.booking.com\",\n 'Accept-Encoding': \"gzip, deflate\",\n 'Cookie': \"bkng=11UmFuZG9tSVYkc2RlIyh9Yaa29%2F3xUOLbca8KLfxLPeck0I1eO54zQUW2YGGgHUJ6NVSV%2BmLwJzaS5ibHX0J%2BdueF6GNDCq1X0NvEJAU9t%2FoaAC2%2FMBm39Gz0lTSWuf6zuBVIiNGAI88YDjaj4w5H8Lrv7T0Yug9jg%2FpPsONkdMVLMiYifIslIsLvFl07K%2BTKGRykCAxOsgE%3D\", # NOQA\n 'Connection': \"keep-alive\",\n 'cache-control': \"no-cache\"\n }\n\n params = {\n 'cc1': country,\n 'pagename': hotel,\n 'type': 'total',\n 'dist': str(1),\n 'rows': str(20)\n }\n\n def build_soup_comment_request(page: int, list_of_countries):\n if page == 0:\n params['offset'] = str(page)\n else:\n params['offset'] = str(page * 20)\n\n response = requests.get(url=url, params=params, headers=headers)\n comments_soup = BeautifulSoup(response.content, 'html.parser')\n span = comments_soup.select('.bui-avatar-block__flag img')\n [get_flags(item, list_of_countries) for item in span]\n\n countries_list = {}\n [build_soup_comment_request(page, countries_list) for page in range(pages)]\n return countries_list",
"def each_comment_from_post(post):\n # first yield the post text body, if any\n if post['text']:\n yield post['text']\n # then yield each comment\n for comment in post['comments']:\n yield comment['text']",
"def fetch_comments(item):\n # pylint: disable=R0912\n # pylint: disable=R0914\n cw, ch, _ = getxy()\n ch = max(ch, 10)\n ytid, title = item.ytid, item.title\n dbg(\"Fetching comments for %s\", c.c(\"y\", ytid))\n writestatus(\"Fetching comments for %s\" % c.c(\"y\", title[:55]))\n qs = {'textFormat': 'plainText',\n 'videoId': ytid,\n 'maxResults': 50,\n 'part': 'snippet'}\n\n # XXX should comment threads be expanded? this would require\n # additional requests for comments responding on top level comments\n\n jsdata = call_gdata('commentThreads', qs)\n\n coms = jsdata.get('items', [])\n coms = [x.get('snippet', {}) for x in coms]\n coms = [x.get('topLevelComment', {}) for x in coms]\n # skip blanks\n coms = [x for x in coms if len(x.get('snippet', {}).get('textDisplay', '').strip())]\n if not len(coms):\n g.message = \"No comments for %s\" % item.title[:50]\n g.content = generate_songlist_display()\n return\n\n items = []\n\n for n, com in enumerate(coms, 1):\n snippet = com.get('snippet', {})\n poster = snippet.get('authorDisplayName')\n _, shortdate = yt_datetime(snippet.get('publishedAt', ''))\n text = snippet.get('textDisplay', '')\n cid = (\"%s/%s\" % (n, len(coms)))\n out = (\"%s %-35s %s\\n\" % (cid, c.c(\"g\", poster), shortdate))\n out += c.c(\"y\", text.strip())\n items.append(out)\n\n cw = Config.CONSOLE_WIDTH.get\n\n def plain(x):\n \"\"\" Remove formatting. \"\"\"\n return x.replace(c.y, \"\").replace(c.w, \"\").replace(c.g, \"\")\n\n def linecount(x):\n \"\"\" Return number of newlines. \"\"\"\n return sum(1 for char in x if char == \"\\n\")\n\n def longlines(x):\n \"\"\" Return number of oversized lines. \"\"\"\n return sum(len(plain(line)) // cw for line in x.split(\"\\n\"))\n\n def linecounter(x):\n \"\"\" Return amount of space required. \"\"\"\n return linecount(x) + longlines(x)\n\n pagenum = 0\n pages = paginate(items, pagesize=ch, delim_fn=linecounter)\n\n while 0 <= pagenum < len(pages):\n pagecounter = \"Page %s/%s\" % (pagenum + 1, len(pages))\n page = pages[pagenum]\n pagetext = (\"\\n\\n\".join(page)).strip()\n content_length = linecount(pagetext) + longlines(pagetext)\n blanks = \"\\n\" * (-2 + ch - content_length)\n g.content = pagetext + blanks\n screen_update(fill_blank=False)\n xprint(\"%s : Use [Enter] for next, [p] for previous, [q] to return:\"\n % pagecounter, end=\"\")\n v = input()\n\n if v == \"p\":\n pagenum -= 1\n\n elif not v:\n pagenum += 1\n\n else:\n break\n\n g.content = generate_songlist_display()",
"def get_book_infos(url):\n response = requests.get(url)\n if response.status_code == 200:\n # We get the link without the \\..\n link = response.url\n soup = BeautifulSoup(response.content, 'html.parser')\n search_img = soup.find('div', {\"class\": \"item active\"}).find('img')[\"src\"]\n image_link = requests.get(f\"http://books.toscrape.com/{search_img}\").url\n # Product info are in balise tr\n trs = soup.findAll('tr')\n # Stocking the info in a dictionnary\n dict_tr = {}\n for tr in trs:\n th = tr.find('th').text\n td = tr.find('td').text\n dict_tr[th] = td\n # All the informations of the book that we need\n return {'product_page_url': link,\n 'universal_ product_code (upc)': dict_tr['UPC'],\n 'title': soup.find('h1').text,\n 'price_including_tax': dict_tr['Price (incl. tax)'],\n 'price_excluding_tax': dict_tr['Price (excl. tax)'],\n 'number_available': dict_tr['Availability'],\n 'product_description': soup.findAll('meta')[2][\"content\"],\n 'category': soup.findAll('li')[2].find('a').text,\n 'review_rating': soup.findAll('p')[2][\"class\"][1],\n 'image_url': image_link}",
"def parse_page(html):\n\n soup = BeautifulSoup(html, \"html.parser\")\n review_soups = soup.find_all(\"script\", type=\"application/ld+json\")\n\n description_list = []\n for soup in review_soups:\n text = soup.string\n # decode the json into python dict\n js_dict = json.loads(text)\n\n if \"review\" in js_dict:\n review_list = js_dict[\"review\"]\n\n for i in range(len(review_list)):\n review_dict = review_list[i]\n description_list.append(review_dict[\"description\"])\n\n return description_list",
"def core(self):\n \n \n comments = self.bot.subreddit(\n \"all\").stream.comments(\n skip_existing = True)\n \n \n for comment in comments:\n \n text = comment.body.lower().replace(\".\", \"\")\n \n for card in self.catalog:\n \n if (\n card[1].lower() in text\n and card[0].lower() in text\n and not comment.submission.id in self.responded\n and not comment.subreddit.user_is_banned):\n\n self.get_info(card)\n\n if not self.details:\n \n break\n\n audio = [\n \"audiobook\", \n \"audio book\"]\n \n author_format = [\n name.lower() for name in card[1].split(\" \") \n if len(name) >= 3]\n\n if (\n self.details[\"duration\"] > 10800\n and card[0].lower() in self.details[\n \"title\"].lower()\n and any(\n item in self.details[\n \"title\"].lower() for item in audio)\n and all(\n item in self.details[\n \"title\"].lower() for item in author_format)):\n \n \n saw_the_sign = (\n \"\"\"[^(Source Code)](https://capybasilisk.com/posts/\"\"\"\n \"\"\"2020/04/speculative-fiction-bot/) \"\"\"\n \"\"\"^| [^(Feedback)](https://www.reddit.com/message/\"\"\"\n \"\"\"compose?to=Capybasilisk&subject=Robot) \"\"\"\n \"\"\"^| [^(Programmer)](https://www.reddit.com/u/\"\"\"\n \"\"\"capybasilisk) \"\"\"\n \"\"\"^| ^(Downvote To Remove) \"\"\" \n \"\"\"^| ^(Version 1.4.0) \"\"\"\n \"\"\"^| ^(Support Robot Rights!)\"\"\")\n \n\n comment.reply(\n f\"\"\"Hi. You just mentioned *{card[0]}* by \"\"\" \n f\"\"\"{card[1]}.\\n\\nI've found an audiobook of \"\"\" \n \"\"\"that novel on YouTube. You can listen to it here\"\"\"\n f\"\"\":\\n\\n[YouTube | {self.details['title']}]\"\"\"\n f\"\"\"({self.details['webpage_url']})\\n\\n*I\\'m a bot that \"\"\" \n \"\"\"searches YouTube for science fiction and fantasy\"\"\" \n f\"\"\" audiobooks.*\\n***\\n{saw_the_sign}\"\"\")\n\n \n self.responded.append(\n comment.submission.id)\n \n with open(\n \"activity.csv\", \n \"a\", \n encoding = \"UTF-8\") as actlog:\n\n activity = clevercsv.writer(\n actlog)\n\n if actlog.tell() == 0:\n\n activity.writerow(\n [\"Book\",\n \"Comment\", \n \"Author\", \n \"Thread\", \n \"Subreddit\", \n \"Time\"])\n\n activity.writerow(\n [f\"{card[0]} by {card[1]}\",\n f\"{comment.body}\",\n f\"{comment.author}\",\n f\"{comment.submission.title}\",\n f\"{comment.subreddit}\",\n f\"{pendulum.now().to_datetime_string()}\"])\n \n self.details = None\n \n break\n \n break \n \n if pendulum.now().to_time_string().endswith(\n \"0:00\"):\n \n self.tidy()"
]
| [
"0.56733966",
"0.5616772",
"0.55885935",
"0.5540495",
"0.5528516",
"0.5509193",
"0.5475242",
"0.53941107",
"0.53796166",
"0.5375941",
"0.536291",
"0.53554696",
"0.53521144",
"0.52970135",
"0.5271222",
"0.52644044",
"0.52596533",
"0.5228912",
"0.5222263",
"0.51596844",
"0.51592755",
"0.5104397",
"0.5086352",
"0.5058568",
"0.50527495",
"0.5047238",
"0.5044126",
"0.5043502",
"0.50364643",
"0.5029607"
]
| 0.56806034 | 0 |
for each comment in the csv, count the number of comentions | def count_comentions_csv(csv_reader):
header = next(csv_reader)
index_of = {col: index for index, col in enumerate(header)}
comention_counter = Counter()
for line in csv_reader:
body = line[index_of['body']]
for isbn1, isbn2 in get_comentions(body):
comention_counter[(isbn1, isbn2)] += 1
return comention_counter | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _count_comment_rows(vcf_path):\n vcf_lines_generator = lines_from_vcf(vcf_path)\n\n comment_lines_count = 0\n for line in vcf_lines_generator:\n if line.startswith('#'):\n comment_lines_count += 1\n else:\n vcf_lines_generator.close() # Don't leave the file handle opened\n # Don't continue reading the VCF once the comments section ended\n break\n\n return comment_lines_count",
"def countLength():\n counter = 0\n\n with open('bc.processed3.csv', 'r') as openfile:\n for line in openfile:\n counter += 1\n if counter == 1:\n print line\n\n print('Length: ', counter)",
"def count_comments(self):\n return self.run_query(f\"count({self.r}/comment)\")",
"def calc_comments(self):\n for comment in self.pull_request.get_comments():\n self._users.add(comment.user.login)\n lowercase_body = comment.body.lower()\n if \"protm\" in lowercase_body:\n self.num_protm += 1\n self.num_comments += 1\n if comment.body is not None:\n self.len_comments += len(comment.body)\n for reaction in comment.get_reactions():\n self._users.add(reaction.user.login)\n self.comment_reactions += 1",
"def calc_conv_comments(self):\n for conv_comment in self.pull_request.get_issue_comments():\n self._users.add(conv_comment.user.login)\n lowercase_body = conv_comment.body.lower()\n if \"protm\" in lowercase_body:\n self.num_protm += 1\n self.num_conv_comments += 1\n for reaction in conv_comment.get_reactions():\n self._users.add(reaction.user.login)\n self.conv_comment_reactions += 1\n if conv_comment.body is not None:\n self.len_issue_comments += len(conv_comment.body)",
"def main():\n csv_paths = sorted(glob(os.path.join('amazon', '*.csv')), key=lambda p: p.split('/'))\n with open('comentions.csv', 'w+') as wf:\n csv_writer = csv.writer(wf)\n # counter for comentions across ALL the csvs\n all_comentions_counter = Counter()\n for csv_path in csv_paths:\n with open(csv_path) as rf:\n csv_reader = csv.reader(rf)\n comentions_counter = count_comentions_csv(csv_reader)\n all_comentions_counter += comentions_counter\n for comention, freq in all_comentions_counter.items():\n isbn1, isbn2 = comention\n csv_writer.writerow([isbn1, isbn2, freq])",
"def commentaires(file_name_c):\n com = subprocess.run([\"grep\", \"eleves_bis/\" + file_name_c, \"-e\", '/\\*'], stdout=subprocess.PIPE)\n return com.stdout.decode().count(\"\\n\")",
"def parseCommentsTotalCount(data):\n p = re.compile(r\"\\d+ Reviews\")\n\n for line in data:\n line = line.replace(\",\", \"\")\n match = re.search(p, line)\n if match != None:\n getNmbr = match.group().split(\" \")\n return int(getNmbr[0])\n return -1",
"def _get_num_lines_from_csv(self, filename):\n\n _file = open(get_full_path(filename))\n _reader = csv.reader(_file)\n\n return len(list(_reader))",
"def getNumberOfReviews(self):\n try:\n count = 0\n with open(self.metadata_path, \"r\", newline='') as metadata:\n mdata = csv.reader(metadata, delimiter=' ', quotechar='|')\n for review_data in mdata:\n count += 1\n return count\n except Exception:\n print(\"Cant load metadata file\")\n traceback.print_exc()",
"def getCommentCount(self, source):\n commentStart = source.find('item?id=')\n commentCountStart = source.find('>', commentStart) + 1\n commentEnd = source.find('</a>', commentStart)\n commentCountString = source[commentCountStart:commentEnd]\n if commentCountString == \"discuss\":\n return 0\n elif commentCountString == \"\":\n return 0\n else:\n commentCountString = commentCountString.split(' ')[0]\n return int(commentCountString)",
"def countsubcatchments(inputfilename=FileSettings.settingsdict['inputfilename']):\r\n global count\r\n with open(inputfilename, 'r') as swmmput:\r\n contents = swmmput.readlines()\r\n count = len(contents)\r\n return(count)",
"def count_commas(txt):\n \n count = 0\n for c in txt:\n if c == ',':\n count += 1\n return count",
"def count_relation_doc(document):\n count = {}\n for line in document[1:]:\n _, _, _, relation_types, _ = conll04_parser.split_line(line)\n for relation in relation_types:\n if relation in count:\n count[relation] += 1\n else:\n count[relation] = 1\n return count",
"def test_graph_histogram_of_sentiment_scores_all_comments():\n graph_histogram_of_sentiment_scores_all_comments('politics_30_months_comments_cleaned_standardized_vader_flair.csv')",
"def sent_count(comment):\n return comment.__len__()",
"def get_rec_count(files: List[str],\n dialect: csv.Dialect) -> Tuple[Optional[int], int]:\n rec_cnt = -1\n for _ in csv.reader(fileinput.input(files), dialect):\n rec_cnt += 1\n fileinput.close()\n return rec_cnt",
"def cleanCsv(): \n\n count_neutral = 0\n count_sad = 0\n count_angry = 0\n count_happy = 0\n\n count_session_neutral = 0 \n\n for column_values in raw_data:\n\n if significant_data.fieldnames is None:\n dh = dict((h, h) for h in raw_data.fieldnames)\n significant_data.fieldnames = raw_data.fieldnames\n significant_data.writerow(dh)\n\n if column_values['AOI[Sad_Right]Hit'] == '1':\n significant_data.writerow(column_values)\n count_sad = count_sad + 1\n\n if column_values['AOI[Neutral_Left]Hit'] == '1':\n significant_data.writerow(column_values)\n count_neutral = count_neutral + 1\n\n if column_values['AOI[Neutral_Left]Hit_0'] == '1':\n significant_data.writerow(column_values)\n count_neutral = count_neutral + 1\n\n if column_values['AOI[Neutral_Right]Hit'] == '1':\n significant_data.writerow(column_values)\n count_neutral = count_neutral + 1\n\n if column_values['AOI[Angry_Left]Hit'] == '1':\n significant_data.writerow(column_values)\n count_angry = count_angry + 1\n\n if column_values['AOI[Neutral_Right]Hit_0'] == '1':\n significant_data.writerow(column_values)\n count_neutral = count_neutral + 1\n\n if column_values['AOI[Happy_Right]Hit'] == '1':\n significant_data.writerow(column_values)\n count_happy = count_happy + 1\n\n if column_values['AOI[Neutral_Left]Hit_1'] == '1':\n significant_data.writerow(column_values)\n count_neutral = count_neutral + 1\n\n if column_values['AOI[Happy_Left]Hit'] == '1':\n significant_data.writerow(column_values)\n count_happy = count_happy + 1\n\n if column_values['AOI[Neutral_Right]Hit_1'] == '1':\n significant_data.writerow(column_values)\n count_neutral = count_neutral + 1\n\n if column_values['AOI[Sad_Left]Hit'] == '1':\n significant_data.writerow(column_values)\n count_sad = count_sad + 1\n\n if column_values['AOI[Neutral_Right]Hit_2'] == '1':\n significant_data.writerow(column_values)\n count_neutral = count_neutral + 1\n\n if column_values['AOI[Angry_Right]Hit'] == '1':\n significant_data.writerow(column_values)\n count_angry = count_angry + 1\n\n if column_values['AOI[Neutral_Left]Hit_2'] == '1':\n significant_data.writerow(column_values)\n count_neutral = count_neutral + 1\n\n return {\n 'count_neutral': count_neutral,\n 'count_sad': count_sad,\n 'count_angry': count_angry,\n 'count_happy': count_happy,\n }",
"def analyze(filename):\r\n start = datetime.datetime.now()\r\n\r\n ao_count = 0\r\n\r\n with open(filename) as csvfile:\r\n reader = csv.reader(csvfile, delimiter=',', quotechar='\"')\r\n year_count = {\r\n \"2013\": 0,\r\n \"2014\": 0,\r\n \"2015\": 0,\r\n \"2016\": 0,\r\n \"2017\": 0,\r\n \"2018\": 0\r\n }\r\n for row in reader:\r\n l_row = list(row)\r\n print(f\"\\n{row}\")\r\n year = l_row[5][6:]\r\n if year in year_count.keys():\r\n year_count[year] += 1\r\n if \"ao\" in l_row[6]:\r\n ao_count += 1\r\n\r\n end = datetime.datetime.now()\r\n return start, end, year_count, ao_count",
"def comment_count(self):\n return self.comments.filter_by(state=0).count()",
"def processData(content):\n\n csv_file = csv.reader(content)\n line_count = 0\n image_count = 0\n hour_count = 0\n\n chrome = ['Google Chrome', 0]\n explorer = ['Internet Explorer', 0]\n mozilla = ['Firefox', 0]\n safari = ['Safari', 0]\n \n for line in csv_file:\n line_count += 1\n if re.search(\"firefox\", line[2], re.I):\n mozilla[1] += 1\n elif re.search(r\"MSIE\", line[2]):\n explorer[1] += 1\n elif re.search(r\"Chrome\", line[2]):\n chrome[1] += 1\n elif re.search(r\"Safari\", line[2]):\n safari[1] += 1\n if re.search(r\"jpe?g|JPE?G|png|PNG|gif|GIF\", line[0]):\n image_count += 1\n\n image_percentage = (float(image_count) / line_count) * 100\n\n browser_count = [chrome, explorer, mozilla, safari]\n\n browser_popularity = 0\n top_browser = ' '\n for b in browser_count:\n if b[1] > browser_popularity:\n browser_popularity = b[1]\n top_browser = b[0]\n else:\n continue\n\n message1 = ('There were {:,} total page hits today.').format(line_count)\n message2 = ('Hits on images accounted for {}% of all hits.').format(image_percentage)\n message3 = ('{} had the most hits with {:,}.').format(top_browser, browser_popularity)\n\n print message1\n print message2\n print message3",
"def gather_counts(directory):\n counts_un = defaultdict(int)\n counts_bi = defaultdict(int)\n counts_tri = defaultdict(int)\n prev_prev = \"<s>\"\n prev = \"<s>\"\n for filename in os.listdir(f\"./{directory}\"):\n if \".DS_Store\" in filename:\n continue\n with open(f\"./{directory}/{filename}\", \"r\") as f:\n for line in f:\n line = line.strip()\n if len(line) == 0:\n continue\n counts_un[line+\"\\n\"] += 1\n counts_bi[prev+\"\\n\"+line+\"\\n\"] += 1\n counts_tri[prev_prev+\"\\n\"+prev+\"\\n\"+line+\"\\n\"] += 1\n prev_prev = prev\n prev = line\n counts_un[\"</s>\\n\"] += 2\n counts_bi[\"</s>\\n</s>\\n\"] += 1\n counts_bi[prev+\"\\n\"+\"</s>\\n\"] += 1\n counts_tri[prev_prev+\"\\n\"+prev+\"\\n\" + \"</s>\\n\"] += 1\n counts_tri[prev+\"\\n</s>\\n</s>\\n\"] += 1\n return counts_un, counts_bi, counts_tri",
"def count_semi_colons(txt):\n \n count = 0\n for c in txt:\n if c == ';':\n count += 1\n return count",
"def count_extracted(j_data):\n count = 0\n for record in j_data:\n tmp = {}\n desc = record['lcr_desc'].lower().split('/')\n title = desc[0]\n cat = category(title)\n if cat and 'location' in record:\n count += 1\n return count",
"def countCoOccurences(line):\r\n elements = line.split()\r\n return (int(elements[0]), len(elements) - 1)",
"def count_distinct_psms(csv_file_path=None, psm_defining_colnames=None):\n\n psm_counter = Counter()\n with open(csv_file_path, \"r\") as in_file:\n csv_input = csv.DictReader(in_file)\n output_fieldnames = list(csv_input.fieldnames)\n for line_dict in csv_input:\n psm = tuple(\n [line_dict[x] for x in psm_defining_colnames if x in line_dict.keys()]\n )\n psm_counter[psm] += 1\n\n return psm_counter",
"def count_elements(path):\n count = 0\n with open(path, 'r') as f:\n groups = f.read().split('\\n\\n')\n for idx in range(len(groups)):\n word = groups[idx].split('\\n')\n no_of_ele = len(word)\n for i in range(no_of_ele-1):\n word[0] = word[0]+word[i+1]\n count += len(''.join(set(word[0])))\n return count",
"def count_reddit_comments_ngram_strs(year, month, n):\n ngram_strs = ngram_extract.extract_reddit_comments_ngram_strs(year, month, n)\n return Counter(itertools.chain.from_iterable(ngram_strs))",
"def count_comments(self):\n comments = YoutubeComment.query.outerjoin((QueryVideoMM, QueryVideoMM.video_id == YoutubeComment.video_id)).filter_by(youtube_query_id=self.id)\n count = comments.count()\n return count",
"def SentenceSplitsCommas(f):\n\tcounter=0\n\twith open(filename) as f:\n\t\tread = csv.reader(f)\n\t\tfor row in read:\n\t\t\t#Original\n\t\t\tzin0=row[0]\n\t\t\t#Human Translation\n\t\t\tzin1=row[1]\n\t\t\t#Machine Translation\n\t\t\tzin2=row[2]\n\t\t\tcounter+=1\n\t\t\t#COMMAS \n\t\t\t#print((abs(zin0.count(',') - zin1.count(','))))\n\t\t\tprint((abs(zin0.count(',') - zin2.count(','))))"
]
| [
"0.6550013",
"0.651329",
"0.63195485",
"0.61350703",
"0.6074047",
"0.60031706",
"0.600072",
"0.59598917",
"0.58645254",
"0.58162385",
"0.5778962",
"0.5714064",
"0.5712255",
"0.5708559",
"0.56961226",
"0.56937104",
"0.56761754",
"0.56676346",
"0.565836",
"0.5652044",
"0.56494296",
"0.5646312",
"0.5622869",
"0.56032103",
"0.5579511",
"0.55479306",
"0.5545643",
"0.55367917",
"0.55026466",
"0.5493784"
]
| 0.7625474 | 0 |
Reads in the all the comment csvs in `csv_directory` and builds a csv containing all the comention frequencies | def main():
csv_paths = sorted(glob(os.path.join('amazon', '*.csv')), key=lambda p: p.split('/'))
with open('comentions.csv', 'w+') as wf:
csv_writer = csv.writer(wf)
# counter for comentions across ALL the csvs
all_comentions_counter = Counter()
for csv_path in csv_paths:
with open(csv_path) as rf:
csv_reader = csv.reader(rf)
comentions_counter = count_comentions_csv(csv_reader)
all_comentions_counter += comentions_counter
for comention, freq in all_comentions_counter.items():
isbn1, isbn2 = comention
csv_writer.writerow([isbn1, isbn2, freq]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def concat_file(filename):\n csv_paths = read_csv(filename)\n\n data_len = 0\n df_total = None\n for csv_name, csv_path in tqdm(csv_paths):\n print(csv_name)\n df = dt.fread(csv_path).to_pandas()\n data_len += df.shape[0]\n\n process_df = filter_law(df)\n\n if df_total is None:\n df_total = process_df.copy()\n else:\n df_total = pd.concat([df_total, process_df], ignore_index=True)\n\n print(\"Total data count: {}\".format(data_len))\n df_total.to_csv('eda_concat.csv')",
"def count_comentions_csv(csv_reader):\n header = next(csv_reader)\n index_of = {col: index for index, col in enumerate(header)}\n comention_counter = Counter()\n for line in csv_reader:\n body = line[index_of['body']]\n for isbn1, isbn2 in get_comentions(body):\n comention_counter[(isbn1, isbn2)] += 1\n return comention_counter",
"def read_csv(csv_folder, split, segment_limit, sentence_limit, word_limit):\n assert split in {'train', 'test'}\n\n docs = []\n labels = []\n word_counter = Counter()\n data = pd.read_csv(os.path.join(csv_folder, \"short_concat_\" + split + '.csv'), header=None)\n for i in tqdm(range(data.shape[0])):\n # 전체 문서\n row = list(data.loc[i, :])\n segments = list()\n text = row[0]\n\n # 각 문단을 문장 단위로 잘라 저장\n for paragraph in preprocess(text).splitlines():\n segments.append([s for s in sent_tokenizer.tokenize(paragraph)])\n\n # 단어 단위로 토크나이징\n sentences = list()\n\n for paragraph in segments[:segment_limit]:\n words = list()\n for s in paragraph[:sentence_limit]:\n w = word_tokenizer.tokenize(s)[:word_limit]\n # If sentence is empty (due to removing punctuation, digits, etc.)\n if len(w) == 0:\n continue\n words.append(w)\n word_counter.update(w)\n sentences.append(words)\n # If all sentences were empty\n if len(words) == 0:\n continue\n\n labels.append(int(row[1])) # since labels are 1-indexed in the CSV\n docs.append(sentences)\n\n return docs, labels, word_counter",
"def main():\n glob_pattern = \"{root}/{child}/*.xml\".format(root=MANCHESTER_ROOT, child=TARGET_CHILD)\n corpus_files = glob(glob_pattern)\n for filename in corpus_files:\n print(filename)\n to_csv(filtered_parent_freq_count([filename], 2))",
"def process_file(filename):\n csv_paths = read_csv(filename)\n\n all_columns = {\n 'court', 'date', 'no', 'sys', 'reason', 'judgement', 'attachAsJudgement', 'attachments', 'type', 'historyHash', 'mainText', 'opinion', 'relatedIssues', 'party'\n }\n unused_columns = ['attachAsJudgement', 'attachments']\n\n data_len = 0\n for csv_name, csv_path in tqdm(csv_paths):\n print(csv_name)\n processed_name = 'no_text_data/' + csv_name\n df = dt.fread(csv_path)\n\n # remove unused columns\n if 'attachAsJudgement' in df.keys():\n del df[:, unused_columns]\n else:\n del df[:, 'attachments']\n\n # remove all text\n del df[:, ['opinion']]\n\n data_len += df.shape[0]\n df.to_csv(processed_name)\n print(\"Total data count: {}\".format(data_len))",
"def collect_stats(csv_file, skip_header, split_index, iterations, dir):\n matchings = [{'desc': 'max_card',\n 'algo': matching_algos.max_card_hospital_residents,\n 'file': lambda dir, iteration: '{}/max_card{}.txt'.format(dir, iteration)},\n {'desc': 'stable',\n 'algo': matching_algos.stable_matching_hospital_residents,\n 'file': lambda dir, iteration: '{}/stable{}.txt'.format(dir, iteration)},\n {'desc': 'popular',\n 'algo': matching_algos.popular_matching_hospital_residents,\n 'file': lambda dir, iteration: '{}/popular{}.txt'.format(dir, iteration)}]\n\n with open(csv_file, encoding='utf-8', mode='r') as fin:\n G = read_course_allotment_graph(fin, skip_header, split_index) # read the graph just once\n\n def G_fn():\n return random_sample(G)\n matching_stats.collect_stats(G_fn, iterations, dir, matchings)",
"def csv_delimiter_examples():\n number_table = read_csv_file(\"number_table.csv\", \" \")\n print_table(number_table)\n print()\n name_table = read_csv_file(\"name_table.csv\", \",\")\n print_table(name_table)",
"def csv_delimiter_examples():\n number_table = read_csv_file(\"number_table.csv\", \" \")\n print_table(number_table)\n print()\n name_table = read_csv_file(\"name_table.csv\", \",\")\n print_table(name_table)",
"def main():\n\n # Ensure the output directory exists\n if not os.path.exists(OUTPUT_DIR):\n os.makedirs(OUTPUT_DIR)\n\n process_csv()",
"def init_csv_file(csv_path):\n with open(csv_path, 'w', newline='') as csv_file:\n writer = csv.writer(csv_file)\n header = ['file_name', 'chart_in_file',\n 'year', 'month', 'row_no', 'bird_species']\n header += list(range(1, 32))\n writer.writerow(header)",
"def read_csv():",
"def count_distinct_psms(csv_file_path=None, psm_defining_colnames=None):\n\n psm_counter = Counter()\n with open(csv_file_path, \"r\") as in_file:\n csv_input = csv.DictReader(in_file)\n output_fieldnames = list(csv_input.fieldnames)\n for line_dict in csv_input:\n psm = tuple(\n [line_dict[x] for x in psm_defining_colnames if x in line_dict.keys()]\n )\n psm_counter[psm] += 1\n\n return psm_counter",
"def aggregate_counts(counts_files,\n output_file = '/dev/stdout', \n sample_names=None, \n sep=\"\\t\", \n header=0, \n comment=\"#\"):\n sample_pos = -1\n \n if sample_names is not None:\n if len(sample_names)!=len(counts_files):\n logging.error(\"Number of sample names is not the same length as \",\n \"the number of counts files.\")\n raise RuntimeError(\"\")\n\n # read in all counts files\n counts_df = [pd.read_csv(file, sep=sep, header=header, comment=comment) \n for file in counts_files]\n\n # overwrite the sample names if provided\n if sample_names:\n for i, df in enumerate(counts_df):\n #counts_df[i].columns[sample_pos] = sample_names[i]\n new_columns = df.columns.tolist()\n new_columns[sample_pos] = sample_names[i]\n df.columns = new_columns\n else:\n # check sample names are all different\n sample_names_from_files = [df.columns[sample_pos] for df in counts_df]\n\n if (len(set(sample_names_from_files))<len(counts_files)):\n logging.error(\"Sample names in counts files are not unique. Fix \",\n \"or provide a list of sample names to use.\")\n raise RunTimeError()\n\n\n # merge the dataframes together\n merged_df = reduce(lambda x, y: pd.merge(x,y), counts_df)\n\n\n # output\n if header is not None:\n out_header = True\n\n with open(output_file, 'w') as handle:\n merged_df.to_csv(handle, sep=sep, header=out_header, index=False)\n\n return 0",
"def setup_csv(self) -> None:\n csvData = ['Followers', 'Time']\n\n # Create our CSV file header\n with open(self.graphfile, 'w') as csvFile:\n writer = csv.writer(csvFile)\n writer.writerow(csvData)\n csvFile.close()",
"def write_analysis_details(self, csvfile):\n #filepath, total words, line count, most common word\n f = open(csvfile, 'w')\n most_common = self.most_common()\n f.write('filepath,total words,line count,most common word\\n')\n f.write(f'{self.filepath},{self.word_count()},{self.sentence_count()},{self.most_common()[0]}')\n f.close()",
"def main():\n csvFile = open('college.csv', 'w', newline='')\n csvWriter = create_csvWriter(csvFile)\n input_files_names = os.listdir(DATADIR)\n print(\"starting\")\n for file_name in input_files_names:\n if file_name.endswith(\".html\"):\n print(\".\", end=\"\", flush=True)\n soup = create_soup(file_name)\n name = college_names(soup)\n if name is None:\n continue\n city, state, zipcode = college_location(soup)\n selectivity = competitiveness(soup)\n\n csvWriter.writerow([name, city, state, zipcode, selectivity, file_name])\n print(\"Finished\")\n csvFile.close()",
"def generateNewsDocsCSV():\n docs = None\n for d in range(7):\n # import data file\n try:\n if d == 0:\n docs = pd.read_csv(\"documents/news_documents\" + str(d) + \".csv\")\n else:\n docs = docs.append(pd.read_csv(\"documents/news_documents\" + str(d) + \".csv\"), ignore_index=True)\n except FileNotFoundError:\n print(\"File not found\")\n\n docs.dropna(inplace=True)\n\n docs['bodylen'] = docs['body'].str.len()\n docs['shortlen'] = docs['short_description'].str.len()\n docs.drop(docs[docs['bodylen'] < docs['shortlen']].index, inplace=True)\n docs = docs.drop(columns=['bodylen', 'shortlen'])\n\n docs.reset_index(drop=True, inplace=True)\n docs['body'] = docs['body'].replace('\\n', ' ', regex=True)\n docs['body'] = docs['body'].replace('\\t', ' ', regex=True)\n docs['body'] = docs['body'].replace('\\r', ' ', regex=True)\n docs['short_description'] = docs['short_description'].replace('\\n', ' ', regex=True)\n docs['short_description'] = docs['short_description'].replace('\\t', ' ', regex=True)\n docs['short_description'] = docs['short_description'].replace('\\r', ' ', regex=True)\n\n docs.loc[27653]['body'] = docs.loc[27653]['body'].replace('PHOTO GALLERY', '')\n pgIdx = docs[docs['body'].str.contains(\"PHOTO GALLERY\")].index\n c = 0\n for i in pgIdx:\n docs.loc[i]['body'] = docs.loc[i]['body'].split('PHOTO GALLERY', 1)[0]\n c += 1\n\n docs.to_csv('documents/full_news_documents.csv', index=True)",
"def pre_process_reviews(csv, outputname):\n df = pd.read_csv(csv)\n df = df.drop(\"Unnamed: 0\", axis='columns')\n df.to_csv(outputname, index=False)",
"def get_tweets_ids_from_csv(self):\n for fname in self.filelist:\n with open(fname) as f:\n fakenews = csv.reader(f)\n next(fakenews) # Discard top CSV row\n for fake in fakenews:\n yield fake[3].split(\"\t\")",
"def generateStatisticsForStep3():\n os.chdir( config.CORPUS_DIRECTORY )\n invertedIndex = {}\n with open('Step3.csv') as f:\n fileList = csv.reader( f )\n for entry in fileList:\n postingList = list(ast.literal_eval(entry[2]))\n invertedIndex[(entry[0], int(entry[1]))]= [ int(x) for x in postingList]\n generateFreqGraph( invertedIndex )\n generatePostingGapGraph( invertedIndex )",
"def main():\n with open('csv_files/products.csv', 'a') as data_file:\n # Move to the next line before appending new row to the file\n data_file.write(\"\\n\")\n data_writer = csv.writer(data_file)\n for i in range(5, 10000):\n data_writer.writerow([str(i+1), \" description{}\".format(str(i)),\n \" type{}\".format(str(i)),\n \" {}\".format(str(random.randint(1, 100)))])\n\n with open('csv_files/customers.csv', 'a') as data_file:\n # Move to the next line before appending new row to the file\n data_file.write(\"\\n\")\n data_writer = csv.writer(data_file)\n for i in range(5, 10000):\n data_writer.writerow([str(i+1), \" first_name{}\".format(str(i)),\n \" last_name{}\".format(str(i)),\n \" address{}\".format(str(i)),\n \" phone_number{}\".format(str(i)),\n \" email{}\".format(str(i))])",
"def purify_comments(csv_file, keep_stops=False, POS=False, lemmatize=False, popular=0):\r\n\r\n df = pd.read_csv(csv_file)\r\n df = df.loc[df[\"author\"] != \"[deleted]\"] # trim out comments whose authors have deleted their accounts\r\n df = df.loc[df[\"score\"] != \"score\"] # this is an error in the code when building new csv_files from dask\r\n\r\n # extracts only the popular comments\r\n if popular > 0:\r\n df = df.loc[pd.to_numeric(df[\"score\"]) > popular]\r\n\r\n comments = df[\"body\"]\r\n del df # no need for this anymore, and it'll merely eat up memory\r\n\r\n nlp = en_core_web_sm.load()\r\n\r\n revised_comments = []\r\n for comment in comments.astype('unicode').values:\r\n comment = comment[1:] # remove the initial 'b' bytes-representation character\r\n comment = comment.encode(\"utf-8-sig\").decode(\"utf-8-sig\") # get rid of BOM character\r\n comment = comment.lower().replace(r\"\\n\", r\"\").replace(r'\"', r'')\r\n\r\n tokens = nlp(comment)\r\n\r\n # actual specification section\r\n for sent in tokens.sents:\r\n\r\n if POS: # conversion of comments to tokens/lemmas-POS tags\r\n if lemmatize:\r\n if keep_stops:\r\n revised_tokens = [\"{}-{}\".format(token.lemma_, token.tag_) for token in sent\r\n if not token.is_punct]\r\n else:\r\n revised_tokens = [\"{}-{}\".format(token.lemma_, token.tag_) for token in sent\r\n if not token.is_stop and not token.is_punct\r\n and not token.orth_ == \"n't\" and not token.orth_ == \"'s\"]\r\n else:\r\n if keep_stops:\r\n revised_tokens = [\"{}-{}\".format(token.orth_, token.tag_) for token in sent\r\n if not token.is_punct]\r\n else:\r\n revised_tokens = [\"{}-{}\".format(token.orth_, token.tag_) for token in sent\r\n if not token.is_stop and not token.is_punct\r\n and not token.orth_ == \"n't\" and not token.orth_ == \"'s\"]\r\n\r\n elif lemmatize: # just lemmatization\r\n if keep_stops:\r\n revised_tokens = [token.lemma_ for token in sent\r\n if not token.is_punct]\r\n else:\r\n revised_tokens = [token.lemma_ for token in sent\r\n if not token.is_stop and not token.is_punct\r\n and not token.orth_ == \"n't\" and not token.orth_ == \"'s\"]\r\n\r\n else: # nothing but removal of stop words (or not)\r\n if keep_stops:\r\n revised_tokens = [token.orth_ for token in sent\r\n if not token.is_punct]\r\n else:\r\n revised_tokens = [token.orth_ for token in sent\r\n if not token.is_stop and not token.is_punct\r\n and not token.orth_ == \"n't\" and not token.orth_ == \"'s\"]\r\n\r\n revised_comments.append(\" \".join(revised_tokens))\r\n\r\n return pd.Series(revised_comments)",
"def process_csv(csv_file=None):\n if csv_file:\n _process_csv_data(csv_file, USER_DATA_MAP)\n else:\n csv_files_list = [os.path.join(DATA_DIR, f) for f in os.listdir(DATA_DIR) if f.endswith('.csv')]\n for fl in csv_files_list:\n _process_csv_data(fl, USER_DATA_MAP)\n return USER_DATA_MAP",
"def import_csv(directory_name, collection_file, database):\n LOGGER.debug('Importing %s CSV file...', collection_file)\n count = 0\n errors = 0\n try:\n filename = f'{collection_file}.csv'\n collection = database[collection_file]\n with open(os.path.join(directory_name, filename)) as file:\n collection.insert_many(data_convert(csv.DictReader(file)))\n count = collection.count_documents({})\n except OSError as err:\n print(f'OS error: {err}')\n LOGGER.error('Error reading %s file: %s', collection_file, err)\n errors = 1\n\n return count, errors",
"def finalize(param, input_files='count_files'):\n\n import csv\n HELPER.writeLog('Collecting featureCount raw counts ... \\n', param)\n\n #check which of these files are actually available\n working_files = [iFile for iFile in param[input_files] if iFile != '']\n\n if len(working_files) > 0:\n #get feature ID using the first column in the first file in the list of working files\n csv_file = open(working_files[0])\n csv_reader = csv.reader(csv_file, delimiter='\\t')\n\n #For featureCount output, we want to skip the first two lines as they\n #include the featureCount call and the headers which we don't want\n next(csv_reader, None)\n next(csv_reader, None)\n\n #Now start by taking the list of identifier,\n #which is the first column in the file\n counts = [row[0] for row in csv_reader]\n csv_file.close()\n\n #get all the expression values\n header = 'ID'\n for idx in range(param['num_samples']):\n if param[input_files] != '':\n header = header+'\\t'+param['stub'][idx]\n csv_file = open(param[input_files][idx])\n csv_reader = csv.reader(csv_file, delimiter='\\t')\n\n #Here too we want to skip the first two lines, before getting the counts\n next(csv_reader, None)\n next(csv_reader, None)\n #Now start getting the counts (row[6]) and add in the ID (counts[i]) before it\n idx = 0\n for row in csv_reader:\n counts[idx] = counts[idx]+'\\t'+row[6]\n idx += 1\n csv_file.close()\n\n #output the file\n out_file = param['working_dir']+'deliverables/featureCount_raw_counts.txt'\n out_handle = open(out_file, 'w')\n out_handle.write(header+'\\n')\n\n for i in range(len(counts)):\n out_handle.write(counts[i]+'\\n')\n out_handle.close()\n\n #output_phenotype_file\n HELPER.writeLog('Writing phenotype data ... \\n', param)\n MODULE_HELPER.output_sample_info(param)\n\n #write summary stats\n #featureCount does this on its own so we can just fetch each summary file\n #check which of these files are actually available\n working_files = [iFile+'.summary' for iFile in param[input_files] if iFile != '']\n\n if len(working_files) > 0:\n #get Status column from summary file using the first column in\n #the first file in the list of working files\n csv_file = open(working_files[0])\n csv_reader = csv.reader(csv_file, delimiter='\\t')\n #Here, we want to skip the first line, as it simply points to the\n #alignment file used when running featureCount\n next(csv_reader, None)\n\n #Now start by taking the list of identifier,\n #which is the first column in the file\n entry = [row[0] for row in csv_reader]\n csv_file.close()\n\n #get all the summary stats for each sample\n header = 'Status'\n\n for idx in range(param['num_samples']):\n if param[input_files] != '':\n header = header+'\\t'+param['stub'][idx]\n #Fetch the corresponding sample's summary file\n csv_file = open(param[input_files][idx]+'.summary')\n csv_reader = csv.reader(csv_file, delimiter='\\t')\n #Again, we want to skip the first line, as it simply points\n #to the alignment file used when running featureCount\n next(csv_reader, None)\n\n #Now start getting the stats (row[1]) and add in the Status\n # (counts[i]) before it\n i = 0\n for row in csv_reader:\n entry[i] = entry[i]+'\\t'+row[1]\n i += 1\n csv_file.close()\n #output the file\n out_handle = open(param['working_dir']+\n 'results/featureCount/featureCount_stats.txt',\n 'w')\n out_handle.write(header+'\\n')\n\n for i in range(len(entry)):\n out_handle.write(entry[i]+'\\n')\n out_handle.close()\n else:\n print 'featureCount was not run successfully on any of the files..\\n'",
"def generate_csv(allstats, dirname):\n for type in CATEGORIES.keys():\n filename = os.path.join(dirname, f\"stats_{type}.csv.gz\")\n stats = allstats[type]\n with gzip.open(filename, 'wt') as handle:\n writer = csv.writer(handle)\n writer.writerow([\"year\", CATEGORIES[type], \"all\", \"ano\", \"ident\", \"inclass\", \"teacher\"])\n for year in sorted(stats.keys()):\n ystats = stats[year]\n for val in sorted(ystats.keys()):\n row = [year, val] + ystats[val]\n writer.writerow(row)",
"def loop_csv(input_csv_path, output_csv_path):\n counter = 0\n with open(input_csv_path, 'rb') as read_csvfile:\n projectsreader = csv.DictReader(\n read_csvfile, delimiter=',', quotechar='\"')\n\n with open(output_csv_path, 'w') as write_csvfile:\n fieldnames = ['acronym', 'title', 'projectUrl', 'foundProjectUrl1',\n 'foundProjectUrl2', 'foundProjectUrl3',\n 'foundProjectUrl4', 'foundProjectUrl5',\n 'foundProjectUrl6', 'foundProjectUrl7',\n 'foundProjectUrl8', 'foundProjectUrl9',\n 'foundProjectUrl10']\n writer = csv.DictWriter(write_csvfile, fieldnames=fieldnames)\n # writer.writeheader() # this method only available at python 2.7\n for row in projectsreader:\n if counter == 100:\n time.sleep(86400) # sleep 1 day\n counter = 0\n\n res = query_google_cse(\n row['acronym'] + \" \" + row['title'] +\n \" project -site:cordis.europa.eu -site:ec.europa.eu\")\n\n # save response to file\n with open('responses_gcse.json', 'w') as outfile:\n json.dump(res, outfile)\n\n # a query response may not have 10 results, so we have to check\n # for that\n results = []\n result_size = res['queries']['request'][0]['totalResults']\n\n print \"INFO: RESULT SIZE %s\" % result_size\n for i in range(10):\n if i < int(result_size):\n results.append(res['items'][i]['link'])\n else:\n results.append('')\n\n # print \"Control Print: \" + res['items'][0]['link']\n print \"INFO: First Result: \" + results[0]\n writer.writerow({\n 'acronym': row['acronym'],\n 'title': row['title'],\n 'projectUrl': row['projectUrl'],\n 'foundProjectUrl1': results[0],\n 'foundProjectUrl2': results[1],\n 'foundProjectUrl3': results[2],\n 'foundProjectUrl4': results[3],\n 'foundProjectUrl5': results[4],\n 'foundProjectUrl6': results[5],\n 'foundProjectUrl7': results[6],\n 'foundProjectUrl8': results[7],\n 'foundProjectUrl9': results[8],\n 'foundProjectUrl10': results[9],\n })\n sys.stdout.flush()\n time.sleep(2)\n counter += 1",
"def _enumerate_csv(self, csv_input):\n csv_file = open(csv_input, 'rb') \n csv_reader = csv.reader(csv_file)\n next(csv_reader, None)\n for row in reader:\n yield row",
"def create_raw_data():\r\n for csv_file in glob.glob(raw_loc + 'ticket_data/PRR_*'):\r\n filestring =os.path.basename(csv_file)\r\n index_start = 1\r\n j = 0\r\n start = dt.datetime.now()\r\n print('{} file started at {}'.format(filestring, start.strftime(\"%H:%M\")))\r\n df = pd.read_csv(csv_file, encoding = 'utf-8', parse_dates = ['Tick Issue Date'])\r\n df = df.rename(columns = {c: c.replace(' ', '') for c in df.columns})\r\n try:\r\n df.to_sql('raw_ticket_data', con = conn, if_exists='append')\r\n except:\r\n print('File read error')\r\n\r\n\r\n print ('{} file finished in {:03.2f} minutes '.format(filestring, (dt.datetime.now()-start).seconds / 60))",
"def preprocess_files(file_path):\n # checking your current working directory\n cur_dir = os.getcwd()\n\n # Get your current folder and sub folder event data\n data_dir = os.path.join(cur_dir, 'event_data')\n\n # Create a for loop to create a list of files and collect each\n # file_path\n file_path_list = []\n for root, dirs, files in os.walk(data_dir):\n # join the file path and roots with the subdirectories using\n # glob\n file_path_list = glob.glob(os.path.join(root, '*'))\n\n full_data_rows_list = []\n\n # for every file_path in the file path list collect records\n for f in file_path_list:\n\n # reading csv file\n with open(f, 'r', encoding='utf8', newline='') as csvfile:\n\n # creating a csv reader object\n csvreader = csv.reader(csvfile)\n next(csvreader)\n\n # extracting each data row one by one and append it\n for line in csvreader:\n full_data_rows_list.append(line)\n\n csv.register_dialect('myDialect', quoting=csv.QUOTE_ALL,\n skipinitialspace=True)\n\n # create one file with all the records\n with open(file_path, 'w', encoding='utf8',\n newline='') as f:\n writer = csv.writer(f, dialect='myDialect')\n writer.writerow(\n ['artist', 'firstName', 'gender', 'itemInSession',\n 'lastName', 'length', 'level', 'location', 'sessionId',\n 'song', 'userId'])\n for row in full_data_rows_list:\n if row[0] == '':\n continue\n writer.writerow((row[0], row[2], row[3], row[4], row[5],\n row[6], row[7], row[8], row[12], row[13],\n row[16]))"
]
| [
"0.6203551",
"0.61001015",
"0.59032845",
"0.5900873",
"0.58855546",
"0.577408",
"0.5725831",
"0.5725831",
"0.5722023",
"0.5682996",
"0.5677519",
"0.5640583",
"0.5640177",
"0.5633452",
"0.55537605",
"0.5501322",
"0.5458168",
"0.5458156",
"0.5456997",
"0.54544693",
"0.54528195",
"0.5422867",
"0.5409898",
"0.54082954",
"0.5404268",
"0.5402929",
"0.5402121",
"0.5402056",
"0.5392816",
"0.53736484"
]
| 0.69587445 | 0 |
This function accepts the column number for the features (X) and the target (y). It chunks the data up with a rolling window of Xt window to predict Xt. It returns two numpy arrays of X and y. | def get_window_data(symbol_signals_df, window_size, feature_col_number, target_col_number):
X = []
y = []
for i in range(len(symbol_signals_df) - window_size):
features = symbol_signals_df.iloc[i : (i + window_size), feature_col_number]
#print(features)
target = symbol_signals_df.iloc[(i + window_size), target_col_number]
X.append(features)
y.append(target)
return np.array(X), np.array(y).reshape(-1, 1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def window_data_proc(X, y=None, delta=1):\n vectorize_windowing = lambda row: train_sample_windowize(row.reshape(20, 20), delta=delta)\n\n X = np.vstack(np.apply_along_axis(vectorize_windowing, 1, X))\n if y is not None:\n y = np.vstack(y.ravel())\n return X, y\n return X",
"def create_my_dataset(df, predict_window):\n\n x = []\n y = []\n for i in range(predict_window, df.shape[0]):\n x.append(df[i-predict_window:i,0])\n y.append(df[i,0])\n #convert data to numpy array\n x = np.array(x)\n y = np.array(y)\n \n return x,y",
"def window_data(datax, datay, window_length, hop_size, sample_rate, test_size):\n sample_window_length = int(np.floor(window_length * sample_rate))\n sample_hop_size = int(np.floor(hop_size * sample_rate))\n\n X_train = np.empty((0, sample_window_length))\n X_test = np.empty((0, sample_window_length))\n y_train = np.array([])\n y_test = np.array([])\n\n for (index, row) in datax.items():\n sys.stdout.write(f\"\\r[-] Reading: {index} of {len(datax)} ({index / len(datax) * 100: .2f}%)\")\n sys.stdout.flush()\n\n windowed_row = np.empty((0, sample_window_length))\n target_row = np.array([])\n\n for start_pos in np.arange(0, len(row)-sample_window_length, sample_hop_size):\n window = datax.loc[index][start_pos:start_pos + sample_window_length]\n\n windowed_row = np.vstack((windowed_row, window))\n target_row = np.append(target_row, datay[index])\n\n midpoint = int(np.floor(len(windowed_row) * (1-test_size)))\n\n X_train = np.vstack((X_train, windowed_row[:midpoint]))\n X_test = np.vstack((X_test, windowed_row[midpoint:]))\n y_train = np.append(y_train, target_row[:midpoint])\n y_test = np.append(y_test, target_row[midpoint:])\n\n return X_train, X_test, y_train, y_test",
"def rolling_window_sequences(X, index, window_size, target_size, target_column):\n out_X = list()\n out_y = list()\n X_index = list()\n y_index = list()\n\n target = X[:, target_column]\n\n for start in range(len(X) - window_size - target_size + 1):\n end = start + window_size\n out_X.append(X[start:end])\n out_y.append(target[end:end + target_size])\n X_index.append(index[start])\n y_index.append(index[end])\n\n return np.asarray(out_X), np.asarray(out_y), np.asarray(X_index), np.asarray(y_index)",
"def get_sequence_to_sequence_data(series):\n Y = np.empty((INITIAL_BATCH_SIZE,N_INPUT_STEPS,N_PREDICTIONS,N_OUTPUT_FEATURES))\n # ^ At every one of our input time steps, we will predict the following N_PREDICTIONS time steps.\n for step_ahead in range(1,N_PREDICTIONS+1):\n Y[:,:,step_ahead-1,:N_OUTPUT_FEATURES] = series[:,step_ahead:step_ahead+N_INPUT_STEPS,:N_OUTPUT_FEATURES]\n Y = Y.reshape(INITIAL_BATCH_SIZE,N_INPUT_STEPS,N_PREDICTIONS*N_OUTPUT_FEATURES)\n return Y",
"def sliding_window_main(x, y, index=None, predict_ahead=predict_ahead):\n x_slid, y_slid, idx_slid = to_sliding_window(\n x, y, timesteps,\n predict_ahead,\n index=index)\n\n reshape_2 = lambda y: np.reshape(y, (y.shape[0], n))\n y_slid = reshape_2(y_slid)\n\n\n print(\"x_slid.shape:\", x_slid.shape)\n print(\"y_slid.shape:\", y_slid.shape)\n\n if index is not None:\n return x_slid, y_slid, idx_slid\n\n return x_slid, y_slid",
"def to_sliding_window(x, y, timesteps, predict_ahead, index=None):\n\n xnew = sliding_window(x, timesteps, predict_ahead)\n #print(xnew)\n ynew = y[timesteps+predict_ahead:]\n\n if index is not None:\n idxnew = index[timesteps+predict_ahead:]\n return xnew, ynew, idxnew\n\n return xnew, ynew, None",
"def _data_transformation(adjclose_array, window = 30):\n X_data = []\n y_data = [] # Price on next day\n window = window\n num_shape = len(adjclose_array)\n\n for i in range(window, num_shape):\n X_data_reshaped = np.reshape(adjclose_array[i-window:i], (window, 1))\n X_data.append(X_data_reshaped)\n X_data = np.stack(X_data)\n y_data = np.stack(adjclose_array)[window:]\n return X_data, y_data",
"def samples_timesteps_features(dataframe, columns, start_date, timesteps=72, \n steps_ahead=24, window_days=100, train_percent=80.):\n \n def overlap_windows(dataset, timesteps, steps_ahead):\n \"\"\" Create overlaping window of time-series data\n \n Parameters\n ----------\n dataset: pd.DataFrame\n time-series pandas dataset\n timesteps: int\n number of time steps from the past for creating output arrays\n steps_ahead: int\n number of time steps into the future for making predictions\n \n Returns\n -------\n X, y: np.array\n input and output 3-d arrays of overlaping time windows\n \"\"\"\n X = []; y = []\n \n start = 0\n for i in range(len(dataset)):\n # Define the end of the input sequence\n in_end = start + timesteps\n out_end = in_end + steps_ahead\n # Ensure that there is enough data\n if out_end <= len(dataset):\n X.append(dataset[start:in_end, :])\n # First column holds load values\n y.append(dataset[in_end:out_end, 0])\n # Move along one time step\n start += 1\n \n # Convert list to np.array\n X = np.asarray(X)\n y = np.asarray(y)\n \n return X, y\n\n\n data = dataframe.copy()\n \n if window_days*24 > data.values.shape[0]:\n raise ValueError('Variable window_days has too large value: {}*24h = {} > {}, which is more than there is data!'.format(window_days, window_days*24, \n data.values.shape[0]))\n \n # Training period\n # ---------------\n train_percent = train_percent/100.\n st = pd.to_datetime(start_date) # start date\n et = st + dt.timedelta(days=int(train_percent*window_days)) # end date\n train = data.loc[st:et].values\n \n # Standardize and transform training data set\n mean_std_values = {}\n for i, column in enumerate(columns):\n # Calculate mean and standard deviation only\n # from the training data set values\n mu = train[:,i].mean() # axis=0\n sd = train[:,i].std()\n mean_std_values[column] = (mu, sd)\n # Standardize training data\n train[:,i] = (train[:,i] - mu)/sd\n \n # Create overlapping windows with training data\n X_train, y_train = overlap_windows(train, timesteps, steps_ahead)\n \n # Testing / Validation period\n # ---------------------------\n sv = et \n ev = sv + dt.timedelta(days=int((1-train_percent)*window_days)+1)\n test = data.loc[sv:ev].values\n \n # Transform testing/validation data set\n for i, column in enumerate(columns):\n # Use mean and standard deviation from the\n # training data set\n mu = mean_std_values[column][0]\n sd = mean_std_values[column][1]\n # Standardize test data\n test[:,i] = (test[:,i] - mu)/sd\n \n # Create overlaping windows with test data\n X_test, y_test = overlap_windows(test, timesteps, steps_ahead)\n \n return mean_std_values, X_train, y_train, X_test, y_test",
"def _transform(self, X, y=None):\n # Check input of feature calculators, i.e list of functions to be\n # applied to time-series\n features = _check_features(self.features)\n X = convert_to(X, \"numpy3D\")\n\n # Check that the input is of the same shape as the one passed\n # during fit.\n if X.shape[1] != self.input_shape_[1]:\n raise ValueError(\n \"Number of columns of input is different from what was seen in `fit`\"\n )\n # Input validation\n # if not all([np.array_equal(fit_idx, trans_idx) for trans_idx,\n # fit_idx in zip(check_equal_index(X),\n # raise ValueError('Indexes of input time-series are different\n # from what was seen in `fit`')\n\n n_instances, _, _ = X.shape\n n_features = len(features)\n\n intervals = self.intervals_\n n_intervals = len(intervals)\n\n # Compute features on intervals.\n Xt = np.zeros((n_instances, n_features * n_intervals)) # Allocate output array\n # for transformed data\n columns = []\n\n i = 0\n drop_list = []\n for func in features:\n # TODO generalise to series-to-series functions and function kwargs\n for start, end in intervals:\n interval = X[:, :, start:end]\n\n # Try to use optimised computations over axis if possible,\n # otherwise iterate over rows.\n try:\n Xt[:, i] = func(interval, axis=-1).squeeze()\n except TypeError as e:\n if (\n str(e) == f\"{func.__name__}() got an unexpected \"\n f\"keyword argument 'axis'\"\n ):\n Xt[:, i] = np.apply_along_axis(\n func, axis=2, arr=interval\n ).squeeze()\n else:\n raise\n new_col_name = f\"{start}_{end}_{func.__name__}\"\n if new_col_name in columns:\n drop_list += [i]\n else:\n columns = columns + [new_col_name]\n i += 1\n\n Xt = pd.DataFrame(Xt)\n Xt = Xt.drop(columns=Xt.columns[drop_list])\n Xt.columns = columns\n\n return Xt",
"def reshape_data(X, y):\n\n X_reshaped = X.reshape(-1, X.shape[-1])\n y_reshaped = y.reshape(-1)\n\n return X_reshaped, y_reshaped",
"def create_sequences(X, y, time_steps=15, window=1):\n Xs, ys = [], []\n for i in range(0, len(X) - time_steps + 1, window):\n Xs.append(X.iloc[i : (i + time_steps)].values)\n ys.append(y.iloc[i + time_steps - 1])\n\n X_array = np.array(Xs)\n y_array = np.array(ys)\n\n X_array = np.reshape(X_array, (X_array.shape[0], X_array.shape[1], 1))\n\n return X_array, y_array",
"def predict(self,X):\n result = []\n for i in range(pd.DataFrame(X).shape[0]):\n prediction = self.predict_single(pd.DataFrame(X).iloc[i])\n result.append(prediction)\n print(result)\n return np.array(result)",
"def preprocess(dataframe_csvpath, cols_x, cols_y, window_in, window_out, data_div_frac, popu_size):\n \n #Loading .CSV file and creating dataframe\n df = pd.read_csv(dataframe_csvpath) \n len_ser = len(df[df['Series_No'] == 1])\n\n #randomly shuffle different series\n permute = np.random.permutation(range(1, len(set(df['Series_No']))))\n train_series_seq = permute[: int(len(set(df['Series_No'])) * data_div_frac)]\n test_series_seq = permute[int( len(set(df['Series_No'])) * data_div_frac):]\n \n #taking relevent columns from dataframe \n df_x = df[cols_x]\n df_y = df[cols_y]\n \n #Innitialize empty lists which are later to be appended\n train_seq, test_seq = [], []\n x_test = []\n y_true =[]\n \n #Creating time series data\n for series_no in train_series_seq:\n \n #new dataframe variable assignment for particular series drom df_x, df_y\n series_df_x = df_x[df_x['Series_No'] == series_no]\n series_df_y = df_x[df_y['Series_No'] == series_no]\n \n #converting into numpy arrays\n array_x = np.array(series_df_x)\n array_y = np.array(series_df_y)\n \n #for loop to append to x_train y_train arrays according to window_in, window_out\n for idx in range(len(series_df_x) - window_in - window_out + 1): #'len(series_df_x) - window_in - window_out + 1' needs to be checked\n arrayx = array_x.copy()\n x = arrayx [idx:idx + window_in, : len(cols_x) - 1]\n #print(x)\n x[:,0:3] = x[:,0:3] / popu_size\n #print(x)\n arrayy = array_y.copy()\n y = arrayy[idx + window_in :idx + window_in + window_out, : len(cols_y) - 1]\n y = y / popu_size\n train_seq.append((x, y)) #out col_x and col_y has last item 'Series number' so to remove that [, : len(cols_x)]\n #y_train.append(array_y[idx + window_in :idx + window_in + window_out, : len(cols_y) - 1])\n #print(train_seq)\n\n #repeat for test sequence\n for series_no in test_series_seq:\n \n #new dataframe variable assignment for particular series drom df_x, df_y\n series_df_x = df_x[df_x['Series_No'] == series_no]\n series_df_y = df_x[df_y['Series_No'] == series_no]\n \n #converting into numpy arrays\n array_x = np.array(series_df_x)\n array_y = np.array(series_df_y)\n \n #for loop to append to x_train y_train arrays according to window_in, window_out\n for idx in range(len(series_df_x) - window_in - window_out + 1): #'len(series_df_x) - window_in - window_out + 1' needs to be checked\n arrayx = array_x.copy()\n x = arrayx[idx:idx + window_in, : len(cols_x) - 1]\n x[:,0:3] = x[:,0:3] / popu_size\n x_test.append(x)\n arrayy = array_y.copy()\n y = arrayy[idx + window_in :idx + window_in + window_out, : len(cols_y) - 1]\n y = y / popu_size\n y_true.append(y)\n test_seq.append((x, y))\n \n \n #test_seq.append((array_x[idx:idx + window_in, : len(cols_x) - 1], array_y[idx + window_in :idx + window_in + window_out, : len(cols_y) - 1])) #out col_x and col_y has last item 'Series number' so to remove that [, : len(cols_x)]\n #y_test.append(array_y[idx + window_in :idx + window_in + window_out, : len(cols_y) - 1])\n\n \n win_len_per_ser = len_ser - window_in - window_out + 1\n \n return np.array(train_seq), np.array(test_seq), len_ser, win_len_per_ser, np.array(x_test), np.array(y_true)",
"def create_predictors(y): # pragma: no cover\n lags = y[-1:-4:-1]\n\n return lags",
"def predict(self, X: np.ndarray) -> np.ndarray:\n if not self._fitted:\n raise ValueError(\"Model is not fitted.\")\n\n X = (X / self.timestamp_interval).astype(int)\n shift = (X - self.y.size) // self.ts_period\n inds = X - self.ts_period * (1 + shift)\n\n if np.any(inds < 0):\n raise ValueError(\n \"Timestamps to predict can't be smaller than \"\n \"the last fitted timestamp.\"\n )\n\n return self.y[inds]",
"def prepare_data(df, target_col, window_len=10, zero_base=True, test_size=0.2):\n # train test split\n train_data, test_data = train_test_split(df, test_size=test_size)\n \n # extract window data\n X_train = extract_window_data(train_data, window_len, zero_base)\n X_test = extract_window_data(test_data, window_len, zero_base)\n \n # extract targets\n y_train = train_data[target_col][window_len:].values\n y_test = test_data[target_col][window_len:].values\n if zero_base:\n y_train = y_train / train_data[target_col][:-window_len].values - 1\n y_test = y_test / test_data[target_col][:-window_len].values - 1\n\n return train_data, test_data, X_train, X_test, y_train, y_test",
"def prepare_data_high(df, target_col, window_len=30, zero_base=True, test_size=0.2):\n # train test split\n train_data, test_data, train_target, test_target = train_test_split(df, test_size=test_size)\n \n # extract window data\n X_train = extract_window_data(train_data, window_len, zero_base)\n X_test = extract_window_data(test_data, window_len, zero_base)\n \n # extract targets\n #train_target, test_target = train_test_split(df.iloc[1:], test_size=test_size)\n y_train = train_target[target_col][window_len:].values\n y_test = test_target[target_col][window_len:].values\n if zero_base:\n y_train = y_train / train_target[target_col][:-window_len].values - 1\n y_test = y_test / test_target[target_col][:-window_len].values - 1\n\n return train_data, test_data, train_target, test_target, X_train, X_test, y_train, y_test",
"def predict(self, x):\n \n\n return predictions",
"def postprocess(self, t, y):\n # TODO not sure if it is the right way to return data\n return t, y[:, [0, -1]]",
"def predict(self, x, reset=True, verbose=False):\n g = []\n num_alarms = 0\n self.reset()\n alarms = []\n for w in tqdm(range(x.shape[0] // self.window_size), disable=not verbose):\n alarm_tmp, _ = self.iterate(\n datapoint=x[w * self.window_size:(w + 1) * self.window_size],\n reset=reset)\n if alarm_tmp:\n num_alarms += 1\n for i in range(self.window_size):\n alarms.append(alarm_tmp)\n g.append(self.g)\n\n y_predict = np.array(alarms)[:, None]\n cumulative_sums = np.array(g)\n if cumulative_sums.ndim == 1:\n cumulative_sums = cumulative_sums[:, None]\n\n return y_predict, cumulative_sums",
"def batch_generator(training_data, sequence_length=15, window_size = 15):\n engine_ids = list(training_data[\"engine_id\"].unique())\n temp = training_data.copy()\n for id_ in engine_ids:\n indexes = temp[temp[\"engine_id\"] == id_].index\n traj_data = temp.loc[indexes]\n cutoff_cycle = max(traj_data['cycle']) - sequence_length - window_size + 1\n \n if cutoff_cycle<0:\n drop_range = indexes\n print(\"sequence_length + window_size is too large\")\n else:\n cutoff_cycle_index = traj_data['cycle'][traj_data['cycle'] == cutoff_cycle+2].index\n drop_range = list(range(cutoff_cycle_index[0], indexes[-1] + 1))\n \n temp.drop(drop_range, inplace=True)\n indexes = list(temp.index)\n del temp\n \n feature_number = training_data.shape[1]-3\n\n x_shape = (len(indexes), sequence_length, window_size, feature_number)\n x_batch = np.zeros(shape=x_shape, dtype=np.float32)\n y_shape = (len(indexes))\n y_batch = np.zeros(shape=y_shape, dtype=np.float32)\n\n alt_index = indexes[0]\n for batch_index, index in enumerate(indexes):\n y_batch[batch_index] = training_data.iloc[index+window_size-2+sequence_length,-1]\n \n\n \n if index-alt_index==1 and batch_index!=0:\n temp_window = training_data.iloc[index+sequence_length-1:index+sequence_length-1 + window_size, 2:-1].values.reshape(1,window_size,-1)\n x_batch[batch_index] = np.concatenate((x_batch[batch_index-1][1:],temp_window))\n else:\n for seq in range(sequence_length):\n x_batch[batch_index][seq] = training_data.iloc[index+seq:index+seq + window_size, 2:-1].values\n alt_index = index\n\n \n return x_batch, y_batch",
"def sliding_window(frame_length, step, Xsampleslist, ysampleslist):\n Xsamples = []\n ysamples = []\n for j in range(len(Xsampleslist)):\n X = Xsampleslist[j]\n ybinary = ysampleslist[j]\n for i in range(0, X.shape[0] - frame_length, step):\n xsub = X[i:i + frame_length, :]\n ysub = ybinary\n Xsamples.append(xsub)\n ysamples.append(ysub)\n return Xsamples, ysamples",
"def predict_all_features(input_data=\"not defined\"):\r\n X, y = splitting.get_x_and_y()\r\n output_dataframe = pd.DataFrame\r\n y_pred_dataframe = pd.DataFrame\r\n for actual_y in y:\r\n X_train, X_test, y_train, y_test = splitting.splitting_data(y=actual_y)\r\n y_pred, predicted_units = linear_regresstion_action(X_train, X_test, y_train, y_test, input_data)\r\n # not sure if scores[actual_y.name] works as well or even scores[actual_y]...\r\n # one need to test if input data is final\r\n output_dataframe[f\"{actual_y.name}\"] = predicted_units\r\n y_pred_dataframe[f\"{actual_y.name}\"] = y_pred\r\n return y_pred_dataframe, output_dataframe",
"def predict(self, data_x):\n\n results = np.empty((data_x.shape[0], 1), dtype=self.__data[1].dtype)\n data = data_x.copy()\n\n data, _ = self.float_to_one_hot(data)\n\n for i, dp in enumerate(data):\n results[i, 0] = self.__predict(dp.reshape(1, -1))\n \n return results",
"def predict(self, X):",
"def predict(self, X):",
"def _one_step_ahead_prediction(self, X, y):\n lagged_data = self.build_matrix(X, y)\n\n if self.basis_function.__class__.__name__ == \"Polynomial\":\n X_base = self.basis_function.transform(\n lagged_data,\n self.max_lag,\n predefined_regressors=self.pivv[: len(self.final_model)],\n )\n else:\n X_base, _ = self.basis_function.transform(\n lagged_data,\n self.max_lag,\n predefined_regressors=self.pivv[: len(self.final_model)],\n )\n\n yhat = super()._one_step_ahead_prediction(X_base)\n return yhat.reshape(-1, 1)",
"def predict(self, X, pred_batch_size=None):",
"def get_preds_lin_reg(df, target_col, N, pred_min, offset):\n # Create linear regression object\n regr = LinearRegression(fit_intercept=True)\n\n pred_list = []\n\n for i in range(offset, len(df['daily'])):\n X_train = np.array(range(len(df['daily'][i-N:i]))) # e.g. [0 1 2 3 4]\n y_train = np.array(df['daily'][i-N:i]) # e.g. [2944 3088 3226 3335 3436]\n X_train = X_train.reshape(-1, 1) # e.g X_train = \n # [[0]\n # [1]\n # [2]\n # [3]\n # [4]]\n # X_train = np.c_[np.ones(N), X_train] # add a column\n y_train = y_train.reshape(-1, 1)\n # print X_train.shape\n # print y_train.shape\n # print 'X_train = \\n' + str(X_train)\n # print 'y_train = \\n' + str(y_train)\n regr.fit(X_train, y_train) # Train the model\n pred = regr.predict(np.array(N).reshape(1,-1))\n \n pred_list.append(pred[0][0]) # Predict the footfall using the model\n \n # If the values are < pred_min, set it to be pred_min\n pred_list = np.array(pred_list)\n pred_list[pred_list < pred_min] = pred_min\n \n return pred_list"
]
| [
"0.6774904",
"0.6560236",
"0.6472635",
"0.6309464",
"0.6181797",
"0.61627406",
"0.6098337",
"0.60654366",
"0.6061897",
"0.596563",
"0.5918512",
"0.5904378",
"0.5902015",
"0.5901196",
"0.58332914",
"0.5796301",
"0.5725761",
"0.5725353",
"0.57221377",
"0.5710071",
"0.56973004",
"0.5677867",
"0.56775707",
"0.5664571",
"0.564631",
"0.564272",
"0.564272",
"0.5635028",
"0.56224173",
"0.5619763"
]
| 0.69587827 | 0 |
Display individual listing details Form to edit details if needed | def view_and_edit_listing(request, listing_id):
categories = Category.objects.all()
listing = get_object_or_404(Listing, pk=listing_id)
if request.method == 'POST':
editform = AddListingForm(
request.POST,
request.FILES,
instance=listing)
if editform.is_valid():
listing.save()
messages.success(
request,
'Thank you. Your listing has been updated')
return redirect(reverse('addlisting'))
else:
editform = AddListingForm(instance=listing)
context = {
'editform': editform,
'listing': listing,
'categories': categories
}
return render(request, 'editlisting.html', context) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def show_edit_form(self, obj_pk=None):\n obj = self.model.objects.get(pk=obj_pk)\n # if there is no edit permission then does not show the form\n if not self.has_view_permissions(obj): return\n\n\n # create the edit form a add it to the empty widget details\n # override the function hide_form to make sure the list is shown after the user close the edition form\n params = {\n 'title':'Edit',\n 'model':self.model,\n 'pk':obj.pk,\n 'parent_model':self.parent_model,\n 'parent_pk':self.parent_pk,\n 'parent_win': self\n }\n\n if self.INLINES: params.update({'inlines': self.INLINES} )\n if self.FIELDSETS: params.update({'fieldsets':self.FIELDSETS})\n if self.READ_ONLY: params.update({'readonly': self.READ_ONLY})\n\n editmodel_class = self.get_editmodel_class(obj)\n editform = editmodel_class(**params)\n\n if hasattr(self, '_details') and self.USE_DETAILS_TO_EDIT:\n self._details.value = editform\n self._list.hide()\n self._details.show()\n\n # only if the button exists:\n toolbar = [self.toolbar] if isinstance(self.toolbar, str) else self.toolbar\n if toolbar:\n for o in toolbar:\n if o and hasattr(self, o): getattr(self, o).hide()\n\n else:\n self._list.show()\n if hasattr(self, '_details'):\n self._details.hide()",
"def show_pet_details(id):\n \n pet = Pet.query.get_or_404(id)\n form = EditPetForm(obj=pet)\n\n if form.validate_on_submit():\n pet.notes = form.notes.data\n pet.available = form.available.data\n pet.photo_url = form.photo_url.data\n db.session.commit()\n return redirect('/')\n\n else:\n return render_template('/pet_edit.html', form = form, pet = pet)",
"def getEditForm( self ):\n return \"listc_edit\"",
"def edit_details(specimen_id):\n specimen = Details.query.filter_by(specimen_id=specimen_id).first()\n form = CollectionDetailsForm(obj=specimen)\n\n if current_user.id == specimen.specimens.user_id:\n if form.validate_on_submit():\n specimen.date = form.date.data\n specimen.location = form.location.data\n specimen.county = form.county.data\n specimen.state = form.state.data\n specimen.habitat = form.habitat.data\n specimen.notes = form.notes.data\n\n db.session.commit()\n\n return redirect(f\"/specimen/{specimen_id}\")\n\n else:\n return render_template(\n \"editspecimen.html\",\n form=form,\n specimen=specimen,\n step=\"details\",\n )\n else:\n return (\"\", 403)",
"def edit_form():\n return template (\"edit\")",
"def show_user_detail_form():\n\n return render_template(\"add-user-details.html\")",
"def detail(request, shoppinglist_id):\n CheckItemFormSet = modelformset_factory(Item, extra=0, fields=('bought',))\n if request.method == 'POST':\n formset = CheckItemFormSet(request.POST)\n if formset.is_valid():\n for form in formset.forms:\n if form.cleaned_data['bought']:\n item = get_object_or_404(Item,\n pk=form.cleaned_data['id'].pk,\n shoppinglist__pantry__owner=request.user)\n if not item.bought:\n try:\n content = Content.objects.get(\n product=item.product,\n pantry=item.shoppinglist.pantry\n )\n content.amount += item.amount\n except ObjectDoesNotExist:\n content = Content(pantry=item.shoppinglist.pantry,\n product=item.product,\n amount=item.amount)\n content.save()\n item.delete()\n\n list = get_object_or_404(Shoppinglist,\n pantry__owner=request.user,\n pk=shoppinglist_id)\n formset = CheckItemFormSet(queryset=Item.objects.filter(shoppinglist=list))\n return render_to_response('shoppinglists/shoppinglist_detail.html',\n {'items': zip(list.item_set.all(), formset.forms),\n 'formset': formset,\n 'list': list,\n 'logged': True},\n context_instance=RequestContext(request))",
"def show_and_edit_pet_page(pet_id):\n \n pet = Pet.query.get(pet_id)\n\n form = EditPetPage(obj=pet)\n\n if form.validate_on_submit():\n pet.photo_url = form.photo_url.data\n pet.notes = form.notes.data\n pet.available = form.available.data\n\n db.session.commit()\n\n return redirect('/')\n\n else:\n return render_template('display_pet.html', pet=pet, form=form)",
"def show_book():\n book = request.form\n return render_template(\"book_details.html\",\n book = book)",
"def management_view(request, slug, id):\n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n management_reference = get_object_or_404(Management, id=id,company=company)\n\n return render_to_response('management_form.html', \n {'details': management_reference,'info':management_reference},\n context_instance=RequestContext(request))",
"def listing_view(self, request):\n self._object = self.get_page_for_url(request)\n if self._object is not None:\n self.kwargs.update({'pk': self._object.pk})\n # pylint: disable=attribute-defined-outside-init\n self.action = 'detail_view'\n return self.detail_view(request, pk=self._object.pk)\n return super().listing_view(request)",
"def litnacional_detail_form(**kwargs):\n return LitnacionalDetailForm(**kwargs)",
"def event_details(id):\n details_form = EventDetailsForm()\n upload_image_form = UploadImageForm()\n remove_image_form = RemoveImageForm()\n details_form.submit.label.text = \"Submit\"\n event = Event.query.get_or_404(id)\n\n if not current_user.is_organizer(event) and not current_user.is_administrator():\n return redirect(url_for(\"main.index\"))\n\n if details_form.validate_on_submit():\n event.description = details_form.description.data\n event.pitch = details_form.pitch.data\n db.session.commit()\n flash(\"Update successful.\", \"success\")\n return redirect(url_for(\"events.event_details\", id=event.id))\n # pre-fill fields\n details_form.description.data = event.description\n details_form.pitch.data = event.pitch\n return render_template(\n \"events/event_details.html\",\n details_form=details_form,\n upload_image_form=upload_image_form,\n remove_image_form=remove_image_form,\n main_image_path=event.main_image(),\n event=event,\n )",
"def pet_detail_edit(pet_id):\n\n pet = Pet.query.get_or_404(pet_id)\n form = PetEditForm(obj=pet)\n\n if form.validate_on_submit():\n pet.photo_url = form.photo_url.data\n pet.notes = form.notes.data\n pet.available = form.available.data\n\n db.session.commit()\n flash(f\"Pet{pet_id} updated!\")\n return redirect(f\"/{pet_id}\")\n\n else:\n return render_template(\"pet_detail.html\", form=form, pet=pet)",
"def funding_view(request, slug, id):\n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n funding_reference = get_object_or_404(Funding, id=id,company=company)\n\n return render_to_response('funding_form.html', \n {'details': funding_reference,'info':funding_reference},\n context_instance=RequestContext(request))",
"def editDetail(id):\n form = EditDetailForm(request.form)\n if request.method == \"GET\":\n return render_template(\"/pages/edit.html\", form=form)\n else:\n choose = True\n section = form.category.data\n return redirect(url_for(\"editDetailSection\", id=id ,section=section))",
"def post(self, request, slug):\n auth = request.user.is_authenticated\n user = None\n username = None\n if auth:\n user = request.user\n username = user.username\n listing = self.get_queryset().get(slug__iexact=slug)\n listing.guest.set([user])\n listing.save()\n return render(request, 'listings/detail.html', {'listing': listing,\n 'username': username,\n 'auth': auth})",
"def details_view(self):\n return_url = get_redirect_target() or self.get_url('.index_view')\n\n if not self.can_view_details:\n return redirect(return_url)\n\n id = get_mdict_item_or_list(request.args, 'id')\n if id is None:\n return redirect(return_url)\n\n model = self.get_one(id)\n\n if model is None:\n flash(gettext('Record does not exist.'), 'error')\n\n if self.details_modal and request.args.get('modal'):\n template = self.details_modal_template\n else:\n template = self.details_template\n\n relationship_views = []\n for relationship in self.model_relationship_views:\n relationship_view = self.model_relationship_views[relationship]\n bp = relationship_view.blueprint\n endpoint = '{}.ajax_config'.format(relationship_view.blueprint.name)\n data = {\n 'field': relationship,\n 'title': relationship_view.title,\n 'config_url': self.get_url(endpoint, model_id=id)\n }\n relationship_views.append(data)\n\n return self.render(\n template,\n model=model,\n details_columns=self._details_columns,\n get_value=self.get_detail_value,\n relationship_views=relationship_views,\n return_url=return_url\n )",
"def show_pet_with_edit_form(pet_id):\n pet = Pet.query.get_or_404(pet_id)\n form = PetFormEdit(obj=pet)\n if form.validate_on_submit():\n pet.photo_url = form.photo_url.data\n pet.notes = form.notes.data\n pet.available = form.available.data\n \n db.session.commit()\n return redirect('/')\n else:\n return render_template('pet.html', pet=pet, form=form)",
"def show_edit_pet(id):\r\n pet = Pet.query.get_or_404(id)\r\n form = EditPetForm(obj=pet)\r\n\r\n if form.validate_on_submit():\r\n pet.photo_url = form.photo_url.data\r\n pet.notes = form.notes.data\r\n pet.available = form.available.data\r\n db.session.commit()\r\n\r\n return redirect('/')\r\n\r\n else:\r\n return render_template(\"pet_profile.html\", form=form, pet=pet)",
"def pet_display(pet_id):\n pet = Pets.query.get_or_404(pet_id)\n form = EditPetForm(obj=pet)\n if form.validate_on_submit():\n pet.photo_url = form.photo_url.data\n pet.notes = form.notes.data\n pet.available = form.available.data\n db.session.commit()\n flash(\"Succesfully updated\")\n return redirect(f'/{pet_id}')\n else:\n return render_template('display.html', pet=pet, form=form)",
"def showEditContact(self):",
"def edit(self):\n\n pass",
"def detail(request, pk):\n mineral = get_object_or_404(Mineral, pk=pk)\n return render(request, 'detail.html', {'mineral': mineral})",
"def edit_basic_info(id):\n form = CreateEventForm()\n form.submit.label.text = \"Update Event\"\n event = Event.query.get_or_404(id)\n if not current_user.is_organizer(event) and not current_user.is_administrator():\n return redirect(url_for(\"main.index\"))\n if form.validate_on_submit():\n services.update_models_from_create_event_form(form, event.venue, event)\n db.session.commit()\n flash(\"Your changes were saved.\", \"success\")\n return redirect(url_for(\"events.edit_basic_info\", id=id))\n services.populate_create_event_form(form, event.venue, event)\n return render_template(\"events/basic_info.html\", form=form, event=event)",
"def edit_form(pagename):\n\n articles = get_articles()\n\n edit_article = None\n for article in articles:\n if article[\"title\"] == pagename:\n edit_article = article\n\n if edit_article == None:\n return template(\"skapa-artikel\")\n\n else:\n return template(\"edit\", article=edit_article)",
"def show_form():\n\n story_title = request.args[\"madlib\"]\n for story in stories.values():\n if story.title == story_title:\n story_for_form = story\n \n return render_template('form.html', s=story_for_form, story_title=story_title)",
"def listing_show(listing_id):\n\n listing = Listing.query.get_or_404(listing_id)\n return (jsonify(listing=listing.serialize(isDetailed=True)), 200)",
"def detail(request, company_id):\n company = get_object_or_404(Company, pk=company_id)\n\n company_form = CompanyForm(instance=company)\n contact_form = ContactCreationForm()\n\n return render(request, 'companies/detail.html', {\n 'company_detail': company,\n 'company_form': company_form,\n 'contact_form': contact_form\n })",
"def show_edit_pet(pet_id):\n pet = Pet.query.get(pet_id)\n form = EditPet(obj=pet)\n\n if form.validate_on_submit():\n pet.photo_url = form.photo_url.data\n pet.notes = form.notes.data\n pet.available = form.available.data\n\n db.session.add(pet)\n db.session.commit()\n\n return redirect(\"/\")\n\n else:\n return render_template('edit_pet.html', form=form, pet=pet)"
]
| [
"0.6423212",
"0.641157",
"0.63378507",
"0.63075763",
"0.6235448",
"0.6208106",
"0.6087774",
"0.59973407",
"0.59855765",
"0.59694964",
"0.5962369",
"0.59481186",
"0.5931475",
"0.5924304",
"0.59183794",
"0.59069675",
"0.59024376",
"0.58667976",
"0.5819715",
"0.57534856",
"0.5748272",
"0.57161015",
"0.5709391",
"0.56826717",
"0.56753737",
"0.5667964",
"0.56500345",
"0.5648509",
"0.5631133",
"0.5630177"
]
| 0.7251244 | 0 |
Allow user to delete his own listing from the database | def delete_listing(request, listing_id):
listing = get_object_or_404(Listing, pk=listing_id)
listing.delete()
messages.success(
request,
'Your listing has been removed from the database.')
return redirect(reverse('addlisting')) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete(request, shoppinglist_id):\n Shoppinglist.objects.filter(pk=shoppinglist_id,\n pantry__owner=request.user).delete()\n return redirect('blackem.users.views.home')",
"def delete_meal():",
"def delete(self):\n ...",
"def delete_single_list(current_user, id):\n\n try:\n int(id)\n except ValueError:\n return response('failed', 'Please provide a valid ShoppingList Id', 400)\n else:\n shoplist = ShoppingList.query.filter_by(user_id=current_user.id, id=id).first()\n if shoplist is not None:\n db.session.delete(shoplist)\n db.session.commit()\n return response('success', 'Shopping list has been deleted', 200)\n return response('failed', 'Shopping list not found', 404)\n\n\n\n # decorator used to allow cross origin requests",
"def post_delete(self, *args, **kw):\n #obtenemos el id de la fase para hacer el filtrado despues de la redireccion\n item_to_del = DBSession.query(Item).filter_by(id_item=args[0]).one()\n fid = item_to_del.id_fase_fk\n pks = self.provider.get_primary_fields(self.model)\n d = {}\n for i, arg in enumerate(args):\n d[pks[i]] = arg\n self.provider.delete(self.model, d)\n\n path = './' + '../' * (len(pks) - 1) + '?fid=' + str(fid)\n\n redirect(path)",
"def delete(self, request, *args, **kwargs):\n self.object = self.get_object()\n user = request.user\n success_url = reverse_lazy('muxic:user', kwargs={'username': user.username})\n self.object.delete()\n return HttpResponseRedirect(success_url)",
"def remove_ingredient(request, pk):\n\n url = reverse('fridge:fridge_detail')\n ingredient = get_object_or_404(FridgeIngredient, pk=pk)\n if request.user != ingredient.fridge.user:\n return HttpResponseRedirect(reverse('home'))\n ingredient.delete()\n\n return HttpResponseRedirect(url)",
"def delete(self, username, private_list_name):\n user = query_user_by_name(username)\n if user is None:\n return 'User does not exit', 404\n if invalid_user(username):\n return 'Unauthorized User', 401\n lst = query_private_list_by_id(username, private_list_name)\n if lst is None:\n return 'Private List does not exist', 404\n db.session.delete(lst)\n db.session.commit()\n return \"PrivateList has been deleted\", 200",
"def on_model_delete(self, model):\n if not current_user.is_active or not current_user.is_authenticated:\n abort(403)\n if not user_has_permission(current_user, 'can_delete','bigfirms'):\n abort(403)",
"def delete_user():",
"def delete(self, _id):",
"def delete_item(self, id: str, user: User) -> bool:",
"def wants_delete(self, name: str):\n del self.wants[name]\n self.db.wants_delete(name)\n util.log(\"Deleted Wants List '{}'\".format(name), util.LogLevel.Info)\n self.push_status(\"Deleted Wants List '{}'\".format(name))",
"def delete(self, *args, **kwargs):\n pass",
"def delete(self, *args, **kwargs):\n pass",
"def delete():",
"def delete_item(request, shoppinglist_id, item_id):\n Item.objects.filter(pk=item_id,\n shoppinglist__pantry__owner=request.user).delete()\n return redirect('shoppinglists.views.detail', shoppinglist_id)",
"def delete():\n # Must be logged in to perform any delete commands.\n auth_required()\n pass",
"def delete_entry():\n u_id = request.args(0) or redirect(URL('moderation', 'new_entries'))\n db(db.lioli_main.unique_id == u_id).delete()\n redirect(URL('new_entries'))\n return dict()",
"def aboutToDelete(self):\n \n pass",
"def aboutToDelete(self):\n \n pass",
"def aboutToDelete(self):\n \n pass",
"def delete():\n id_num = int(input('Enter the ID number of the item you wish to delete\\n'))\n db_actions.remove(id_num)",
"def delete(self):\n pass",
"def delete(self):\n pass",
"def delete(self):\n pass",
"def delete(self):\n pass",
"def delete_user():\n #TODO user delete\n pass",
"def on_model_delete(self, model):\n if not current_user.is_active or not current_user.is_authenticated:\n abort(403)\n if not user_has_permission(current_user, 'can_delete','specialties'):\n abort(403)",
"def delete(self, request: HttpRequest, *args, **kwargs) -> HttpResponse:\n self.object = self.get_object()\n success_url = self.get_success_url()\n success_message = _(f'Successfully deleted todo list: {self.object}')\n\n self.object.delete()\n\n messages.success(request=request, message=success_message)\n\n return redirect(success_url)"
]
| [
"0.7306309",
"0.7026673",
"0.67697275",
"0.6706425",
"0.6665472",
"0.66603535",
"0.663963",
"0.6630547",
"0.6598493",
"0.6590657",
"0.65645725",
"0.6553357",
"0.6548903",
"0.6521397",
"0.6521397",
"0.6516474",
"0.65144175",
"0.6508936",
"0.64967644",
"0.64729667",
"0.64729667",
"0.64729667",
"0.6453948",
"0.64439064",
"0.64439064",
"0.64439064",
"0.64439064",
"0.64352363",
"0.64316607",
"0.6418438"
]
| 0.70493543 | 1 |
Shows on the stdout the progress bar for the given progress. | def bar(self, progress):
if not hasattr(self, "_limit") or not self._limit:
self._limit = self.terminal_size()
graph_progress = int(progress * self._limit)
self.stdout.write("\r", ending="")
progress_format = "[%-{}s] %d%%".format(self._limit)
self.stdout.write(
self.style.SUCCESS(
progress_format
% (self.progress_symbol * graph_progress, int(progress * 100))
),
ending="",
)
self.stdout.flush() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def progress_bar(progress):\n bar_length = 50\n block = int(round(bar_length * progress))\n text = 'Progress: [{0}] {1}'.format('#' * block + '-' * (bar_length - block),\n progress * 100)\n # Print progress after removing the previous progress\n sys.stdout.write('\\r' + text)\n sys.stdout.flush()",
"def _printProgressBar(self, fractionComplete):\n import sys\n nInc = 50\n count = int(nInc * fractionComplete)\n proBar = \"|\"\n for i in range(nInc):\n if i < count:\n proBar += \"-\"\n else:\n proBar += \" \"\n proBar += \"|\"\n print((proBar, int(fractionComplete * 100), \"%\\r\",))\n sys.stdout.flush()\n\n return",
"def _dl_progress_bar(self):\n if not self.show_progress:\n return\n\n if self.file_size:\n ratio = float(self.bytes_read) / self.file_size\n else:\n ratio = 1\n percent = int(ratio * 100)\n\n bar_len = 60\n done = int(bar_len * ratio)\n bar = ('=' * done) + (' ' * (bar_len - done))\n\n progress = '{percent: >3}%: [{bar}]'.format(percent=percent, bar=bar)\n backspace = '\\b' * len(progress)\n print(backspace + '\\r', end='')\n print(progress, end='')",
"def _printProgress(self, progress):\n if not self._quiet:\n sys.stdout.write('\\rWriting store to CSV: [{0:50s}] {1:.2f}% '.format('#' * int(progress * 50.0), progress * 100.0))\n sys.stdout.flush()",
"def progress_bar(self, count, total, status):\n\n bar_len = 50\n filled_len = int(round(bar_len * count / float(total)))\n\n file_size_bytes = f\"{count:,}/{total:,} Bytes\"\n transfer_percent = round(100.0 * count / float(total), 2)\n file_bar = '=' * filled_len + '-' * (bar_len - filled_len)\n\n prefix = f\"[{self.LOGGER.host}:{self.LOGGER.port}]\"\n sys.stdout.write(f\"{prefix} -> |{file_bar}| {file_size_bytes} | {transfer_percent}% | {status}...\\r\")\n sys.stdout.flush()\n\n if count >= total: print()",
"def progress_bar(count, total, status=\"\"):\n\n bar_size = 20\n filled = int(round(bar_size * count / float(total)))\n percents = round(100.0 * count / float(total), 1)\n bar = u\"\\u25A0\" * filled + \" \" * (bar_size - filled)\n sys.stdout.write(\"Training [%s] %s%s %s\\r\" % \\\n (bar, percents, \"%\", status))\n sys.stdout.flush()",
"def progress_bar(bar_name: str, current_num: int, total_num: int, output_option: int = 2):\r\n if output_option == 2:\r\n print(\r\n '\\r[{:<50}] {}: {}/{}'.format(\r\n '=' * int(current_num / (2 * total_num) * 100), \r\n bar_name, current_num, total_num\r\n ), \r\n end=''\r\n )\r\n if current_num == total_num:\r\n print()",
"def printProgress(self, percentage):\n #print '%s\\r' % ' '*20, # clean up row\n #print '%3d%% ' % percentage, # ending with comma prevents newline from being appended\n sys.stdout.flush()",
"def progress(count, total, status=''):\n bar_len = 60\n filled_len = int(round(bar_len * count / float(total)))\n\n percents = round(100.0 * count / float(total), 1)\n bar = '=' * filled_len + '-' * (bar_len - filled_len)\n \n if count >= total: \n sys.stdout.write('[%s] %s%s ...%s%s\\r' % (bar, percents, '%', status, '\\n'))\n sys.stdout.flush()\n else:\n sys.stdout.write('[%s] %s%s ...%s\\r' % (bar, percents, '%', status))\n sys.stdout.flush()",
"def _progressBar(self, percent, printEvery=10):\n floor = int(percent)\n sys.stdout.write('\\r' * (floor + 9))\n sys.stdout.write('[')\n sys.stdout.write('=' * (floor/printEvery))\n sys.stdout.write('>] {:02.2f}%'.format(percent))\n sys.stdout.flush()",
"def print_progress_bar(self, iter_num, start_time):\n iteration = iter_num + 1\n prefix = \"Progress: \"\n length = 50\n fill = '█'\n percent = (\"{0:.\" + str(1) + \"f}\").format(100 *\n (iteration / float(self.num_games)))\n exact_progress = \"{}/{}\".format(iteration, self.num_games)\n filled_length = int(length * iteration // self.num_games)\n total_time = int(time()-start_time)\n time_remaining = (time() - start_time)/(float(iter_num)+0.1)\n time_remaining = str(int(time_remaining*(self.num_games-iter_num)))\n bars = fill * filled_length + '-' * (length - filled_length)\n\n print('\\r%s |%s| (%s) %s%% | ETA: %ss (%ss)\\t' %\n (prefix, bars, exact_progress,\n percent, time_remaining,\n total_time), end='\\r')\n\n # Print New Line on Complete\n if iteration >= self.num_games:\n print(\"\\r\\n\\r\\n\")",
"def print_progress(iteration, total):\n iteration += 1\n prefix = 'Progress'\n suffix = 'Complete'\n length = 50\n fill = u\"\\u2588\"\n fill_alt = '#'\n\n percent = (\"{0:.1f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n bar_alt = fill_alt * filledLength + '-' * (length - filledLength)\n\n try:\n sys.stdout.write('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix))\n except:\n sys.stdout.write('\\r%s |%s| %s%% %s' % (prefix, bar_alt, percent, suffix))\n sys.stdout.flush()\n\n # Print New Line on Complete\n if iteration == total:\n print()",
"def print_progress(done,total):\n \n percent = 100.0*done/(total) \n bar = int(0.2*percent) \n \n sys.stdout.write('\\r')\n sys.stdout.write('[%-20s] %d%%' % ('='*bar, percent))\n sys.stdout.flush()\n \n return",
"def _printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '$'):\r\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\r\n filledLength = int(length * iteration // total)\r\n bar = fill * filledLength + '-' * (length - filledLength)\r\n sys.stdout.write('\\r{} |{}| {}% {}'.format(prefix, bar, percent, suffix))\r\n # Print New Line on Complete\r\n if iteration == total: \r\n print()",
"def printProgressBar(iteration, total, prefix='Progress: ', suffix='Complete',\n decimals=1, length=50, fill='█'):\n global start_time\n if iteration == 0:\n start_time = time.time()\n value = 100 * (iteration / float(total))\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(value)\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n elapsed_time = int(time.time() - start_time)\n m = str(elapsed_time // 60).zfill(2)\n s = str(elapsed_time % 60).zfill(2)\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\\r')\n # Print New Line on Complete\n if iteration == total:\n print()",
"def start_progress(title):\n global progress_x\n sys.stdout.write(title + \": [\" + \"-\" * 40 + \"]\" + chr(8) * 41)\n sys.stdout.flush()\n progress_x = 0\n return 0",
"def printProgressBar (iteration, total, prefix = '\\tProgress', suffix = 'Complete', decimals = 2, length = 30, fill = '█'):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\\r')\n # Print New Line on Complete\n if iteration == total:\n print()",
"def show_progress(show, current, max, text, *args):\n if show:\n progress = round((float(current) / max) * 100.0, 0)\n output = \"\\r\" + text.format(*args) + \" {0}% done. \".format(progress) \n sys.stdout.write(output)\n sys.stdout.flush()",
"def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\\r')\n # Print New Line on Complete\n if iteration == total: \n print()",
"def startprogress(title):\n global progress_x, title_global\n title_global = title\n sys.stdout.write(title + \": [\" + \"-\" * 40 + \"] 00% \")\n sys.stdout.flush()\n progress_x = 0",
"def printProgress (iteration, total, prefix = '', suffix = '', decimals = 2, barLength = 100):\n filledLength = int(round(barLength * iteration / float(total)))\n percents = round(100.00 * (iteration / float(total)), decimals)\n bar = '#' * filledLength + '-' * (barLength - filledLength)\n Sys.stdout.write('%s [%s] %s%s %s\\r' % (prefix, bar, percents, '%', suffix)),\n Sys.stdout.flush()\n if iteration == total:\n print(\"\\n\")",
"def printProgress (iteration, total, prefix = '', suffix = '', decimals = 2, barLength = 100):\n filledLength = int(round(barLength * iteration / float(total)))\n percents = round(100.00 * (iteration / float(total)), decimals)\n bar = '#' * filledLength + '-' * (barLength - filledLength)\n Sys.stdout.write('%s [%s] %s%s %s\\r' % (prefix, bar, percents, '%', suffix)),\n Sys.stdout.flush()\n if iteration == total:\n print(\"\\n\")",
"def OnProgress(bytes_read, total_bytes, percent):\n sys.stdout.write(\"progress: %.2f%% \\r\" % (percent))\n sys.stdout.flush()",
"def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):\n if total == 0:\n \treturn\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\\r')\n # Print New Line on Complete\n if iteration == total: \n print()",
"def printProgress (iteration, total, prefix = '', suffix = '', decimals = 1, barLength = 100, fill = '█'):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(barLength * iteration // total)\n bar = fill * filledLength + '-' * (barLength - filledLength)\n sys.stdout.write('\\r%s |%s| %s%s %s' % (prefix, bar, percent, '%', suffix)),\n if iteration == total:\n sys.stdout.write('\\n')\n sys.stdout.flush()",
"def print_progress(self, info_dict):\n if self.n_print != 0:\n t = info_dict['t']\n if t == 1 or t % self.n_print == 0:\n string = 'Iteration {0}'.format(str(t).rjust(len(str(self.n_iter))))\n string += ' [{0}%]'.format(str(int(t / self.n_iter * 100)).rjust(3))\n print(string)",
"def printProgress(iteration, total, prefix = '', suffix = '', decimals = 1, barLength = 100):\n formatStr = \"{0:.\" + str(decimals) + \"f}\"\n percent = formatStr.format(100 * (iteration / float(total)))\n filledLength = int(round(barLength * iteration / float(total)))\n bar = '#'* filledLength + '-' * (barLength - filledLength)\n sys.stdout.write('\\r%s |%s| %s%s %s' % (prefix, bar, percent, '%', suffix)),\n if iteration == total:\n sys.stdout.write('\\n')\n sys.stdout.flush()",
"def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '')\n # Print New Line on Complete\n if iteration == total:\n print()",
"def printProgressBar (self,iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\\r')\n # Print New Line on Complete\n if iteration == total: \n print()",
"def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\\r')\n # Print New Line on Complete\n if iteration == total:\n print()"
]
| [
"0.82115006",
"0.7860901",
"0.7719026",
"0.76584613",
"0.7644995",
"0.7632446",
"0.76069385",
"0.7581132",
"0.75713515",
"0.7540913",
"0.75251687",
"0.7506127",
"0.75033945",
"0.74972796",
"0.7495927",
"0.7455444",
"0.74466825",
"0.7409827",
"0.7370332",
"0.7350555",
"0.73497766",
"0.73497766",
"0.73477244",
"0.73388577",
"0.73376113",
"0.73292047",
"0.732766",
"0.7326191",
"0.7317305",
"0.73169243"
]
| 0.8260696 | 0 |
Given a tkinter widget, this will create a FigureCanvas as a root of that widget and pack it | def create_figure(root_window):
frame = ttk.Frame(root_window)
fig = Figure()
canvas = FigureCanvas(fig, frame)
toolbar = NavigationToolbar2TkAgg(canvas, frame)
frame.pack(expand=tk.YES, fill=tk.BOTH)
canvas.get_tk_widget().pack(expand=tk.YES, fill=tk.BOTH)
toolbar.update()
return fig | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def createWidget(self):\n figure = Figure(figsize=(4,2), dpi=100)\n \"\"\"Figure size is measured in inches.\"\"\"\n graph = figure.add_subplot(111)\n \"\"\"The default subplot, which creates one row, one column, with index one.\"\"\"\n graph.plot(self.wave_table[0], self.wave_table[1])\n\n canvas = FigureCanvasTkAgg(figure, self.master)\n canvas.draw()\n canvas.get_tk_widget().pack(fill=tk.BOTH, expand=True)",
"def draw_canvas(self):\n\n self.canvas = Canvas(self)\n self.scrollbar = ttk.Scrollbar(self, orient= VERTICAL,\n command=self.canvas.yview) \n self.canvas.configure(yscrollcommand=self.scrollbar.set)\n \n # make sure to add scrollbar before adding the canvas\n self.scrollbar.pack(side=RIGHT, fill=Y)\n self.canvas.pack(side=TOP, fill=BOTH, expand=1, padx=20, pady=20)\n \n # adding a frame to hold all the widgets, ttk Frame doesn't support\n # background config option \n self.frame = Frame(self.canvas) \n self.canvas.create_window(0,0,window=self.frame, anchor='nw')",
"def make_canvas(width, height, title):\n top = tkinter.Tk()\n top.minsize(width=width, height=height)\n top.title(title)\n canvas = tkinter.Canvas(top, width=width + 1, height=height + 1)\n # canvas.pack()\n canvas.place()\n return canvas",
"def __createCanvas(self):\r\n # create a canvas and pass a figure to it\r\n self.figure = plt.figure()\r\n self.canvas = FigureCanvas(self.figure)\r\n\r\n # create an axis\r\n self.canvas.axes = self.figure.add_subplot(1, 1, 1) # 1X1 grid, 1st subplot\r\n self.canvas.axes.set_title(\"Plot\")\r\n\r\n # create Navigation widget and pass a Canvas widget and the parent\r\n self.toolbar = NavigationToolbar(self.canvas, self)",
"def _create_canvas(self, parent):\n # matplotlib commands to create a canvas\n frame = QtGui.QWidget()\n mpl_canvas = FigureCanvas(self.value)\n mpl_canvas.setParent(frame)\n#\t\tmpl_toolbar = NavigationToolbar2QT(mpl_canvas,frame)\n\n vbox = QtGui.QVBoxLayout()\n vbox.addWidget(mpl_canvas)\n#\t\tvbox.addWidget(mpl_toolbar)\n frame.setLayout(vbox)\n return frame",
"def _create_canvas(self, parent):\n # matplotlib commands to create a canvas\n frame = QtGui.QWidget()\n mpl_canvas = FigureCanvas(self.value)\n mpl_canvas.setParent(frame)\n mpl_toolbar = NavigationToolbar2QT(mpl_canvas,frame)\n vbox = QtGui.QVBoxLayout()\n vbox.addWidget(mpl_canvas)\n vbox.addWidget(mpl_toolbar)\n frame.setLayout(vbox)\n return frame",
"def setup_canvas(self):\n # create frame to contain canvas\n self.world_container = tk.Frame(self,\n width = self.world_size[1],\n height = self.world_size[0])\n self.world_container.grid(row = 1, column = 0, sticky = tk.W+tk.N)\n\n # create canvas\n self.canvas = tk.Canvas(\n self.world_container,\n width = self.world_size[1],\n height = self.world_size[0],\n borderwidth = 1,\n highlightthickness = 0)\n self.canvas.grid(row = 0, column = 0, sticky = tk.W)\n self.canvas.bind('<Button-1>', self.click_cell)",
"def __init__(self, parent_frame, plt_props=None):\n tk.Frame.__init__(self, master=parent_frame)\n if self.matplotlib_ready():\n \"\"\" the import statements are scoped so make new ones\"\"\"\n import matplotlib\n import matplotlib.pyplot as plt\n from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\n\n\n self.figure_bed = plt.figure(figsize=(7, 3.5))\n self.axis = self.figure_bed.add_subplot(111)\n\n if plt_props:\n for key, value in plt_props.iteritems():\n eval(\"plt.\" + key + \"(\" + value + \")\")\n # self.axis.set_axis_bgcolor('red')\n self.figure_bed.set_facecolor('white')\n self.canvas = FigureCanvasTkAgg(self.figure_bed, master=self)\n self.canvas._tkcanvas.config(highlightthickness=0)\n self.canvas.draw()\n self.canvas.get_tk_widget().pack(side='top')\n\n # self.make_matplotlib_area(parent, plt_props)\n self.embed_matplotlib()\n self.type = 'matplotlib'\n # TODO ADD TO THIS\n else:\n graph = tk.Canvas(master=self)\n graph.pack(side='left', expand=True, fill=tk.BOTH)\n self.type = 'canvas'",
"def make_canvas(width, height, title):\r\n top = tkinter.Tk()\r\n top.minsize(width=width, height=height)\r\n top.title(title)\r\n canvas = tkinter.Canvas(top, width=width + 1, height=height + 1)\r\n canvas.pack()\r\n return canvas",
"def make_graph(self, frame, obname, **kwargs):\n \n #Generate the figure\n fig = self.make_fig(obname)\n \n #Identify the location to place the figure\n if 'gridpos' in kwargs:\n newrow = kwargs.pop('gridpos')\n else:\n newrow = frame.grid_size()[1] \n \n #Generate a frame specifically for the figure (this prevents resizing when the figure is updated)\n canvas_frame = tk.Frame(frame) #, width=self.screenwidth*0.13, height=self.screenheight*0.2778)\n canvas_frame.grid(column=0, row=newrow+1, columnspan=2)\n \n #Generate a canvas and place the figure in it\n canvas = FigureCanvasTkAgg(fig, master=canvas_frame) # A tk.DrawingArea.\n canvas.draw()\n canvas.get_tk_widget().grid(column=0, row=0)\n\n return canvas, fig",
"def make_canvas(width, height, title=None):\n top = tkinter.Tk()\n top.minsize(width=width, height=height)\n if title:\n top.title(title)\n canvas = tkinter.Canvas(top, width=width + 1, height=height + 1)\n canvas.pack()\n return canvas",
"def make_canvas(width, height, title=None):\n top = tkinter.Tk()\n top.minsize(width=width, height=height)\n if title:\n top.title(title)\n canvas = tkinter.Canvas(top, width=width + 1, height=height + 1)\n canvas.pack()\n return canvas",
"def make_canvas(self, painter, **args):\n\t\treturn None",
"def create_canvas(self):\n self.canvas = tk.Canvas(\n self,\n bd=-2,\n height=self.controller.GAME_HEIGHT,\n width=self.controller.GAME_WIDTH)\n self.canvas.pack(expand=tk.YES, fill=tk.BOTH)",
"def create_canvas(self):\n self.canvas = tk.Canvas(\n self,\n bd=-2,\n height=self.controller.GAME_HEIGHT,\n width=self.controller.GAME_WIDTH)\n self.canvas.pack(expand=tk.YES, fill=tk.BOTH)",
"def create_canvas(self):\n self.canvas = tk.Canvas(\n self,\n bd=-2,\n height=self.controller.GAME_HEIGHT,\n width=self.controller.GAME_WIDTH)\n self.canvas.pack(expand=tk.YES, fill=tk.BOTH)",
"def _create_canvas(self, parent):\n # The panel lets us add additional controls.\n panel = wx.Panel(parent, -1, style=wx.CLIP_CHILDREN)\n sizer = wx.BoxSizer(wx.VERTICAL)\n panel.SetSizer(sizer)\n # matplotlib commands to create a canvas\n mpl_control = FigureCanvas(panel, -1, self.value)\n sizer.Add(mpl_control, 1, wx.LEFT | wx.TOP | wx.GROW)\n toolbar = NToolbar(mpl_control)\n sizer.Add(toolbar, 0, wx.EXPAND)\n self.value.canvas.SetMinSize((10,10))\n return panel",
"def __createWidgets(self):\n # Widget canvas, used to draw rubik's cube\n self.cv = Canvas(self.master)\n self.cv['bg'] = 'white' # Background color\n self.cv['height'] = '440' # Height of canvas\n self.cv['width'] = '560' # Width of canvas\n self.cv.place(x=0, y=0)\n self.__drawCube()",
"def make_canvas(width, height, title=None):\n objects = {}\n top = tkinter.Tk()\n top.minsize(width=width, height=height)\n if title:\n top.title(title)\n canvas = tkinter.Canvas(top, width=width + 1, height=height + 1)\n canvas.pack()\n\n return canvas",
"def _configure_canvas(event):\n if self.internal_frame.winfo_reqwidth() != self.canvas.winfo_width():\n## print \"frame\",self.internal_frame.winfo_reqwidth()\n## print \"canvas\",self.canvas.winfo_width()\n # update the inner frame's width to fill the canvas\n## self.canvas.itemconfigure(interior_id, width=self.canvas.winfo_width())\n self.canvas.config(width=self.internal_frame.winfo_reqwidth())\n if self.internal_frame.winfo_reqheight() != self.canvas.winfo_height():\n # update the inner frame's width to fill the canvas\n## self.canvas.itemconfigure(interior_id, width=self.canvas.winfo_width())\n self.canvas.config(height=self.internal_frame.winfo_reqheight())",
"def add_canvas(self, fig):\r\n self.canvas = FigureCanvas(fig)\r\n self.toolbar = NavigationToolbar(self.canvas,\r\n self, coordinates=True)\r\n self.canvas_vlayout.addWidget(self.toolbar)\r\n self.canvas_vlayout.addWidget(self.canvas)\r\n self.canvas.draw()",
"def set_canvas(self):\n self.ui.figure = plt.figure(figsize=(10, 10))\n self.ui.figure.patch.set_facecolor('None')\n self.ui.canvas = FigureCanvas(self.ui.figure)\n self.ui.canvas.setStyleSheet('background-color:transparent;')\n # Matplotlib toolbar\n self.ui.toolbar = NavigationToolbar(self.ui.canvas, self)\n self.ui.toolbar.setMaximumHeight(30)\n self.ui.figureLayout.addWidget(self.ui.toolbar)\n self.ui.figureLayout.addWidget(self.ui.canvas)\n self.ui.canvas.mpl_connect('button_press_event', self.onclick)\n self.ui.canvas.mpl_connect('pick_event', self.onclick_pick)",
"def prepare_canvas( self ):\n self.bottom_text = self.make_bottom_text()\n title = getattr( self, 'title', self.metadata.get('title','') )\n xlabel = getattr( self, 'xlabel', self.metadata.get('xlabel','') )\n ylabel = getattr( self, 'ylabel', self.metadata.get('ylabel','') )\n labels = getattr( self, 'labels', [] )\n colors = getattr( self, 'colors', [] )\n colors = list(colors); colors.reverse()\n x_formatter_cb = getattr( self, 'x_formatter_cb', lambda x: None )\n y_formatter_cb = getattr( self, 'y_formatter_cb', lambda x: None )\n legend = getattr( self, 'legend', self.metadata.get('legend', True) )\n bottom_text = getattr( self, 'bottom_text', None )\n kw = self.kw\n\n if type(legend) == types.StringType and legend.lower().find('f') > -1:\n legend = False\n elif type(legend) == types.StringType:\n legend = True\n\n prefs = self.prefs\n if 'svg' in kw.keys():\n svg = kw['svg']\n else:\n svg = False\n if svg:\n FigureCanvas = FigureCanvasSVG\n else:\n FigureCanvas = FigureCanvasAgg\n\n # Change the preferences based on passed metadata *and* kw keys.\n for key in prefs.keys():\n if key in self.metadata.keys():\n my_type = type( prefs[key] )\n # bool('false') is true! That's\n # why we have to do this override.\n if my_type == types.BooleanType:\n if str(self.metadata[key]).lower().find('f') >= 0:\n prefs[key] = False\n else:\n prefs[key] = True\n else:\n prefs[key] = my_type(self.metadata[key])\n if key in kw.keys():\n my_type = type( prefs[key] )\n # bool('false') is true! That's\n # why we have to do this override.\n if my_type == types.BooleanType:\n if str(self.kw[key]).lower().find('f') >= 0:\n prefs[key] = False\n else:\n prefs[key] = True\n else:\n prefs[key] = my_type(self.kw[key])\n\n self.prefs = prefs\n # Alter the number of label columns, if necessary. First,\n # calculate the max length of all the labels we are considering.\n max_length = 0\n for label in labels:\n max_length = max( len(label), max_length )\n\n # This is a hack to change the number of columns if the max_length\n # is very long.\n if max_length > 23:\n prefs['columns'] = min( 4, prefs['columns'] )\n if max_length > 30:\n prefs['columns'] = min( 3, prefs['columns'] )\n if max_length > 37:\n prefs['columns'] = min( 2, prefs['columns'] )\n\n # Figure size\n num_labels = len( labels )\n dpi = prefs['width'] / float(prefs['width_inches'])\n height_inches = prefs['height'] / dpi\n\n # Conversion from pixels to percentage of screen\n figure_padding_perc = prefs['figure_padding'] / float(prefs['height'])\n\n # Calculations for the legend\n rows = 0.0; column_height = 0.0; bottom = 0.0\n # Max number of rows in the legend\n rows = max(1,min( numpy.ceil(num_labels / float(prefs['columns'])), \\\n prefs['max_rows']) + 2*int(bottom_text != None))\n # Width and height for the legend, then converted into pixels.\n legend_width = 1 - 2 * prefs['legend_padding'] # In percent of screen.\n legend_height = (2*prefs['text_padding'] + prefs['text_size']) * \\\n rows/float(prefs['height']) # In percent of screen.\n leg_pix_height = legend_height * height_inches * dpi\n leg_pix_width = legend_width * prefs['width_inches'] * dpi\n self.leg_pix_width = leg_pix_width\n self.leg_pix_height = leg_pix_height\n column_width = 1.0 / float( prefs['columns'] )\n self.column_width = column_width\n\n if legend:\n column_height = (2 * prefs['text_padding'] + prefs['text_size']) / \\\n leg_pix_height\n bottom = 2 * prefs['legend_padding'] + legend_height\n\n box_width = prefs['text_size']\n self.box_width = box_width\n\n # Create our figure and canvas to work with\n fig = Figure()\n canvas = FigureCanvas( fig )\n\n # Set the figure properties we derived above.\n fig.set_size_inches( prefs['width_inches'], height_inches )\n fig.set_dpi( dpi )\n\n fig.set_facecolor('white')\n\n # rect = (left, bottom, width, height)\n legend_rect = prefs['legend_padding'], prefs['legend_padding'], \\\n legend_width, legend_height\n self.legend_rect = legend_rect\n if prefs['square_axis']:\n min_size = min( 1 - 1.5*figure_padding_perc, 1 - bottom - \\\n 2*figure_padding_perc )\n ax_rect = (.5 - min_size/2.0*prefs['height']/float(prefs['width']),\n figure_padding_perc + bottom,\n prefs['height']/float(prefs['width'])*min_size,\n min_size )\n else:\n ax_rect = (figure_padding_perc,\n figure_padding_perc + bottom,\n 1 - 1.5*figure_padding_perc,\n 1 - bottom - 2*figure_padding_perc)\n\n # Add a watermark:\n if 'watermark' in prefs.keys() and str(prefs['watermark']) != 'False':\n watermark_filename = os.path.expandvars( os.path.expanduser( \\\n prefs['watermark'] ) )\n if os.path.exists(watermark_filename):\n try:\n i = PILImage.open(watermark_filename)\n enh = PILImageEnhance.Contrast( i )\n i = enh.enhance( .033 )\n img_size = i.size\n resize = 1.0\n if prefs['width'] < img_size[0]:\n resize = prefs['width'] / float(img_size[0])\n if prefs['height'] < img_size[1]:\n resize = min(resize, prefs['height']/float(img_size[1]))\n box = (0.0, 0.0, img_size[0]/float(prefs['width'])*resize, \\\n img_size[1]/float(prefs['height'])*resize)\n #print box\n ax_wm = fig.add_axes( box )\n im = ax_wm.imshow( i, origin='lower', aspect='equal' )\n ax_wm.axis('off')\n ax_wm.set_frame_on( False )\n ax_wm.set_clip_on( False )\n except Exception, e:\n print e\n pass\n else:\n # Do nothing right now. Write a warning sometime?\n pass\n\n # Create our two axes, and set properties\n ax = fig.add_axes( ax_rect )\n\n # If requested, make x/y axis logarithmic\n if find_info('log_xaxis',kw,self.metadata,'False').find('r') >= 0:\n ax.semilogx()\n self.log_xaxis = True\n else:\n self.log_xaxis = False\n if find_info('log_yaxis',kw,self.metadata,'False').find('r') >= 0:\n ax.semilogy()\n self.log_yaxis = True\n else:\n self.log_yaxis = False\n\n setp( ax.get_xticklabels(), family=prefs['font_family'] )\n setp( ax.get_xticklabels(), fontname=prefs['font'] )\n setp( ax.get_xticklabels(), size=prefs['text_size'] )\n\n setp( ax.get_yticklabels(), family=prefs['font_family'] )\n setp( ax.get_yticklabels(), fontname=prefs['font'] )\n setp( ax.get_yticklabels(), size=prefs['text_size'] )\n\n setp( ax.get_xticklines(), markeredgewidth=2.0 )\n setp( ax.get_yticklines(), markeredgewidth=2.0 )\n setp( ax.get_xticklines(), zorder=4.0 )\n\n if legend:\n legend_ax = fig.add_axes( legend_rect )\n legend_ax.set_axis_off()\n\n ax.grid( True, color='#555555', linewidth=0.1 )\n\n # Set text on main axes.\n # Creates a subtitle, if necessary\n title = title.split('\\n',1)\n subtitle_height_pix = (prefs['subtitle_size'] + \\\n 2*prefs['text_padding']) * \\\n (len(title) > 1)\n ax_height_pix = ax_rect[-1] * height_inches * dpi\n ax.title = ax.text( 0.5, 1 + (subtitle_height_pix + \\\n prefs['text_padding'])/ \\\n ax_height_pix, title[0],\n verticalalignment='bottom', \\\n horizontalalignment='center' )\n ax.title.set_transform( ax.transAxes )\n ax.title.set_clip_box( None )\n ax._set_artist_props( ax.title )\n\n if len(title) > 1:\n ax.subtitle = ax.text( 0.5, 1.0 + prefs['text_padding']/\\\n ax_height_pix, title[1],\n verticalalignment='bottom',\n horizontalalignment='center' )\n ax.subtitle.set_family( prefs['font_family'] )\n ax.subtitle.set_fontname( prefs['font'] )\n ax.subtitle.set_size(prefs['subtitle_size'])\n ax.subtitle.set_transform( ax.transAxes )\n ax.subtitle.set_clip_box( None )\n\n ax.title.set_family( prefs['font_family'] )\n ax.title.set_fontname( prefs['font'] )\n ax.title.set_weight('bold')\n ax.title.set_size( prefs['title_size'] )\n\n # Set labels\n t = ax.set_xlabel( xlabel )\n t.set_family(prefs['font_family'])\n t.set_fontname(prefs['font'])\n t.set_size(prefs['text_size'])\n\n t = ax.set_ylabel( ylabel )\n t.set_family(prefs['font_family'])\n t.set_fontname(prefs['font'])\n t.set_size(prefs['text_size'])\n # Now, make the legend.\n offset = 0\n early_stop = False; labels = list(labels)\n labels.reverse()\n zipped = zip(labels,colors); #zipped.reverse()\n\n # Loop over the labels.\n for my_text, my_color in zipped:\n # Size calculations\n left = (box_width+3*prefs['text_padding'])/leg_pix_width + \\\n column_width*(offset % prefs['columns'])\n top = 1 - (column_height)*(numpy.floor( offset / prefs['columns'] ))\n next_bottom = 1 - (column_height)*(numpy.floor((offset+1)/prefs['columns']) + 2*int(bottom_text != None))\n\n # Stop early if we ran out of room.\n if next_bottom < 0 and (num_labels - offset > 1):\n early_stop = True\n break\n\n # Create text\n if legend:\n t = legend_ax.text( left, top, str(my_text), horizontalalignment='left',\n verticalalignment='top', size=prefs['text_size'])\n t.set_fontname( prefs['font'] )\n t.set_family( prefs['font_family'] )\n\n # Create legend rectangle:\n patch = Rectangle( ((column_width*(offset % prefs['columns']) + \\\n 1.2*prefs['text_padding']/leg_pix_width),\n top - box_width/leg_pix_height),\n 1.2*box_width/leg_pix_width, 1.2*box_width/leg_pix_height )\n patch.set_ec('black')\n patch.set_linewidth(0.25)\n patch.set_fc( my_color )\n legend_ax.add_patch( patch )\n\n offset += 1\n\n # Set some additional text if we stopped early\n if early_stop == True:\n my_text = '... plus %i more' % (num_labels - offset)\n if legend: legend_ax.text( left, top, my_text, horizontalalignment='left',\n verticalalignment='top', size = prefs['text_size'] )\n\n top = 1 - column_height*( rows-1 )\n left = 0.5\n\n if bottom_text != None:\n if legend:\n t = legend_ax.text( left, top, str(bottom_text), horizontalalignment='center',\n verticalalignment='top', size=prefs['text_size'] )\n t.set_family( prefs['font_family'] )\n t.set_fontname( prefs['font'] )\n\n x_formatter_cb( ax )\n y_formatter_cb( ax )\n\n self.ax = ax\n self.canvas = canvas\n self.fig = fig",
"def draw_glycan_in_canvas(self, canvas, tree, root, names, h = 100., w = 100.):\n fig = mpl.figure.Figure(figsize=(h/self.dpi, w/self.dpi))\n ax = fig.add_subplot(111)\n \n self.myDrawer.draw_tree(tree, root, names, root_pos = [0, 0], direction = 1, ax = ax, axis = 0)\n ax.axis('equal')\n ax.axis('off')\n ax.set_ylim((-1, 6))\n ax.set_xlim((-3, 3))\n\n # Add to tk window\n figure_canvas_agg = FigureCanvasAgg(fig)\n figure_canvas_agg.draw()\n figure_x, figure_y, figure_w, figure_h = fig.bbox.bounds\n figure_w, figure_h = int(figure_w), int(figure_h)\n glycan_image = tk.PhotoImage(master = canvas, width=figure_w, height=figure_h)\n canvas.create_image(figure_w/2, figure_h/2, image = glycan_image)\n tkagg.blit(glycan_image, figure_canvas_agg.get_renderer()._renderer, colormode=2)\n return glycan_image",
"def __init__(self, frame, width, height):\n \n self.canvas = Tkinter.Canvas(frame, width = int(width), \n height = int(height))\n self.canvas.pack(side = CANVAS[\"POSITION\"])\n self.canvas.configure(background = check_color(CANVAS[\"BACKGROUND_COLOR\"]))",
"def create_widgets(self):\n self.pack(fill=tk.BOTH, expand=True)\n self.top_frame = tk.Frame(self)\n self.top_frame.pack(fill=tk.X, expand=False)\n\n # Create obstacle button\n self.create_obstacle_button = tk.Button(\n self.top_frame,\n text=self.OBSTACLE_CREATION_INACTIVE_LABEL,\n command=self._toggle_creation_mode_cb\n )\n self.create_obstacle_button.pack(side=tk.LEFT)\n\n # Load button\n self.load_button = tk.Button(\n self.top_frame,\n text=self.LOAD_BUTTON_LABEL,\n command=self._load_button_cb\n )\n self.load_button.pack(side=tk.LEFT)\n\n # Export button\n export_button = tk.Button(\n self.top_frame,\n text=self.EXPORT_BUTTON_LABEL,\n command=self._export_button_cb\n )\n export_button.pack(side=tk.RIGHT)\n\n # Main canvas\n self.canvas = tk.Canvas(self, background='white')\n self.canvas.config(width=self.CANVAS_WIDTH, height=self.CANVAS_HEIGHT)\n self.canvas.bind('<ButtonRelease-1>', self._draw_line)\n self.canvas.pack(fill=tk.BOTH, expand=True)\n self.canvas.focus_set()",
"def init_plot(self, master):\n b = Figure(figsize=(8, 6), dpi=100)\n ac = b.add_subplot(111)\n ac.plot(10, 10)\n ac.set_title('Current tour plot')\n ac.set_xlabel('X axis coordinates')\n ac.set_ylabel('Y axis coordinates')\n ac.grid(True)\n canvas = FigureCanvasTkAgg(b, master)\n canvas.draw()\n canvas.get_tk_widget().grid(row=1, column=1, sticky=W)",
"def create_widget(self):\n self.widget = wxDockPane(self.parent_widget())",
"def set_ui(self):\r\n\r\n self.canvas = tk.Canvas(self)\r\n self.canvas.pack()\r\n\r\n self.entry = ttk.Entry(self.canvas, justify=\"center\", font=(\"Calibri\", 12))\r\n\r\n self.grid = Grid(self.canvas)",
"def appendCanvas(self, label, obj):\n obj.canvas.set_hadjustment(self.scroll.hAdjust[0])\n\n labelWidget = gtk.Label(label)\n labelWidget.set_alignment(0, 0)\n\n group = gtk.SizeGroup(gtk.SIZE_GROUP_VERTICAL)\n group.add_widget(labelWidget)\n group.add_widget(obj.canvas)\n\n self.leftHeading.pack_start(labelWidget, False, padding=self.vPadding)\n self.canvasStack.pack_start(obj.canvas, False, padding=self.vPadding)\n\n self.leftHeading.pack_start(gtk.HSeparator(), False)\n self.canvasStack.pack_start(gtk.HSeparator(), False)\n\n self.leftHeading.show_all()\n self.canvasStack.show_all()\n self.canvasList.append(obj)"
]
| [
"0.6434021",
"0.6354336",
"0.6290193",
"0.6223832",
"0.62126684",
"0.60837847",
"0.60644627",
"0.6044969",
"0.60278356",
"0.598321",
"0.59531015",
"0.59531015",
"0.5886708",
"0.5838247",
"0.5838247",
"0.5838247",
"0.5822294",
"0.58217853",
"0.57900804",
"0.57653874",
"0.57375574",
"0.56541646",
"0.5630705",
"0.5623565",
"0.5606233",
"0.559272",
"0.55264163",
"0.55176306",
"0.5503178",
"0.5471794"
]
| 0.6727419 | 0 |
Yields a generator of features for aligned sequences. | def get_alignment(sequenceA, sequenceB, featuresA, featuresB):
alignment = max(pairwise2.align.globalxx(sequenceA, sequenceB), key=operator.itemgetter(1))
alignedA, alignedB, _, __, ___ = alignment
featuresA = iter(featuresA)
featuresB = iter(featuresB)
for x,y in zip(alignedA, alignedB):
Fa = next(featuresA) if x != '-' else None
Fb = next(featuresB) if y != '-' else None
if Fa and Fb:
yield Fa, Fb | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_features(self) -> Generator[np.ndarray, None, None]:\n for text in self.texts:\n yield embed(text)",
"def __iter__(self):\n for feature in self.features:\n yield feature",
"def generator(features, labels, batch_size):\n \n # Create empty arrays to contain batch of features and labels#\n batch_features = np.zeros((batch_size, 160, 320, 3))\n batch_labels = np.zeros((batch_size, 1))\n while True:\n for i in range(batch_size):\n # choose random index in features\n index = random.choice(range(len(features)))\n batch_features[i] = features[index]\n batch_labels[i] = labels[index]\n yield batch_features, batch_labels",
"def iter_features(self):\n features = self.features\n if (features is not None):\n yield from features",
"def __iter__(self):\n for feature in itertools.izip(self.shapes, self.records):\n yield feature",
"def fasta_read_generator(file_handler):\r\n seq = []\r\n name = ''\r\n for line in file_handler:\r\n if line[0] == '>':\r\n sequence = ''.join(seq)\r\n if name: # only yield when we already have all data for the first sequence\r\n yield name, sequence\r\n name = line.rstrip()[1:] # omitting the leading >\r\n seq = []\r\n else:\r\n seq += [line]#.rstrip()] # keep line breaks\r\n sequence = ''.join(seq)\r\n yield name, sequence # don't forget the last sequence\r",
"def features(sequence, i):\n seq = sequence[i].split(\"\\t\")[1]\n\n # first position in the sentence\n if i == 0:\n yield \"first\"\n\n if i == len(sequence) - 1:\n yield \"last\"\n\n # word's length\n yield \"len=\" + get_word_len(seq)\n\n # first 4 letters\n yield \"first_four_letters=\" + seq[:4] if len(seq) > 4 else seq\n\n # last 3 letters\n yield \"last_three_letters=\" + seq[-3:] if len(seq) > 3 else seq\n\n # word shape\n yield \"word_shape=\" + str(get_word_shape(seq))\n yield \"short_word_shape=\" + get_short_word_shape(seq)\n yield \"digits_count=\" + str(digits_count(seq))\n\n # currency\n if currency_pattern.search(seq):\n yield \"currency\"\n\n if has_affixes(seq):\n yield \"starts_with_affixes\"\n\n # contains -'its'\n if 'its' in seq or re.search(r'\\w+(tel|nik)', seq, re.I):\n yield \"with_tel_its\"\n\n # contains letter + 'к' suffix\n if re.search(r'\\w+[bjlmnpstvz]k', seq, re.I):\n yield \"with_k_suffix\"\n\n # contains letter + 'в' suffix\n if re.search(r'\\w+(st|z|o)v', seq, re.I):\n yield \"with_v_suffix\"\n\n if re.search(r'\\w+[eio]k', seq, re.I):\n yield \"with_eiok_suffix\"\n\n if re.search(r'\\w+stn', seq, re.I):\n yield \"with_stn_suffix\"\n\n if re.search(r'\\w+[dk]r', seq, re.I):\n yield \"with_dr_suffix\"\n\n if re.search(r'\\w+(sh|jj)k', seq, re.I):\n yield \"with_shk_suffix\"\n\n if re.search(r'\\w+[ln]`k', seq, re.I):\n yield \"with_lnk_suffix\"\n\n if re.search(r'l[aeio]?$', seq, re.I):\n yield \"ends_with_l\"\n\n # contains 'нн'\n if 'nn' in seq:\n yield \"with_nn\"\n\n # contains 'чн', 'чк'\n if 'chk' in seq or 'chn' in seq or 'schn' in seq:\n yield \"with_chk\"\n\n # contains letter + 'н' suffix\n if re.search(r'\\w+[jlmrstvz]n', seq, re.I):\n yield \"with_n_suffix\"\n\n # contains suffixes 'ющ', 'ящ', 'ищ', 'вш'\n if re.search(r'\\w+((y[au]|i)s?ch|vsh)', seq, re.I) or seq.endswith('v'):\n yield \"with_part_sch_suffixes\"\n\n # ends with 'ся'\n if seq.endswith(\"sya\") or seq.endswith('s\\''):\n yield \"ends_with_sya\"\n\n if seq.endswith('j') and len(seq) > 1 and is_vowel(seq[-2]):\n yield \"ends_with_j\"\n\n if seq.endswith('t') and len(seq) > 1 and is_vowel(seq[-2]):\n yield \"ends_with_t\"\n\n if seq.endswith('\\''):\n yield \"ends_with_apo\"\n\n if i > 0:\n prev = sequence[i - 1].split(\"\\t\")[1]\n # previous word's length\n yield \"prev_len=\" + str(get_word_len(prev))\n\n if i > 0:\n prev = sequence[i - 1].split(\"\\t\")[1]\n # last letters of the previous word\n yield \"prev_last_letters=\" + (prev[-3:] if len(prev) > 3 else prev)\n\n if i > 0:\n prev = sequence[i - 1].split(\"\\t\")[1]\n yield \"prev_short_word_shape=\" + get_short_word_shape(prev)\n\n if i < len(sequence) - 1:\n next = sequence[i + 1].split(\"\\t\")[1]\n # next word's length\n yield \"next_len=\" + str(get_word_len(next))\n\n if i < len(sequence) - 1:\n next = sequence[i + 1].split(\"\\t\")[1]\n # last letters of the next word\n yield \"next_last_letters=\" + (next[-3:] if len(next) > 3 else next)\n\n if i < len(sequence) - 1:\n next = sequence[i + 1].split(\"\\t\")[1]\n yield \"next_short_word_shape=\" + get_short_word_shape(next)",
"def convert_examples_to_features(examples, max_seq_length, tokenizer):\n\n features = []\n for (ex_index, example) in enumerate(examples):\n print(example.text_a)\n tokens_a = tokenizer.tokenize(example.text_a)\n\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[:(max_seq_length - 2)]\n\n tokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"]\n segment_ids = [0] * len(tokens)\n\n if tokens_b:\n tokens += tokens_b + [\"[SEP]\"]\n segment_ids += [1] * (len(tokens_b) + 1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n input_mask = [1] * len(input_ids)\n\n padding = [0] * (max_seq_length - len(input_ids))\n input_ids += padding\n input_mask += padding\n segment_ids += padding\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n \n labels_ids = []\n for label in example.labels:\n labels_ids.append(int(label))\n \n if ex_index < 0:\n logger.info(\"*** Example ***\")\n logger.info(\"guid: %s\" % (example.guid))\n logger.info(\"tokens: %s\" % \" \".join(\n [str(x) for x in tokens]))\n logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n logger.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n logger.info(\n \"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n logger.info(\"label: %s (id = %s)\" % (example.labels, labels_ids))\n\n features.append(\n InputFeatures(input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_ids=labels_ids))\n return features",
"def iter_sequence(self):\n for res_name, fragment in self.sequence_fragment_list:\n yield res_name",
"def sequence_loader(\n data_path: str,\n index_path: typing.Union[str, None],\n context_description: typing.Union[\n typing.List[str], typing.Dict[str, str], None\n ] = None,\n features_description: typing.Union[\n typing.List[str], typing.Dict[str, str], None\n ] = None,\n shard: typing.Optional[typing.Tuple[int, int]] = None,\n compression_type: typing.Optional[str] = None,\n) -> typing.Iterable[\n typing.Tuple[\n typing.Dict[str, np.ndarray], typing.Dict[str, typing.List[np.ndarray]]\n ]\n]:\n typename_mapping = {\n \"byte\": \"bytes_list\",\n \"float\": \"float_list\",\n \"int\": \"int64_list\"\n }\n\n record_iterator = tfrecord_iterator(\n data_path=data_path,\n index_path=index_path,\n shard=shard,\n compression_type=compression_type,\n )\n\n for record in record_iterator:\n example = example_pb2.SequenceExample()\n example.ParseFromString(record)\n\n context = extract_feature_dict(example.context, context_description, typename_mapping)\n features = extract_feature_dict(example.feature_lists, features_description, typename_mapping)\n\n yield context, features",
"def __iter__(self):\n\n if self.output_mode:\n process_atom = self._process_atom_output\n self.output_names = self.names[:]\n else:\n process_atom = self._process_atom\n\n if self.output_mode is False:\n getLogger(\"problog_lfi\").debug(\"\\nProcessed Atoms:\")\n for clause in self.source:\n if isinstance(clause, Clause):\n if clause.head.functor == \"query\" and clause.head.arity == 1:\n continue\n extra_clauses = process_atom(clause.head, clause.body)\n for extra in extra_clauses:\n if self.output_mode is False:\n getLogger(\"problog_lfi\").debug(\"\\t\" + str(extra))\n yield extra\n elif isinstance(clause, AnnotatedDisjunction):\n extra_clauses = process_atom(Or.from_list(clause.heads), clause.body)\n for extra in extra_clauses:\n if self.output_mode is False:\n getLogger(\"problog_lfi\").debug(\"\\t\" + str(extra))\n yield extra\n else:\n if clause.functor == \"query\" and clause.arity == 1:\n continue\n # Fact\n extra_clauses = process_atom(clause, None)\n for extra in extra_clauses:\n if self.output_mode is False:\n getLogger(\"problog_lfi\").debug(\"\\t\" + str(extra))\n yield extra\n\n if self.leakprob is not None:\n leakprob_atoms = self._get_leakprobatoms()\n for example_atom in leakprob_atoms:\n yield example_atom.with_probability(Constant(self.leakprob))",
"def convert_examples_to_features(self, examples, max_seq_length):\n features = []\n for (ex_index, example) in enumerate(examples):\n tokens_a = self.tokenizer.tokenize(example.text_a)\n\n tokens_b = None\n if example.text_b:\n tokens_b = self.tokenizer.tokenize(example.text_b)\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n self._truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[: (max_seq_length - 2)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambigiously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"]\n segment_ids = [0] * len(tokens)\n\n if tokens_b:\n tokens += tokens_b + [\"[SEP]\"]\n segment_ids += [1] * (len(tokens_b) + 1)\n\n input_ids = self.tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding = [0] * (max_seq_length - len(input_ids))\n input_ids += padding\n input_mask += padding\n segment_ids += padding\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n labels_ids = []\n for label in example.labels:\n labels_ids.append(float(label))\n\n if ex_index < 0:\n self.logger.info(\"*** Example ***\")\n self.logger.info(\"guid: %s\" % (example.guid))\n self.logger.info(\"tokens: %s\" % \" \".join([str(x) for x in tokens]))\n self.logger.info(\n \"input_ids: %s\" % \" \".join([str(x) for x in input_ids])\n )\n self.logger.info(\n \"input_mask: %s\" % \" \".join([str(x) for x in input_mask])\n )\n self.logger.info(\n \"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids])\n )\n self.logger.info(\"label: %s (id = %s)\" % (example.labels, labels_ids))\n\n if example.parent_labels is None:\n input_features = InputFeatures(\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_ids=labels_ids,\n )\n else:\n input_features = InputFeatures(\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_ids=labels_ids,\n parent_labels=example.parent_labels,\n )\n features.append(input_features)\n\n return features",
"def fasta_iterator(f, num_proteins, delim='|'):\n with open(f) as fd:\n lines = fd.readlines()\n line_idx = 0\n\n for i in range(num_proteins):\n line = lines[line_idx].strip()\n protein_id = line.split(delim)[1]\n seq = \"\"\n line_idx += 1\n\n while True:\n line = lines[line_idx].strip()\n if line[0] < 'A' or line[0] > 'Z':\n break\n seq += line\n line_idx += 1\n \n yield (protein_id, seq)",
"def predict_sample_generator(self, fi):\n for line in fi:\n sequence = np.array(line.split(\" \"), dtype=np.intp)\n yield sequence, sequence.shape[0], np.unique(sequence).shape[0]",
"def convert_examples_to_features(examples, max_seq_length, tokenizer):\n\n features = []\n for (ex_index, example) in enumerate(examples):\n tokens_a = tokenizer.tokenize(example.text_a)\n\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[: (max_seq_length - 2)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambigiously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"]\n segment_ids = [0] * len(tokens)\n\n if tokens_b:\n tokens += tokens_b + [\"[SEP]\"]\n segment_ids += [1] * (len(tokens_b) + 1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding = [0] * (max_seq_length - len(input_ids))\n input_ids += padding\n input_mask += padding\n segment_ids += padding\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n label_ids = []\n\n for label in example.labels:\n label_ids.append(int(label))\n\n if ex_index < 0:\n logger.info(\"*** Example ***\")\n logger.info(\"guid: %s\" % (example.guid))\n logger.info(\"tokens: %s\" % \" \".join([str(x) for x in tokens]))\n logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n logger.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n logger.info(\"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n logger.info(\"label: %s (id = %s)\" % (example.labels, label_ids))\n\n features.append(\n InputFeatures(\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_ids=label_ids,\n )\n )\n return features",
"def convert_examples_to_features(examples, seq_length, tokenizer):\n\n features = []\n for (ex_index, example) in enumerate(examples):\n tokens_a = tokenizer.tokenize(example.text_a)\n\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n\n if tokens_b:\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > seq_length - 2:\n tokens_a = tokens_a[0:(seq_length - 2)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambiguously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens = []\n input_type_ids = []\n tokens.append(\"[CLS]\")\n input_type_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n input_type_ids.append(0)\n tokens.append(\"[SEP]\")\n input_type_ids.append(0)\n\n if tokens_b:\n for token in tokens_b:\n tokens.append(token)\n input_type_ids.append(1)\n tokens.append(\"[SEP]\")\n input_type_ids.append(1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < seq_length:\n input_ids.append(0)\n input_mask.append(0)\n input_type_ids.append(0)\n\n assert len(input_ids) == seq_length\n assert len(input_mask) == seq_length\n assert len(input_type_ids) == seq_length\n\n if ex_index < 5:\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"unique_id: %s\" % (example.unique_id))\n tf.logging.info(\"tokens: %s\" % \" \".join([tokenization.printable_text(x) for x in tokens]))\n tf.logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n tf.logging.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n tf.logging.info(\"input_type_ids: %s\" % \" \".join([str(x) for x in input_type_ids]))\n\n features.append(InputFeatures(unique_id=example.unique_id,\n tokens=tokens,\n input_ids=input_ids,\n input_mask=input_mask,\n input_type_ids=input_type_ids))\n return features",
"def sequences(self):\n # i am one\n yield self\n # nothing further\n return",
"def output(self, doc):\n contextfeaures = self.build_feature(doc)\n for f in contextfeaures:\n yield np.array(f).astype(np.float32)",
"def _generate_features(reader, paths, same_size=False,\n allow_missing_files=False):\n\n shape_determined = False\n for i, path in enumerate(paths):\n if allow_missing_files and not os.path.isfile(path):\n logger.debug(\"... File %s, that does not exist, has been ignored.\", path)\n continue\n\n feature = numpy.atleast_2d(reader(path))\n feature = numpy.ascontiguousarray(feature)\n if not shape_determined:\n shape_determined = True\n dtype = feature.dtype\n shape = list(feature.shape)\n yield (dtype, shape)\n else:\n # make sure all features have the same shape and dtype\n if same_size:\n assert shape == list(feature.shape)\n else:\n assert shape[1:] == list(feature.shape[1:])\n assert dtype == feature.dtype\n\n for value in feature.flat:\n yield value",
"def predict_batch_generator(self):\n input = np.zeros((self.batch_size, self.max_seq_len,\n self.embedding_size))\n seq_lengths = np.zeros((self.batch_size), dtype=np.intp)\n unique_counts = np.zeros((self.batch_size), dtype=np.intp)\n i = 0\n\n fi = open(self.config.parsed_predict_file)\n sample_gen = self.predict_sample_generator(fi)\n self.load_embedding()\n\n for sequence, seq_length, unique_count in sample_gen:\n seq_lengths[i], unique_counts[i] = seq_length, unique_count\n if seq_lengths[i] > self.max_seq_len:\n seq_lengths[i] = self.max_seq_len\n sequence = sequence[:seq_lengths[i]]\n input[i, 0:seq_lengths[i], :] = self.embedding[sequence, :]\n\n i += 1\n\n if i == self.batch_size:\n yield input, seq_lengths, unique_counts\n input = np.zeros(\n (self.batch_size, self.max_seq_len,\n self.embedding_size)\n )\n i = 0\n\n if i < self.batch_size:\n yield input[:i, :, :], seq_lengths[:i], unique_counts[:i]\n\n fi.close()",
"def features():\n return tuple(\n torch.randint(N_UNIQUE_FEATS, size=(seq_length,))\n for seq_length in SEQ_LENGTHS)",
"def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, mode): #check later if we can merge this function with the SQuAD preprocessing \n label_map = {}\n for (i, label) in enumerate(label_list):\n label_map[label] = i\n\n features = []\n for (ex_index, example) in enumerate(examples):\n if mode!=\"ae\":\n tokens_a = tokenizer.tokenize(example.text_a)\n else: #only do subword tokenization.\n tokens_a, labels_a, example.idx_map= tokenizer.subword_tokenize([token.lower() for token in example.text_a], example.label )\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n\n if tokens_b:\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[0:(max_seq_length - 2)]\n\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n if tokens_b:\n for token in tokens_b:\n tokens.append(token)\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n if mode!=\"ae\":\n label_id = label_map[example.label]\n else:\n label_id = [-1] * len(input_ids) #-1 is the index to ignore\n #truncate the label length if it exceeds the limit.\n lb=[label_map[label] for label in labels_a]\n if len(lb) > max_seq_length - 2:\n lb = lb[0:(max_seq_length - 2)]\n label_id[1:len(lb)+1] = lb\n \n features.append(\n InputFeatures(\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id))\n return features",
"def generate_genes(genbank):\n for (idx, feature) in enumerate(genbank.features):\n if feature.type == 'source' or feature.type == 'gene':\n continue\n row = {\n 'location_start': feature.location.start,\n 'location_end': feature.location.end,\n 'strand': feature.strand,\n 'ref': feature.ref,\n 'ref_db': feature.ref_db\n }\n for (name, val) in feature.qualifiers.items():\n # For some reason, all values under .qualifiers are lists of one elem\n # We join the elems into a string just in case there are ever multiple items\n row[name] = ', '.join(val)\n if not row.get('locus_tag'):\n # No locus tag; skip this one. We can only use features with locus tags.\n continue\n row['_key'] = row['locus_tag']\n # Generate the DNA sequence using biopython\n # https://biopython.org/DIST/docs/api/Bio.SeqFeature.SeqFeature-class.html#extract\n seq_obj = SeqFeature(feature.location, feature.type) # type: SeqFeature\n seq_str = str(seq_obj.extract(genbank.seq))\n row['dna_sequence'] = seq_str\n yield row",
"def get_data_generator(feature_file, label_file):\n with open(feature_file, \"r\") as csv1, open(label_file, \"r\") as csv2:\n reader1 = csv.reader(csv1)\n reader2 = csv.reader(csv2)\n # Skip the header row\n next(reader1)\n next(reader2)\n for row1, row2 in zip(reader1, reader2):\n array_row1 = np.array(row1, dtype=np.float)\n array_row2 = np.array(row2, dtype=np.int)\n yield array_row1, array_row2",
"def train_batch_generator(self):\n seq_lengths = np.zeros((self.batch_size), dtype=np.intp)\n unique_count = np.zeros((self.batch_size), dtype=np.intp)\n fis = (self.config.train_dir + \"pos.txt\",\n self.config.train_dir + \"neg.txt\")\n fi_pos, fi_neg = map(open, fis)\n sample_gen_pos, sample_gen_neg = map(\n lambda fi: self.train_sample_generator(fi),\n (fi_pos, fi_neg)\n )\n self.load_embedding()\n\n while True:\n input = np.zeros((self.batch_size, self.max_seq_len,\n self.embedding_size))\n labels = np.random.choice([0, 1], self.batch_size,\n p=self.config.class_probs)\n for i in range(self.batch_size):\n if labels[i] == 1:\n sequence, seq_lengths[i], unique_count[i] = next(sample_gen_pos)\n else:\n sequence, seq_lengths[i], unique_count[i] = next(sample_gen_neg)\n\n if seq_lengths[i] > self.max_seq_len:\n seq_lengths[i] = self.max_seq_len\n sequence = sequence[:seq_lengths[i]]\n input[i, 0:seq_lengths[i], :] = self.embedding[sequence, :]\n yield input, seq_lengths, unique_count, labels\n\n map(lambda fi: fi.close(), (fi_pos, fi_neg))",
"def PhylipIterator(handle, alphabet = single_letter_alphabet) :\n line = handle.readline()\n if not line: return\n line = line.strip()\n parts = filter(None, line.split())\n if len(parts)<>2 :\n raise ValueError(\"First line should have two integers\")\n try :\n number_of_seqs = int(parts[0])\n length_of_seqs = int(parts[1])\n except ValueError:\n raise ValueError(\"First line should have two integers\")\n\n ids = []\n seqs = []\n\n #Expects STRICT truncation/padding to 10 characters\n #Does not require any white space between name and seq.\n for i in range(0,number_of_seqs) :\n line = handle.readline().rstrip()\n ids.append(line[:10].strip()) #first ten characters\n seqs.append([line[10:].strip().replace(\" \",\"\")])\n\n line=\"\"\n while True :\n #Skip any blank lines between blocks...\n while \"\"==line.strip():\n line = handle.readline()\n if not line : break #end of file\n if not line : break\n #print \"New block...\"\n for i in range(0,number_of_seqs) :\n seqs[i].append(line.strip().replace(\" \",\"\"))\n line = handle.readline()\n if (not line) and i+1 < number_of_seqs :\n raise ValueError(\"End of file mid-block\")\n if not line : break #end of file\n\n for i in range(0,number_of_seqs) :\n seq = \"\".join(seqs[i])\n if len(seq)<>length_of_seqs :\n raise ValueError(\"Sequence %i length %i, expected length %i\" \\\n % (i+1, len(seq), length_of_seqs))\n yield SeqRecord(Seq(seq, alphabet), id=ids[i], name=ids[i], description=\"\")",
"def __iter__(self):\n for tokens in readbook(self.path, self.ngrams):\n # transform tokens (strings) into a sparse vector, one at a time\n yield self.dictionary.doc2bow(tokens)",
"def _generateSequence(self, classifications, detections):\n det_len = len(detections)\n\n # Convert classifications and detections to input required for network\n seq_len = int(self.input_tensor.shape[1])\n fea_len = int(self.input_tensor.shape[2])\n input_data = np.zeros((seq_len,fea_len))\n\n # Add padding before and after sequence based on KEYFRAME_OFFSET\n input_data[:KEYFRAME_OFFSET,0] = np.ones(KEYFRAME_OFFSET)\n input_data[det_len:det_len+KEYFRAME_OFFSET,0] = np.ones(KEYFRAME_OFFSET)\n # Iterate through each frame of the data\n for idx, frame_detections in enumerate(detections):\n # We have already padded before and after\n seq_idx = idx + KEYFRAME_OFFSET\n\n # Skip through frames with no detections\n if len(frame_detections) == 0:\n input_data[seq_idx][0] = 1.0\n continue\n\n detection = frame_detections[0]\n classification = classifications[idx][0]\n\n # Do a size check on input\n # We expect either 1 or 2 models per sequence\n num_species = len(classification.species)\n num_cover = len(classification.cover)\n num_loc = len(detection.location)\n num_fea = num_species + num_cover + num_loc + 2\n num_of_models = int(fea_len / num_fea)\n\n if num_of_models != 2 and num_of_models != 1:\n raise Exception('Bad Feature Length')\n\n # Layout of the feature is:\n # Species, Cover, Normalized Location, Confidence, SSD Species\n # Optional duplicate\n\n for model_idx in range(num_of_models):\n # Calculate indices of vector based on model_idx\n fea_idx = model_idx * num_fea\n species_stop = fea_idx + num_species\n cover_stop = species_stop + num_cover\n loc_stop = cover_stop + num_loc\n ssd_conf = loc_stop\n ssd_species = ssd_conf + 1\n\n input_data[seq_idx,fea_idx:species_stop] = \\\n classification.species\n input_data[seq_idx,species_stop:cover_stop] = \\\n classification.cover\n input_data[seq_idx,cover_stop:loc_stop] = \\\n self._normalizeDetection(detection.location)\n input_data[seq_idx, ssd_conf] = detection.confidence\n input_data[seq_idx, ssd_species] = detection.species\n return input_data",
"def inner_generator():\n # A buffer where observed query-document features will be stored.\n # It is a list of dictionaries, one per query-document pair, where\n # each dictionary is a mapping from a feature ID to a feature value.\n for p in processed:\n yield p",
"def __iter__(self) -> Generator:\r\n yield from self.sequence"
]
| [
"0.58422786",
"0.57766163",
"0.5733101",
"0.572696",
"0.56986547",
"0.5611865",
"0.55697197",
"0.5549772",
"0.55406874",
"0.5532181",
"0.55207205",
"0.5516716",
"0.5501799",
"0.5498407",
"0.5498093",
"0.5491017",
"0.54699457",
"0.5461517",
"0.54557294",
"0.543198",
"0.5413791",
"0.54128647",
"0.54008687",
"0.53936327",
"0.5379307",
"0.5373486",
"0.5370216",
"0.5366719",
"0.5338934",
"0.53381073"
]
| 0.6744513 | 0 |
Joins feature_dict with the conservation features based on the sequence alignment. Features loaded are score, color, score confidence interval, color confidence interval and residue variety. | def join_conservation_data(sequence, features_dict, conservation_file):
with open(conservation_file, 'r') as ifile:
lines = [x.rstrip('\n') for x in ifile.readlines()]
lines = [x.split('\t') for x in lines]
lines = [x for x in lines if len(x) == 14]
features = features_dict.keys()
cons_sequence = ''.join([line[1].lstrip() for line in lines])
for res_id, line in get_alignment(sequence, cons_sequence, features, lines):
features_dict[res_id]["score"] = float(line[3])
features_dict[res_id]["color"] = line[5]
features_dict[res_id]["score_confidence_interval_low"] = float(line[6].split(",")[0])
features_dict[res_id]["score_confidence_interval_high"] = float(line[6].split(",")[1])
features_dict[res_id]["color_confidence_interval_low"] = float(line[9].split(",")[0])
features_dict[res_id]["color_confidence_interval_high"] = float(line[9].split(",")[1])
features_dict[res_id]["residue_variety"] = line[-1].replace(",", " ")
return features_dict | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _series_merging_map(self, map_list, feature_option=\"sift\"):\n print(\" --- Start ---\")\n # Transform state into 3 specified values\n for i in range(len(map_list)):\n map_list[i] = cv2.cvtColor(map_list[i], cv2.COLOR_RGB2GRAY)\n map_list[i] = MF._transform_state(map_list[i])\n \n\n map_ref = map_list[0]\n for i in range(len(map_list)-1):\n map_align = map_list[i+1]\n\n \n if feature_option == \"orb\":\n orb = cv2.ORB_create()\n key_points_1, descriptor_1 = orb.detectAndCompute(map_ref, None)\n key_points_2, descriptor_2 = orb.detectAndCompute(map_align, None)\n \n elif feature_option == \"surf\":\n surf = cv2.xfeatures2d.SURF_create(400)\n key_points_1, descriptor_1 = surf.detectAndCompute(map_ref, None)\n key_points_2, descriptor_2 = surf.detectAndCompute(map_align, None)\n else:\n siftDetector = cv2.xfeatures2d.SIFT_create()\n key_points_1, descriptor_1 = siftDetector.detectAndCompute(map_ref, None)\n key_points_2, descriptor_2 = siftDetector.detectAndCompute(map_align, None)\n\n bf = cv2.BFMatcher()\n matches = bf.knnMatch(descriptor_1, descriptor_2, k=2)\n\n good = []\n for m, n in matches:\n if m.distance < 0.75*n.distance:\n good.append(m)\n \n pts_1, pts_2 = [], []\n for i in good:\n query_idx = i.queryIdx\n train_idx = i.trainIdx\n\n pts_1.append([\n key_points_1[query_idx].pt[0],\n key_points_1[query_idx].pt[1],\n ])\n pts_2.append([\n key_points_2[train_idx].pt[0],\n key_points_2[train_idx].pt[1],\n ])\n \n pts1 = np.array(pts_1)\n pts2 = np.array(pts_2)\n\n # relation, value, _ = RMM._ransac_find_rotation_translation(pts_set_1=pts2, pts_set_2=pts1, sigma=0.5, max_iter=5000)\n # print(\"- Inlier Percent: %f\"%value)\n # # Because the coordinates between the maps and the SIFT features are different:\n # # SIFT Features: Right: +x, Down: +y\n # # Maps: Down: +x, Right: +y\n # # Hence the dx and dy should be changed.\n # dx = relation[1]\n # dy = relation[0]\n # dyaw = relation[2]\n # print(\"- (x, y, t): (%f, %f, %f)\"%(dx,dy,dyaw))\n\n # # index, agr, dis = RMM._similarity_index(x=[dy, dx, dyaw], map1=map_ref, map2=map_align)\n # # print(\"Similarity Index: %f\\nAgree Number: %f\\nDisargee Number: %f\"%(index, agr, dis))\n # index, agr, dis, _ = RMM._similarity_index_2(x=[dx, dy, dyaw], map1=map_ref, map2=map_align)\n # print(\"- Similarity Index: %f\\n- Agree Number: %f\\n- Disargee Number: %f\"%(index, agr, dis))\n \n # map_merged = MF._merging_map(dx=dx, dy=dy, dtheta=dyaw, map1=map_ref, map2=map_align)\n # map_ref = map_merged.astype(np.uint8)\n # map_ref = MF._modify_map_size(merged_map=map_ref)\n\n relation, value, _ = RANSAC_Map_Merging()._ransac_find_all(pts_set_1=pts2, pts_set_2=pts1, sigma=5, max_iter=2000)\n dx = relation[1]\n dy = relation[0]\n dyaw = relation[2]\n dr = relation[3]\n print(\"- Inlier Percent: %f\"%value)\n print(\"- (dx, dy, dyaw, dr) = %f, %f, %f, %f\"%(dx,dy,dyaw, dr))\n map_merged = MAP_Function()._merging_map_ratio(dx=dx, dy=dy, dtheta=dyaw, dr=dr, map1=map_ref, map2=map_align)\n map_ref = map_merged.astype(np.uint8)\n map_ref = MF._modify_map_size(merged_map=map_ref)\n\n # return map_ref, (dx, dy, dyaw)\n return map_ref, (dx, dy, dyaw, dr)",
"def combine_features(c_dat):\n # They are keyed on transcript ID\n for tx in c_dat:\n for cds in c_dat[tx]:\n cds_pieces = c_dat[tx][cds]\n # If there fewer than 2 CDS chunks, then pull the tuple out of the\n # list.\n if len(cds_pieces) < 2:\n c_dat[tx][cds] = cds_pieces[0]\n else:\n # Join pieces\n locs = []\n ph = []\n for chunk in cds_pieces:\n c_loc = FeatureLocation(\n chunk[0].location.start,\n chunk[0].location.end,\n strand=chunk[0].strand)\n locs.append(c_loc)\n ph.append(chunk[2])\n # Sort them, according to strand. We assume that a CDS is not a\n # mixed-strand feature\n if cds_pieces[0][0].strand == 1:\n locs.sort(key=lambda x: x.start)\n else:\n locs.sort(key=lambda x: x.end, reverse=True)\n # Join them into a CompoundLocation\n full_loc = CompoundLocation(locs)\n # And then overwrite the input dictionary values\n full_feat = SeqFeature(full_loc, type='CDS',\n id=cds_pieces[0][0].id)\n full_feat.qualifiers['transl_tabl'] = [1]\n # Keep the phases!\n c_dat[tx][cds] = (full_feat, cds_pieces[0][1], ph)\n return c_dat",
"def _data_augmentation(feature_dict):\n image_features = feature_dict[_transformed_name(constants.IMAGE_KEY)]\n image_features = _image_augmentation(image_features)\n feature_dict[_transformed_name(constants.IMAGE_KEY)] = image_features\n return feature_dict",
"def add_feat_conf(self, conf_map):\n conf_map['generations'] = str(self.generations.text())\n conf_map['ga_metrics'] = str(self.metrics.text()).replace('\\n', '')\n conf_map['ga_breed_modes'] = str(self.breed_modes.text()).replace('\\n', '')\n conf_map['ga_cullings'] = str(self.removes.text()).replace('\\n', '')\n conf_map['ga_support_thresholds'] = str(self.ga_support_thresholds.text()).replace('\\n', '')\n conf_map['ga_support_sigmas'] = str(self.ga_support_sigmas.text()).replace('\\n', '')\n conf_map['ga_low_resolution_sigmas'] = str(self.lr_sigmas.text()).replace('\\n', '')\n conf_map['gen_pcdi_start'] = str(self.gen_pcdi_start.text())",
"def extract_features_cue(sentence_dicts, cue_lexicon, affixal_cue_lexicon, mode='training'):\n instances = []\n for sent in sentence_dicts:\n # print(sent)\n for key, value in sent.items():\n features = {}\n if isinstance(key, int):\n if not_known_cue_word(value[3].lower(), cue_lexicon, affixal_cue_lexicon):\n sent[key]['not-pred-cue'] = True\n continue\n\n features['token'] = value[3].lower()\n features['lemma'] = value[4].lower()\n features['pos'] = value[5]\n\n if key == 0:\n features['bw-bigram1'] = 'null'\n else:\n features['bw-bigram1'] = \"%s_*\" %sent[key-1][4].lower()\n if not (key+1) in sent:\n features['fw-bigram1'] = 'null'\n else:\n features['fw-bigram1'] = \"*_%s\" %sent[key+1][4].lower()\n \n affix = get_affix_cue(value[3].lower(), affixal_cue_lexicon)\n if affix != None:\n base = value[3].lower().replace(affix, \"\")\n features['char-5gram1'], features['char-5gram2'] = get_character_ngrams(base, affix, 5)\n features['char-4gram1'], features['char-4gram2'] = get_character_ngrams(base, affix, 4)\n features['char-3gram1'], features['char-3gram2'] = get_character_ngrams(base, affix, 3)\n features['char-2gram1'], features['char-2gram2'] = get_character_ngrams(base, affix, 2)\n features['char-1gram1'], features['char-1gram2'] = get_character_ngrams(base, affix, 1)\n features['affix'] = affix\n else:\n features['char-5gram1'], features['char-5gram2'] = 'null','null'\n features['char-4gram1'], features['char-4gram2'] = 'null','null'\n features['char-3gram1'], features['char-3gram2'] = 'null','null'\n features['char-2gram1'], features['char-2gram2'] = 'null','null'\n features['char-1gram1'], features['char-1gram2'] = 'null','null'\n features['affix'] = 'null'\n \n instances.append(features)\n if mode == 'training':\n labels = extract_labels_cue(sentence_dicts, cue_lexicon, affixal_cue_lexicon)\n return sentence_dicts, instances, labels\n return sentence_dicts, instances",
"def get_mapped_feature_name(self):\n\n # open a h5 file in case we need it\n f5 = h5py.File(self.train_database[0], 'r')\n mol_name = list(f5.keys())[0]\n mapped_data = f5.get(mol_name + '/mapped_features/')\n chain_tags = ['_chain1', '_chain2']\n\n # if we select all the features\n if self.select_feature == \"all\":\n\n # redefine dict\n self.select_feature = {}\n\n # loop over the feat types and add all the feat_names\n for feat_type, feat_names in mapped_data.items():\n self.select_feature[feat_type] = [\n name for name in feat_names]\n\n # if a selection was made\n else:\n\n # we loop over the input dict\n for feat_type, feat_names in self.select_feature.items():\n\n # if for a given type we need all the feature\n if feat_names == 'all':\n if feat_type in mapped_data:\n self.select_feature[feat_type] = list(\n mapped_data[feat_type].keys())\n else:\n self.print_possible_features()\n raise KeyError('Feature type %s not found')\n\n # if we have stored the individual\n # chainA chainB data we need to expand the feature list\n # however when we reload a pretrained model we already\n # come with _chainA, _chainB features.\n # So then we shouldn't add the tags\n else:\n # TODO to refactor this part\n if feat_type not in mapped_data:\n self.print_possible_features()\n raise KeyError('Feature type %s not found')\n\n self.select_feature[feat_type] = []\n\n # loop over all the specified feature names\n for name in feat_names:\n\n # check if there is not _chainA or _chainB in the name\n cond = [tag not in name for tag in chain_tags]\n\n # if there is no chain tag in the name\n if np.all(cond):\n\n # if we have a wild card e.g. PSSM_*\n # we check the matches and add them\n if '*' in name:\n match = name.split('*')[0]\n possible_names = list(\n mapped_data[feat_type].keys())\n match_names = [\n n for n in possible_names\n if n.startswith(match)]\n self.select_feature[feat_type] += match_names\n\n # if we don't have a wild card we append\n # <feature_name>_chainA and <feature_name>_chainB\n # to the list\n else:\n self.select_feature[feat_type] += [\n name + tag for tag in chain_tags]\n # if there is a chain tag in the name\n # (we probably relaod a pretrained model)\n # and we simply append the feaature name\n else:\n self.select_feature[feat_type].append(\n name)\n\n f5.close()",
"def features_combine():\n\n\n\t# PROCESSING AUDIO",
"def combine_features(df, lag_fea, lags, window_size, used_columns):\n lagged_fea = lagged_features(df[lag_fea], lags)\n moving_avg = moving_averages(df[lag_fea], 2, window_size)\n fea_all = pd.concat([df[used_columns], lagged_fea, moving_avg], axis=1)\n return fea_all",
"def main():\n args = get_args()\n FILE = args.FILE\n annotations = args.annotations\n outfile = args.outfile\n \n \n if not os.path.isfile(FILE):\n die('\"{}\" is not a file'.format(FILE))\n if not os.path.isfile(annotations):\n die('\"{}\" is not a file'.format(annotations))\n if os.path.isfile(FILE) and os.path.isfile(annotations):\n reader = csv.DictReader(open(FILE), delimiter = '\\t', fieldnames = (\"qseqid\", \"sseqid\", \"pident\", \"length\", \"mismatch\", \"gapopen\", \"qstart\", \"qend\", \"sstart\", \"send\", \"evalue\", \"bitscore\"))\n reader_a = csv.DictReader(open(annotations), fieldnames = (\"centroid\", \"domain\", \"kingdom\", \"phylum\", \"class\", \"order\", \"genus\", \"species\"))\n reader_b = csv.reader(open(annotations, 'r'))\n anno_dict = {}\n for row in reader_b:\n key1 = row[0]\n anno_dict[key1] = row[1:]\n\n #print(anno_dict)\n \n \"\"\"for dct in map(dict, reader_a):\n genus = (f\"{dct['genus']}\")\n species = (f\"{dct['species']}\")\n if genus == \"\": \n print(\"NA\")\n else:\n print(genus)\n if species == \"\":\n print(\"NA\")\n else:\n print(species)\"\"\"\n for dct in map(dict, reader):\n seq_id = (f\"{dct['sseqid']}\") \n pident = (f\"{dct['pident']}\")\n #print(seq_id)\n for dct_a in map(dict, reader_a):\n genus = (f\"{dct_a['genus']}\")\n species = (f\"{dct_a['species']}\")\n if any(seq_id == key for key in anno_dict): \n \"\"\"print(seq_id)\n print(pident)\n print(genus)\n print(species)\n #find a way to print genus and species of seq_id\n \"\"\"\n \n else:\n warn('Cannot find seq \"{}\" in lookup'.format(seq_id))\n \"\"\"for line_a in reader_a:\n an_id = (line_a['centroid']) \n print('\"{}\" is an_id'.format(an_id)) \n for line in reader:\n seq_id = (line['sseqid'])\n print('\"{}\" is seq_id'.format(seq_id))\n if seq_id == an_id:\n print(\"hi\")\n else:\n warn('Cannot find seq \"{}\" in lookup'.format(seq_id))\n \"\"\"\n #pprint.pprint(dict_list)\n #pprint.pprint(dict_list_a)\n #for key, value in d1.items():\n #if key is 'sseqid':\n #print(value)\n #print(dict_list_a['centroid']) ",
"def merge_annotation(self, other_seg):\n try:\n assert isinstance(other_seg, SFFSegmentation)\n except AssertionError:\n print_date(_encode(u\"Invalid type for other_seg: {}\".format(type(other_seg)), u'utf-8'))\n sys.exit(65)\n # global data\n self.name = other_seg.name\n self.software = other_seg.software\n self.global_external_references = other_seg.global_external_references\n self.details = other_seg.details\n # loop through segments\n for segment in self.segments:\n other_segment = other_seg.segments.get_by_id(segment.id)\n segment.biological_annotation = other_segment.biological_annotation\n segment.complexes_and_macromolecules = other_segment.complexes_and_macromolecules",
"def _concatenate_features(features):\n pass",
"def annotateWithPredictions(featurefile, predictionfile, dest):\n features = pd.read_csv(featurefile, sep=\"\\t\")\n headercolumns = list(features.columns)\n features = features.values\n predictions = pd.read_csv(predictionfile, sep=\"\\t\").values\n\n predictmap = dict()\n for i in range(0, len(predictions)):\n key = predictions[i,0]+\":\"+str(predictions[i,1])+\"-\"+str(predictions[i,2])\n predictmap[key] = predictions[i,3]\n\n annvect = np.ones((len(features),1))*-1\n for i in range(0, len(features)):\n key = features[i,0]+\":\"+str(features[i,1])+\"-\"+str(features[i,2])\n try:\n annvect[i,0] = predictmap[key]\n except:\n pass\n\n headercolumns.append(\"Class Annotation\")\n afeatures = np.concatenate((features, annvect.astype(int)), axis=1)\n\n pd.DataFrame(afeatures,columns=headercolumns).to_csv(dest, sep=\"\\t\", index=None)",
"def _update_from_exons(self, feature):\n # note that start and end here are in direction of translation\n def start(loc):\n return loc[0][1]\n\n def end(loc):\n if loc[-1][2] == \"+\":\n return loc[-1][1] + loc[-1][3] + 1\n else:\n return loc[-1][1] - loc[-1][3] - 1\n\n if 'exon' in feature:\n # update the feature with the exon locations and sequences\n feature['location'] = [x['location'][0] for x in feature['exon']]\n feature['dna_sequence'] = \"\".join(\n x['dna_sequence'] for x in feature['exon'])\n feature['dna_sequence_length'] = len(feature['dna_sequence'])\n\n # construct feature location from utrs and cdss if present\n elif 'cds' in feature:\n cds = [copy.deepcopy(self.feature_dict[feature['cds']])]\n locs = [] # type: list\n seq = \"\"\n for frag in feature.get('five_prime_UTR', []) + cds + \\\n feature.get('three_prime_UTR', []):\n\n # merge into last location if adjacent\n if locs and abs(end(locs) - start(frag['location'])) == 1:\n # extend the location length by the length of the first\n # location in the fragment\n first = frag['location'].pop(0)\n locs[-1][3] += first[3]\n\n locs.extend(frag['location'])\n seq += frag['dna_sequence']\n\n feature['location'] = locs\n feature['dna_sequence'] = seq\n feature['dna_sequence_length'] = len(seq)\n\n # remove these properties as they are no longer needed\n for x in ['five_prime_UTR', 'three_prime_UTR', 'exon']:\n feature.pop(x, None)\n\n else:\n ValueError('Feature {feature[\"id\"]} must contain either exon or cds data to '\n 'construct an accurate location and sequence')",
"def add_feat_conf(self, conf_map):\n conf_map['resolution_trigger'] = str(self.res_triggers.text()).replace('\\n', '')\n conf_map['iter_res_sigma_range'] = str(self.sigma_range.text()).replace('\\n', '')\n conf_map['iter_res_det_range'] = str(self.det_range.text()).replace('\\n', '')",
"def add_feat_conf(self, conf_map):\n conf_map['pcdi_trigger'] = str(self.pcdi_triggers.text()).replace('\\n', '')\n conf_map['partial_coherence_type'] = '\"' + str(self.pcdi_type.text()) + '\"'\n conf_map['partial_coherence_iteration_num'] = str(self.pcdi_iter.text())\n conf_map['partial_coherence_normalize'] = str(self.pcdi_normalize.text())\n conf_map['partial_coherence_roi'] = str(self.pcdi_roi.text()).replace('\\n', '')",
"def featurize(movies):\n ###TODO \n movies['features'] = \"\" \n get_h = set() \n vocab_dict = {}\n df_dict_return = {}\n tup_list = []\n index_dict = {}\n index_dict_1 = {}\n movie_len = len(movies) \n #print(\"MovieLength::\",movie_len)\n #print(\"MOVIES:::\",movies)\n \n get_h = cal_unique_features(movies) # num_features\n\n vocab_dict = cal_unique_vocab(get_h) # vocab complete\n\n len_vocab = len(get_h)\n \n df_dict_return = cal_unique_docs(get_h,movies) # df(i)\n\n for token in get_h :\n #tup_list.clear()\n #print(\"token_GOTTTTT:::\",token)\n for index,row in movies.iterrows(): \n #print(\"row_got::\",row)\n gen_list = row['tokens']\n #print(\"gen_list::\",gen_list)\n #mov_id = row['movieId'] \n #print(\"mov_id::\",mov_id)\n token_count_1 = Counter(gen_list).most_common()[:1]\n tok = token_count_1[0]\n index_dict_1[index] = tok[1]\n token_count = gen_list.count(token)\n #print(\"token_count::\",token_count)\n tup = (index,token_count)\n #print(\"tuple::\",tup)\n tup_list.append(tup)\n #print(\"LIST_PRINT:::::::::::::\",tup_list)\n index_dict[token] = tup_list\n tup_list = []\n \n \n #print(\"INDEX_DICT:::\",index_dict) # tf(i,d)\n #print(\"INDEX_DICT_1:::\",index_dict_1) # max_k dict per docx\n \n \n for ind, row in movies.iterrows():\n data_list = []\n rows_list = []\n columns_list = []\n gen_list = row['tokens']\n #print(\"TOKENS GOTTT::\",gen_list) \n for gen in gen_list:\n tf = get_tf_value(index_dict,gen,ind)\n #print(\"TF GOTTT::\",tf) \n tf_weight = float( tf / index_dict_1[ind])\n #print(\"tf_weight::\",tf_weight)\n df_weight = float( math.log10( movie_len / df_dict_return[gen] ) )\n #print(\"df_weight::\",df_weight)\n final_tfidf = tf_weight * df_weight\n #print(\"final_tfidf::\",final_tfidf)\n data_list.append(final_tfidf)\n columns_list.append(vocab_dict[gen])\n rows_list.append(0) \n csr = csr_matrix((data_list, (rows_list,columns_list)), shape=(1,len_vocab))\n #print(\"TYPE of CSR GOTT::\",type(csr))\n #print(\"CSR GOTT:::\",csr) \n movies.set_value(ind, 'features', csr)\n \n #print(\"UPDATE movies::\",movies) \n\n return(movies,vocab_dict)\n \n\n pass",
"def feat_dict(pos_feat,text):\n dict = {}\n bigrams = ngrams(word_tokenize(text),2)\n trigrams = ngrams(word_tokenize(text),3)\n \n for feat in pos_feat:\n dict[feat]=features(feat,text,bigrams,[],[])\n return dict",
"def index_feats_dict(self):\n doc_features_dict = {}\n\n for index, doc in zip(self.index, self.series):\n # Sets for a doc and feature words\n doc_set = set(doc.split())\n feat_set = set(self.features)\n\n # Shared words between the two sets\n interset_words = doc_set.intersection(feat_set)\n\n # Append to doc_features_dict\n doc_features_dict[index] = list(interset_words)\n\n return doc_features_dict",
"def preprocess_data(self):\n # Fault and cavity models use same data and features. Get that now.\n signals = get_signal_names(cavities=['1', '2', '3', '4', '5', '6', '7', '8'],\n waveforms=['GMES', 'GASK', 'CRFP', 'DETA2'])\n\n # We need to crop, downsample, then do z-score. Any constant values are set to 0.001 manually.\n num_resample = 4096\n num_meta_columns = 8\n self.common_features_df = window_extractor(self.example, signals=signals, windows={'pre-fault': -1533.4},\n n_samples=7680, standardize=False, downsample=True,\n ds_kwargs={'num': num_resample})\n\n # The extractor makes a row per requested window plus some metadata. Columns are named\n # Sample_<sample_num>_<cav_num>_<signal>, and go Sample_1_1_GMES, Sample_2_1_GMES, ..., Sample_1_1_GASK, ....\n # We want to change this so that each column is all of the samples for 1_GMES, 1_GASK, ... as in the signal\n # order above.\n self.common_features_df = pd.DataFrame(\n self.common_features_df.iloc[0, num_meta_columns:].values.reshape(len(signals), -1).T, columns=signals)\n\n self.common_features_df = standard_scaling(self.common_features_df, fill=0.001)",
"def merge_all_claims_norm_dicts_for_docs(): \n# docs_norm_scores_dicts_path = base_path+\"\\\\docs_norm_scores_dicts\"\n docs_norm_scores_dicts_path = linux_base_path+\"/docs_norm_scores_dicts\"\n# all_claims_norms_scores_merged_dict = base_path +\"\\\\all_claims_norms_scores_merged_dict\"\n all_claims_norms_scores_merged_dict = linux_base_path +\"/all_claims_norms_scores_merged_dict\"\n for alpha in range(0,11,1):\n for beta in range(0,10,1):\n docs_scores_all_claims = {}\n for filename in os.listdir(docs_norm_scores_dicts_path):\n (alpha_f,beta_f)=turn_to_float([alpha,beta])\n if \"_alpha_\"+str(alpha_f)+\"_\" in filename and \"_beta_\"+str(beta_f)+\"_\" in filename:\n curr_dict = read_pickle(docs_norm_scores_dicts_path+\"/\"+filename)\n docs_scores_all_claims = dict(docs_scores_all_claims.items() + curr_dict.items()) #merge dicts\n save_pickle(all_claims_norms_scores_merged_dict+\"/docs_norm_scores_all_claims_alpha_\"+str(alpha_f)+\"_beta_\"+str(beta_f),docs_scores_all_claims)",
"def forward(self, aligned_feat):\n n, t, c, h, w = aligned_feat.size()\n # temporal attention\n embedding_ref = self.temporal_attn1(\n aligned_feat[:, self.center_frame_idx, :, :, :].clone())\n emb = self.temporal_attn2(aligned_feat.view(-1, c, h, w))\n emb = emb.view(n, t, -1, h, w) # (n, t, c, h, w)\n\n corr_l = [] # correlation list\n for i in range(t):\n emb_neighbor = emb[:, i, :, :, :]\n corr = torch.sum(emb_neighbor * embedding_ref, 1) # (n, h, w)\n corr_l.append(corr.unsqueeze(1)) # (n, 1, h, w)\n corr_prob = torch.sigmoid(torch.cat(corr_l, dim=1)) # (n, t, h, w)\n corr_prob = corr_prob.unsqueeze(2).expand(n, t, c, h, w)\n corr_prob = corr_prob.contiguous().view(n, -1, h, w) # (n, t*c, h, w)\n aligned_feat = aligned_feat.view(n, -1, h, w) * corr_prob\n\n # fusion\n feat = self.feat_fusion(aligned_feat)\n\n # spatial attention\n attn = self.spatial_attn1(aligned_feat)\n attn_max = self.max_pool(attn)\n attn_avg = self.avg_pool(attn)\n attn = self.spatial_attn2(torch.cat([attn_max, attn_avg], dim=1))\n # pyramid levels\n attn_level = self.spatial_attn_l1(attn)\n attn_max = self.max_pool(attn_level)\n attn_avg = self.avg_pool(attn_level)\n attn_level = self.spatial_attn_l2(\n torch.cat([attn_max, attn_avg], dim=1))\n attn_level = self.spatial_attn_l3(attn_level)\n attn_level = self.upsample(attn_level)\n\n attn = self.spatial_attn3(attn) + attn_level\n attn = self.spatial_attn4(attn)\n attn = self.upsample(attn)\n attn = self.spatial_attn5(attn)\n attn_add = self.spatial_attn_add2(self.spatial_attn_add1(attn))\n attn = torch.sigmoid(attn)\n\n # after initialization, * 2 makes (attn * 2) to be close to 1.\n feat = feat * attn * 2 + attn_add\n return feat",
"def add_profile_from_lstrap(\n matrix_file, annotation_file, species_id, order_color_file=None\n ):\n annotation = {}\n\n with open(annotation_file, \"r\") as fin:\n # get rid of the header\n _ = fin.readline()\n\n for line in fin:\n parts = line.strip().split(\"\\t\")\n if len(parts) > 1:\n run, description = parts\n annotation[run] = description\n\n order, colors = [], []\n if order_color_file is not None:\n with open(order_color_file, \"r\") as fin:\n for line in fin:\n try:\n o, c = line.strip().split(\"\\t\")\n order.append(o)\n colors.append(c)\n except Exception as _:\n pass\n\n # build conversion table for sequences\n sequences = Sequence.query.filter_by(species_id=species_id).all()\n\n sequence_dict = {} # key = sequence name uppercase, value internal id\n for s in sequences:\n sequence_dict[s.name.upper()] = s.id\n\n with open(matrix_file) as fin:\n # read header\n _, *colnames = fin.readline().rstrip().split()\n\n colnames = [c.replace(\".htseq\", \"\") for c in colnames]\n\n # determine order after annotation is not defined\n if order is None:\n order = []\n\n for c in colnames:\n if c in annotation.keys():\n if annotation[c] not in order:\n order.append(annotation[c])\n\n order.sort()\n\n # read each line and build profile\n new_probes = []\n for line in fin:\n transcript, *values = line.rstrip().split()\n profile = defaultdict(list)\n\n for c, v in zip(colnames, values):\n if c in annotation.keys():\n condition = annotation[c]\n profile[condition].append(float(v))\n\n new_probe = {\n \"species_id\": species_id,\n \"probe\": transcript,\n \"sequence_id\": sequence_dict[transcript.upper()]\n if transcript.upper() in sequence_dict.keys()\n else None,\n \"profile\": json.dumps(\n {\"order\": order, \"colors\": colors, \"data\": profile}\n ),\n }\n\n new_probes.append(new_probe)\n\n if len(new_probes) > 400:\n db.engine.execute(ExpressionProfile.__table__.insert(), new_probes)\n new_probes = []\n\n db.engine.execute(ExpressionProfile.__table__.insert(), new_probes)",
"def transform_data(data_df, target_df = None):\n rec_idx, rec_col, rec_data = create_recency_feature(data_df)\n freq_idx, freq_col, freq_data = create_frequency_feature(data_df)\n norm_idx, norm_col, norm_data = create_norm_feature(data_df)\n\n # with hstack function we are concatinating a sparse matrix and a dense matirx :)\n feat_df = hstack((rec_data, freq_data, norm_data))\n print('Final feature matrix shape:', feat_df.shape)\n \n # merge all the feature names\n feat_names = list(rec_col) + list(freq_col) + list(norm_col)\n \n if isinstance(target_df, pd.core.frame.DataFrame):\n # get +ve & -ve indices\n one_idx = target_df[target_df['outcome_flag'] == 1]['id'].index.tolist()\n zero_idx = target_df[target_df['outcome_flag'] == 0]['id'].index.tolist()\n \n # calculate fitness values of features\n rcdf = create_fitness_stats(rec_data, rec_col, one_idx, zero_idx, nans = True)\n fqdf = create_fitness_stats(freq_data, freq_col, one_idx, zero_idx, nans = False)\n nrdf = create_fitness_stats(norm_data, norm_col, one_idx, zero_idx, nans=False)\n fit_df = rcdf.append(fqdf).append(nrdf)\n fit_df.reset_index(drop=1)\n return feat_df, feat_names, fit_df\n \n return feat_df, feat_names",
"def featureprepare(self,df):\n try:\n df = self.featureselection(df)\n emp_len_dict= pickleload(self.dict_employ_len) # Load emp len\n df['emp_length'] = df['emp_length'].map(emp_len_dict)\n df['Long_emp_length'] = df['emp_length'].apply(lambda x: 'Yes' if x == 10 else 'No') # creating new feature\n df[\"emp_title\"].fillna('Missing', inplace=True)\n\n # Handling missing numerical value\n dict_Mean_var = pickleload(self.dict_Mean_var)\n for col, mean_val in dict_Mean_var.items():\n df[col].fillna(mean_val, inplace=True)\n\n # Handling rare values\n Freqlabels = pickleload(self.Freqlabels)\n for variable, frequent_labels in Freqlabels.items():\n df[variable] = np.where(df[variable].isin(frequent_labels), df[variable], 'Rare')\n\n # Encoding Categorical features\n x = pickleload(self.labelEncoder)\n for features, labels in x.items():\n df.loc[:, features] = labels.transform(df.loc[:, features])\n return df\n except Exception as e:\n self._Logger.error(\"Error in feature preparation: {}\".format(e))",
"def add_features(data_dict, features_list):\n\n for name in data_dict:\n # add features for the log values of the financial data\n for feat in features_financial:\n try:\n data_dict[name][feat + '_log'] = math.log(data_dict[name][feat] + 1)\n except:\n data_dict[name][feat + '_log'] = 'NaN'\n\n # Add ratio of POI messages to total.\n try:\n total_messages = data_dict[name]['from_messages'] + data_dict[name]['to_messages']\n poi_related_messages = data_dict[name][\"from_poi_to_this_person\"] +\\\n data_dict[name][\"from_this_person_to_poi\"] +\\\n data_dict[name][\"shared_receipt_with_poi\"]\n poi_ratio = 1.* poi_related_messages / total_messages\n data_dict[name]['poi_ratio_messages'] = poi_ratio\n except:\n data_dict[name]['poi_ratio_messages'] = 'NaN'\n\n return data_dict",
"def present_map(cmdb_ci_types, db_ci_types, cmdb_rel_types, db_rel_types, cmdb_ci_attributes, db_ci_attributes, cmdb_rel_attributes, db_rel_attributes, similar_ci, similar_rel, similar_attr_ci, similar_attr_rel):\n print(\"\\n===============================================================================================================================================================================\")\n print(blue + \"CONFIGURATION ITEMS MAPPING\" + reset)\n print(\"===============================================================================================================================================================================\")\n print()\n data = []\n for db_ci in similar_ci:\n cmdb_ci = list(similar_ci[db_ci].keys())[0]\n sim = similar_ci.get(db_ci).get(cmdb_ci)\n row = [cmdb_ci, cmdb_ci_types.get(\n cmdb_ci), db_ci, db_ci_types.get(db_ci), sim]\n data.append(row)\n print(tabulate(data, headers=[\n \"CI in CMDB\", \"Description\", \"CI in DB\", \"Description\", \"Similarity Coeficient\"]))\n print()\n\n for db_ci in similar_ci:\n data = []\n cmdb_ci = list(similar_ci[db_ci].keys())[0]\n print(\"**************************************************************************************************\")\n print(\n green + str(cmdb_ci) + \" Attributes Mapping\" + reset)\n print(\"**************************************************************************************************\")\n print()\n atrs = similar_attr_ci.get(cmdb_ci)\n if atrs != None:\n for cmdb_at in atrs:\n db_at = list(atrs.get(cmdb_at).keys())[0]\n sim = atrs.get(cmdb_at).get(db_at)\n row = [cmdb_at, cmdb_ci_attributes.get(\n cmdb_ci).get(cmdb_at), db_at, db_ci_attributes.get(db_ci).get(db_at), sim]\n data.append(row)\n print(tabulate(data, headers=[\"Attribute in CMDB\", \"Description\",\n \"Attribute in DB\", \"Description\", \"Similarity Coeficient\"]))\n print()\n print()\n\n print(\"===============================================================================================================================================================================\")\n print(blue + \"RELATIONSHIPS MAPPING\" + reset)\n print(\"===============================================================================================================================================================================\")\n print()\n\n data = []\n for db_rel in similar_rel:\n cmdb_rel = list(similar_rel[db_rel].keys())[0]\n sim = similar_rel.get(db_rel).get(cmdb_rel)\n row = [cmdb_rel, cmdb_rel_types.get(\n cmdb_rel), db_rel, db_rel_types.get(db_rel), sim]\n data.append(row)\n atrs = similar_attr_rel.get(cmdb_rel)\n print(tabulate(data, headers=[\n \"Relationship in CMDB\", \"Description\", \"Relationship in DB\", \"Description\", \"Similarity Coeficient\"]))\n print()\n\n for db_rel in similar_rel:\n data = []\n cmdb_rel = list(similar_rel[db_rel].keys())[0]\n print(\"**************************************************************************************************\")\n print(green + str(cmdb_rel) + \" Attributes Mapping\" + reset)\n print(\"**************************************************************************************************\")\n print()\n for cmdb_at in atrs:\n db_at = list(atrs.get(cmdb_at).keys())[0]\n sim = atrs.get(cmdb_at).get(db_at)\n cmdb_at_desc = cmdb_rel_attributes.get(cmdb_rel)\n if cmdb_at_desc != None:\n cmdb_at_desc = cmdb_at_desc.get(cmdb_at)\n db_at_desc = db_rel_attributes.get(db_rel)\n if db_at_desc != None:\n db_at_desc = db_at_desc.get(db_at)\n row = [cmdb_at, cmdb_at_desc, db_at,\n db_at_desc, sim]\n data.append(row)\n print(tabulate(data, headers=[\"Attribute in CMDB\", \"Description\",\n \"Attribute in DB\", \"Description\", \"Similarity Coeficient\"]))\n print()",
"def prepare_features(features, subject_labels):\n data = {}\n labels = {}\n for stage in STAGES:\n labels[stage] = []\n features_combined = []\n\n for subject in subject_labels.keys():\n current = []\n for feature, columns in features:\n if feature[stage][subject].size == 0:\n # do not look at empty arrays\n continue\n # collect features for current stage and subject\n if len(feature[stage][subject].shape) == 2:\n # feature is 2-dimensional, just use transpose\n current.append(feature[stage][subject].T)\n elif len(feature[stage][subject].shape) == 3:\n # feature is 3-dimensional, manually reshape to 2-dimensional\n # np.reshape does not work here\n reshaped = []\n for electrode in range(feature[stage][subject].shape[0]):\n for band in range(feature[stage][subject].shape[2]):\n if len(feature[stage][subject].shape) != 3:\n continue\n reshaped.append(feature[stage][subject][electrode, :, band])\n current.append(np.array(reshaped).T)\n\n if len(current) == 0:\n continue\n\n # merge the features for the current stage and subject\n features_combined.append(np.concatenate(current, axis=1))\n\n # concatenate the label name for the current subject as often as there are samples\n labels[stage] += [subject_labels[subject]] * features_combined[-1].shape[0]\n\n # concatenate the features for all subjects\n data[stage] = np.concatenate(features_combined, axis=0)\n labels[stage] = np.array(labels[stage])\n\n return data, labels",
"def featurize_1(list_of_demonstrations, kinematics, sr):\n\tprint \"FEATURIZATION 1\"\n\n\tdata_X_1 = {}\n\tdata_X_2 = {}\n\tfor demonstration in list_of_demonstrations:\n\t\tprint \"SIFT for \", demonstration\n\t\tstart, end = parser.get_start_end_annotations(constants.PATH_TO_DATA + constants.ANNOTATIONS_FOLDER\n\t\t\t\t\t\t+ demonstration + \"_\" + constants.CAMERA +\".p\")\n\n\t\tW = kinematics[demonstration]\n\t\tW_sampled = utils.sample_matrix(W, sampling_rate = sr)\n\n\n\t\tPATH_TO_SIFT = constants.PATH_TO_DATA + \"sift_FCED/SIFT_\"+ demonstration\n\t\tZ = pickle.load(open(PATH_TO_SIFT + \"_1.p\", \"rb\"))\n\t\tZ = Z[start:end + 1]\n\t\tZ_sampled_1 = utils.sample_matrix(Z, sampling_rate = sr)\n\n\t\tZ = pickle.load(open(PATH_TO_SIFT + \"_2.p\", \"rb\"))\n\t\tZ = Z[start:end + 1]\n\t\tZ_sampled_2 = utils.sample_matrix(Z, sampling_rate = sr)\n\n\t\tassert Z_sampled_1.shape[0] == W_sampled.shape[0]\n\t\tassert Z_sampled_2.shape[0] == W_sampled.shape[0]\n\n\t\tdata_X_1[demonstration] = np.concatenate((W_sampled, Z_sampled_1), axis = 1)\n\t\tdata_X_2[demonstration] = np.concatenate((W_sampled, Z_sampled_2), axis = 1)\n\n\tpickle.dump(data_X_1, open(PATH_TO_FEATURES + \"SIFT_1.p\", \"wb\"))\n\tpickle.dump(data_X_2, open(PATH_TO_FEATURES + \"SIFT_2.p\", \"wb\"))",
"def _build_augmentation_map(self, images):\n aug_map = {}\n img_shape = (images[0].x.shape[0], images[0].x.shape[1])\n\n vert_modes = [Crop.TOP, Crop.CENTER, Crop.BOTTOM]\n horiz_modes = [Crop.LEFT, Crop.CENTER, Crop.RIGHT]\n crop_modes = flatten_list([[CropMode(vert, horiz) for horiz in horiz_modes] for vert in vert_modes])\n\n labels_series = pd.Series([i.y for i in images])\n labels_distribution = labels_series.value_counts(normalize=True).sort_values()\n\n min_distribution = labels_distribution.iloc[0] * len(crop_modes)\n\n for label, distribution in labels_distribution.iteritems():\n aug_num = math.ceil(min_distribution / distribution)\n #additional augmentation functions can be added here:\n aug_map[label] = [self._build_crop_fn(img_shape, crop_modes[:aug_num])]\n \n return aug_map",
"def sample(self, features, states=None): # for predicton\n sampled_ids = []\n prob_ids = []\n k_samples = []\n k_probs = []\n sampling_num = 30\n prob_thresh = 0.1\n histogram_clothing = np.zeros(15, dtype=int)\n inputs = features.unsqueeze(1)\n\n for i in range(2):\n hiddens, states = self.lstm(inputs, states) \n outputs = self.linear(hiddens.squeeze(1)) \n if i == 0 :\n prob_pred, predicted = outputs.max(1) \n inputs = self.embed(predicted) \n inputs = inputs.unsqueeze(1)\n # states_m = states\n else :\n top_k_prob, top_k = outputs.topk(sampling_num)\n #top_k = top_k.squeeze(0) \n \n for i in range(sampling_num):\n inputs = self.embed(top_k[:,i]) \n inputs = inputs.unsqueeze(1) \n word_prob = top_k_prob[:,i]\n if word_prob < prob_thresh:\n break\n sampled_ids.append(top_k[:,i])\n # print(self.vocab.idx2word[top_k[:,i].cpu().numpy()[0]])\n prob_ids.append(word_prob)\n _states = states # re-load\n duplicate_tag = False\n\n for j in range(self.max_seg_length):\n _hiddens, _states = self.lstm(inputs, _states) \n outputs = self.linear(_hiddens.squeeze(1)) \n prob_pred, predicted = outputs.max(1) \n \n word = self.vocab.idx2word[predicted.cpu().numpy()[0]]\n if word == '<end>':\n break\n\n class_index = self.clothing_class.get(word, '')\n if class_index is not '':\n if histogram_clothing[class_index] > 0:\n duplicate_tag = True\n break\n else:\n if word == 'jacket' or word == 'coat' or word == 'jumper':\n class_index = self.clothing_class.get('jacket')\n histogram_clothing[class_index] += 1\n class_index = self.clothing_class.get('coat')\n histogram_clothing[class_index] += 1\n class_index = self.clothing_class.get('jumper')\n histogram_clothing[class_index] += 1\n else:\n histogram_clothing[class_index] += 1\n\n sampled_ids.append(predicted)\n prob_ids.append(prob_pred)\n inputs = self.embed(predicted) \n inputs = inputs.unsqueeze(1) \n if duplicate_tag :\n duplicate_tag = False\n sampled_ids = [] \n prob_ids = []\n continue\n sampled_ids = torch.stack(sampled_ids, 1) \n prob_ids = torch.stack(prob_ids, 1) \n k_samples.append(sampled_ids) \n k_probs.append(prob_ids)\n sampled_ids = [] \n prob_ids = []\n\n return k_samples, k_probs"
]
| [
"0.5636555",
"0.5480253",
"0.5412035",
"0.5236842",
"0.5235385",
"0.52171755",
"0.51624644",
"0.5105716",
"0.5102434",
"0.50964284",
"0.5079223",
"0.50492746",
"0.50246644",
"0.50122684",
"0.5011619",
"0.50078404",
"0.50054425",
"0.4992147",
"0.49865118",
"0.49690413",
"0.49674088",
"0.4959926",
"0.49576372",
"0.4955293",
"0.49544904",
"0.49539313",
"0.49050832",
"0.4898684",
"0.48688194",
"0.48581815"
]
| 0.7717556 | 0 |
Create two Species and check that Ecosystem is correctly updated. | def test_speciesCreation():
sys = LVsystem.Ecosystem()
sys.addSpecies('rabbit')
sys.addSpecies('fox')
sys.setInteraction('rabbit', 'fox', -1)
sys.setInteraction('fox', 'rabbit', 1)
sys.setInitialCond('rabbit', 10)
sys.setInitialCond('fox', 5)
sys.setGrowthRate('rabbit', 1)
sys.setGrowthRate('fox', -1)
sys.setCarrCap('rabbit', 10000)
sys.setCarrCap('fox', 10000)
sys.setChangeRate('rabbit', 10)
sys.setChangeRate('fox', 20)
assert len(sys.species_list) == 2
assert sys.species_list == ['rabbit','fox']
assert sys.intMatrix == {('rabbit','fox'):-1, ('fox','rabbit'):1}
sys.removeSpecies('rabbit')
sys.removeSpecies('fox') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def testMakeNewSpecies(self):\n\n # adding 3 unique species:\n cerm = CoreEdgeReactionModel()\n\n spcs = [Species().fromSMILES('[OH]'), \n Species().fromSMILES('CC'),\n Species().fromSMILES('[CH3]')]\n\n for spc in spcs:\n cerm.makeNewSpecies(spc)\n\n self.assertEquals(len(cerm.speciesDict), len(spcs)) \n self.assertEquals(len(cerm.indexSpeciesDict), len(spcs))\n\n # adding 3 unique, and 1 already existing species:\n cerm = CoreEdgeReactionModel()\n\n spcs = [Species().fromSMILES('[OH]'), \n Species().fromSMILES('CC'),\n Species().fromSMILES('[CH3]'),\n Species().fromSMILES('CC')]#duplicate species\n\n for spc in spcs:\n cerm.makeNewSpecies(spc)\n\n self.assertEquals(len(cerm.speciesDict), len(spcs) - 1) \n self.assertEquals(len(cerm.indexSpeciesDict), len(spcs) - 1)",
"def test_make_new_species(self):\n\n # adding 3 unique species:\n cerm = CoreEdgeReactionModel()\n\n spcs = [Species().from_smiles('[OH]'),\n Species().from_smiles('CC'),\n Species().from_smiles('[CH3]')]\n\n for spc in spcs:\n cerm.make_new_species(spc)\n\n self.assertEquals(len(cerm.species_dict), len(spcs))\n self.assertEquals(len(cerm.index_species_dict), len(spcs))\n\n # adding 3 unique, and 1 already existing species:\n cerm = CoreEdgeReactionModel()\n\n spcs = [Species().from_smiles('[OH]'),\n Species().from_smiles('CC'),\n Species().from_smiles('[CH3]'),\n Species().from_smiles('CC')] # duplicate species\n\n for spc in spcs:\n cerm.make_new_species(spc)\n\n self.assertEquals(len(cerm.species_dict), len(spcs) - 1)\n self.assertEquals(len(cerm.index_species_dict), len(spcs) - 1)",
"def test_patch_creation(self):\n host = synthetic_host(\"myserver\")\n self.create_simple_filesystem(host)\n\n spare_volume_1 = synthetic_volume_full(host)\n spare_volume_2 = synthetic_volume_full(host)\n\n response = self.api_client.patch(\n \"/api/target/\",\n data={\n \"objects\": [\n {\"kind\": \"OST\", \"filesystem_id\": self.fs.id, \"volume_id\": spare_volume_1.id},\n {\"kind\": \"MDT\", \"filesystem_id\": self.fs.id, \"volume_id\": spare_volume_2.id},\n ],\n \"deletions\": [],\n },\n )\n self.assertHttpAccepted(response)",
"def test_create_services_with_components(self):\n\n component1 = sample_componenets(user=self.user, name='switch')\n component2 = sample_componenets(user=self.user, name='switchboard')\n\n payload = {\n 'title' : 'Fitting Job',\n 'components' : [component1.id, component2.id],\n 'price' : 100.00\n }\n\n res =self.client.post(SERVICES_URL, payload)\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n services = Service.objects.get(id=res.data['id'])\n components = Service.components.all()\n self.assertEqual(components.count(), 2)\n self.assertIn(component1, components)\n self.assertIn(component2, components)",
"def testOldKernelMatchesNewKernelSpeciesList(self):\n species_list1 = self.tree1.get_species_list()\n species_list2 = self.tree2.get_species_list()\n for i, each in enumerate(species_list1):\n self.assertListEqual(list(each), list(species_list2[i]))",
"def testOldKernelMatchesNewKernelSpeciesList(self):\n species_list1 = self.tree1.get_species_list()\n species_list2 = self.tree2.get_species_list()\n for i, each in enumerate(species_list1):\n self.assertListEqual(list(each), list(species_list2[i]))",
"def test_createData():\n\n sys = LVsystem.Ecosystem()\n\n sys.addSpecies('rabbit')\n sys.setInteraction('rabbit', 'hen', 0)\n sys.setInteraction('rabbit', 'fox', -1)\n sys.setInitialCond('rabbit', 30)\n sys.setGrowthRate('rabbit', 0.09)\n sys.setCarrCap('rabbit', 10000)\n sys.setChangeRate('rabbit', 400)\n\n sys.addSpecies('hen')\n sys.setInteraction('hen', 'rabbit', 0)\n sys.setInteraction('hen', 'fox', -1)\n sys.setInitialCond('hen', 10)\n sys.setGrowthRate('hen', 0.07)\n sys.setCarrCap('hen', 10000)\n sys.setChangeRate('hen', 500)\n\n sys.addSpecies('fox')\n sys.setInteraction('fox', 'rabbit', 1)\n sys.setInteraction('fox', 'hen', 1)\n sys.setInitialCond('fox', 20)\n sys.setGrowthRate('fox', -0.06)\n sys.setCarrCap('fox', 1)\n sys.setChangeRate('fox', 250)\n\n \n data = sys.create_data()\n \n assert data[0] == 3\n assert data[1] == ['rabbit', 'hen', 'fox']\n assert data[2] == [30,10,20]\n assert data[3] == [0.09,0.07,-0.06] \n assert data[4] == [10000,10000,1]\n assert data[5] == [400,500,250]\n assert data[6][1][2] == -data[6][2][1]\n assert data[6][2][2] == 0\n \n sys.removeSpecies('rabbit')\n sys.removeSpecies('fox')\n sys.removeSpecies('hen')",
"def test_create_system_entire(self):\n pass",
"def test_topo_add_herbviore():\n instance = topo.Topography()\n instance.add_animal(animals.Herbivores())\n assert len(instance.herbivore_list) == 1",
"def test_species(self):\n spec = input.species('C2H4', os.path.join(self.directory, 'species', 'C2H4', 'ethene.py'))\n self.assertTrue(isinstance(spec, Species))\n self.assertEqual(len(spec.molecule), 0)",
"def test_put_dicot_name_creates_plant(self):\n request_data = {\n \"common_name\": \"mahogany\",\n \"subclass\": \"Rosidae\",\n \"order\": \"Sapindales\",\n \"family\": \"Meliaceae\",\n \"genus\": \"Swietenia\",\n \"species\": \"Sweitenia mahagoni\"\n }\n response = self.client.put('/dicots/mahogany', request_data, format='json')\n created_plant = Plant.objects.latest('id')\n expected_data = {\n \"id\": \"plant-%i\" % (created_plant.id),\n \"common_name\": created_plant.common_name,\n \"subclass\": created_plant.subclass,\n \"order\": created_plant.order,\n \"family\": created_plant.family,\n \"genus\": created_plant.genus,\n \"species\": created_plant.species,\n \"leaves\": []\n }\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertJSONEqual(response.content, json.dumps(expected_data))",
"def test_createteam(self):\n p1, p2, p3 = self.create3persons()\n t = model.Team(name='Tigers', persons=[p1, p2, p3])\n id = t.store()\n t2 = model.Team(id=id)\n self.assertEqual(t.name, t2.name)\n self.assertEqual(t.persons, t2.persons)",
"def test_speciesDestruction():\n sys = LVsystem.Ecosystem()\n sys.addSpecies('rabbit')\n sys.addSpecies('fox')\n sys.addSpecies('wolf')\n \n sys.removeSpecies('fox')\n \n assert len(sys.species_list) == 2\n assert not ('fox' in sys.species_list)\n for key in sys.intMatrix:\n assert not ('fox' in key)\n \n sys.removeSpecies('wolf')\n \n assert sys.species_list == ['rabbit']\n for key in sys.intMatrix:\n assert not ('wolf' in key)\n \n sys.removeSpecies('rabbit') \n \n assert sys.intMatrix == {}\n assert sys.species_list == []\n for key in sys.intMatrix:\n assert not ('rabbit' in key)",
"def speciate(self):\n\n\n # Clear out the previous generation\n for spec in self.species.values():\n spec.champ = spec.get_champion()\n spec.flush()\n\n for genome in self.all_genomes:\n if genome.species_hint is not None:\n spec = self.species.get(genome.species_hint)\n if spec and spec.is_compatible(genome):\n spec.add_genome(genome)\n continue\n\n for spec in self.species.values():\n # check compatibility until found\n if spec.is_compatible(genome):\n spec.add_genome(genome)\n break\n else: # make a new spec\n spec_num = self.get_next_species_num()\n spec = Species(self, spec_num)\n spec.add_genome(genome)\n spec.champ = genome\n self.species[spec_num] = spec\n\n # Delete unnecessary species\n for spec_num, spec in list(self.species.items()):\n if len(spec)==0:\n self.species.pop(spec_num)",
"def test_create_flavor_existing(self):\n # Create Flavor\n flavor_settings = FlavorConfig(\n name=self.flavor_name, ram=1, disk=1, vcpus=1)\n self.flavor_creator = OpenStackFlavor(self.os_creds, flavor_settings)\n flavor = self.flavor_creator.create()\n self.assertTrue(validate_flavor(self.nova, flavor_settings, flavor))\n\n flavor_creator_2 = OpenStackFlavor(self.os_creds, flavor_settings)\n flavor2 = flavor_creator_2.create()\n\n self.assertEqual(flavor.id, flavor2.id)",
"def test_createperson(self):\n p = model.Person(firstname=\"Tobias\", lastname=\"Thelen\",\n email=\"[email protected]\", hobbies=[\"singen\",\"springen\",\"fröhlichsein\"])\n id = p.store()\n\n p2 = model.Person(id=id)\n self.assertEqual(p.id, p2.id)\n self.assertEqual(p.firstname, p2.firstname)\n self.assertEqual(p.lastname, p2.lastname)\n self.assertEqual(p.email, p2.email)\n self.assertEqual(p.hobbies, p2.hobbies)",
"def speciate_genomes(self):\n for genome in self.genomes:\n species_found = False\n\n for species in self.species:\n compatibility = genome.compatibility(species.leader)\n\n if compatibility < self.config.compatibility_threshold:\n species.add_genome(genome)\n species_found = True\n break\n\n if not species_found:\n new_species = Species(self.species_id, genome, self.config, self.breeder)\n self.species.append(new_species)\n self.species_id += 1",
"def test_add_resource(self):\n s1 = System()\n b1 = Books(\"1984\", \"George Orwell\", \"Harvill Secker\", \"1949\", \"0123456789123\")\n self.assertNotIn(b1, s1.catalogue)\n s1.add_resource(b1)\n self.assertIn(b1, s1.catalogue)\n s1.add_resource(b1)\n self.assertEqual(len(s1.catalogue), 1)",
"def test_manufacturers_created(self):\n # Currently, there is just 1 Organization in the database, the org_existing\n org_existing = OrganizationFactory(name='Existing Organization')\n self.assertEqual(Organization.objects.count(), 1)\n\n self.call_command(filename='power_plant_import/tests/data/six_rows.csv')\n\n # Get the Projects that were created during the import\n (project_ouessant1, project_ouessant2, project_liaoning) = self.get_created_projects()\n # The CSV file mentions 3 manufacturers, 1 for the project_ouessant1, and\n # 2 for project_liaoning\n manufacturer1 = Organization.objects.get(name='Manufacturer One')\n manufacturer_vestas = Organization.objects.get(name='Vestas Wind Systems A/S')\n self.assertEqual(set(project_ouessant1.manufacturers.all()), set([org_existing]))\n self.assertEqual(\n set(project_liaoning.manufacturers.all()),\n set([manufacturer1, manufacturer_vestas])\n )\n self.assertEqual(project_ouessant2.manufacturers.count(), 0)",
"def test_gear_create(self):\n gear_cam = Gear.objects.get(name='Cam')\n gear_nut = Gear.objects.get(name='Nut')\n self.assertEquals(\n gear_cam,\n Gear(id=1, name='Cam', desc='A cam', brand='OnlyCams', weight_grams='1',\n length_mm='1', width_mm='1', depth_mm='1', locking=False, owner=None)\n )\n self.assertEquals(\n gear_nut,\n Gear(id=2, name='Nut', desc='A Nut', brand='OnlyNuts', weight_grams='1',\n length_mm='1', width_mm='1', depth_mm='1', locking=False, owner=None)\n )",
"def test_01_product_create(self):\n # Create new product with a replacement product\n product = self.create_product()\n\n # Check recently was created product with default 'In Development'\n # value state and that the replacement was assigned. This case also\n # check the read test.\n self.assertTrue(product)\n self.assertEqual(product.state2, 'draft')\n self.assertTrue(product.replacement_product_ids)\n self.assertEqual(len(product.replacement_product_ids), 1)\n self.assertEqual(product.replacement_product_ids[0].id,\n self.ref('product_lifecycle.product_product_4e'))",
"def test_add_new_surface_objects(self):\n\n # create object with ReactionSystem behavior\n class rsys:\n pass\n\n class item:\n pass\n\n T = item()\n P = item()\n T.value_si = 1000.0\n P.value_si = 101000.0\n rsys.T = T\n rsys.P = P\n procnum = 2\n\n cerm = CoreEdgeReactionModel()\n\n spcA = Species().from_smiles('[OH]')\n spcs = [Species().from_smiles('CC'), Species().from_smiles('[CH3]')]\n spc_tuples = [((spcA, spc), ['H_Abstraction']) for spc in spcs]\n\n rxns = list(itertools.chain.from_iterable(react(spc_tuples, procnum)))\n rxns += list(itertools.chain.from_iterable(react([((spcs[0], spcs[1]), ['H_Abstraction'])], procnum)))\n\n for rxn in rxns:\n cerm.make_new_reaction(rxn)\n\n cerm.core.species = [spcA] + spcs\n\n corerxns = []\n edgerxns = []\n edgespcs = set()\n for rxn in rxns:\n if set(rxn.reactants + rxn.products) <= set(cerm.core.species):\n corerxns.append(rxn)\n else:\n edgespcs |= set(cerm.core.species) - set(rxn.reactants + rxn.products)\n edgerxns.append(rxn)\n\n cerm.edge.species += list(edgespcs)\n\n cerm.core.reactions = corerxns\n cerm.edge.reactions = edgerxns\n\n cerm.surface.species = []\n cerm.surface.reactions = []\n\n new_surface_reactions = [cerm.edge.reactions[0]]\n new_surface_species = []\n obj = new_surface_reactions\n\n cerm.add_new_surface_objects(obj, new_surface_species, new_surface_reactions, rsys)\n\n empty = set()\n\n self.assertEqual(cerm.new_surface_spcs_add, empty)\n self.assertEqual(cerm.new_surface_spcs_loss, empty)\n self.assertEqual(cerm.new_surface_rxns_loss, empty)\n self.assertEqual(cerm.new_surface_rxns_add, set([cerm.edge.reactions[0]]))",
"def test_create_service_with_duplicate_data(self):\n first_response = self.tenant_client.create_service(\n name=self.name,\n type_=self.type,\n description=self.description)\n first_service = first_response.entity\n self.assertEqual(first_response.status_code, 200)\n self.addCleanup(self.tenant_client.delete_service, first_service.id_)\n second_response = self.tenant_client.create_service(\n name=self.name,\n type_=self.type,\n description=self.description)\n second_service = second_response.entity\n self.assertEqual(second_response.status_code, 200)\n self.addCleanup(self.tenant_client.delete_service, second_service.id_)\n self.assertNotEqual(first_service, second_service)\n self.assertEqual(second_service.name, first_service.name)\n self.assertEqual(second_service.type_, first_service.type_)\n self.assertEqual(first_service.description,\n second_service.description)",
"def test_verify_enterprise_reinit(self):\n proj_obj, fabric_obj, pr_obj = self._create_prerequisites()\n esi_id = '00:11:22:33:44:55:66:77:88:99'\n vlan_1 = 42\n\n esi_id = '00:11:22:33:44:55:66:77:88:99'\n vlan_1 = 42\n vlan_2 = '4094'\n vlan_3 = 4093\n pi_name = self.id() + '_physical_interface1'\n pi = PhysicalInterface(name=pi_name,\n parent_obj=pr_obj,\n ethernet_segment_identifier=esi_id)\n pi_uuid = self._vnc_lib.physical_interface_create(pi)\n pi_obj = self._vnc_lib.physical_interface_read(id=pi_uuid)\n\n fabric_name = fabric_obj.get_fq_name()\n pi_fq_name = pi_obj.get_fq_name()\n\n # Create VPG\n vpg_name = \"vpg-1\"\n vpg = VirtualPortGroup(vpg_name, parent_obj=fabric_obj)\n vpg_uuid = self.api.virtual_port_group_create(vpg)\n vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_uuid)\n vpg_name = vpg_obj.get_fq_name()\n\n # Create single VN\n vn1 = VirtualNetwork('vn1-%s' % (self.id()), parent_obj=proj_obj)\n self.api.virtual_network_create(vn1)\n\n # Create a VMI that's attached to vpg-1 and having reference\n # to vn1\n vmi_obj_1 = VirtualMachineInterface(self.id() + \"1\",\n parent_obj=proj_obj)\n vmi_obj_1.set_virtual_network(vn1)\n\n # Create KV_Pairs for this VMI\n kv_pairs = self._create_kv_pairs(pi_fq_name,\n fabric_name,\n vpg_name)\n\n vmi_obj_1.set_virtual_machine_interface_bindings(kv_pairs)\n\n vmi_obj_1.set_virtual_machine_interface_properties(\n VirtualMachineInterfacePropertiesType(\n sub_interface_vlan_tag=vlan_1))\n vmi_uuid_1 = self.api.virtual_machine_interface_create(vmi_obj_1)\n vpg_obj.add_virtual_machine_interface(vmi_obj_1)\n self.api.virtual_port_group_update(vpg_obj)\n\n mock_zk = self._api_server._db_conn._zk_db\n # Verify if Znode are created for VMI1\n tagged_validation_node1 = os.path.join(\n _DEFAULT_ZK_FABRIC_ENTERPRISE_PATH_PREFIX,\n 'virtual-port-group:%s' % vpg_obj.uuid,\n 'virtual-network:%s' % vn1.uuid)\n znode_vlan_1_id = mock_zk._zk_client.read_node(\n tagged_validation_node1)\n validation_node1 = os.path.join(\n _DEFAULT_ZK_FABRIC_ENTERPRISE_PATH_PREFIX,\n 'virtual-port-group:%s' % vpg_obj.uuid,\n 'vlan:%s' % znode_vlan_1_id)\n\n # Read Znode\n znode_vmi_1_uuid = mock_zk._zk_client.read_node(validation_node1)\n # Verify if correct Znodes are created\n assert znode_vmi_1_uuid == vmi_uuid_1, \\\n \"Znode for VMI_1 (%s) doesn't exist\" % vmi_uuid_1\n\n # Attach Second VMI with untagged vlan\n vn2 = VirtualNetwork('vn2-%s' % (self.id()), parent_obj=proj_obj)\n self.api.virtual_network_create(vn2)\n\n # Create first untagged VMI and attach it to Virtual Port Group\n vmi_obj_2 = VirtualMachineInterface(self.id() + \"2\",\n parent_obj=proj_obj)\n vmi_obj_2.set_virtual_network(vn2)\n\n # Create KV_Pairs for this VMI with an untagged VLAN\n # If tor_port_vlan_id is set, then it signifies a untagged VMI\n kv_pairs = self._create_kv_pairs(pi_fq_name,\n fabric_name,\n vpg_name,\n tor_port_vlan_id=vlan_2)\n\n vmi_obj_2.set_virtual_machine_interface_bindings(kv_pairs)\n vmi_uuid_2 = self.api.virtual_machine_interface_create(vmi_obj_2)\n vpg_obj = self.api.virtual_port_group_read(id=vpg_obj.uuid)\n vpg_obj.add_virtual_machine_interface(vmi_obj_2)\n self.api.virtual_port_group_update(vpg_obj)\n\n validation_node2 = os.path.join(\n _DEFAULT_ZK_FABRIC_ENTERPRISE_PATH_PREFIX,\n 'virtual-port-group:%s' % vpg_obj.uuid,\n 'untagged')\n\n # Read Znode\n znode_vmi_2_uuid = mock_zk._zk_client.read_node(validation_node2)\n # Verify if correct Znodes are created\n assert znode_vmi_2_uuid == vmi_uuid_2, \\\n \"Znode for VMI_2 (%s) doesn't exist\" % vmi_uuid_2\n\n # Create another third VN with second tagged VMI\n vn3 = VirtualNetwork('vn3-%s' % (self.id()), parent_obj=proj_obj)\n self.api.virtual_network_create(vn3)\n\n # Create a VMI that's attached to vpg-1 and having reference\n # to vn3\n vmi_obj_3 = VirtualMachineInterface(self.id() + \"3\",\n parent_obj=proj_obj)\n vmi_obj_3.set_virtual_network(vn3)\n\n # Create KV_Pairs for this VMI\n kv_pairs = self._create_kv_pairs(pi_fq_name,\n fabric_name,\n vpg_name)\n\n vmi_obj_3.set_virtual_machine_interface_bindings(kv_pairs)\n\n vmi_obj_3.set_virtual_machine_interface_properties(\n VirtualMachineInterfacePropertiesType(\n sub_interface_vlan_tag=vlan_3))\n vmi_uuid_3 = self.api.virtual_machine_interface_create(vmi_obj_3)\n vpg_obj = self.api.virtual_port_group_read(id=vpg_obj.uuid)\n vpg_obj.add_virtual_machine_interface(vmi_obj_3)\n self.api.virtual_port_group_update(vpg_obj)\n vpg_obj = self.api.virtual_port_group_read(id=vpg_obj.uuid)\n\n tagged_validation_node3 = os.path.join(\n _DEFAULT_ZK_FABRIC_ENTERPRISE_PATH_PREFIX,\n 'virtual-port-group:%s' % vpg_obj.uuid,\n 'virtual-network:%s' % vn3.uuid)\n znode_vlan_3_id = mock_zk._zk_client.read_node(\n tagged_validation_node3)\n validation_node3 = os.path.join(\n _DEFAULT_ZK_FABRIC_ENTERPRISE_PATH_PREFIX,\n 'virtual-port-group:%s' % vpg_obj.uuid,\n 'vlan:%s' % znode_vlan_3_id)\n\n # Read Znode\n znode_vmi_3_uuid = mock_zk._zk_client.read_node(validation_node3)\n # Verify if correct Znodes are created\n assert znode_vmi_3_uuid == vmi_uuid_3, \\\n \"Znode for VMI_3 (%s) doesn't exist\" % vmi_uuid_3\n\n # Delete all Znodes for VMI1, VMI2, VMI3\n mock_zk._zk_client.delete_node(validation_node1, True)\n mock_zk._zk_client.delete_node(validation_node2, True)\n mock_zk._zk_client.delete_node(validation_node3, True)\n\n # manually setting contrail_version to 21.4\n # so db_resync is run as part of upgrade scenario\n self._api_server._args.contrail_version = '21.4'\n\n self._api_server._db_conn._db_resync_done.clear()\n # API server DB reinit\n self._api_server._db_init_entries()\n self._api_server._db_conn.wait_for_resync_done()\n\n # Verify if Znodes are added back\n znode_vmi_1_uuid = mock_zk._zk_client.read_node(validation_node1)\n znode_vmi_2_uuid = mock_zk._zk_client.read_node(validation_node2)\n znode_vmi_3_uuid = mock_zk._zk_client.read_node(validation_node3)\n\n # Verify if correct Znodes are created\n assert znode_vmi_1_uuid == vmi_uuid_1, \\\n \"Znode for VMI_1 (%s) doesn't exist\" % vmi_uuid_1\n assert znode_vmi_2_uuid == vmi_uuid_2, \\\n \"Znode for VMI_2 (%s) doesn't exist\" % vmi_uuid_2\n assert znode_vmi_3_uuid == vmi_uuid_3, \\\n \"Znode for VMI_3 (%s) doesn't exist\" % vmi_uuid_3\n\n # Delete VMIs from VPG\n self.api.virtual_machine_interface_delete(id=vmi_uuid_1)\n self.api.virtual_machine_interface_delete(id=vmi_uuid_2)\n self.api.virtual_machine_interface_delete(id=vmi_uuid_3)\n self.api.virtual_port_group_delete(id=vpg_obj.uuid)\n self.api.physical_interface_delete(id=pi_uuid)\n self.api.physical_router_delete(id=pr_obj.uuid)\n self.api.fabric_delete(id=fabric_obj.uuid)\n # adding back zknode to original version\n # so other test cases runs from the begining\n mock_zk._zk_client.update_node(PATH_SYNC, '2011')",
"def testaddElectronicDevices(self):\r\n device1 = ElectronicDevice()\r\n device1.setDeviceType('Kindle')\r\n device1.setLocationDevice('F2R4S12P1') #floor 2, row 4, shelf 12, position 1\r\n device1.setDeviceAvailability(True)\r\n \r\n device2 = ElectronicDevice()\r\n device2.setDeviceType('Tablet')\r\n device2.setLocationDevice('F2R4S12P2') #floor 2, row 4, shelf 12, position 2\r\n device2.setDeviceAvailability(False) \r\n \r\n ebook1 = ElectronicResources()\r\n ebook1.setListDevices([device1])\r\n ebook1.setISBN(9780316485616)\r\n ebook1.setEBookTitle('The Night Fire')\r\n ebook1.setEBookAuthor('Harry Bosch')\r\n ebook1.addElectronicDevices(device2)\r\n \r\n #unit test for console print from StackOverflow\r\n output = io.StringIO() # Create StringIO object\r\n sys.stdout = output # and redirect stdout.\r\n ebook1.addElectronicDevices(device1)\r\n print ('Captured', output.getvalue())",
"def test_owners_ownerstakes_created(self):\n org_existing = OrganizationFactory(name='Existing Organization')\n # Currently, there is 1 owner (Organization) and 0 OwnerStakes objects\n # in the database\n self.assertEqual(Organization.objects.count(), 1)\n self.assertEqual(PlantOwnerStake.objects.count(), 0)\n\n self.call_command(filename='power_plant_import/tests/data/six_rows.csv')\n\n # Get the PowerPlants that were created during the import\n (powerplant_ouessant, powerplant_ilarionas, powerplant_tonstad) = self.get_created_plants()\n # The OwnerStakes have been assigned to the correct PowerPlants\n self.assertEqual(PlantOwnerStake.objects.count(), 4)\n owner_sabella = Organization.objects.get(name='Sabella SAS')\n owner_ppc = Organization.objects.get(name='Public Power Corporation SA')\n owner_stake_ouessant1 = PlantOwnerStake.objects.get(\n owner=owner_sabella,\n power_plant=powerplant_ouessant\n )\n self.assertEqual(owner_stake_ouessant1.percent_owned, 50)\n owner_stake_ouessant2 = PlantOwnerStake.objects.get(\n owner=org_existing,\n power_plant=powerplant_ouessant\n )\n self.assertEqual(owner_stake_ouessant2.percent_owned, 30)\n owner_stake_ilarionas1 = PlantOwnerStake.objects.get(\n owner=owner_ppc,\n power_plant=powerplant_ilarionas\n )\n self.assertIsNone(owner_stake_ilarionas1.percent_owned)\n owner_stake_ilarionas2 = PlantOwnerStake.objects.get(\n owner=org_existing,\n power_plant=powerplant_ilarionas\n )\n self.assertIsNone(owner_stake_ilarionas2.percent_owned)\n self.assertEqual(\n set(powerplant_ouessant.plant_owner_stakes.all()),\n set([owner_stake_ouessant1, owner_stake_ouessant2])\n )\n self.assertEqual(\n set(powerplant_ilarionas.plant_owner_stakes.all()),\n set([owner_stake_ilarionas1, owner_stake_ilarionas2])\n )\n self.assertEqual(powerplant_tonstad.plant_owner_stakes.count(), 0)",
"def test_0_0_create(self):\n\n self.assertTrue(self.b1)",
"def test_spawn(self):\n self.grid.spawn()\n\n self.assertEqual(xyzroom.XYZRoom.objects.all().count(), 6)\n self.assertEqual(xyzroom.XYZExit.objects.all().count(), 10)\n\n room1 = xyzroom.XYZRoom.objects.get_xyz(xyz=(0, 1, \"map12a\"))\n room2 = xyzroom.XYZRoom.objects.get_xyz(xyz=(1, 0, \"map12b\"))\n east_exit = [exi for exi in room1.exits if exi.db_key == \"east\"][0]\n west_exit = [exi for exi in room2.exits if exi.db_key == \"west\"][0]\n\n # make sure exits traverse the maps\n self.assertEqual(east_exit.db_destination, room2)\n self.assertEqual(west_exit.db_destination, room1)",
"def test_init_spouse(self):\n # Add a spouse and confirm that both Person objects are updated\n person1 = self.owner\n person2 = Person(\n self.initial_year, \"Spouse\", self.initial_year - 20,\n retirement_date=self.retirement_date,\n gross_income=Money(50000),\n spouse=person1, tax_treatment=self.tax_treatment)\n self.assertEqual(person1.spouse, person2)\n self.assertEqual(person2.spouse, person1)",
"def test_create_same_product(self):\n url = reverse('products:list')\n data = {\n 'name': 'Eggs',\n 'description': '''\n Bird and reptile eggs consist of a protective eggshell,\n albumen (egg white), and vitellus (egg yolk),\n contained within various thin membranes.\n The most commonly consumed eggs are chicken eggs.\n Other poultry eggs including those of duck and quail\n also are eaten.\n '''\n }\n product_count_before = models.Product.objects.count()\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(models.Product.objects.count(), product_count_before)"
]
| [
"0.68667287",
"0.656023",
"0.61642134",
"0.60768485",
"0.6068856",
"0.6068856",
"0.59459317",
"0.5942943",
"0.58677506",
"0.5831742",
"0.58130676",
"0.5796804",
"0.5780882",
"0.57244974",
"0.5724118",
"0.5683249",
"0.56828576",
"0.56735647",
"0.5658161",
"0.5650394",
"0.559248",
"0.5590907",
"0.55169153",
"0.55168265",
"0.55133915",
"0.54913247",
"0.54869205",
"0.54607064",
"0.54550725",
"0.5436894"
]
| 0.68553 | 1 |
Create and destroy two species and check that at every step the Ecosystem is correctly updated. | def test_speciesDestruction():
sys = LVsystem.Ecosystem()
sys.addSpecies('rabbit')
sys.addSpecies('fox')
sys.addSpecies('wolf')
sys.removeSpecies('fox')
assert len(sys.species_list) == 2
assert not ('fox' in sys.species_list)
for key in sys.intMatrix:
assert not ('fox' in key)
sys.removeSpecies('wolf')
assert sys.species_list == ['rabbit']
for key in sys.intMatrix:
assert not ('wolf' in key)
sys.removeSpecies('rabbit')
assert sys.intMatrix == {}
assert sys.species_list == []
for key in sys.intMatrix:
assert not ('rabbit' in key) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_speciesCreation():\n \n sys = LVsystem.Ecosystem()\n sys.addSpecies('rabbit')\n sys.addSpecies('fox')\n sys.setInteraction('rabbit', 'fox', -1)\n sys.setInteraction('fox', 'rabbit', 1)\n sys.setInitialCond('rabbit', 10)\n sys.setInitialCond('fox', 5)\n sys.setGrowthRate('rabbit', 1)\n sys.setGrowthRate('fox', -1)\n sys.setCarrCap('rabbit', 10000)\n sys.setCarrCap('fox', 10000)\n sys.setChangeRate('rabbit', 10)\n sys.setChangeRate('fox', 20) \n \n assert len(sys.species_list) == 2\n assert sys.species_list == ['rabbit','fox']\n assert sys.intMatrix == {('rabbit','fox'):-1, ('fox','rabbit'):1}\n\n sys.removeSpecies('rabbit')\n sys.removeSpecies('fox')",
"def testMakeNewSpecies(self):\n\n # adding 3 unique species:\n cerm = CoreEdgeReactionModel()\n\n spcs = [Species().fromSMILES('[OH]'), \n Species().fromSMILES('CC'),\n Species().fromSMILES('[CH3]')]\n\n for spc in spcs:\n cerm.makeNewSpecies(spc)\n\n self.assertEquals(len(cerm.speciesDict), len(spcs)) \n self.assertEquals(len(cerm.indexSpeciesDict), len(spcs))\n\n # adding 3 unique, and 1 already existing species:\n cerm = CoreEdgeReactionModel()\n\n spcs = [Species().fromSMILES('[OH]'), \n Species().fromSMILES('CC'),\n Species().fromSMILES('[CH3]'),\n Species().fromSMILES('CC')]#duplicate species\n\n for spc in spcs:\n cerm.makeNewSpecies(spc)\n\n self.assertEquals(len(cerm.speciesDict), len(spcs) - 1) \n self.assertEquals(len(cerm.indexSpeciesDict), len(spcs) - 1)",
"def test_make_new_species(self):\n\n # adding 3 unique species:\n cerm = CoreEdgeReactionModel()\n\n spcs = [Species().from_smiles('[OH]'),\n Species().from_smiles('CC'),\n Species().from_smiles('[CH3]')]\n\n for spc in spcs:\n cerm.make_new_species(spc)\n\n self.assertEquals(len(cerm.species_dict), len(spcs))\n self.assertEquals(len(cerm.index_species_dict), len(spcs))\n\n # adding 3 unique, and 1 already existing species:\n cerm = CoreEdgeReactionModel()\n\n spcs = [Species().from_smiles('[OH]'),\n Species().from_smiles('CC'),\n Species().from_smiles('[CH3]'),\n Species().from_smiles('CC')] # duplicate species\n\n for spc in spcs:\n cerm.make_new_species(spc)\n\n self.assertEquals(len(cerm.species_dict), len(spcs) - 1)\n self.assertEquals(len(cerm.index_species_dict), len(spcs) - 1)",
"def speciate(self):\n\n\n # Clear out the previous generation\n for spec in self.species.values():\n spec.champ = spec.get_champion()\n spec.flush()\n\n for genome in self.all_genomes:\n if genome.species_hint is not None:\n spec = self.species.get(genome.species_hint)\n if spec and spec.is_compatible(genome):\n spec.add_genome(genome)\n continue\n\n for spec in self.species.values():\n # check compatibility until found\n if spec.is_compatible(genome):\n spec.add_genome(genome)\n break\n else: # make a new spec\n spec_num = self.get_next_species_num()\n spec = Species(self, spec_num)\n spec.add_genome(genome)\n spec.champ = genome\n self.species[spec_num] = spec\n\n # Delete unnecessary species\n for spec_num, spec in list(self.species.items()):\n if len(spec)==0:\n self.species.pop(spec_num)",
"def test_species(self):\n spec = input.species('C2H4', os.path.join(self.directory, 'species', 'C2H4', 'ethene.py'))\n self.assertTrue(isinstance(spec, Species))\n self.assertEqual(len(spec.molecule), 0)",
"def test_sg_delete_create_same(self):\n\n # Add a faked storage_group to be tested and another one\n faked_storage_group = self.add_storage_group1()\n storage_group_name = faked_storage_group.name\n self.add_storage_group2()\n\n # Construct the input properties for a third storage_group\n sg3_props = copy.deepcopy(faked_storage_group.properties)\n sg3_props['description'] = 'Third storage_group'\n\n storage_group_mgr = self.console.storage_groups\n storage_group = storage_group_mgr.find(name=storage_group_name)\n\n # Execute the deletion code to be tested.\n storage_group.delete()\n\n # Check that the storage_group no longer exists\n with pytest.raises(NotFound):\n storage_group_mgr.find(name=storage_group_name)\n\n # Execute the creation code to be tested.\n storage_group_mgr.create(sg3_props)\n\n # Check that the storage_group exists again under that name\n storage_group3 = storage_group_mgr.find(name=storage_group_name)\n description = storage_group3.get_property('description')\n assert description == 'Third storage_group'",
"def test_enlarge_2_add_reactive_species(self):\n m1 = Molecule(smiles='CC')\n spc1 = self.rmg.reaction_model.make_new_species(m1, label='C2H4')[0]\n self.rmg.reaction_model.enlarge(spc1)\n\n self.assertEqual(len(self.rmg.reaction_model.core.species), 2)\n self.assertTrue(self.rmg.reaction_model.core.species[1].reactive)\n\n m2 = Molecule(smiles='[CH3]')\n spc2 = self.rmg.reaction_model.make_new_species(m2, label='CH3')[0]\n self.rmg.reaction_model.enlarge(spc2)\n\n self.assertEqual(len(self.rmg.reaction_model.core.species), 3)\n self.assertTrue(self.rmg.reaction_model.core.species[2].reactive)",
"def test_createData():\n\n sys = LVsystem.Ecosystem()\n\n sys.addSpecies('rabbit')\n sys.setInteraction('rabbit', 'hen', 0)\n sys.setInteraction('rabbit', 'fox', -1)\n sys.setInitialCond('rabbit', 30)\n sys.setGrowthRate('rabbit', 0.09)\n sys.setCarrCap('rabbit', 10000)\n sys.setChangeRate('rabbit', 400)\n\n sys.addSpecies('hen')\n sys.setInteraction('hen', 'rabbit', 0)\n sys.setInteraction('hen', 'fox', -1)\n sys.setInitialCond('hen', 10)\n sys.setGrowthRate('hen', 0.07)\n sys.setCarrCap('hen', 10000)\n sys.setChangeRate('hen', 500)\n\n sys.addSpecies('fox')\n sys.setInteraction('fox', 'rabbit', 1)\n sys.setInteraction('fox', 'hen', 1)\n sys.setInitialCond('fox', 20)\n sys.setGrowthRate('fox', -0.06)\n sys.setCarrCap('fox', 1)\n sys.setChangeRate('fox', 250)\n\n \n data = sys.create_data()\n \n assert data[0] == 3\n assert data[1] == ['rabbit', 'hen', 'fox']\n assert data[2] == [30,10,20]\n assert data[3] == [0.09,0.07,-0.06] \n assert data[4] == [10000,10000,1]\n assert data[5] == [400,500,250]\n assert data[6][1][2] == -data[6][2][1]\n assert data[6][2][2] == 0\n \n sys.removeSpecies('rabbit')\n sys.removeSpecies('fox')\n sys.removeSpecies('hen')",
"def test_setup_name_smiles_openeye_charges():\n with mmtools.utils.temporary_directory() as tmp_dir:\n molecules_ids = ['toluene-smiles', 'p-xylene-name']\n yaml_content = get_template_script(tmp_dir, keep_openeye=True)\n exp_builder = ExperimentBuilder(yaml_content)\n exp_builder._db._setup_molecules(*molecules_ids)\n\n for mol in molecules_ids:\n output_dir = exp_builder._db.get_molecule_dir(mol)\n output_basepath = os.path.join(output_dir, mol)\n\n # Check that all the files have been created\n assert os.path.exists(output_basepath + '.mol2')\n assert os.path.exists(output_basepath + '.gaff.mol2')\n assert os.path.exists(output_basepath + '.frcmod')\n assert os.path.getsize(output_basepath + '.mol2') > 0\n assert os.path.getsize(output_basepath + '.gaff.mol2') > 0\n assert os.path.getsize(output_basepath + '.frcmod') > 0\n\n atoms_frame, _ = mdtraj.formats.mol2.mol2_to_dataframes(output_basepath + '.mol2')\n input_charges = atoms_frame['charge']\n atoms_frame, _ = mdtraj.formats.mol2.mol2_to_dataframes(output_basepath + '.gaff.mol2')\n output_charges = atoms_frame['charge']\n\n # With openeye:am1bcc charges, the final charges should be unaltered\n if mol == 'p-xylene-name':\n assert input_charges.equals(output_charges)\n else: # With antechamber, sqm should alter the charges a little\n assert not input_charges.equals(output_charges)\n\n # Check that molecules are resumed correctly\n exp_builder = ExperimentBuilder(yaml_content)\n exp_builder._db._setup_molecules(*molecules_ids)",
"def test_add_new_surface_objects(self):\n\n # create object with ReactionSystem behavior\n class rsys:\n pass\n\n class item:\n pass\n\n T = item()\n P = item()\n T.value_si = 1000.0\n P.value_si = 101000.0\n rsys.T = T\n rsys.P = P\n procnum = 2\n\n cerm = CoreEdgeReactionModel()\n\n spcA = Species().from_smiles('[OH]')\n spcs = [Species().from_smiles('CC'), Species().from_smiles('[CH3]')]\n spc_tuples = [((spcA, spc), ['H_Abstraction']) for spc in spcs]\n\n rxns = list(itertools.chain.from_iterable(react(spc_tuples, procnum)))\n rxns += list(itertools.chain.from_iterable(react([((spcs[0], spcs[1]), ['H_Abstraction'])], procnum)))\n\n for rxn in rxns:\n cerm.make_new_reaction(rxn)\n\n cerm.core.species = [spcA] + spcs\n\n corerxns = []\n edgerxns = []\n edgespcs = set()\n for rxn in rxns:\n if set(rxn.reactants + rxn.products) <= set(cerm.core.species):\n corerxns.append(rxn)\n else:\n edgespcs |= set(cerm.core.species) - set(rxn.reactants + rxn.products)\n edgerxns.append(rxn)\n\n cerm.edge.species += list(edgespcs)\n\n cerm.core.reactions = corerxns\n cerm.edge.reactions = edgerxns\n\n cerm.surface.species = []\n cerm.surface.reactions = []\n\n new_surface_reactions = [cerm.edge.reactions[0]]\n new_surface_species = []\n obj = new_surface_reactions\n\n cerm.add_new_surface_objects(obj, new_surface_species, new_surface_reactions, rsys)\n\n empty = set()\n\n self.assertEqual(cerm.new_surface_spcs_add, empty)\n self.assertEqual(cerm.new_surface_spcs_loss, empty)\n self.assertEqual(cerm.new_surface_rxns_loss, empty)\n self.assertEqual(cerm.new_surface_rxns_add, set([cerm.edge.reactions[0]]))",
"def test_multiplesquarecreation(self):\n Square.reset_objects()\n s1 = Square(10)\n s2 = Square(2)\n s3 = Square(3)\n self.assertEqual(s1.id, 1)\n self.assertEqual(s2.id, 2)\n self.assertEqual(s3.id, 3)",
"def test_instance_type_create_then_delete(self):\n name = 'Small Flavor'\n flavorid = 'flavor1'\n\n original_list = instance_types.get_all_types()\n\n # create new type and make sure values stick\n inst_type = instance_types.create(name, 256, 1, 120, 100, flavorid)\n inst_type_id = inst_type['id']\n self.assertEqual(inst_type['flavorid'], flavorid)\n self.assertEqual(inst_type['name'], name)\n self.assertEqual(inst_type['memory_mb'], 256)\n self.assertEqual(inst_type['vcpus'], 1)\n self.assertEqual(inst_type['root_gb'], 120)\n self.assertEqual(inst_type['ephemeral_gb'], 100)\n self.assertEqual(inst_type['swap'], 0)\n self.assertEqual(inst_type['rxtx_factor'], 1)\n\n # make sure new type shows up in list\n new_list = instance_types.get_all_types()\n self.assertNotEqual(len(original_list), len(new_list),\n 'instance type was not created')\n\n instance_types.destroy(name)\n self.assertRaises(exception.InstanceTypeNotFound,\n instance_types.get_instance_type, inst_type_id)\n\n # deleted instance should not be in list anymoer\n new_list = instance_types.get_all_types()\n self.assertEqual(original_list, new_list)",
"def setUp(self):\n Pet.remove_all()",
"def test_enlarge_1_add_nonreactive_species(self):\n m0 = Molecule(smiles='[He]')\n spc0 = self.rmg.reaction_model.make_new_species(m0, label='He', reactive=False)[0]\n self.rmg.reaction_model.enlarge(spc0)\n\n self.assertEqual(len(self.rmg.reaction_model.core.species), 1)\n self.assertFalse(self.rmg.reaction_model.core.species[0].reactive)",
"def testOldKernelMatchesNewKernelSpeciesList(self):\n species_list1 = self.tree1.get_species_list()\n species_list2 = self.tree2.get_species_list()\n for i, each in enumerate(species_list1):\n self.assertListEqual(list(each), list(species_list2[i]))",
"def testOldKernelMatchesNewKernelSpeciesList(self):\n species_list1 = self.tree1.get_species_list()\n species_list2 = self.tree2.get_species_list()\n for i, each in enumerate(species_list1):\n self.assertListEqual(list(each), list(species_list2[i]))",
"def test_append_unreactive_structure(self):\n\n cerm = CoreEdgeReactionModel()\n\n spcs = [Species().from_smiles('CCO'), # a control species\n Species().from_smiles('[N]=O'),\n Species().from_adjacency_list(\"\"\"1 O u1 p2 c0 {2,S}\n 2 N u0 p2 c0 {1,S}\"\"\"), # a non-representative structure of '[N]=O'\n ]\n\n for spc in spcs:\n cerm.make_new_species(spc)\n\n self.assertEquals(len(cerm.species_dict), 2)\n self.assertEquals(len(cerm.index_species_dict), 2)\n self.assertEquals(len(cerm.index_species_dict[1].molecule), 1)\n self.assertTrue(cerm.index_species_dict[1].molecule[0].reactive)\n self.assertEquals(len(cerm.index_species_dict[2].molecule), 1)\n self.assertTrue(cerm.index_species_dict[2].molecule[0].reactive)",
"def test_remove_resource(self):\n s1 = System()\n b1 = Books(\"1984\", \"George Orwell\", \"Harvill Secker\", \"1949\", \"0123456789123\")\n self.assertEqual(s1.remove_resource(b1), print())\n s1.add_resource(b1)\n self.assertIn(b1, s1.catalogue)\n s1.remove_resource(b1)\n self.assertNotIn(b1, s1.catalogue)",
"def load_individual_species():\n\n print (\"individual species\")\n\n SpeciesIndividual.query.delete()\n\n with open(\"seed_data/species_seed.psv\") as species:\n for row in species:\n species_name, group_id = row.strip().split(\"|\")\n\n species = SpeciesIndividual(species_name=species_name,\n species_group_id=group_id)\n\n db.session.add(species)\n\n db.session.commit()",
"def destroy(self, cause:str, *, warp_core_breach:bool=False, self_destruct:bool=False):\n gd = self.game_data\n #gd.grid[self.sector_coords.y][self.sector_coords.x].removeShipFromSec(self)\n is_controllable = self.is_controllable\n #wc_value = self.sys_warp_core.get_effective_value\n\n if self.is_controllable:\n self.game_data.cause_of_damage = cause\n try:\n self.life_support.able_crew = 0\n self.life_support.injured_crew = 0\n except AttributeError:\n pass\n try:\n for k in self.torpedo_launcher.torps.keys():\n self.torpedo_launcher.torps[k] = 0\n self.torpedo_launcher.integrety = 0.0\n except AttributeError:\n pass\n try:\n self.shield_generator.shields = 0\n self.shield_generator.shields_up = False\n self.shield_generator.integrety = 0.0\n except AttributeError:\n pass\n try:\n self.polarized_hull.polarization_amount = 0\n self.polarized_hull.is_polarized = False\n self.polarized_hull.integrety = 0.0\n except AttributeError:\n pass\n self.power_generator.energy = 0\n self.power_generator.integrety = 0\n try:\n self.warp_drive.integrety = 0.0\n except AttributeError:\n pass\n try:\n self.beam_array.integrety = 0.0\n except AttributeError:\n pass\n try:\n self.cannons.integrety = 0.0\n except AttributeError:\n pass\n try:\n self.impulse_engine.integrety = 0.0\n except AttributeError:\n pass\n self.sensors.integrety = 0.0\n try:\n self.cloak.cloak_status = CloakStatus.INACTIVE\n self.cloak.integrety = 0.0\n except AttributeError:\n pass\n try:\n self.transporter.integrety = 0.0\n except AttributeError:\n pass\n\n if is_controllable:\n gd.engine.message_log.print_messages = False\n\n if warp_core_breach or self_destruct:\n \n self.warp_core_breach(self_destruct)\n self.hull = -self.ship_class.max_hull\n \n if self is self.game_data.selected_ship_planet_or_star:\n self.game_data.selected_ship_planet_or_star = None\n \n self.get_sub_sector.destroy_ship(self)",
"def test_create_delete(self):\n self.shell.onecmd(\"create %s/one 'hello'\" % (self.tests_path))\n self.shell.onecmd(\"rm %s/one\" % (self.tests_path))\n self.shell.onecmd(\"exists %s/one\" % (self.tests_path))\n self.assertEqual(\"Path %s/one doesn't exist\\n\" % (\n self.tests_path), self.output.getvalue())",
"def test_create_system_entire(self):\n pass",
"def update_species_frames(self):\n pass",
"def update_species(self):\n warn(f\"Default Update Species Called for Mechanism = {self.name}.\")\n return []",
"async def test_deleting_entity(\n hass: HomeAssistant,\n entity_registry: er.EntityRegistry,\n client,\n sensor_entities,\n climate_entities,\n) -> None:\n data = {**sensor_entities, **climate_entities}\n body = await generate_latest_metrics(client)\n\n assert (\n 'sensor_temperature_celsius{domain=\"sensor\",'\n 'entity=\"sensor.outside_temperature\",'\n 'friendly_name=\"Outside Temperature\"} 15.6' in body\n )\n\n assert (\n 'entity_available{domain=\"sensor\",'\n 'entity=\"sensor.outside_temperature\",'\n 'friendly_name=\"Outside Temperature\"} 1.0' in body\n )\n\n assert (\n 'sensor_humidity_percent{domain=\"sensor\",'\n 'entity=\"sensor.outside_humidity\",'\n 'friendly_name=\"Outside Humidity\"} 54.0' in body\n )\n\n assert (\n 'entity_available{domain=\"sensor\",'\n 'entity=\"sensor.outside_humidity\",'\n 'friendly_name=\"Outside Humidity\"} 1.0' in body\n )\n\n assert (\n 'climate_action{action=\"heating\",'\n 'domain=\"climate\",'\n 'entity=\"climate.heatpump\",'\n 'friendly_name=\"HeatPump\"} 1.0' in body\n )\n\n assert (\n 'climate_action{action=\"cooling\",'\n 'domain=\"climate\",'\n 'entity=\"climate.heatpump\",'\n 'friendly_name=\"HeatPump\"} 0.0' in body\n )\n\n assert \"sensor.outside_temperature\" in entity_registry.entities\n assert \"climate.heatpump\" in entity_registry.entities\n entity_registry.async_remove(data[\"sensor_1\"].entity_id)\n entity_registry.async_remove(data[\"climate_1\"].entity_id)\n\n await hass.async_block_till_done()\n body = await generate_latest_metrics(client)\n\n # Check if old metrics deleted\n body_line = \"\\n\".join(body)\n assert 'entity=\"sensor.outside_temperature\"' not in body_line\n assert 'friendly_name=\"Outside Temperature\"' not in body_line\n assert 'entity=\"climate.heatpump\"' not in body_line\n assert 'friendly_name=\"HeatPump\"' not in body_line\n\n # Keep other sensors\n assert (\n 'sensor_humidity_percent{domain=\"sensor\",'\n 'entity=\"sensor.outside_humidity\",'\n 'friendly_name=\"Outside Humidity\"} 54.0' in body\n )\n\n assert (\n 'entity_available{domain=\"sensor\",'\n 'entity=\"sensor.outside_humidity\",'\n 'friendly_name=\"Outside Humidity\"} 1.0' in body\n )",
"def test_sg_delete_non_associated(self):\n\n # Add a faked storage group to be tested and another one\n faked_storage_group = self.add_storage_group1()\n self.add_storage_group2()\n\n storage_group_mgr = self.console.storage_groups\n\n storage_group = storage_group_mgr.find(name=faked_storage_group.name)\n\n # Execute the code to be tested.\n storage_group.delete()\n\n # Check that the storage group no longer exists\n with pytest.raises(NotFound):\n storage_group_mgr.find(name=faked_storage_group.name)",
"def cleanUp(self):\r\n for group in self._groups.values():\r\n group.destroy()\r\n\r\n assert len(self._groups) == 0\r\n\r\n for machine in self._machines.copy():\r\n self.destroyMachine(machine)\r\n\r\n assert len(self._machines) == 0\r\n\r\n self.unregisterIAASHook()",
"def createResourceSims(self):\n if self.game.myEmpire['viewResources'] == 0:\n return\n import anwp.sims\n # remove old sims if any\n self.removeResourceSims()\n # create resource sims\n self.resourceSims = []\n for systemID, systemDict in self.game.allSystems.iteritems():\n if systemDict['myEmpireID'] == self.game.myEmpireID:\n # create resource sims representing resources on system\n i = 0\n for attr in ['AL', 'EC', 'IA']:\n if systemDict[attr] > 0:\n # system produces this resource create sim\n name = string.lower(attr[-2:])\n imageFileName = '%smini_%s.png' % (self.game.app.genImagePath, name)\n \n # create sim\n sim = ResourceEntity(self, anwp.sims.categories.StaticCategory(imageFileName, 'resource'))\n \n # add sim to world\n self.resourceSims.append(sim)\n x = systemDict['x'] - 15\n y = systemDict['y'] - 45 - 20*i\n facing = 0\n speed = 0\n force = 1\n self.world.addToWorld(sim, x, y, facing, speed, force)\n i += 1\n \n # create resource sims representing resources being generated\n i = 0\n for attr in ['prodAL', 'prodEC', 'prodIA', 'prodCR']:\n if systemDict[attr] > 0:\n # system produces this resource create sim\n name = string.lower(attr[-2:])\n imageFileName = '%smini_%s_gen.png' % (self.game.app.genImagePath, name)\n \n # create sim\n sim = ResourceEntity(self, anwp.sims.categories.StaticCategory(imageFileName, 'resource'))\n \n # add sim to world\n self.resourceSims.append(sim)\n x = systemDict['x'] + 15\n y = systemDict['y'] - 45 - 20*i\n facing = 0\n speed = 0\n force = 1\n self.world.addToWorld(sim, x, y, facing, speed, force)\n i += 1",
"def test_remove_herbivore():\n savanna_cell = topo.Savanna()\n test_herbivore = animals.Herbivores()\n savanna_cell.add_animal(test_herbivore)\n assert test_herbivore in savanna_cell.herbivore_list\n assert test_herbivore in animals.Animals.instances\n savanna_cell.remove_animal(test_herbivore)\n assert test_herbivore not in savanna_cell.herbivore_list\n assert test_herbivore in animals.Animals.instances\n animals.Animals.instances.remove(test_herbivore)\n assert test_herbivore not in animals.Animals.instances",
"def test_func(x):\n for i in range(32):\n handle = self.model_manager.create(name='%s-%s' % (x, i))\n self.assertTrue(\n handle in [m.handle for m in self.model_manager.models()])\n self.model_manager.delete(handle)\n self.assertTrue(\n handle not in\n [m.handle for m in self.model_manager.models()])\n return True"
]
| [
"0.7024433",
"0.6819262",
"0.65983313",
"0.5896822",
"0.5891256",
"0.58759683",
"0.580056",
"0.5736429",
"0.55829257",
"0.5575694",
"0.55605096",
"0.55312914",
"0.5522494",
"0.55098146",
"0.5448845",
"0.5448845",
"0.5442072",
"0.5338611",
"0.53373635",
"0.53349733",
"0.53340185",
"0.5305964",
"0.53014356",
"0.53000873",
"0.5298578",
"0.5284039",
"0.52804965",
"0.5263787",
"0.52565736",
"0.52512443"
]
| 0.7159826 | 0 |
Tests if the method createData of the class Ecosystem returns correctly the data stored. | def test_createData():
sys = LVsystem.Ecosystem()
sys.addSpecies('rabbit')
sys.setInteraction('rabbit', 'hen', 0)
sys.setInteraction('rabbit', 'fox', -1)
sys.setInitialCond('rabbit', 30)
sys.setGrowthRate('rabbit', 0.09)
sys.setCarrCap('rabbit', 10000)
sys.setChangeRate('rabbit', 400)
sys.addSpecies('hen')
sys.setInteraction('hen', 'rabbit', 0)
sys.setInteraction('hen', 'fox', -1)
sys.setInitialCond('hen', 10)
sys.setGrowthRate('hen', 0.07)
sys.setCarrCap('hen', 10000)
sys.setChangeRate('hen', 500)
sys.addSpecies('fox')
sys.setInteraction('fox', 'rabbit', 1)
sys.setInteraction('fox', 'hen', 1)
sys.setInitialCond('fox', 20)
sys.setGrowthRate('fox', -0.06)
sys.setCarrCap('fox', 1)
sys.setChangeRate('fox', 250)
data = sys.create_data()
assert data[0] == 3
assert data[1] == ['rabbit', 'hen', 'fox']
assert data[2] == [30,10,20]
assert data[3] == [0.09,0.07,-0.06]
assert data[4] == [10000,10000,1]
assert data[5] == [400,500,250]
assert data[6][1][2] == -data[6][2][1]
assert data[6][2][2] == 0
sys.removeSpecies('rabbit')
sys.removeSpecies('fox')
sys.removeSpecies('hen') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_create_device_data(self):\n pass",
"def test_creation(self):\n self.assertEqual(self.book_data, self.reader._books)\n self.assertEqual(1, self.reader._location)\n self.assertEqual([0, 0, 0, 0, 0, 0], self.reader._timing)",
"def create(self, data):\n raise NotImplementedError",
"def test_create_data(self):\n process = Process.objects.filter(slug=\"test-min\").latest()\n data = Data.objects.create(\n name=\"Test data\",\n contributor=self.contributor,\n process=process,\n )\n\n data.refresh_from_db()\n self.assertEqual(data.status, Data.STATUS_DONE)",
"def test_data_object_vaporise(self):\n pass",
"def test_creates_data(self, config_filename, expected_class):\n data = create_data(read_config_file(config_filename))\n self.assertIsInstance(data, expected_class)",
"def test_insert_data(self):\n self.engine.insert_data(self.correct_camper_data)\n self.assertDictEqual(\n self.ds.store,\n {\n 3: Camper(**{\n \"id\": 3,\n \"latitude\": 38.7436883,\n \"longitude\": -9.1952226,\n \"price_per_day\": 85.5,\n \"weekly_discount\": 0.25\n })\n })",
"def test_geo_data_created(self):\n # Currently, there are no GeometryStore or PointGeometry objects in the database\n self.assertEqual(GeometryStore.objects.count(), 0)\n self.assertEqual(PointGeometry.objects.count(), 0)\n\n self.call_command(filename='power_plant_import/tests/data/six_rows.csv')\n\n # Get the PowerPlants that were created during the import\n (powerplant_ouessant, powerplant_ilarionas, powerplant_tonstad) = self.get_created_plants()\n # Get the Projects that were created during the import\n (project_ouessant1, project_ouessant2, project_liaoning) = self.get_created_projects()\n\n # GeometryStore objects were created for:\n # - powerplant_ouessant\n # - powerplant_ilarionas\n # - project_liaoning\n # The project_ouessant1 and project_ouessant2 should use\n # powerplant_ouessant's GeometryStore\n self.assertEqual(GeometryStore.objects.count(), 3)\n # PointGeometry objects were created for:\n # - powerplant_ouessant\n # - powerplant_ilarionas\n # - project_liaoning\n # The project_ouessant1 and project_ouessant2 should use\n # powerplant_ouessant's PointGeometry\n self.assertEqual(PointGeometry.objects.count(), 3)\n # The powerplant_ouessant point is correct\n powerplant_ouessant_points = powerplant_ouessant.geo.points.all()\n self.assertEqual(powerplant_ouessant_points.count(), 1)\n self.assertEqual(powerplant_ouessant_points.first().geom.x, -5.11121)\n self.assertEqual(powerplant_ouessant_points.first().geom.y, 48.43754)\n # The powerplant_ilarionas point is correct\n powerplant_ilarionas_points = powerplant_ilarionas.geo.points.all()\n self.assertEqual(powerplant_ilarionas_points.count(), 1)\n self.assertEqual(powerplant_ilarionas_points.first().geom.x, 21.8039)\n self.assertEqual(powerplant_ilarionas_points.first().geom.y, 40.0966)\n # The project_liaoning gets its geodata from its latitude and longitude\n # cells\n project_liaoning_points = project_liaoning.geo.points.all()\n self.assertEqual(project_liaoning_points.count(), 1)\n self.assertEqual(project_liaoning_points.first().geom.x, 121.38065)\n self.assertEqual(project_liaoning_points.first().geom.y, 41.16469)\n # For the project_ouessant1 and project_ouessant2, the latitude and\n # longitude cells are blank, so they get their geodata from their\n # parent PowerPlant (powerplant_ouessant).\n self.assertEqual(project_ouessant1.geo, project_ouessant1.power_plant.geo)\n self.assertEqual(project_ouessant2.geo, project_ouessant2.power_plant.geo)\n # The powerplant_tonstad has no geo data\n self.assertIsNone(powerplant_tonstad.geo)",
"def insert_data(self):\n # Make a connexion with a mock database\n self.generate_data_collection()",
"def _validate_create_data(self, data):\n return",
"def test_process_data(self):\n pass",
"def test_data_object_get_details(self):\n pass",
"def fakedata():\n if User.query.filter_by(email='[email protected]').first():\n print ('fake data already generated')\n else:\n generate_test_confs() # load testing confs and tracks\n generate_fake_tickets() # create fake tickets\n generate_test_users() # create named fake users\n # generate_fake_users(100) # create random users\n # add_self_follows() # create self-follows for all users\n generate_fake_papers(100) # create random papers\n generate_fake_reviews() # create random reviews\n generate_fake_transactions() # create fake tickets\n generate_fake_schedule()\n generate_default_addons()",
"def test_0_data_insertion(self):\n s = self.fitness.insert_in_database(self.fitness_dict, date_time=self.dt1)\n self.assertTrue(s)",
"def _test_cloud_store_data(self, config, metadata_provider, provider):\n database = MetaDataDB(config)\n database.drop()\n cloud = Cloud(config, metadata_provider, provider, database).connect()\n data1 = file(\"testdata/data1.txt\").read()\n data2 = file(\"testdata/data2.txt\").read()\n data3 = file(\"testdata/data2.txt\").read()\n data4 = file(\"testdata/data2.txt\").read()\n metadata1 = cloud.store(data1, \"testdata/data1.txt\")\n metadata2 = cloud.store(data2, \"testdata/data2.txt\")\n metadata3 = cloud.store(data3, \"testdata/data3.txt\")\n metadata4 = cloud.store(data4, \"testdata/data4.txt\")\n for metadata in cloud.list():\n if metadata[\"key\"] == metadata1[\"key\"]:\n self.assertEqual(\"testdata/data1.txt\", metadata[\"path\"])\n if metadata[\"key\"] == metadata2[\"key\"]:\n self.assertEqual(\"testdata/data2.txt\", metadata[\"path\"])\n if metadata[\"key\"] == metadata3[\"key\"]:\n self.assertEqual(\"testdata/data3.txt\", metadata[\"path\"])\n if metadata[\"key\"] == metadata4[\"key\"]:\n self.assertEqual(\"testdata/data4.txt\", metadata[\"path\"])\n new_data1 = cloud.retrieve(metadata1)\n new_data2 = cloud.retrieve(metadata2)\n new_data3 = cloud.retrieve(metadata3)\n new_data4 = cloud.retrieve(metadata4)\n self.assertEqual(data1, new_data1)\n self.assertEqual(\"testdata/data1.txt\", metadata1[\"path\"])\n self.assertEqual(data2, new_data2)\n self.assertEqual(\"testdata/data2.txt\", metadata2[\"path\"])\n self.assertEqual(data2, new_data3)\n self.assertEqual(\"testdata/data3.txt\", metadata3[\"path\"])\n self.assertEqual(data2, new_data4)\n self.assertEqual(\"testdata/data4.txt\", metadata4[\"path\"])\n cloud.delete(metadata1)\n cloud.delete(metadata2)\n cloud.delete(metadata3)\n cloud.delete(metadata4)\n cloud.disconnect()",
"def test_database_object_can_be_created(self):\n database = generate_database_object()",
"def test_creates_data_collection(self):\n data_collection = create_data_collection(read_config_file(\"test/data_collection.yaml\"))\n self.assertIsInstance(data_collection, DataCollection)",
"def check_for_new_data(self):\n return",
"def check_for_data():\n if not (os.path.exists(ep.get_test_data_path()) or os.path.exists(ep.get_dbn_weight_path())):\n return False\n return True",
"def test_is_work_data(self):\n self.assertEqual(self.connector.is_work_data(self.work_data), True)\n self.assertEqual(self.connector.is_work_data(self.edition_data), False)",
"def test_loadData():\n \n sys = LVsystem.Ecosystem()\n \n sys.loadSetup('2Prey1Predator')\n \n \n data = sys.create_data()\n \n assert data[0] == 3\n assert data[1] == ['rabbit', 'hen', 'fox']\n assert data[2] == [30,10,20]\n assert data[3] == [0.09,0.07,-0.06] \n assert data[4] == [10000,10000,1]\n assert data[5] == [400,500,250]\n assert data[6][1][2] == -data[6][2][1]\n assert data[6][2][2] == 0\n\n sys.removeSpecies('rabbit')\n sys.removeSpecies('fox')\n sys.removeSpecies('hen')",
"def saveData(self):\n pass",
"def test_create_office_class_method(self):\n new_office = Office.create_office(self.office_test_data[\"id\"], self.office_test_data[\"type\"],\n self.office_test_data[\"name\"])\n self.assertDictEqual(new_office, offices[len(offices) - 1])",
"def test_create_office_class_method(self):\n new_office = Office.create_office(self.office_test_data[\"id\"], self.office_test_data[\"type\"],\n self.office_test_data[\"name\"])\n self.assertDictEqual(new_office, offices[len(offices) - 1])",
"def test_alien_data(self):",
"def test_001_save_data(self):\n __test = chess_storage.ChessStorage()\n __test_data = list(range(consts.TEST_LIST_LENGHT))\n __test_filename = consts.TEST_FILENAME\n __test_filename_appended = consts.TEST_FILENAME + \"__1\"\n __save_test = __test.save_data(__test_filename, __test_data, True, False)\n self.assertEqual(__save_test, consts.ERROR_CODES[\"SUCCESSFULL\"])\n __save_test = __test.save_data(__test_filename, __test_data, False, False)\n self.assertEqual(__save_test, consts.ERROR_CODES[\"FILE_EXIST\"])\n __save_test = __test.save_data(__test_filename, __test_data, True, False)\n self.assertEqual(__save_test, consts.ERROR_CODES[\"SUCCESSFULL\"])\n __save_test = __test.save_data(__test_filename, __test_data, False, True)\n self.assertEqual(__save_test, consts.ERROR_CODES[\"SUCCESSFULL\"])\n __save_test = __test.save_data(__test_filename, __test_data, False, True)\n self.assertEqual(__save_test, consts.ERROR_CODES[\"SUCCESSFULL\"])\n __save_test = __test.save_data(__test_filename_appended, __test_data, False, True)\n self.assertEqual(__save_test, consts.ERROR_CODES[\"SUCCESSFULL\"])",
"def getTestData(self):\n raise NotImplementedError",
"def test_data(self):\n\n # Boolean tests\n is_datas = [True, False]\n for is_data in is_datas:\n self.colorspace.setIsData(is_data)\n self.assertEqual(is_data, self.colorspace.isData())\n\n # Wrong type tests\n wrong_is_datas = [['test'], 'test']\n for wrong_is_data in wrong_is_datas:\n with self.assertRaises(TypeError):\n self.colorspace.setIsData(wrong_is_data)",
"def test_create_system_entire(self):\n pass",
"def test_create(self):\n for citizen in self.citizen_datas:\n # Request the citizen creation.\n body = dict(zip(self.citizen_fields, citizen))\n\n resp = self.client.post(self.url, data=json.dumps(body), content_type='application/json')\n\n # Ensure estatus 201\n assert resp.status_code == HTTPStatus.CREATED, \\\n '{url}: Got {error}, Expected {code}'. \\\n format(url=self.url, error=resp.status_code, code=HTTPStatus.CREATED.value)\n\n resp_body = resp.json()\n # Ensure data created is correct.\n for key in body:\n assert resp_body[key] == body[key], \\\n '{url}: Got field {field} -> {error}, Expected {code}'. \\\n format(url=self.url, field = key, error=resp_body[key], code=body[key])"
]
| [
"0.64928454",
"0.6455364",
"0.6255506",
"0.62456274",
"0.6178591",
"0.6177842",
"0.6141926",
"0.61172384",
"0.6074773",
"0.60663724",
"0.60520685",
"0.6051333",
"0.6007553",
"0.6000438",
"0.598897",
"0.5983702",
"0.59824914",
"0.5970858",
"0.59612536",
"0.59584916",
"0.5956119",
"0.591717",
"0.5904385",
"0.5904385",
"0.5883182",
"0.5872869",
"0.586363",
"0.5842594",
"0.58237755",
"0.58160573"
]
| 0.65844554 | 0 |
Annotation used to log the HTTP headers of a request. Used for debugging. Headers will be logged as info. | def log_headers(f):
def wrapper(fself, *arguments, **keywords):
import logging
for header_key, header_value in fself.request.headers.items():
logging.info(header_key + ": " + header_value)
# Call the underlying function with the parameter added
return f(fself, *arguments, **keywords)
return wrapper | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def show_headers():\n logger.info(request.headers)\n for h in request.headers:\n logger.info(f\"{h[0]}: {h[1]}\")\n return jsonify({k:v for k, v in request.headers.items()})",
"def requestheaders(self, flow: mitmproxy.http.HTTPFlow):\n pass",
"def requestheaders(self, flow: mitmproxy.http.HTTPFlow):\n pass",
"def requestheaders(self, flow: mitmproxy.http.HTTPFlow):",
"def view_headers():\n\n return jsonify(get_dict('headers'))",
"def log_request(self, code='-', size='-'):\n print self._heading(\"HTTP Request\")\n #First, print the resource identifier and desired operation.\n print self.raw_requestline,\n #Second, print the request metadata\n for header, value in self.headers.items(): \n print header + \":\", value",
"def get_request_headers(self):\n return self.request_headers",
"def view_request_headers(line):\n args = shlex.split(line)\n if not args:\n raise PappyException(\"Request id is required\")\n reqid = args[0]\n\n reqs = yield load_reqlist(reqid)\n for req in reqs:\n if len(reqs) > 1:\n print 'Request %s:' % req.reqid\n view_full_message(req, True)\n if len(reqs) > 1:\n print '-'*30\n print ''",
"def get_request_headers(self):\n return getattr(self.instance, 'request_headers')",
"def log(self):\n\n\t\theader_dict = dict(request.headers)\n\n\t\ttry:\n\t\t\ttracker_id = header_dict[\"tracker_id\"]\n\t\texcept Exception:\n\t\t\ttracker_id = None\n\t\t\n\t\ttry:\n\t\t\tuser_agent = header_dict[\"User-Agent\"]\n\t\texcept Exception:\n\t\t\tuser_agent = None\n\n\t\ttry:\n\t\t\tlanguage = header_dict[\"Accept-Language\"]\n\t\texcept Exception:\n\t\t\tlanguage = None\n\n\t\ttry:\n\t\t\treferer = header_dict[\"Referer\"]\n\t\texcept Exception:\n\t\t\treferer = None\n\n\t\ttry:\n\t\t\torigin = header_dict[\"Origin\"]\n\t\texcept Exception:\n\t\t\torigin = None\n\n\t\ttry:\n\t\t\tjson_data = request.json\n\t\texcept Exception:\n\t\t\tjson_data = None\n\n\t\ttry:\n\t\t\tplatform = request.user_agent.platform.title()\n\t\texcept Exception:\n\t\t\tplatform = None\n\n\t\ttry:\n\t\t\tbrowser = request.user_agent.browser.title()\n\t\texcept Exception:\n\t\t\tbrowser = None\n\n\t\ttry:\n\t\t\tauth_header_token = header_dict[\"Authorization\"].split(\" \")[1]\n\t\texcept Exception:\n\t\t\tauth_header_token = None\n\t\t\n\t\t## If set to run before a request: This is the default setting\n\t\tif self.pre_request:\n\t\t\[email protected]_request()\n\t\t\tdef run():\n\t\t\t\t## If the path accessed is in the do_not_log list, it is skipped\n\t\t\t\tif request.path in self.do_not_log:\n\t\t\t\t\treturn\n\t\t\t\t## If the path accessed is not in the do_not_log list, it is posted\n\t\t\t\telse:\n\t\t\t\t\tpost_data = {\n\t\t\t\t\t\t\"error\": None,\n\t\t\t\t\t\t\"stack_trace\": None,\n\t\t\t\t\t\t\"method\": request.method,\n\t\t\t\t\t\t\"source_ip\": request.remote_addr,\n\t\t\t\t\t\t\"url\": request.url,\n\t\t\t\t\t\t\"status_code\": 200, ## Assumed to be 200 due to the nature of the function\n\t\t\t\t\t\t\"headers\": str(header_dict),\n\t\t\t\t\t\t\"user_agent\": user_agent,\n\t\t\t\t\t\t\"language\": language,\n\t\t\t\t\t\t\"platform\": platform,\n\t\t\t\t\t\t\"browser\": browser,\n\t\t\t\t\t\t\"referer\": referer,\n\t\t\t\t\t\t\"origin\": origin,\n\t\t\t\t\t\t\"auth_header\": auth_header_token,\n\t\t\t\t\t\t\"access_time\": datetime.now().strftime(\"%A, %d %B %Y %H:%M:%S\"),\n\t\t\t\t\t\t\"logging_access_key\": self.accessKey,\n\t\t\t\t\t\t\"json\": json_data,\n\t\t\t\t\t\t\"request_params\": str(dict(request.args))\n\t\t\t\t\t}\n\n\t\t\t\t\tself.startPost(post_data)\n\n\t\t\t\t\treturn\n\n\t\t\treturn run\n\t\t\n\t\t## If set to as a wrapper to a function\n\t\telse:\n\t\t\tdef log_decorator(func):\n\n\t\t\t\t@wraps(func)\n\t\t\t\tdef execute(*args, **kwargs):\n\t\t\t\t\ttry:\n\t\t\t\t\t\tresult = func(*args, **kwargs)\n\n\t\t\t\t\t\tresult_response = make_response(result)\n\n\t\t\t\t\t\tpost_data = {\n\t\t\t\t\t\t\t\"error\": None,\n\t\t\t\t\t\t\t\"stack_trace\": None,\n\t\t\t\t\t\t\t\"method\": request.method,\n\t\t\t\t\t\t\t\"source_ip\": request.remote_addr,\n\t\t\t\t\t\t\t\"url\": request.url,\n\t\t\t\t\t\t\t\"status_code\": result_response.status_code,\n\t\t\t\t\t\t\t\"headers\": str(header_dict),\n\t\t\t\t\t\t\t\"user_agent\": user_agent,\n\t\t\t\t\t\t\t\"language\": language,\n\t\t\t\t\t\t\t\"platform\": platform,\n\t\t\t\t\t\t\t\"browser\": browser,\n\t\t\t\t\t\t\t\"referer\": referer,\n\t\t\t\t\t\t\t\"origin\": origin,\n\t\t\t\t\t\t\t\"auth_header\": auth_header_token,\n\t\t\t\t\t\t\t\"access_time\": datetime.now().strftime(\"%A, %d %B %Y %H:%M:%S\"),\n\t\t\t\t\t\t\t\"logging_access_key\": self.accessKey,\n\t\t\t\t\t\t\t\"json\": json_data,\n\t\t\t\t\t\t\t\"request_params\": str(dict(request.args))\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tself.startPost(post_data)\n\n\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\tresult = func(*args, **kwargs)\n\t\t\t\t\t\t\n\t\t\t\t\t\ttrace = traceback.format_exc()\n\n\t\t\t\t\t\tkwargs = {\n\t\t\t\t\t\t\t\"trace\": trace,\n\t\t\t\t\t\t\t\"exception\": str(e)\n\t\t\t\t\t\t}\n\t\t\t\t\t\t\n\t\t\t\t\t\tpost_data = {\n\t\t\t\t\t\t\t\"error\": str(e),\n\t\t\t\t\t\t\t\"stack_trace\": trace,\n\t\t\t\t\t\t\t\"method\": request.method,\n\t\t\t\t\t\t\t\"source_ip\": request.remote_addr,\n\t\t\t\t\t\t\t\"url\": request.url,\n\t\t\t\t\t\t\t\"status_code\": 500,\n\t\t\t\t\t\t\t\"headers\": str(header_dict),\n\t\t\t\t\t\t\t\"user_agent\": user_agent,\n\t\t\t\t\t\t\t\"language\": language,\n\t\t\t\t\t\t\t\"platform\": platform,\n\t\t\t\t\t\t\t\"browser\": browser,\n\t\t\t\t\t\t\t\"referer\": referer,\n\t\t\t\t\t\t\t\"origin\": origin,\n\t\t\t\t\t\t\t\"auth_header\": auth_header_token,\n\t\t\t\t\t\t\t\"access_time\": datetime.now().strftime(\"%A, %d %B %Y %H:%M:%S\"),\n\t\t\t\t\t\t\t\"logging_access_key\": self.accessKey,\n\t\t\t\t\t\t\t\"json\": json_data,\n\t\t\t\t\t\t\t\"request_params\": str(dict(request.args))\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tself.startPost(post_data)\n\t\t\t\t\t\n\t\t\t\t\treturn result\n\t\t\t\t\n\t\t\t\treturn execute\n\t\t\t\n\t\t\treturn log_decorator",
"def get_headers(self):\r\n raise NotImplementedError",
"def headers():\n return {\n 'user-agent': 'integration-tester',\n 'content-type': 'application/json',\n }",
"def headers(self):\n return(self.__response.headers)",
"def log_request(self, r):\n\n token = r.headers.get(self.header, None)\n r.token = token\n self.requests.append(r)\n if r.token:\n self.log.debug('[%s] %s', token or '/', r.url)",
"def http_headers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['HTTPHeaderArgs']]]]:\n return pulumi.get(self, \"http_headers\")",
"def get_headers(self, environ=None):\n return [('Content-Type', 'application/json')]",
"def trace():\n logger.debug('[FLASKWEB /trace] Trace debug request')\n output = {}\n output['args'] = request.args\n output['form'] = request.form\n output['method'] = request.method\n output['url'] = request.url\n output['client_ip'] = request.remote_addr\n output['headers'] = {k: str(v) for k,v in request.headers.items()}\n return jsonify(output), 200",
"def _log_request(self):\n log = self.server.log\n if log:\n if hasattr(log, \"info\"):\n log.info(self.format_request() + '\\n')\n else:\n log.write(self.format_request() + '\\n')",
"def headers(self) -> dict:\n raise NotImplementedError # pragma: no cover",
"def _log_request(res: SpamResult) -> None:\n _log.info(f\"requestId=[{request.id}] result=[{res.label}] reason=[{res.reason}]\")",
"def get_headers(self):\n \n return self.headers",
"def get_headers(self):\n return ['dep_red', 'dep_sd', 'hyp_red', 'hyp_sd']",
"def print_header(now):\n global config\n date_time = datetime.datetime.fromtimestamp(now).strftime('%Y-%m-%d %H:%M:%S')\n\n print('*************************************')\n print(f'HTTP LOGS STATISTICS - {date_time}')",
"def test_specific_headers_sent_with_request(self):\n req = self.httpbin.get_my_headers(dry_run=True)\n self.assertIn('All-Request-Headers', req.prepared_request.headers)\n request_data_headers = self.httpbin.client['get_my_headers']['headers']['All-Request-Headers']\n self.assertEqual(req.prepared_request.headers['All-Request-Headers'], request_data_headers)",
"def get_request_headers():\n return request.headers.keys()",
"def add_headers():\n # the actual access token -\n g.x_tapis_token = request.headers.get('X-Tapis-Token')\n\n # the tenant associated with the subject of the request; used, for instance, when the subject is different\n # from the subject in the actual access_token (for example, when the access_token represents a service account).\n g.x_tapis_tenant = request.headers.get('X-Tapis-Tenant')\n\n # the user associated with the subject of the request. Similar to x_tapis_tenant, this is used, for instance, when\n # the subject is different from the subject in the actual access_token (for example, when the access_token\n # represents a service account).\n g.x_tapis_user = request.headers.get('X-Tapis-User')\n\n # a hash of the original user's access token. this can be used, for instance, to check if the original user's\n # access token has been revoked.\n g.x_tapis_user_token_hash = request.headers.get('X-Tapis-User-Token-Hash')",
"def generate_header():\n env = getattr(g, 'env', 'unknown')\n return {'X-LOGGLY-TAG': env}",
"def log_request(task_request, request):\n msg = \"{0.method} {0.url}: {0.body}\".format(request)\n log_info(task_request, msg)",
"def test_user_headers_sent_with_request(self):\n user_header = {'All-Request-Headers': 'Headers from user code'}\n req = self.httpbin.get_my_headers(headers=user_header, dry_run=True)\n self.assertIn('All-Request-Headers', req.prepared_request.headers)\n self.assertEqual(req.prepared_request.headers['All-Request-Headers'], user_header['All-Request-Headers'])",
"def define_headers(self):\n return {}"
]
| [
"0.7068124",
"0.6656864",
"0.6656864",
"0.6536316",
"0.6233839",
"0.6233012",
"0.6124116",
"0.6074504",
"0.6061556",
"0.60301226",
"0.6003825",
"0.5963785",
"0.5946163",
"0.5938001",
"0.5906835",
"0.5905984",
"0.590182",
"0.589646",
"0.5873743",
"0.5865336",
"0.58603334",
"0.58565676",
"0.58554316",
"0.5852122",
"0.58267504",
"0.5811529",
"0.5798882",
"0.5794287",
"0.57752675",
"0.57601833"
]
| 0.7275658 | 0 |
Decorator that allows an endpoint to use pytracts messages for the request and response. | def endpoint(_wrapped_function=None, lenient=False, **kwargs):
if len(kwargs) > 1:
raise IndexError("Cannot have more than one mapping for request body")
if len(kwargs) == 1:
body_param_name = list(kwargs.keys())[0]
body_param_type = list(kwargs.values())[0]
if not isinstance(body_param_type, messages._MessageClass):
raise TypeError("Body must be of type pytracts.messages.Message")
else:
body_param_name = None
body_param_type = None
def get_wrapper(body_param_name, body_param_type, lenient, f):
def wrapper(self, *arguments, **keywords):
pj = to_json.JsonEncoder()
# If we have a body message provided, this request must be json
if body_param_name:
request_content_type = self.request.content_type
if request_content_type is not None:
request_content_type = request_content_type.lower().split(";")[0]
if request_content_type != "application/json" and not lenient:
raise exceptions.HTTPUnsupportedMediaType("Content type must be 'application/json'")
try:
m = pj.decode_message(body_param_type, self.request.body)
keywords[body_param_name] = m
except (ValueError, messages.Error) as error:
raise exceptions.HTTPBadRequest(error.message or "Request body JSON is invalid.")
try:
# Everything is good. Call the actual handler method
result = f(self, *arguments, **keywords)
response_code = None
headers = {}
except Exception as e:
result = message_types.error_message_from_exception(e)
headers = {}
response_code = 500
if hasattr(e, 'code'):
response_code = e.code
# Log only errors
if response_code < 200 or response_code > 404:
logging.exception(e)
if type(result) != tuple:
result = (result,)
for val in result:
if type(val) == int:
response_code = val
elif type(val) == dict:
headers.update(val)
elif isinstance(val, messages.Message):
response_code = response_code or 200
self.response.content_type = 'application/json'
self.response.write(pj.encode_message(val))
if response_code:
self.response.status_int = response_code
for k, v in headers.items():
self.response.headers[k] = v
return wrapper
if _wrapped_function is not None and hasattr(_wrapped_function, '__call__'):
return get_wrapper(body_param_name=body_param_name, body_param_type=body_param_type, lenient=lenient, f=_wrapped_function)
else:
return util.curry(get_wrapper, body_param_name, body_param_type, lenient) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def monorail_api_method(\n request_message, response_message, **kwargs):\n time_fn = kwargs.pop('time_fn', time.time)\n method_name = kwargs.get('name', '')\n method_path = kwargs.get('path', '')\n http_method = kwargs.get('http_method', '')\n def new_decorator(func):\n @endpoints.method(request_message, response_message, **kwargs)\n @functools.wraps(func)\n def wrapper(self, *args, **kwargs):\n method_identifier = (ENDPOINTS_API_NAME + '.' +\n (method_name or func.__name__)\n + '/' + (method_path or func.__name__))\n start_time = time_fn()\n approximate_http_status = 200\n request = args[0]\n ret = None\n c_id = None\n c_email = None\n mar = None\n try:\n if settings.read_only and http_method.lower() != 'get':\n raise permissions.PermissionException(\n 'This request is not allowed in read-only mode')\n requester = endpoints.get_current_user()\n logging.info('requester is %r', requester)\n logging.info('args is %r', args)\n logging.info('kwargs is %r', kwargs)\n auth_client_ids, auth_emails = (\n client_config_svc.GetClientConfigSvc().GetClientIDEmails())\n if settings.local_mode:\n auth_client_ids.append(endpoints.API_EXPLORER_CLIENT_ID)\n if self._services is None:\n self._set_services(service_manager.set_up_services())\n cnxn = sql.MonorailConnection()\n c_id, c_email = api_base_checks(\n request, requester, self._services, cnxn,\n auth_client_ids, auth_emails)\n mar = self.mar_factory(request, cnxn)\n self.ratelimiter.CheckStart(c_id, c_email, start_time)\n self.increment_request_limit(request, c_id, c_email)\n ret = func(self, mar, *args, **kwargs)\n except exceptions.NoSuchUserException as e:\n approximate_http_status = 404\n raise endpoints.NotFoundException(\n 'The user does not exist: %s' % str(e))\n except (exceptions.NoSuchProjectException,\n exceptions.NoSuchIssueException,\n exceptions.NoSuchComponentException) as e:\n approximate_http_status = 404\n raise endpoints.NotFoundException(str(e))\n except (permissions.BannedUserException,\n permissions.PermissionException) as e:\n approximate_http_status = 403\n logging.info('Whitelist ID %r email %r', auth_client_ids, auth_emails)\n raise endpoints.ForbiddenException(str(e))\n except endpoints.BadRequestException:\n approximate_http_status = 400\n raise\n except endpoints.UnauthorizedException:\n approximate_http_status = 401\n # Client will refresh token and retry.\n raise\n except oauth.InvalidOAuthTokenError:\n approximate_http_status = 401\n # Client will refresh token and retry.\n raise endpoints.UnauthorizedException(\n 'Auth error: InvalidOAuthTokenError')\n except (exceptions.GroupExistsException,\n exceptions.InvalidComponentNameException,\n ratelimiter.ApiRateLimitExceeded) as e:\n approximate_http_status = 400\n raise endpoints.BadRequestException(str(e))\n except Exception as e:\n approximate_http_status = 500\n logging.exception('Unexpected error in monorail API')\n raise\n finally:\n if mar:\n mar.CleanUp()\n now = time_fn()\n elapsed_ms = int((now - start_time) * 1000)\n if c_id and c_email:\n self.ratelimiter.CheckEnd(c_id, c_email, now, start_time)\n\n fields = {\n # Endpoints APIs don't return the full set of http status values.\n 'status': approximate_http_status,\n # Use the api name, not the request path, to prevent an\n # explosion in possible field values.\n 'name': method_identifier,\n 'is_robot': False,\n }\n\n http_metrics.server_durations.add(\n elapsed_ms, fields=fields)\n http_metrics.server_response_status.increment(\n fields=fields)\n http_metrics.server_request_bytes.add(\n len(protojson.encode_message(request)), fields=fields)\n response_size = 0\n if ret:\n response_size = len(protojson.encode_message(ret))\n http_metrics.server_response_bytes.add(\n response_size, fields=fields)\n\n return ret\n\n return wrapper\n return new_decorator",
"def direct(func):\n def wrapper(*args, **kwargs):\n response = func(*args, **kwargs)\n raise DirectResponseException(response)\n return wrapper",
"def decorate_HTTP_verb_method(method):\n @functools.wraps(method)\n def wrapper(self, RIC_base_uri, **kwargs):\n partition = kwargs.pop('partition', '')\n name = kwargs.pop('name', '')\n sub_path = kwargs.pop('subPath', '')\n suffix = kwargs.pop('suffix', '')\n uri_as_parts = kwargs.pop('uri_as_parts', False)\n if uri_as_parts:\n REST_uri = generate_bigip_uri(RIC_base_uri, partition, name,\n sub_path, suffix, **kwargs)\n else:\n REST_uri = RIC_base_uri\n pre_message = \"%s WITH uri: %s AND suffix: %s AND kwargs: %s\" %\\\n (method.__name__, REST_uri, suffix, kwargs)\n logging.debug(pre_message)\n response = method(self, REST_uri, **kwargs)\n post_message =\\\n \"RESPONSE::STATUS: %s Content-Type: %s Content-Encoding:\"\\\n \" %s\\nText: %r\" % (response.status_code,\n response.headers.get('Content-Type', None),\n response.headers.get('Content-Encoding', None),\n response.text)\n logging.debug(post_message)\n if response.status_code not in range(200, 207):\n error_message = '%s Unexpected Error: %s for uri: %s\\nText: %r' %\\\n (response.status_code,\n response.reason,\n response.url,\n response.text)\n raise iControlUnexpectedHTTPError(error_message, response=response)\n return response\n return wrapper",
"def expose_proxy_http():\n def proxy_http_wrap(func):\n func.expose = True\n func._pecan = {\n 'content_types': {'text/xml': 'wsmexml:',\n 'application/json': 'wsmejson:',\n 'application/xml': 'wsmexml:'},\n 'argspec': inspect.getargspec(func),\n 'template': ['wsmexml:', 'wsmexml:', 'wsmejson:'],\n 'content_type': 'application/json'\n }\n\n def proxy_http_wrapper(*args, **kwargs):\n return func(*args, **kwargs)\n\n functools.update_wrapper(proxy_http_wrapper, func)\n return proxy_http_wrapper\n return proxy_http_wrap",
"def _wrap_send(func, instance, args, kwargs):\n # TODO[manu]: we already offer a way to provide the Global Tracer\n # and is ddtrace.tracer; it's used only inside our tests and can\n # be easily changed by providing a TracingTestCase that sets common\n # tracing functionalities.\n tracer = getattr(instance, \"datadog_tracer\", ddtrace.tracer)\n\n # skip if tracing is not enabled\n if not tracer.enabled:\n return func(*args, **kwargs)\n\n request = kwargs.get(\"request\") or args[0]\n if not request:\n return func(*args, **kwargs)\n\n # sanitize url of query\n parsed_uri = parse.urlparse(request.url)\n hostname = parsed_uri.hostname\n if parsed_uri.port:\n hostname = \"{}:{}\".format(hostname, parsed_uri.port)\n sanitized_url = parse.urlunparse(\n (\n parsed_uri.scheme,\n parsed_uri.netloc,\n parsed_uri.path,\n parsed_uri.params,\n None, # drop parsed_uri.query\n parsed_uri.fragment,\n )\n )\n\n with tracer.trace(\"requests.request\", span_type=SpanTypes.HTTP) as span:\n span.set_tag(SPAN_MEASURED_KEY)\n # update the span service name before doing any action\n span.service = _extract_service_name(instance, span, hostname=hostname)\n\n # Configure trace search sample rate\n # DEV: analytics enabled on per-session basis\n cfg = config.get_from(instance)\n analytics_enabled = cfg.get(\"analytics_enabled\")\n if analytics_enabled:\n span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, cfg.get(\"analytics_sample_rate\", True))\n\n # propagate distributed tracing headers\n if cfg.get(\"distributed_tracing\"):\n propagator = HTTPPropagator()\n propagator.inject(span.context, request.headers)\n\n # Storing request headers in the span\n store_request_headers(request.headers, span, config.requests)\n\n response = None\n try:\n response = func(*args, **kwargs)\n\n # Storing response headers in the span. Note that response.headers is not a dict, but an iterable\n # requests custom structure, that we convert to a dict\n if hasattr(response, \"headers\"):\n store_response_headers(dict(response.headers), span, config.requests)\n return response\n finally:\n try:\n span.set_tag(http.METHOD, request.method.upper())\n span.set_tag(http.URL, sanitized_url)\n if config.requests.trace_query_string:\n span.set_tag(http.QUERY_STRING, parsed_uri.query)\n if response is not None:\n span.set_tag(http.STATUS_CODE, response.status_code)\n # `span.error` must be an integer\n span.error = int(500 <= response.status_code)\n # Storing response headers in the span.\n # Note that response.headers is not a dict, but an iterable\n # requests custom structure, that we convert to a dict\n response_headers = dict(getattr(response, \"headers\", {}))\n store_response_headers(response_headers, span, config.requests)\n except Exception:\n log.debug(\"requests: error adding tags\", exc_info=True)",
"def req_as_decorator(req_output, *args, **kwargs):\r\n return req_output(dummy_func)(*args, **kwargs)",
"def passthrough(f):\r\n def wrapper(response):\r\n f()\r\n return response\r\n\r\n return wrapper",
"def decorator(func):\n def wrapper(resource, request, ** kwargs):\n \"\"\" wraps the method with common api response's routines, like\n checking if it's authenticated or packing the response in an api\n friendly way\n\n \"\"\"\n # ckech if everything is ok, before proceding\n resource.method_check(request, allowed=expected_methods)\n resource.is_authenticated(request)\n resource.throttle_check(request)\n\n # call the decorated method\n result = func(resource, request, **kwargs)\n\n # if a single response is expected\n if single:\n if returns_extra_data:\n objt = result[0]\n else:\n objt = result\n bundle = resource.build_bundle(obj=objt, request=request)\n to_be_serialized = resource.full_dehydrate(bundle)\n if returns_extra_data:\n to_be_serialized.data.update(result[1])\n else: # if we are expecting an array of objects\n # we need to paginante\n paginator = resource._meta.paginator_class(\n request.GET,\n result,\n resource_uri=resource.get_resource_uri(),\n limit=resource._meta.limit,\n max_limit=resource._meta.max_limit,\n collection_name=resource._meta.collection_name)\n\n to_be_serialized = paginator.page()\n\n bundles = [resource.build_bundle(obj=obj, request=request)\n for obj in to_be_serialized['objects']]\n\n to_be_serialized['objects'] = [resource.full_dehydrate(bnd)\n for bnd in bundles]\n\n resource.log_throttled_access(request)\n return resource.create_response(request, to_be_serialized)\n return wrapper",
"def response_transform_decorator(original_func):\n def response_transformer_wrapper(*args, **kwargs):\n \"\"\"\n Log errors and apply transformation in response_handler_func\n \"\"\"\n try:\n response = original_func(*args, **kwargs)\n response.raise_for_status()\n\n except requests.exceptions.HTTPError:\n help_string = ('Please consult the Coursera Data '\n 'Exports Guide for further assistance: '\n 'https://partner.coursera.help/hc/en-us/articles/360021121132.') # noqa\n\n if (response.status_code == 403):\n help_string = ('Please authorize this application '\n 'by running:\\n'\n '\\t$ courseraoauth2client config authorize --app manage_research_exports\\n' # noqa\n 'See https://github.com/coursera/courseraoauth2client ' # noqa\n 'for more information on authorization.\\n'\n 'For further assistance, consult the '\n 'Coursera Data Exports Guide '\n 'https://partner.coursera.help/hc/en-us/articles/360021121132.') # noqa\n\n logging.error(\n 'Request to {url} with body:\\n\\t{body}\\nreceived response'\n ':\\n\\t{text}\\n'\n '{help_string}\\n'\n .format(url=response.url,\n text=response.text,\n body=(response.request and response.request.body),\n help_string=help_string))\n raise\n\n return response_transformer(response)\n return response_transformer_wrapper",
"def apimethod(func):\n def wrapper(self, *args, **kwargs):\n # Debug\n class_name = self.__class__.__name__\n method_name = func.__name__.upper()\n logger.info(\"[Class: %s] %s request\" % (class_name, method_name))\n\n # Call the parse method\n self.apply_parameters()\n self.parse()\n # Call the wrapped function\n try:\n out = func(self, *args, **kwargs)\n except KeyError as e:\n error = str(e).strip(\"'\")\n logger.critical(\"Key error: %s\" % error)\n if error == \"security\":\n return {'message': \"FAIL: problems with auth check\"}, \\\n hcodes.HTTP_BAD_NOTFOUND\n raise e\n except TypeError as e:\n logger.warning(e)\n error = str(e).strip(\"'\")\n logger.critical(\"Type error: %s\" % error)\n if \"required positional argument\" in error:\n return {'message': \"FAIL: missing argument\"}, \\\n hcodes.HTTP_BAD_REQUEST\n raise e\n\n # DO NOT INTERCEPT 404 or status from other plugins (e.g. security)\n if isinstance(out, Response):\n return out\n\n # BASE STATUS?\n status = hcodes.HTTP_OK_BASIC\n\n # VERY IMPORTANT\n # DO NOT INTERFERE when\n # at some level we already provided the couple out/response\n if isinstance(out, tuple) and len(out) == 2:\n subout, status = out\n out = subout\n\n # Set standards for my response as specified in base.py\n #return marshal(out, self.resource_fields), status\n return out, status\n\n return wrapper",
"def api_decorator(name, fn):\n def wrapped_func(*args, **kwarg):\n body = {}\n body['args'] = []\n body['kwarg'] = {}\n original_args = args\n if len(args) >= 2:\n #body['self'] = args[0]\n body['context'] = args[1]\n args = args[3:]\n for arg in args[3:]:\n body['args'].append(arg)\n for key in kwarg:\n body['kwarg'][key] = kwarg[key]\n api.notify(FLAGS.default_publisher_id,\n name,\n FLAGS.default_notification_level,\n body)\n ret = None\n try:\n ret = fn(*original_args, **kwarg)\n except Error as e:\n body['error'] = \"%s\" % e\n api.notify(FLAGS.default_publisher_id,\n name,\n 'ERROR',\n body)\n raise e\n return ret\n return wrapped_func",
"def inner(func):\r\n\r\n service = func.__qualname__.split(\".\")[0]\r\n _Router().add_route(\r\n service=service,\r\n grpc_method=func.__name__,\r\n url_path=url,\r\n http_method=method\r\n )\r\n if pre_request is not None and len(pre_request) > 0:\r\n _MiddlewareManager().add_route_pre_middleware(pre_request, url)\r\n if pos_request is not None and len(pos_request) > 0:\r\n _MiddlewareManager().add_route_pre_middleware(pos_request, url)\r\n return func",
"def wrapper(self, *args, **kwd):\n try:\n retval = function(self, *args, **kwd)\n except (ValueError, AttributeError), log:\n LOG('SlapTool', INFO, 'Converting ValueError to NotFound, real error:',\n error=True)\n raise NotFound(log)\n except SoftwareInstanceNotReady, log:\n self.REQUEST.response.setStatus(408)\n self.REQUEST.response.setHeader('Cache-Control', 'private')\n return self.REQUEST.response\n except ValidationFailed:\n LOG('SlapTool', INFO, 'Converting ValidationFailed to ValidationFailed,'\\\n ' real error:',\n error=True)\n raise ValidationFailed\n except Unauthorized:\n LOG('SlapTool', INFO, 'Converting Unauthorized to Unauthorized,'\\\n ' real error:',\n error=True)\n raise Unauthorized\n\n self.REQUEST.response.setHeader('Content-Type', 'text/xml; charset=utf-8')\n return '%s' % retval",
"def dont_apply_response_fixes(original_function, self, request, response):\n return response",
"def response_handling(self) -> global___Snippet.SimpleResponseHandling:",
"def response_handling(self) -> global___Snippet.SimpleResponseHandling:",
"def api_method(func):\n @wraps(func)\n def decorator(self, return_request_args=False, *args, **kwargs):\n request_args = func(self, *args, **kwargs)\n request_args.update({\n 'method': '{module}.{method}'.format(\n module=self.__class__.__name__,\n method=func.__name__)})\n request_args = self._preprocess(request_args)\n if return_request_args:\n return request_args\n else:\n return self.pa.request(**request_args)\n return decorator",
"def greeting_message_decorator(f):\n\n def wrap(*args, **kwargs):\n \"\"\" Here we can print *aregs and **kwargs and manipulate them \"\"\"\n\n print('>> Decorate before executing \"greeting_message\" function')\n func = f(*args, **kwargs)\n print('>> Decorate after executing \"greeting_message\" function')\n return func\n return wrap",
"def __call__(self, environ, start_response):\n start_response(self.status, self.headers)\n return [self.message] if not isinstance(self.message, list) else self.message",
"def http_response(code):\n def decorator(func):\n def wrapper(*args, **kwargs):\n def _http_response(response, http_status_code):\n \"\"\"\n Returns an API response for the client.\n\n Args:\n response (list/dict/serializable object): api response for the client.\n http_status_code (int): the http status code that the server should return.\n\n Returns:\n Response: a flask response object.\n \"\"\"\n return make_response(jsonify(response), http_status_code)\n try:\n response = func(*args, **kwargs)\n return _http_response(\n response=response if code != HttpCodes.NO_CONTENT else \"\", http_status_code=code\n )\n except BaseApiException as exc:\n return _http_response(response=exc.to_dict(), http_status_code=exc.status_code)\n return wrapper\n return decorator",
"def wrap(*args, **kwargs):\n\n print('>> Decorate before executing \"greeting_message\" function')\n func = f(*args, **kwargs)\n print('>> Decorate after executing \"greeting_message\" function')\n return func",
"def web_service_response_example(self, action, controller):",
"def func_wrapper(event, context):\n req = Request(event, context)\n\n try:\n resp = func(req)\n\n if not isinstance(resp, Response):\n message = (\n 'Invalid return value from handler. '\n 'It should be either Response or Exception'\n )\n raise TypeError(message)\n except ServerlessError as e:\n status_code = e.status_code\n message = e.message if e.message else e.__class__.__name__\n\n resp = to_error_response(message, e.errors, status_code)\n except Exception as e: # pylint: disable=W0703\n logger.exception(e)\n status_code = 500\n message = 'InternalServerError'\n errors = tuple()\n\n resp = to_error_response(message, errors, status_code)\n return resp.to_lambda_output()",
"def Echo(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def decorator(fn):\n @functools.wraps(fn)\n def result(*args, **kwargs):\n request_time = datetime.datetime.now()\n actual_response = fn(*args, **kwargs)\n request = bottle.request\n response = bottle.response\n # modify this to log exactly what you need:\n logger.info('%s %s %s %s %s', request.remote_addr,\n request_time,\n request.method,\n request.url,\n response.status)\n logger.info('Cookies: %s', request.get_cookie('login'))\n logger.info('Handeled by: \"%s\" in file: \"%s\"', fn.__name__, SCRIPT_NAME)\n\n return actual_response\n return result",
"def handle_request(fun):\n\n def wrapper(self, *args, **kwargs):\n \"\"\"\n We raise an exception when\n the code on the client side fails\n Server side errors are taken care of\n through response codes\n \"\"\"\n try:\n return fun(self, *args, **kwargs)\n except Exception as req_exception:\n self.logger.exception(\"internal error\")\n raise ClientSideError(str(req_exception))\n\n return wrapper",
"def send(self, request: Request, **requests_kwargs) -> Response:",
"def ProtoRPCServiceMethod(method):\n\n def wrapper(self, request):\n assert isinstance(request, wrapper.rpc_method_spec.request_type)\n logging.info(\"Request:\\n%s\", request)\n response = method(self, request)\n assert isinstance(response, wrapper.rpc_method_spec.response_type)\n logging.info(\"Response:\\n%s\", response)\n return response\n\n # Since the service's descriptor will be parsed when the class is created,\n # which is later than the invocation time of this decorator, here it just\n # place the placeholder with dummy contents.\n wrapper.rpc_method_spec = _ProtoRPCServiceMethodSpec(None, None)\n return wrapper",
"def proxy_method(self, rest_path, sign, kwargs):",
"def _respondTo(self, endpoint, data):\n self.testServer.respondWith(endpoint, lambda request: self._respond(request, data))"
]
| [
"0.6401912",
"0.60603726",
"0.59293485",
"0.58852273",
"0.586914",
"0.58472216",
"0.5837364",
"0.58304596",
"0.57779384",
"0.57330793",
"0.5651764",
"0.5620239",
"0.56154704",
"0.56032073",
"0.5593258",
"0.5593258",
"0.55751294",
"0.5568334",
"0.55397886",
"0.55388176",
"0.5536625",
"0.55136603",
"0.5508641",
"0.5490952",
"0.5490268",
"0.5485441",
"0.54805875",
"0.54720235",
"0.54475844",
"0.54091376"
]
| 0.64499915 | 0 |
given the argument PATTERN from the argument parser, this function decides if it's a single pattern or a file. If it's a file, each line is a search pattern. | def extract_pattern(self, patterns):
# if we have more patterns or
# a single one which is not a file:
if len(patterns) > 1 or (
len(patterns) == 1 and not os.path.isfile(patterns[0])):
return patterns
else:
pattern = patterns[0]
pat_list = []
# if PATTERN is a file, extract all patterns
if os.path.isfile(pattern):
try:
with open(pattern, "r", encoding="utf-8") as p_file:
for line in p_file:
pat_list.append(line.strip())
except Exception:
print("The selected PATH-file cannot be opened! "
"Please choose another one.")
sys.exit()
return pat_list | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def findFile(PATH, PATTERN):\n\t#### find all the file located in the PATH corresponding to the PATTERN\n\tfindPattern = subprocess.Popen('find {path} -name \"{pattern}\" 2> /dev/null'.format(path=PATH, pattern=PATTERN), shell=True, stdout=subprocess.PIPE);\n\t#### String containing all the path toward the file's name matching with the pattern\n\treturn findPattern.stdout.read().decode(\"utf-8\").strip().split(\"\\n\")",
"def _resolve_arguments(patterns, packages_path, search_packages_path):\n\n def _read_patterns(path):\n try:\n with open(path, \"r\") as handler:\n return set(handler.read().splitlines())\n except IOError:\n return set()\n\n ignore_patterns = set()\n\n for item in patterns:\n if os.path.isfile(item) or os.path.isabs(item):\n # This happens in 2 scenarios\n # 1. The user-given pattern is actually a path on-disk\n # 2. The user does bash process substitution (e.g.\n # `rez-batch-process report --ignore-patterns <(cat patterns.txt)`)\n #\n ignore_patterns.update(_read_patterns(item))\n else:\n ignore_patterns.add(item)\n\n if isinstance(packages_path, six.string_types):\n packages_path = packages_path.split(os.pathsep)\n\n if isinstance(search_packages_path, six.string_types):\n search_packages_path = search_packages_path.split(os.pathsep)\n\n return ignore_patterns, packages_path, search_packages_path",
"def matching(pattern: str, kind: Optional[str] = None,\n dirpath: Optional[Union[str, Path]] = None, **options\n) -> List[Tuple[Path, Path]]:\n\n if dirpath or kind == 'askdirectory':\n\n # dialog for dir if none given\n dirpath = standard(kind, **options) if not dirpath else Path(dirpath)\n # separate file paths in dirpat by suffix\n filepaths = dirpath.glob('*.*')\n sorted_paths = defaultdict(list)\n for path in filepaths:\n sorted_paths[path.suffix].append(path)\n paths, others = list(sorted_paths.values())\n\n elif kind == 'askopenfilenames':\n\n # open two dialogs to user select files to match\n paths = standard(kind, title='Select File Set 1', **options)\n others = standard(kind, title='Select File Set 2', **options)\n\n else:\n\n msg = (\"matching dialog requires 'kind' argument to be one of '{}' \"\n \"or '{}' or a Path passed to the dirpath argument.\")\n raise TypeError(msg.format('askdirectory', 'askopenfilenames'))\n\n return re_match(paths, others, pattern)",
"def file_check(pattern, file_to_check):\n if file_to_check.name.__contains__(pattern):\n yield True",
"def MatchPattern(file_path, pattern):\n try:\n with open(file_path, \"r\") as f:\n prog = re.compile(pattern)\n for line in f:\n result = prog.match(line)\n if result:\n return result.groups()\n except IOError:\n pass\n except Exception:\n pass\n\n return None",
"def find_file(regex):\n f_list = glob.glob(regex)\n if len(f_list) == 1:\n return f_list[0]\n elif len(f_list) > 1:\n return f_list\n else:\n raise Exception(\"[ERROR] There is no valid output file for this job\")",
"def _search(matcher=None, output_formatter=None, input_file=None, options=None):\n match_found = False\n\n file_obj = None\n\n try:\n file_obj = open(input_file)\n\n matches = list(matcher.match_file(file_obj))\n\n if matches:\n match_found = True\n\n if options.count:\n if output_formatter:\n output_formatter.emit_line('%s : %s' % (input_file, str(len(matches))))\n else:\n for match in matches:\n if output_formatter:\n output_formatter.emit_matching_line(input_file, match)\n except (OSError, IOError):\n pass\n finally:\n if file_obj is not None:\n file_obj.close()\n\n return match_found",
"def find_pattern_in_file(pattern, file_name):\n pattern = re.compile(pattern)\n with open(file_name) as f:\n for line in f:\n for match in re.finditer(pattern,line):\n return match.groups()\n return None",
"def grep(pattern, *files_or_paths):\n matches = []\n\n for fop in files_or_paths:\n with fileobj(fop) as fo:\n matches.extend((line for line in fo if re.match(pattern, line)))\n\n return matches",
"def main(pattern, i=stdin, o=stdout, only_matching=False):\n sre = re.compile(pattern)\n with i as i, o as o:\n for line in i:\n match = sre.match(line)\n if match:\n if only_matching:\n o.write(\"%s\\n\" % match.groups()[0])\n else:\n o.write(line)",
"def matchPattern(category):\n settings = settingsLoader()\n categoryPattern = (settings['categoriesDictSettings']\n [category]\n ['matches']\n ['matchExpression'])\n logging.debug(\"SORT: matchPattern: using %s\" % categoryPattern)\n for EachPattern in categoryPattern:\n logging.debug(\"SORT: matchPattern: searching for %s\" % EachPattern)\n for EachFile in listOfFiles:\n logging.debug(\"SORT: matchPattern: searching for %s in %s\" %\n (EachPattern, EachFile))\n if fnmatch.fnmatchcase(EachFile, EachPattern):\n return True\n return False",
"def _parse_args_files(self, filematch):\n files, start_pos = [], 0\n while True:\n pos_a = self.cmd.find(filematch, start_pos)\n if pos_a > 0:\n pos_b = self.cmd.find(' ', pos_a)\n if pos_b > 0:\n files.append(self.cmd[pos_a:pos_b])\n else:\n files.append(self.cmd[pos_a:])\n start_pos = pos_b\n else:\n return files",
"def search_file(pattern, filename):\n if not os.path.exists(filename):\n raise Exception(\"Can't open file for reading! \" + filename)\n\n fh = open(filename, \"r\")\n for line in fh:\n allmatch = re.findall(pattern, line)\n if allmatch:\n fh.close()\n return allmatch[0]\n\n fh.close()\n return None",
"def fsearch(self,pattern,msg=None, killon=None ):\n import re\n current=0\n cpat=re.compile(pattern)\n\n for num,line in enumerate(self.f):\n if killon:\n kill = re.search(killon,line)\n if kill:\n # the kill phrase was found first, so die. \n return False\n current=re.search(cpat,line)\n if current:\n if msg:\n print msg\n break\n if not current:\n# print 'ERROR: Requested pattern ('+pattern+') not found in file: <'+self.f.name+ '>. Check file for correct structure. Exiting...'\n return False\n\n return line",
"def match_file(patterns, file):\n\tmatched = False\n\tfor pattern in patterns:\n\t\tif pattern.include is not None:\n\t\t\tif file in pattern.match((file,)):\n\t\t\t\tmatched = pattern.include\n\treturn matched",
"def _filter_patterns(self, filepath, pattern_prefix, exclude_pattern, include_pattern):\n isfilter = False\n if exclude_pattern:\n full_exclude_pattern = os.path.join(pattern_prefix, exclude_pattern)\n if fnmatch.fnmatch(filepath, full_exclude_pattern):\n isfilter = True\n if include_pattern:\n full_include_pattern = os.path.join(pattern_prefix, include_pattern)\n if fnmatch.fnmatch(filepath, full_include_pattern):\n isfilter = False\n return isfilter",
"def fnmatch(pattern, filename) -> bool:\n return _fnmatch(filename, pattern)",
"def getType(self, file=None):\n if file is None:\n raise ValueError(\"No file name supplied\")\n\n for patterns, type in self.pattern.items():\n if patterns.search(file):\n return type\n\n return None",
"def do_file (self, line) :\n\t\targ = line.split()\n\t\tfor a in arg :\n\t\t\tif self.exists( a ) :\n\t\t\t\tprint \"%s: %s\" % ( a, self.__wd['content'][a]['type'] )",
"def parse_line(file,pattern,group=1,indiv_file=None):\n text = open_file(file,indiv_file).split(\"\\n\")\n for line in text:\n match = re.search(pattern, line)\n if match:\n matched_string = match.group(group)\n return matched_string",
"def _MatchPatternLines(self, in_stream, re_pattern, num_lines=None):\n num_read = 0\n while True:\n line = in_stream.readline()\n if not line:\n return None\n num_read += 1\n m = re_pattern.match(line)\n if m is not None:\n return m\n if num_lines is not None and num_read >= num_lines:\n return None",
"def search_by_pattern(self, tl):\n print(\"Search by regex pattern\")\n pattern = input(\"Please enter search pattern: \")\n return tl.findall_pattern(pattern)",
"def match_patterns(pathname, patterns):\n for pattern in patterns:\n if fnmatch(pathname, pattern):\n return True\n return False",
"def grep_me(pattern, fname):\n for line in stream_reader(fname):\n if re.search(pattern, line, re.I):\n print('{}:{}:{}'.format(filename(), filelineno(), line), end='')",
"def find_egg_info_file(self, pattern=''):\n full_pattern = os.path.join(self.requirement.source_directory, 'pip-egg-info', '*.egg-info', pattern)\n logger.debug(\"Looking for %r file(s) using pattern %r ..\", pattern, full_pattern)\n matches = glob.glob(full_pattern)\n if len(matches) > 1:\n msg = \"Source distribution directory of %s (%s) contains multiple *.egg-info directories: %s\"\n raise Exception(msg % (self.requirement.project_name, self.requirement.version, concatenate(matches)))\n elif matches:\n logger.debug(\"Matched %s: %s.\", pluralize(len(matches), \"file\", \"files\"), concatenate(matches))\n return matches[0]\n else:\n logger.debug(\"No matching %r files found.\", pattern)",
"def scan(self, pat):\n re_pat = re.compile(pat)\n for infilename in self.file_names:\n infile = open(infilename, 'r')\n for line in infile:\n line = line.rstrip()\n mo = re_pat.search(line)\n if mo is not None:\n print '%s:%s' % (infilename, line, )",
"def validate_string_findall(pattern, file):\r\n try:\r\n file_open = open(file, 'r')\r\n except:\r\n logging.info(\"file not found\")\r\n return -1\r\n file_data = file_open.read()\r\n ret_out = re.findall(pattern, file_data)\r\n if ret_out:\r\n return True, ret_out\r\n else:\r\n return False, ret_out",
"def _line_fits_pattern(self, logline):\n for (fieldname, pattern) in self._excludepatterns:\n try:\n m = pattern.search(str(logline.__dict__[fieldname]))\n except AttributeError:\n warn(\"Exclude patterns must be tuples of a field name and a compiled regex.\")\n warn(\"The object that you provided as a regex seems not to have a 'search' method\")\n exit(-1)\n except KeyError:\n warn(\"You tried to filter for a field that doesn't exist\")\n m = False\n if m:\n return False\n if len(self._includepatterns) == 0:\n return True # no includepatterns means 'accept everything'\n for (fieldname, pattern) in self._includepatterns:\n try:\n m = pattern.search(str(logline.__dict__[fieldname]))\n except AttributeError:\n warn(\"Exclude patterns must be tuples of a field name and a compiled regex.\")\n warn(\"The object that you provided as a regex seems not to have a 'search' method\")\n exit(-1)\n except KeyError:\n warn(\"You tried to filter for a field that doesn't exist\")\n m = False\n if m:\n return True\n return False",
"def file_entry(file_path, regex, default=()):\n with open(file_path, 'r') as f:\n for line in f:\n search = re.search(regex, line, re.IGNORECASE)\n if search:\n return search.groups()\n\n return default",
"def grep(filename, pattern, verbose=False):\n with open(filename, \"r\") as file:\n for line in file:\n if re.search(pattern, line):\n if verbose:\n return line\n else:\n return True"
]
| [
"0.6204803",
"0.60617113",
"0.5994919",
"0.57386744",
"0.56577635",
"0.5650249",
"0.5618319",
"0.5582942",
"0.5576439",
"0.5539853",
"0.55385",
"0.5527214",
"0.5495445",
"0.549276",
"0.54618526",
"0.54250616",
"0.54236466",
"0.54076785",
"0.5393178",
"0.53860563",
"0.53833365",
"0.5375606",
"0.533824",
"0.5327334",
"0.52859443",
"0.52744025",
"0.52634543",
"0.52379453",
"0.5215235",
"0.52109957"
]
| 0.6360427 | 0 |
remove the empty string from the patterns if no patterns are left, stops the programme and warns the user. No need to validate the TEXT parameter as an empty string cannot contain any other pattern | def validate_data(self):
for pattern in self.patterns:
if pattern == "":
self.patterns.remove("")
if not self.patterns:
print("WARNING! Missing pattern or empty string!")
sys.exit() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_mask_when_empty(self, text):\n if text in ['()-', '.-', '..-']:\n return ''\n else:\n return text",
"def clean_text(self, text) -> Union[str, None]:\n if text and ''.join(text.split()):\n if type(text) == bytes: #Decoding byte strings\n text = text.decode('utf-8')\n #Removing emails + ***.com urls\n text = ' '.join([item for item in text.split() if '@' not in item and '.com' not in item])\n text = ' '.join(text.split()) #removing all multiple spaces\n if text: return text\n # UNCLEAN_TEXT.inc()\n return None",
"def _clean(self, texts, no_punc=False):\n result = ''\n sw = self._sw_no_punc_dict if no_punc else self._sw_dict\n for t in texts:\n if t not in sw:\n result += t\n return result",
"def validate_empty(self):\n result = \"\"\n if(not re.search(\"[a-zA-Z0-9]\", self.title) or not\n re.search(\"^(\\s|\\S)*(\\S)+(\\s|\\S)*$\", self.body)):\n result = \"INCORRECT INPUT, YOU CAN'T SUBMIT EMPTY FIELD OR FIRST CHARACTER SHOULD BE ALPHA NUMERIC\"\n else:\n result = True\n return result",
"def sanitize_text(text: str) -> str:\n for r in [RE_NOISE, RE_EMAIL, RE_REFERENCE]:\n text = r.sub(\"\", text)\n return text",
"def clean_resume(self, text):\n text = text.lower() # lowercase capital letters\n\n text = re.sub(r'(http|www)\\S+\\s*', '', text) # remove URLs\n text = re.sub(r'\\S+@\\S+\\s*', '', text) # remove emails\n text = re.sub(r'@\\S+\\s*', '', text) # remove mentions\n text = re.sub(r'#\\S+\\s*', '', text) # remove hashtags\n\n if self.remove_stopwords:\n text = self.remove_stopwords_f(text)\n\n text = re.sub('[^a-zA-Z]+', ' ', text) # select only alphabet characters (letters only)\n # text = re.sub('[^a-zA-Z0-9]+', ' ', text) # select only alphanumeric characters (letters & numbers)\n # text = re.sub(r'\\W+', ' ', text) # Select only alphanumeric characters (including greek & underscore)\n\n # text = re.sub('[%s]' % re.escape(\"\"\"!\"#$%&'()*+,-./:;<=>?@[]^_`{|}~\"\"\"), '', text) # remove punctuation\n # text = re.sub(r'[^\\x00-\\x7f]', '', text) # remove non-ASCII characters\n # # # Replace non-ASCII characters with their most alike representation (doesn't work):\n # # text = unidecode(unicode(text, encoding=\"utf-8\"))\n\n text = re.sub(' +', ' ', text) # remove extra spaces\n\n if self.apply_normalization:\n text = self.normalize_text(text)\n\n return text",
"def filter_out_data(text):\n if len(text) < 30:\n return None\n return text",
"def remove_pattern(input_txt,pattern):\r\n r = re.findall(pattern,input_txt)\r\n\r\n for i in r:\r\n input_txt = re.sub(i,'',input_txt)\r\n return input_txt",
"def clean(mystring, mypatterns):\n\treturnstring = mystring\n\tfor pattern in mypatterns:\n\t\treturnstring = pattern.sub(r'', returnstring)\n\treturnstring = re.sub('\\n\\n', '', returnstring)\n\treturnstring = re.sub('\\n', ' ', returnstring)\n\treturn returnstring",
"def __cleanText(self,stripNonAlphaNumeric=False, stripNumbers=False):\n if stripNonAlphaNumeric:\n txt = r1.sub(\" \",self.getRawText() )\n else:\n txt = self.getRawText()\n # clean up white spaces\n txt = r2.sub(\" \",txt)\n if stripNumbers:\n txt = r3.sub(\"\",txt)\n self.graph[\"__txt\"] = txt\n self.graph[\"__scope\"] = (0,len(txt))",
"def _clean(matches):\n # type: (List[str]) -> None\n while True:\n try:\n matches.remove(\"\")\n except ValueError:\n break\n\n while True:\n try:\n matches.remove(\",\")\n except ValueError:\n return",
"def clean_text(string):\n remove = ['\\n','\\r','\\s','\\\\']\n if not pd.isna(string):\n for char in remove:\n string = string.replace(char, ' ')\n return string\n else:\n return \"Missing\"",
"def _text_clean(self):\n try:\n self.text = eval(self.text[0])[0]['node']['text']\n self.clean = True\n except IndexError:\n return",
"def test_empty_description(self):\n self.check_validation_error('description\\n string does not match regex \".+\"', name=\"Name\", description=\"\")",
"def onTextChange(self, event):\n\n self.validateRegexFields(complete=False)\n event.Skip()",
"def clean(self, value):\n value = super().clean(value)\n if value in self.empty_values:\n return value\n return value.replace(' ', '')",
"def clean_text(self, text: str) -> str:\n url_regex = r\"https?:\\/\\/(www\\.)?[-a-zA-Z0-9@:%._\\+~#=]{2,256}\\.[a-z]{2,4}\\b([-a-zA-Z0-9@:%_\\+.~#?&//=]*)\"\n\n text = text.strip(\" _\\t\\n\")\n text = text.split(\"____\")[0] # To remove footnotes\n text = text.strip(\" _\\t\\n\")\n text = re.sub(url_regex, \"<url>\", text) # To remove URLs\n text = re.sub(r\">.*(?!(\\n+))$\", \"\",\n text) # To remove quotes at last.\n text = re.sub(r\">(.*)\\n\", \"<startq> \\g<1> <endq>\",\n text) # To add start quote, end quote tags\n text = re.sub(r\"\\n\", \" \", text)\n text = text.rstrip(\" _\\n\\t\")\n text = re.sub(r\"\\n\", \" \", text)\n text = re.sub(r\"\\r\", \" \", text)\n text = text.lower()\n if self.mask_dms:\n text = self.mask_disc_markers(text)\n return text",
"def _text_remove_s(all_text):\n\t# on n'agit que s'il y a au moins un cara plein\n\t\t# => pas les elts vides, ni \\s dont saut de ligne\n\tif len(all_text) and search('[^\\s]', all_text, flags=MULTILINE):\n\t\tflat_alltext = sub(r'\\n', '¤', all_text, flags=MULTILINE)\n\t\tflat_alltext = sub(r'[¤\\s]+$', '', flat_alltext)\n\t\tflat_alltext = sub(r'^[¤\\s]+', '', flat_alltext)\n\telse:\n\t\tflat_alltext = ''\n\treturn flat_alltext",
"def toClean(self, *patterns):\n self.cleanables.extend([*patterns])",
"def filter_blanks(user, str):\n if user.is_staff:\n return str\n return re.sub(r'\\n{2}\\n+', '\\n', str)",
"def comment_cleaner(text):\n text = re.sub(\"[^\\w\\s]\", \"\", text)\n text = \" \".join([x.lower() for x in text.split(' ') if x.lower() in corpus and x.lower() not in stopwords and len(x) > 1])\n if text == '':\n return np.nan\n return text",
"def filter_blanks(user, str):\n return re.sub(r'\\n{2}\\n+', '\\n', str)",
"def test_empty():\n\n result = SECOND_TASK.start(\"\")\n\n assert result.replace(\"*\", \"\").replace(\"_\", \"\").replace(\"#\", \"\").replace(\"^\", \"\") == \"\"",
"def clean_all(text):\n # anticipate Null values in columns that will be cleaned\n if text is not None and type(text) is not float:\n text = \"\".join(text)\n no_ucode = clean_unicode(text)\n no_space = \"\".join(clean_whitespaces(no_ucode.strip()))\n text = no_space.strip()\n\n return text",
"def test_typeerror_in_case_of_string(self):\n eq_(None,grepit(\"\",\"\",\"\"))",
"def cleanText_letters(text):\n text = re.sub(r\"emoji_(\\w+)\", r\" \", text)\n text = re.sub(r\"hashtag_(\\w+)\", r\" \", text)\n text = re.sub(r\"specialmentioned\", r\" \", text)\n text = re.sub(r\"specialurl\", r\" \", text)\n text = re.sub(\"\\s+\", \" \", text).lower().strip() \n\n if text == \" \" or text == \"\":\n return \"blank_comment\"\n else:\n return text \n \n return text",
"def clean_content(self) -> str:",
"def clean(self, text, **kwargs):\n text = sanitize_text(text)\n if text is not None:\n return self.clean_text(text, **kwargs)",
"def test_strip_bad(self):\n # have to turn off check to get bad data in; no longer preserves case\n self.assertEqual(\n self.RNA(\"UCxxxAGwsnyrHBNzzzD-D\", check=False).strip_bad(),\n \"UCAGWSNYRHBND-D\",\n )\n self.assertEqual(self.RNA(\"@#^*($@!#&()!@QZX\", check=False).strip_bad(), \"\")\n self.assertEqual(\n self.RNA(\"aaaxggg---!ccc\", check=False).strip_bad(), \"AAAGGG---CCC\"\n )",
"def clean_description(self):\n description = self.cleaned_data['description']\n if not re.match(r'[\\w{4}\\s*]+', description) or len(description) < 10:\n v_err('no_desc')\n return description"
]
| [
"0.6168507",
"0.5996358",
"0.59933984",
"0.5874263",
"0.5803689",
"0.57598734",
"0.57212836",
"0.5720643",
"0.56914717",
"0.5681065",
"0.5645377",
"0.56068885",
"0.5581656",
"0.5568604",
"0.55501294",
"0.55471134",
"0.5540058",
"0.55348915",
"0.55294496",
"0.54810286",
"0.54774636",
"0.5472218",
"0.547196",
"0.54689187",
"0.54684246",
"0.54508924",
"0.5427497",
"0.54243535",
"0.54034233",
"0.53878236"
]
| 0.69046766 | 0 |
This functions prints (or saves in JSON) the output from the matcher based on some parameters (es. self.recursive, type of input). It prints additional informations such as the TEXT string or the name of the file and the calls the function self.results to format the rest of the output. | def output(self, argument):
if not self.json:
if not self.first_print:
print()
self.first_print = False
if isinstance(argument, tuple):
filepath, filename = argument
if not self.json:
# if -r, print the path AND the name of the file
# if not only filename, path is given by user
if filename:
to_print = filename
if self.recursive:
to_print = filepath
print(f"- {to_print}")
print(self.results)
else:
# always use complete path as key, otherwise path lost
# once the file is saved
self.json_results[str(filepath)] = self.__results
# string
else:
if not self.json:
# if multiple TEXTS, print TEXT
if len(self.input) > 1:
print(f"- {argument}")
print(self.results)
else:
self.json_results[argument] = self.__results | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def print(self, failed_tests=True, passed_tests=True, json_format=False):\n # TODO: Do we need the json option?\n if json_format:\n passed = self.passed\n if (passed and passed_tests) or (not passed and failed_tests):\n print(json.dumps(self, indent=4, cls=MyEncoder))\n else:\n separator = \"\"\n\n filtered_output = \"\"\n passed = self.passed\n if passed and passed_tests:\n filtered_output += \"Parser Name = {}\\n\".format(self.parser)\n if self.input_file_path and self.input_file_path != \"N/A\":\n filtered_output += \"Input Filename = {}\\n\".format(self.input_file_path)\n filtered_output += \"Tests Passed = {}\\n\".format(self.passed)\n elif not passed and failed_tests:\n filtered_output += \"Parser Name = {}\\n\".format(self.parser)\n if self.input_file_path and self.input_file_path != \"N/A\":\n filtered_output += \"Input Filename = {}\\n\".format(self.input_file_path)\n filtered_output += \"Tests Passed = {}\\n\".format(self.passed)\n filtered_output += \"Errors = {}\".format(\"\\n\" if self.errors else \"None\\n\")\n if self.errors:\n for entry in self.errors:\n filtered_output += \"\\t{0}\\n\".format(entry)\n filtered_output += \"Debug Logs = {}\".format(\"\\n\" if self.debug else \"None\\n\")\n if self.debug:\n for entry in self.debug:\n filtered_output += \"\\t{0}\\n\".format(entry)\n if self.results:\n filtered_output += \"Results =\\n\"\n for result in self.results:\n if not result.passed:\n filtered_output += \"{0}\\n\".format(result)\n\n if filtered_output:\n filtered_output += \"{0}\\n\".format(separator)\n print(filtered_output.encode(\"ascii\", \"backslashreplace\").decode())",
"def results(self):\n to_print = \"\"\n\n # determine longest key for pretty print\n limit = 0\n for key in self.__results:\n if len(key) > limit:\n limit = len(key)\n\n # decide spacing and ending between results based on type of input\n spacing = \"\"\n if len(self.input) > 1:\n spacing = \"\\t\"\n\n for key in self.__results:\n key_name = key\n if len(self.patterns) < 2:\n key_name = \"\"\n\n if self.counter:\n matches = self.__results[key]\n else:\n matches = \", \".join([str(i) for i in self.__results[key]])\n\n if len(self.patterns) > 1:\n to_print += (f'{spacing}{key_name:<{limit+2}}{matches}\\n')\n else:\n to_print += (f'{spacing}{matches}\\n')\n\n # remove last newline\n to_print = to_print[:-1]\n\n return to_print",
"def print_results(self):\n pass",
"def output_found(text, data=None):\n if conf.eval_output:\n info_dict = {'type':'found', 'text' : text}\n info_dict.update(data or {})\n output_result_eval(info_dict)\n else:\n output_result('[FOUND] ' + text)",
"def process_output(self, data):\n\n if self.interactive_result_stdout_writing:\n self.brief_logger.debug(data)\n if self.verbose_logger:\n self.verbose_logger.info(data)\n\n # f.write(data)\n # show results instantly in log file\n # f.flush()\n\n return data\n\n # TODO: #68: compile re for better performance\n # TODO: RENAME",
"def print_res(self, result, index=None):\n if index is not None:\n print(str(index).rjust(3)+ \" \" + _c.bold + _c.blue + result[\"title\"] + _c.reset)\n if result[\"description\"]:\n print(\" \"*4 + \"Description:\\t\", result[\"description\"])\n print(\n \" \"*4 +\n result[\"highlight\"].replace(\"<highlight>\", _c.blue).replace(\"</highlight>\", _c.reset),\n )\n print(\" \"*4 + \"Path: \", result[\"path\"])\n else:\n print(\"Title:\\t\\t\", result[\"title\"])\n if result[\"description\"]:\n print(\"Description:\\t\", result[\"description\"])\n print(result[\"highlight\"])\n print(\"Path: \", result[\"path\"])",
"def process_files(self):\n matcher = self.choose_algorithm()\n # process one file at the time for better memory management\n for i, element in enumerate(self.input):\n filepath, _ = element\n\n try:\n with open(filepath, \"r\", encoding=\"utf-8\") as readfile:\n for line in readfile:\n matcher.find_match(line, self.case_insensitive)\n\n # collect unreadeable files for error log\n except Exception:\n self.errors.append(str(filepath))\n\n # copy results and reset matcher for next file\n self.__results = matcher.results\n\n if self.counter:\n self.__results = matcher.counts\n\n matcher.reset()\n\n # output - print or json\n if self.results:\n self.output(element)\n\n # if json print progress bar\n if self.json:\n self.progress_bar(i+1, len(self.input), prefix=\"Matching:\",\n fixed_len=True, length=40)",
"def print_results(self, out_file):\n extra_results = [\n # Total test methods processed, excluding reruns.\n [\"Test Methods\", len(self.result_events)],\n [\"Reruns\", self.test_method_rerun_count]]\n\n # Output each of the test result entries.\n categories = [\n # result id, printed name, print matching tests?, detail label\n [EventBuilder.STATUS_SUCCESS,\n \"Success\", False, None],\n [EventBuilder.STATUS_EXPECTED_FAILURE,\n \"Expected Failure\", False, None],\n [EventBuilder.STATUS_FAILURE,\n \"Failure\", True, \"FAIL\"],\n [EventBuilder.STATUS_ERROR,\n \"Error\", True, \"ERROR\"],\n [EventBuilder.STATUS_EXCEPTIONAL_EXIT,\n \"Exceptional Exit\", True, \"ERROR\"],\n [EventBuilder.STATUS_UNEXPECTED_SUCCESS,\n \"Unexpected Success\", True, \"UNEXPECTED SUCCESS\"],\n [EventBuilder.STATUS_SKIP, \"Skip\", False, None],\n [EventBuilder.STATUS_TIMEOUT,\n \"Timeout\", True, \"TIMEOUT\"],\n [EventBuilder.STATUS_EXPECTED_TIMEOUT,\n # Intentionally using the unusual hyphenation in TIME-OUT to\n # prevent buildbots from thinking it is an issue when scanning\n # for TIMEOUT.\n \"Expected Timeout\", True, \"EXPECTED TIME-OUT\"]\n ]\n\n # Partition all the events by test result status\n result_events_by_status = self._partition_results_by_status(\n categories)\n\n # Print the details\n have_details = self._has_printable_details(\n categories, result_events_by_status)\n if have_details:\n self._print_banner(out_file, \"Issue Details\")\n for category in categories:\n self._report_category_details(\n out_file, category, result_events_by_status)\n\n # Print the summary\n self._print_summary_counts(\n out_file, categories, result_events_by_status, extra_results)\n\n if self.options.dump_results:\n # Debug dump of the key/result info for all categories.\n self._print_banner(out_file, \"Results Dump\")\n for status, events_by_key in result_events_by_status.items():\n out_file.write(\"\\nSTATUS: {}\\n\".format(status))\n for key, event in events_by_key:\n out_file.write(\"key: {}\\n\".format(key))\n out_file.write(\"event: {}\\n\".format(event))",
"def __printResults(files, expected, actual, similarity):\n if (showIndividualResults):\n for i in range(len(files)):\n print \"\\nExpected = %s\\nActual = %s \\nSimilarity = %f\" % (expected[i], actual[i], similarity[i])\n print \"\\nMean Similarity = %f\" % np.mean(similarity)",
"def tree_analysisOutput(self, *args, **kwargs):\n fn_outputcallback = None\n for k, v in kwargs.items():\n if k == 'outputcallback': fn_outputcallback = v\n index = 1\n total = len(self.d_inputTree.keys())\n for path, d_analysis in self.d_outputTree.items():\n self.simpleProgress_show(index, total)\n self.dp.qprint(\"Processing analysis results in output: %s\" % path)\n d_output = fn_outputcallback((path, d_analysis), **kwargs)\n return {\n 'status': True\n }",
"def output_matches(self) -> List[str]:\n output = list()\n for match in sorted(self.matches):\n line = f\"{match[0]} - {match[1]}: Matching ({match[4]}) {match[2]} to {match[3]} on map\"\n logger.info(line)\n output.append(line)\n return output",
"def provide_output():\n args = parser.parse_args()\n #convert args to a dictionary\n args_dict = {arg: value for arg, value in vars(args).items() if value \n is not None} \n #store method into a variable\n method = args_dict.pop('method')\n def perform_operation():\n \"\"\"Function to perform all operations requested by the user\"\"\"\n for k in ['en', 'de', 'fr']:\n inst = named_entity_methods_text(k, method)\n if 'list_all' in args_dict:\n inst.save_all_ne_as_list_to_txt()\n if 'list_different' in args_dict:\n inst.save_different_ne_as_list_to_txt()\n if 'percentage' in args_dict:\n inst.save_percentages_to_txt()\n if 'annotated_txt' in args_dict:\n inst.save_annotated_text_to_txt()\n if 'annotated_xml' in args_dict:\n inst.save_annotated_text_to_xml()\n return\n #if we choose the url option\n if 'url' in args_dict:\n url = args_dict.pop('url')\n url = horizon_url(url)\n #save horizon pages into txt\n url.save_horizon_to_txt()\n #perform operations depending on the user input\n perform_operation()\n #if we choose the folder option\n elif 'folder' in args_dict:\n folder = args_dict.pop('folder')\n os.chdir(folder)\n #perform operations depending on the user input\n perform_operation()\n #if we choose the textfile option\n elif 'textfile' in args_dict:\n textfile = args_dict.pop('textfile')\n #initialise counter for folders\n url_nr = 1\n #for every line in the text_file\n for line in textfile:\n #build new directory and move into it\n os.mkdir('url_nr_'+str(url_nr))\n os.chdir('url_nr_'+str(url_nr))\n url = line.replace('\\n', '')\n url = horizon_url(url)\n #save horizon pages into txt\n url.save_horizon_to_txt()\n #perform operations depending on the user input\n perform_operation()\n #update counter for folders\n url_nr += 1\n os.chdir('..')\n elif 'parent_directory' in args_dict:\n parent_directory = args_dict.pop('parent_directory')\n #initialise list for good paths (i.e. the ones containing only txt \n #files)\n good_paths = []\n #all paths\n all_paths = ([x[0] for x in os.walk(parent_directory)])\n for i in all_paths:\n #content of the paths\n content = os.listdir(i)\n #if there is a directory in the folder, then pass. Otherwise, \n #add to list\n for j in content:\n if not j.endswith('txt'):\n pass\n else:\n good_paths.append(i)\n break\n #for every good path\n for i in good_paths:\n #initialise a parameter containing the number of subdirectories \n #of the path\n amount_subdirectories = 1 + i.count('/')\n #go to the directory\n os.chdir(i)\n #perform operations depending on the user input\n perform_operation()\n #come back to the parent directory\n while amount_subdirectories > 0:\n os.chdir('..')\n amount_subdirectories -= 1\n #if no one among url, folder, textfile or parent_directory is provided, \n #return an error and exit\n else: \n raise TypeError('Either -u, -f, -t, or -p must be specified')\n exit(1)",
"def dump(self):\n self.hasher.update_time_dicts() # Makes the time measurements available\n\n print(\" Creating a results folder in {} and storing all results there.\".format(self.config.output_dir))\n if not os.path.isdir(self.config.output_dir):\n os.mkdir(self.config.output_dir)\n\n print(\" Dumping profile ...\")\n profile_file_name = \"{}_{}_profile\".format(self.name, self.config.mode)\n with open(os.path.join(self.config.output_dir, profile_file_name), \"a\") as file:\n profile = {\"config\": self.config.dump(),\n \"hash\": self.hasher.hash_time_dict,\n \"find\": self.hasher.find_time_dict}\n\n json.dump(profile, file)\n\n print(\" Dumping matches ...\")\n for i, match in enumerate(self.__matched_offsets):\n if int(match[0] > match[1]):\n offset_a = match[1]\n offset_b = match[0]\n else:\n offset_a = match[0]\n offset_b = match[1]\n\n match_file_name = \"{}_{}_{}_{}\".format(self.name, self.config.mode, offset_a, offset_b)\n with open(os.path.join(self.config.output_dir, match_file_name), \"w\") as file:\n infos = \"Config:\\n: {}\".format(self.config)\n text_a = \"\"\n text_b = \"\"\n if self.config.dump_text:\n text_a = \"Text:\\n{}\".format(self.__offset_text_map.get(offset_a))\n text_b = \"Text:\\n{}\".format(self.__offset_text_map.get(offset_b))\n\n file.write(\"{}\\n\\n{}\\n\\n{}\\n\\n{}\".format(infos, text_a, \"#\"*25, text_b))\n\n if self.config.dump_graph:\n print(\" Creating graphs ...\")\n x1, x2 = list(), list()\n y1, y2 = list(), list()\n t_all = 0\n for element, t in self.hasher.hash_time_dict.items():\n t_all += t\n x1.append(element)\n y1.append(t_all)\n\n t_all = 0\n for element, t in self.hasher.find_time_dict.items():\n t_all += t\n x2.append(element)\n y2.append(t_all)\n\n self.__plot(os.path.join(self.config.output_dir, \"hash_time\"), x1, y1)\n self.__plot(os.path.join(self.config.output_dir, \"find_time\"), x2, y2)\n\n print(\"\\n\\n\")\n\n return",
"def main():\n outfile = 'result.txt'\n\n if os.path.exists(outfile):\n os.remove(outfile)\n\n for arg in sys.argv[1:]:\n get_info(arg, outfile)",
"def test_outputs(self, monkeypatch, script_runner):\n monkeypatch.setattr(\"builtins.input\", lambda _: \"n\")\n _ = script_runner.run(\n \"spectrafit\",\n \"spectrafit/test/test_data.txt\",\n \"-i\",\n \"spectrafit/test/test_input_2.json\",\n )\n assert len(list(Path(\".\").glob(\"*.json\"))) == 1\n assert len(list(Path(\".\").glob(\"*.csv\"))) == 3",
"def report(self):\n print()\n print(\"%-15s %-25s %s\" % (\"Class\", \"Name\", \"File\"))\n print(\"%-15s %-25s %s\" % (\"-----\", \"----\", \"----\"))\n for m in sorted(self.flatten(), key=lambda n: n.identifier):\n print(\"%-15s %-25s %s\" % (type(m).__name__, m.identifier, m.filename or \"\"))",
"def run(self):\n # FILE INPUT\n if self.text_type == \"file\":\n self.process_files()\n\n # STRING INPUT\n else:\n self.process_strings()\n\n if self.json:\n self.save_json()\n\n if self.errors:\n print(\"\\nThe following file(s) could not be opened:\")\n for error in self.errors:\n print(f\"\\t{error}\")",
"def parse(self, **kwargs):\n output_filename = self.node.get_option('output_filename')\n jobname = self.node.get_option('jobname')\n if jobname is not None:\n output_filename = \"log-\" + jobname + \".yaml\"\n # Check that folder content is as expected\n files_retrieved = self.retrieved.list_object_names()\n files_expected = [output_filename]\n # Note: set(A) <= set(B) checks whether A is a subset of B\n if not set(files_expected) <= set(files_retrieved):\n self.logger.error(\"Found files '{}', expected to find '{}'\".format(\n files_retrieved, files_expected))\n return self.exit_codes.ERROR_MISSING_OUTPUT_FILES\n\n # add output file\n self.logger.info(\"Parsing '{}'\".format(output_filename))\n# print(self.retrieved._repository._get_base_folder().get_abs_path(output_filename))\n output = BigDFTLogfile(self.retrieved._repository._get_base_folder().\n get_abs_path(output_filename))\n try:\n output.store()\n except ValidationError:\n self.logger.info(\"Impossible to store LogFile - ignoring '{}'\".\n format(output_filename))\n\n# with self.retrieved.open(output_filename, 'rb') as handle:\n# output_node = SinglefileData(file=handle)\n# output_dict_aiida=orm.Dict(dict=output_dict)\n# output_dict_aiida.store()\n# output_log_aiida=BigDFTLogfile(output)\n self.out('bigdft_logfile', output)\n\n return ExitCode(0)",
"def main():\n inputs = []\n files = set()\n\n args = parseArguments()\n\n # Configure the stdout logger\n logging.basicConfig(format=\"%(filename)s: %(levelname)s: %(message)s\",\n level=logging.DEBUG)\n\n try:\n # Create a list of input format objects\n for gcsv in args.gcsv.split():\n inputs.append(GoogleCSV(gcsv))\n for plain in args.plain.split():\n inputs.append(Plain(plain))\n\n # Get the URLs\n urls = mergeURLS(inputs)\n\n # Get the files\n for dir in args.dirs.split():\n files = files.union(formatFiles(dir, args.utc, args.ext))\n\n # Search for matches\n redirects = fuzzySearch(urls, files, args.matches, args.cutoff)\n\n except Exception as e:\n logging.error(e)\n\n if args.output == \"csv\":\n out = CSV(redirects, args.subdomain)\n elif args.output == \"rack\":\n out = Rack(redirects, args.subdomain)\n else:\n out = OutputFormat(redirects, args.subdomain)\n\n print(out)",
"def explore(racine,file_out):\n if racine.data == \"programme\":\n if(type(racine.children)==list):\n for el in racine.children:\n explore(el,file_out)\n elif racine.data == \"txt\":\n if(file_out != None):\n file_out.write(racine.children[0])\n \n elif racine.data == \"dumbo_bloc\":\n if(type(racine.children)==list):\n for el in racine.children:\n explore(el,file_out)\n\n elif racine.data == \"expression_list\":\n if(type(racine.children)==list):\n for el in racine.children:\n explore(el,file_out)\n\n elif racine.data == \"expression\":\n if(racine.children[0].data == \"print\"):\n if file_out is not None:\n file_out.write(str(getVar(racine.children[0].children[0])) )\n \n elif(racine.children[0].data == \"if\"):\n executeIf(racine.children[0],file_out)\n elif(racine.children[0].data == \"for\"):\n executeFor(racine.children[0],file_out)\n elif(racine.children[0].data == \"variable\"):\n dic[racine.children[0].children[0]] = getVar(racine.children[1])",
"def parse(self, **kwargs):\n if 'qcschema' in self.node.inputs:\n input_method = 'qcschema'\n if 'psiapi' in self.node.inputs:\n input_method = 'psiapi'\n output_filename = PSI4_FILENAMES[input_method]['output']\n\n # Check that folder content is as expected\n files_retrieved = self.retrieved.list_object_names()\n files_expected = [output_filename]\n # Note: set(A) <= set(B) checks whether A is a subset of B\n if not set(files_expected) <= set(files_retrieved):\n self.logger.error(\"Found files '{}', expected to find '{}'\".format(\n files_retrieved, files_expected))\n return self.exit_codes.ERROR_MISSING_OUTPUT_FILES\n\n # add outputs\n self.logger.info(\"Parsing '{}'\".format(output_filename))\n with self.retrieved.open(output_filename, 'rb') as handle:\n\n if input_method == 'psiapi':\n log_node = SinglefileData(file=handle,\n filename=output_filename)\n\n elif input_method == 'qcschema':\n output_dict = json.loads(handle.read())\n if not output_dict['success']:\n return self.exit_codes.ERROR_CALCULATION_FAILED\n\n # remove stdout (don't want to store unparsed files in the database)\n log_node = SinglefileData(\n # note: in python3.9 with AiiDA 2.0 this can be simplified to\n # file=io.StrinIO(''.join(output_dict['stdout'])),\n file=io.BytesIO(\n bytes(''.join(output_dict['stdout']),\n encoding='utf8')),\n filename=PSI4_FILENAMES['qcschema']['output'])\n output_dict.pop('stdout')\n\n self.out('qcschema', orm.Dict(dict=output_dict))\n\n self.out('stdout', log_node)\n\n return ExitCode(0)",
"def test_present_results_displays_results(self):\n # to test this we don't actually need to write to the database,\n # we just need a list of ordered_dicts in menu.records\n test_records = [\n OrderedDict([\n ('name', 'Test Employee 1'),\n ('date', datetime.date(2018, 5, 1)),\n ('task_name', 'Test Task 1'),\n ('duration', 1),\n ('notes', 'This is a note for the first test task')\n ]),\n OrderedDict([\n ('name', 'Test Employee 2'),\n ('date', datetime.date(2018, 5, 2)),\n ('task_name', 'Test Task 2'),\n ('duration', 2),\n ('notes', 'This is a note for the second test task')\n ]),\n ]\n self.menu.records = [test_records[0]]\n f_username = test_records[0]['name']\n f_date = test_records[0]['date'].strftime(\"%Y-%m-%d\")\n f_time_taken = str(test_records[0]['duration'])\n f_task_name = test_records[0]['task_name']\n f_notes = test_records[0]['notes']\n short_form = \"{}: {} ({}m): {} | {}\".format(\n f_username,\n f_date,\n f_time_taken,\n f_task_name,\n f_notes\n )\n expected_output = (\"\\nSearch Results\\n\" +\n \"1) {}\\n\".format(short_form) +\n \"\\n\" +\n \"Available actions:\\n\" +\n \"v) View detail\\n\" +\n \"e) Edit\\n\" +\n \"d) Delete\\n\" +\n \"m) go back to Main menu\\n\" +\n \"q) quit\\n\")\n\n '''The process for capturing `print()` statements and redirecting to\n an accumulating object for later processing has the following steps:\n 1. import io and sys\n 2. in the test function, create a StringIO object\n (this is a buffer object that will be the destination for the\n redirected stdout)\n ```\n captured_output = io.StringIO()\n ```\n 3. point stdout at the capture object\n ```\n sys.stdout = captured_output\n ```\n 4. Run code as normal, any print() statement will go to\n the StringIO object instead of standard out\n 5. Revert stdout (will not affect the contents of the StringIO buffer)\n ```\n sys.stdout = sys.__stdout__\n ```\n 6. Run the rest of the code. The contents of the StringIO buffer can\n be accessed as follows:\n ```\n captured_output.getvalue()\n ```\n '''\n # Create a StringIO object to be a capture object\n captured_output = io.StringIO()\n # point stdout at the capture object\n sys.stdout = captured_output\n # Do anything that's going to have a print statement\n # (these will be accumulated in the captured_output object)\n example_input = 'q'\n with patch('builtins.input', side_effect=example_input):\n self.menu.present_results()\n\n # Revert stdout (captured_output still holds the captured items)\n sys.stdout = sys.__stdout__\n # Do any other test code (e.g., asserts)\n self.assertEqual(expected_output, captured_output.getvalue())",
"def __init__(self,query_results,output='term',output_style=\"single\",output_file=None,output_stream='STDIO',color=True): # style single double rst \n if None==query_results:\n return\n self.output=None\n mode=output.lower()\n if 'bash'==mode:\n self.output=self.format_bash(query_results)\n \n elif 'term'==mode:\n self.output=self.format_term(query_results,output_style,output_stream=output_stream,color=color)\n \n elif 'raw'==mode:\n self.output=self.format_raw(query_results,output_stream)\n \n elif 'yaml'==mode:\n self.output=self.format_yaml(query_results)\n \n elif 'json'==mode:\n self.output=self.format_json(query_results)\n \n elif 'xml'==mode:\n self.output=self.format_xml(query_results)\n elif 'time'==mode:\n self.output =\"User Time:Start:{0}, End:{1}, Elapsed:{2}\".format(query_results.start_time,query_results.end_time,query_results.time)\n self.output+=\"Wall Time:Start:{0}, End:{1}, Elapsed:{2}\".format(query_results.wall_start,query_results.wall_end,query_results.wall_time) #default\n else: \n self.output=self.format_term(query_results)",
"def search(self):\n apk_files = self.apk.get_files_types()\n search_results = []\n for file_path, file_type in apk_files.iteritems():\n file_ext = os.path.splitext(os.path.basename(file_path))[1]\n\n #if file type filter on, and this file is not that type, then skip\n if self.file_types and not any(interested_type in file_type.lower() or interested_type in file_ext for interested_type in self.file_types):\n continue\n\n search_result = None\n file_data = self.apk.get_file(file_path)\n\n if self.search_strings:\n for pattern in self.patterns:\n match = pattern.search(file_data)\n if match:\n search_result = {'file_path': file_path,\n 'file_type': file_type,\n 'search_string': pattern.pattern}\n search_results.append(search_result)\n else:\n search_result = {'file_path': file_path,\n 'file_type': file_type,\n 'search_string': None}\n search_results.append(search_result)\n\n #write individual files\n if search_result and self.save_matched_files_dir:\n #save original structure to avoid duplicate filename collisions\n save_file_path = os.path.join(self.save_matched_files_dir, file_path)\n if not os.path.exists(os.path.dirname(save_file_path)):\n os.makedirs(os.path.dirname(save_file_path))\n\n with open(save_file_path,'wb') as f:\n f.write(file_data)\n\n if 'Android binary XML' in file_type:\n with open(save_file_path,'r+') as axml_f:\n decoded_axml = AXMLPrinter(axml_f.read()).buff\n axml_f.seek(0)\n axml_f.write(decoded_axml)\n axml_f.truncate()\n\n return search_results",
"def process(text, output_dir, file_name, json_output):\n\t\n\t# Process HTML\n\tprocessed_text_html = process_html(text)\n\t# Write processed HTML output \n\t#pre_proc.create_text_file(output_dir + \"/html_\" + file_name + \"_pre.html\", processed_text_html)\n\n\t# Convert HMTL to MD\n\ttext_md = pre_proc.extract_text_md(processed_text_html)\n\n\t# Process MD\n\tprocessed_text_md = process_md(text_md)\n\t\n\tif(json_output):\n\t\t# Convert MD to JSON\n\t\tprocessed_json = pre_proc.convert_md_to_json(processed_text_md, file_name)\n\t\t# Write processed JSON output \n\t\tpre_proc.create_binary_file(output_dir + \"/\" + file_name + \".json\", processed_json)\n\telse:\n\t\t# Write processed MD output \n\t\tpre_proc.create_text_file(output_dir + \"/\" + file_name + \".md\", processed_text_md)",
"def show_result():\n return scan_result(request.args.get('filename'))",
"def main():\n\n # Command Line Interface\n parse = command_line()\n args = parse.parse_args()\n if not os.path.isdir(args.directory):\n raise IOError\n\n # Abstract File Tree\n filetree = tree_walk(args.directory, args.replace, args.depth)\n jsontree = json.dumps(\n filetree,\n indent=4,\n sort_keys=True,\n separators=(', ', ': '),\n )\n\n # Pipe vs Redirection\n if sys.stdout.isatty():\n try: jsontree = highlight(\n jsontree,\n JsonLexer(),\n Terminal256Formatter(style='autumn'))\n except:\n pass\n\n print(jsontree)",
"def cat(config, input):\n for file in input:\n while True:\n output = file.read()\n if not output:\n break\n m = SearchMatches(file, output, config.regex, config.color, config.underline)\n m.print_match_lines()",
"def process_match(\n self,\n entry, \n test_index, \n test, \n defaults\n ):\n format_string = ''\n if \"test_exception\" in entry and \\\n entry[\"test_exception\"] is not None:\n out_string = entry[\"test_exception\"]\n else:\n if \"format\" in test:\n format_string = test[\"format\"]\n elif \"format\" in defaults:\n format_string = defaults[\"format\"]\n else:\n return \n out_string = Output.populate_format(entry, format_string)\n self.results.append(out_string)",
"def main(index, output_file, **kwargs):\n\n output_jsonl = None\n output_text = None\n if 'json' in kwargs['output_format']:\n fname = output_file if len(kwargs['output_format']) == 1 else kwargs['output_format'] + '.jsonl'\n output_jsonl = open(fname, 'w')\n if 'text' in kwargs['output_format']:\n fname = output_file if len(kwargs['output_format']) == 1 else kwargs['output_format'] + '.txt'\n output_text = open(fname, 'w')\n\n if kwargs.get('query') is not None:\n query = json.load(kwargs.get('query'))\n else:\n query = {\n \"sort\": [\"warc_id\"],\n \"size\": 200,\n \"query\": {\n \"bool\": {\n \"filter\": {\n \"bool\": {\n \"must_not\": [\n {\n \"query_string\": {\n \"analyze_wildcard\": True,\n \"default_field\": \"*\",\n \"query\": \"\"\"group:(*.patches OR *.commits* OR\n *.dist-commits* OR *.version-control* OR *.git* OR *.cvs* OR *.svn*\n OR *.trunk* OR *.scm* OR *.pkg*) OR (group:(*.bugs* OR *.issues*\n OR *.bugzilla* OR *.codereview*) OR \n headers.subject.keyword:(*jira* OR *bugzilla*) OR\n headers.from_email.keyword:(*bugs* OR *bugzilla* OR *jira* OR *jboss*))\"\"\"\n }\n }\n ],\n \"must\": {\"term\": {\"lang\": \"en\"}},\n \"minimum_should_match\": 1,\n \"should\": [\n {\"wildcard\": {\"group\": \"gmane.culture.*\"}},\n {\"wildcard\": {\"group\": \"gmane.politics.*\"}},\n {\"wildcard\": {\"group\": \"gmane.science.*\"}},\n {\"wildcard\": {\"group\": \"gmane.education.*\"}},\n {\"wildcard\": {\"group\": \"gmane.music.*\"}},\n {\"wildcard\": {\"group\": \"gmane.games.*\"}},\n {\"wildcard\": {\"group\": \"gmane.recreation.*\"}}\n ]\n }\n }\n }\n }\n }\n\n logger.info('Retrieving initial batch')\n es = util.get_es_client()\n results = util.es_retry(es.search, index=index, scroll='10m', size=kwargs['scroll_size'], body=query)\n\n skip = kwargs['skip']\n if skip > 0:\n logger.info('Skipping ahead {} messages'.format(skip))\n\n sampled_groups = {}\n num_samples = 0\n num_skipped = 0\n\n try:\n with tqdm(desc='Calculating progress', unit=' messages') as progress_bar:\n while num_samples < kwargs['total_mails'] and len(results['hits']['hits']) > 0:\n for hit in results['hits']['hits']:\n if skip > 0 and num_skipped < skip:\n progress_bar.set_description('Skipping messages')\n progress_bar.total = skip\n num_skipped += 1\n progress_bar.update()\n continue\n elif (skip == 0 or num_skipped >= skip) and num_samples == 0:\n progress_bar.set_description('Sampling messages')\n progress_bar.total = kwargs['total_mails']\n progress_bar.n = 0\n progress_bar.last_print_n = 0\n progress_bar.update(0)\n\n src = hit['_source']\n text_plain = src['text_plain']\n\n prev_samples = sampled_groups.get(src['group'], 0)\n if kwargs['group_limit'] and prev_samples > kwargs['group_limit']:\n continue\n sampled_groups[src['group']] = prev_samples + 1\n\n num_samples += 1\n progress_bar.update()\n\n if output_jsonl:\n json.dump({'text': text_plain,\n 'meta': {k: src[k] for k in src.keys() if k not in ['text_plain', 'text_html']},\n 'labels': []}, output_jsonl)\n output_jsonl.write('\\n')\n\n if output_text:\n output_text.write(util.normalize_message_text(text_plain))\n output_text.write('\\n')\n\n if num_samples >= kwargs['total_mails']:\n break\n\n results = util.es_retry(es.scroll, scroll_id=results['_scroll_id'], scroll='10m')\n finally:\n es.clear_scroll(scroll_id=results['_scroll_id'])\n\n if output_jsonl:\n output_jsonl.close()\n if output_text:\n output_text.close()"
]
| [
"0.6003957",
"0.5898108",
"0.56751025",
"0.56545293",
"0.5604814",
"0.5568598",
"0.55671835",
"0.5561098",
"0.5531501",
"0.55153",
"0.5513097",
"0.54923934",
"0.5433857",
"0.5425692",
"0.54195386",
"0.539011",
"0.53654116",
"0.53612506",
"0.5351342",
"0.53480667",
"0.5315203",
"0.53089166",
"0.53061616",
"0.53044957",
"0.5292819",
"0.52843124",
"0.5280159",
"0.5265407",
"0.526296",
"0.5259977"
]
| 0.68714005 | 0 |
Verify that volumes pagination works right and back. | def test_volumes_pagination(self, volumes_steps, create_volumes,
update_settings):
volume_names = list(generate_ids('volume', count=3))
create_volumes(volume_names)
update_settings(items_per_page=1)
tab_volumes = volumes_steps.tab_volumes()
tab_volumes.table_volumes.row(
name=volume_names[2]).wait_for_presence(30)
assert tab_volumes.table_volumes.link_next.is_present
assert not tab_volumes.table_volumes.link_prev.is_present
tab_volumes.table_volumes.link_next.click()
tab_volumes.table_volumes.row(
name=volume_names[1]).wait_for_presence(30)
assert tab_volumes.table_volumes.link_next.is_present
assert tab_volumes.table_volumes.link_prev.is_present
tab_volumes.table_volumes.link_next.click()
tab_volumes.table_volumes.row(
name=volume_names[0]).wait_for_presence(30)
assert not tab_volumes.table_volumes.link_next.is_present
assert tab_volumes.table_volumes.link_prev.is_present
tab_volumes.table_volumes.link_prev.click()
tab_volumes.table_volumes.row(
name=volume_names[1]).wait_for_presence(30)
assert tab_volumes.table_volumes.link_next.is_present
assert tab_volumes.table_volumes.link_prev.is_present
tab_volumes.table_volumes.link_prev.click()
tab_volumes.table_volumes.row(
name=volume_names[2]).wait_for_presence(30)
assert tab_volumes.table_volumes.link_next.is_present
assert not tab_volumes.table_volumes.link_prev.is_present | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_pagination(self):\n self.check_pagination()",
"def test03_vms_page_table_paging_buttons(self):\n self.lg('%s STARTED' % self._testID)\n self.lg('try paging from start/previous/next/last and verify it should succeed')\n self.assertTrue(self.Tables.check_next_previous_buttons('machines'))\n self.lg('%s ENDED' % self._testID)",
"def test_volumes_get(self):\n pass",
"def test_vlv_with_page_size(self):\n search_dn = \"ou=nerdherd,%s\" % self.basedn\n self.assertRaises(bonsai.UnwillingToPerform,\n lambda: self.conn.search(search_dn, 1, page_size=3,\n sort_order=[\"-uidNumber\"],\n attrvalue=1, before_count=1,\n after_count=2,\n est_list_count=6))",
"def test02_vm_page_paging_table(self):\n self.lg('%s STARTED' % self._testID)\n self.lg('try paging from the available page numbers and verify it should succeed')\n self.assertTrue(self.Tables.check_show_list('machines'))\n self.lg('%s ENDED' % self._testID)",
"def test_get_photos_paging(self):\n pass",
"def test_get_html_paginated(self):\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, 'video_upload_pagination')",
"def test_aws_service_api_volumes_get(self):\n pass",
"def test_paginated_photos(self):\n\n own_album = AlbumFactory.create(user=self.user)\n photos = PhotoFactory.create_batch(3, album=own_album)\n PhotoFactory.create_batch(2, album=own_album, trash=True)\n trash_album = AlbumFactory.create(user=self.user, trash=True)\n PhotoFactory.create(album=trash_album)\n album_group = AlbumGroupFactory.create(users=[self.user])\n photos += PhotoFactory.create_batch(4, album=album_group.album)\n random_album = AlbumFactory.create()\n PhotoFactory.create_batch(5, album=random_album)\n\n url = reverse(\"api:my/photos-list\")\n\n # check as anonymous user\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n self.assertEqual(\n response.data, {\"detail\": \"Authentication credentials were not provided.\"}\n )\n\n # authenticated\n self.client.login(username=self.user.username, password=\"password\")\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n # ensure pagination is active\n self.assertEqual(response.data[\"count\"], len(photos))\n\n photos.reverse()\n results = response.data[\"results\"]\n for photo, result in zip(photos, results):\n self.assertEqual(result[\"id\"], photo.id)\n self.assertEqual(result[\"user\"], photo.user_id)\n image = result[\"image\"]\n self.assertIn(\"large\", image)\n self.assertIn(\"thumb\", image)\n\n # test filter\n response = self.client.get(url, {\"album\": own_album.pk})\n self.assertEqual(response.data[\"count\"], 3)\n results = response.data[\"results\"]\n\n for result, photo in zip(results, photos[-3:]):\n self.assertEqual(result[\"id\"], photo.id)",
"def test_aws_service_api_volume_get(self):\n pass",
"def test_pagination(self):\n for page in range(1, 5):\n self._test_one_page(page=page)",
"def test_tags_browse_click_page_links_check_items_displayed(self):\n\n po = self.catalog.load_pageobject('TagsBrowsePage')\n po.goto_page()\n\n # change the display limit to 5\n new_display_limit = '5'\n po.form.footer.display_limit(new_display_limit)\n\n # get the updated display limit\n display_limit = int(po.form.footer.display_limit())\n\n assert display_limit == int(new_display_limit), \\\n \"updated display limit does not match the display\" \\\n + \" limit set by user: updated display limit =\" \\\n + \" '%s', user set display limit = '%s'\" \\\n % (display_limit,new_display_limit)\n\n # get the updated page number links\n page_numbers = po.get_link_page_numbers()\n\n page_url = po.current_url()\n\n for p in page_numbers:\n # click the page number link\n po.goto_page_number(p)\n\n po2 = self.catalog.load_pageobject('TagsBrowsePage')\n\n # get the number of items that should be displayed\n # according to the pagination counts\n (start,end,total) = po2.get_pagination_counts()\n num_pag = (end-start+1)\n\n # get the number of items that are actually displayed\n num_rows = po2.form.search_results.num_rows()\n\n # compare that is should be displayed to what is displayed\n assert num_pag == num_rows, \\\n \"after clicking page link #%s on %s,\" % (p,page_url) \\\n + \" the number of items displayed does not match the\" \\\n + \" number of items listed in the pagination counts:\" \\\n + \" displayed = %s, start = %s,\" % (num_rows,start) \\\n + \" end = %s, end-start+1 (what should be displayed) = %s\" \\\n % (end,num_pag)\n\n # return back to our original page\n self.browser._browser.back()",
"def test_videos_pagination_constrain_collection(mocker, logged_in_apiclient):\n mocker.patch(\"ui.serializers.get_moira_client\")\n mocker.patch(\"ui.utils.get_moira_client\")\n page_size = 8\n VideoSetPagination.page_size = page_size\n client, user = logged_in_apiclient\n collections = CollectionFactory.create_batch(3, owner=user)\n videos_by_collection_key = {\n collection.hexkey: VideoFactory.create_batch(20, collection=collection)\n for collection in collections\n }\n url = reverse(\"models-api:video-list\")\n target_collection = collections[1]\n result = client.get(url, {\"collection\": target_collection.hexkey})\n expected_videos = videos_by_collection_key[target_collection.hexkey]\n assert len(result.data[\"results\"]) == min(page_size, len(expected_videos))\n for i in range(1, 3):\n paged_url = url + \"?page={}\".format(i)\n result = client.get(paged_url)\n assert len(result.data[\"results\"]) == min(\n page_size, max(0, len(expected_videos) - page_size * (i - 1))\n )",
"def test_book_search_pagination(self):\n c = Client()\n # TODO implement a test\n pass",
"def test_pagination(self):\r\n page = 1\r\n per_page = 5\r\n total_count = 10\r\n p = pybossa.util.Pagination(page, per_page, total_count)\r\n assert p.page == page, p.page\r\n assert p.per_page == per_page, p.per_page\r\n assert p.total_count == total_count, p.total_count\r\n\r\n err_msg = \"It should return two pages\"\r\n assert p.pages == 2, err_msg\r\n p.total_count = 7\r\n assert p.pages == 2, err_msg\r\n p.total_count = 10\r\n\r\n err_msg = \"It should return False\"\r\n assert p.has_prev is False, err_msg\r\n err_msg = \"It should return True\"\r\n assert p.has_next is True, err_msg\r\n p.page = 2\r\n assert p.has_prev is True, err_msg\r\n err_msg = \"It should return False\"\r\n assert p.has_next is False, err_msg\r\n\r\n for i in p.iter_pages():\r\n err_msg = \"It should return the page: %s\" % page\r\n assert i == page, err_msg\r\n page += 1",
"async def test_stable_version_pagination(aresponses):\n aresponses.add(\n \"registry.hub.docker.com\",\n \"/v2/repositories/homeassistant/home-assistant/tags\",\n \"get\",\n aresponses.Response(\n text=fixture(\"container/page1\", False), status=200, headers=HEADERS\n ),\n )\n aresponses.add(\n \"registry.hub.docker.com\",\n \"/v2/repositories/homeassistant/home-assistant/tags/page2\",\n \"get\",\n aresponses.Response(\n text=fixture(\"container/page2\", False), status=200, headers=HEADERS\n ),\n )\n async with aiohttp.ClientSession() as session:\n haversion = HaVersion(\n session=session,\n source=HaVersionSource.CONTAINER,\n )\n await haversion.get_version()\n assert haversion.version == STABLE_VERSION",
"def test_pagination(self):\n for num in range(60):\n self.add_mp3(artist='Artist', title='Title %02d' % (num+1),\n album='Album %02d' % (num+1), filename='song%d.mp3' % (num+1))\n self.run_add()\n self.assertEqual(Album.objects.count(), 60)\n\n self.assertEqual(Artist.objects.count(), 2)\n artist = Artist.objects.get(name='Artist')\n\n albums = {}\n for num in range(60):\n albums[num] = Album.objects.get(name='Album %02d' % (num+1))\n\n songs = {}\n for num in range(60):\n songs[num] = Song.objects.get(title='Title %02d' % (num+1))\n\n response = self.client.get(reverse('exordium:artist', args=(artist.normname,)))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, '50 of 60 albums')\n self.assertContains(response, '25 of 60 songs')\n self.assertContains(response, '\"?album-page=2\"')\n self.assertContains(response, '\"?song-page=2\"')\n self.assertEqual(len(response.context['albums'].data), 60)\n self.assertEqual(len(response.context['songs'].data), 60)\n for num in range(50):\n self.assertContains(response, '%s<' % (albums[num]))\n self.assertContains(response, reverse('exordium:album', args=(albums[num].pk,)))\n for num in range(50, 60):\n self.assertNotContains(response, '%s<' % (albums[num]))\n self.assertNotContains(response, reverse('exordium:album', args=(albums[num].pk,)))\n for num in range(25):\n self.assertContains(response, '%s<' % (songs[num]))\n for num in range(25, 60):\n self.assertNotContains(response, '%s<' % (songs[num]))\n\n # test page 2/3\n response = self.client.get(reverse('exordium:artist', args=(artist.normname,)), {'album-page': 2, 'song-page': 3})\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, '10 of 60 albums')\n self.assertContains(response, '10 of 60 songs')\n self.assertContains(response, 'album-page=1')\n self.assertContains(response, 'song-page=2')\n self.assertEqual(len(response.context['albums'].data), 60)\n self.assertEqual(len(response.context['songs'].data), 60)\n for num in range(50):\n self.assertNotContains(response, '%s<' % (albums[num]))\n self.assertNotContains(response, reverse('exordium:album', args=(albums[num].pk,)))\n for num in range(50, 60):\n self.assertContains(response, '%s<' % (albums[num]))\n self.assertContains(response, reverse('exordium:album', args=(albums[num].pk,)))\n for num in range(50):\n self.assertNotContains(response, '%s<' % (songs[num]))\n for num in range(50, 60):\n self.assertContains(response, '%s<' % (songs[num]))",
"def test_videos_pagination(mocker, logged_in_apiclient):\n mocker.patch(\"ui.serializers.get_moira_client\")\n mocker.patch(\"ui.utils.get_moira_client\")\n page_size = 8\n VideoSetPagination.page_size = page_size\n client, user = logged_in_apiclient\n collection = CollectionFactory(owner=user)\n videos = VideoFactory.create_batch(20, collection=collection)\n url = reverse(\"models-api:video-list\")\n result = client.get(url)\n assert len(result.data[\"results\"]) == min(page_size, len(videos))\n for i in range(1, 3):\n paged_url = url + \"?page={}\".format(i)\n result = client.get(paged_url)\n assert len(result.data[\"results\"]) == min(\n page_size, max(0, len(videos) - page_size * (i - 1))\n )",
"def test_get_versions(self):\n versions = get_versions(self.session_mock, S3_BUCKET, S3_OBJECT_WITH_VERSIONS, S3_MAX_KEYS)\n self.list_object_versions_mock.paginate.assert_called_once_with(\n Bucket=S3_BUCKET, Prefix=S3_OBJECT_WITH_VERSIONS, MaxKeys=S3_MAX_KEYS\n )\n self.assertEqual(4, len(versions))",
"def test_tags_view_click_page_links_check_items_displayed(self,tag_with_items):\n\n self.tag_name = tag_with_items\n\n po = self.catalog.load_pageobject('TagsPage')\n po.goto_page()\n po.search_for_content([self.tag_name])\n\n po = self.catalog.load_pageobject('TagsViewPage')\n\n # change the display limit to 5\n new_display_limit = '5'\n po.form.footer.display_limit(new_display_limit)\n\n # get the updated display limit\n display_limit = int(po.form.footer.display_limit())\n\n assert display_limit == int(new_display_limit), \\\n \"updated display limit does not match the display limit\" \\\n + \" set by user: updated display limit =\" \\\n + \" '%s', user set display limit = '%s'\" \\\n % (display_limit,new_display_limit)\n\n # get the updated page number links\n page_numbers = po.get_link_page_numbers()\n\n page_url = po.current_url()\n\n for p in page_numbers:\n # click the page number link\n po.goto_page_number(p)\n\n po2 = self.catalog.load_pageobject('TagsViewPage')\n\n # get the number of items that should be displayed\n # according to the pagination counts\n (start,end,total) = po2.get_pagination_counts()\n num_pag = (end-start+1)\n\n # get the number of items that are actually displayed\n num_rows = po2.form.search_results.num_rows()\n\n # compare that is should be displayed to what is displayed\n assert num_pag == num_rows, \\\n \"after clicking page link #%s on %s,\" \\\n % (p,page_url) \\\n + \" the number of items displayed does not match the\" \\\n + \" number of items listed in the pagination counts:\" \\\n + \" displayed = %s, start = %s, end = %s,\" \\\n % (num_rows,start,end) \\\n + \" end-start+1 (what should be displayed) = %s\" \\\n % (num_pag)\n\n # return back to our original page\n self.browser._browser.back()",
"def can_paginate(operation_name=None):\n pass",
"def can_paginate(operation_name=None):\n pass",
"def can_paginate(operation_name=None):\n pass",
"def can_paginate(operation_name=None):\n pass",
"def can_paginate(operation_name=None):\n pass",
"def can_paginate(operation_name=None):\n pass",
"def can_paginate(operation_name=None):\n pass",
"def can_paginate(operation_name=None):\n pass",
"def can_paginate(operation_name=None):\n pass",
"def can_paginate(operation_name=None):\n pass"
]
| [
"0.69831944",
"0.62766",
"0.6245892",
"0.62054735",
"0.61570704",
"0.61375374",
"0.60276866",
"0.60012406",
"0.5986151",
"0.59639984",
"0.5956867",
"0.5899587",
"0.58527124",
"0.58507735",
"0.5844528",
"0.5832102",
"0.5763427",
"0.5760909",
"0.57395583",
"0.5714855",
"0.5710858",
"0.5710858",
"0.5710858",
"0.5710858",
"0.5710858",
"0.5710858",
"0.5710858",
"0.5710858",
"0.5710858",
"0.5710858"
]
| 0.7495992 | 0 |
Verify that user can view volume info. | def test_view_volume(self, volume, volumes_steps):
volumes_steps.view_volume(volume.name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ensure_volume_access_right_exists( user_email, volume_name, caps, allowed_gateways=[msconfig.GATEWAY_TYPE_UG] ):\n client = connect_syndicate()\n return syndicate_provisioning.ensure_volume_access_right_exists( client, user_email, volume_name, caps, allowed_gateways )",
"def can_view(self, user):\r\n return True",
"def can_be_viewed_by(self,user):\n return True",
"def show_volume(self, volume, check=True):\n cmd = 'cinder show ' + volume.id\n\n exit_code, stdout, stderr = self.execute_command(\n cmd, timeout=config.VOLUME_SHOW_TIMEOUT, check=check)\n\n volume_table = output_parser.table(stdout)\n show_result = {key: value for key, value in volume_table['values']}\n\n if check:\n assert_that(show_result['id'], is_(volume.id))\n if volume.name:\n assert_that(show_result['name'], is_(volume.name))\n if volume.description:\n assert_that(show_result['description'],\n is_(volume.description))",
"def is_virtual_vol_in_use(self, virtualvol):\n try:\n get_map = self.maps.get_map(virtualvol)\n except (utils.ApiException, ValueError, TypeError) as err:\n msg = \"Could not get the map view of {0} due to \"\n err_msg = msg.format(virtualvol) + \"error {0}\"\n e_msg = utils.display_error(err_msg, err)\n LOG.error(\"%s\\n%s\\n\", e_msg, err)\n self.module.fail_json(msg=e_msg)\n\n vview_list = utils.serialize_content(get_map)\n # Collect the storage view if it has virtual volume\n if len(vview_list['parents']) > 0:\n return True\n return False",
"def get_viewable(self, user):\n return True",
"def is_viewable(miscobj):\n return misctype_byname(miscobj.filetype).viewable",
"def test_aws_service_api_volume_get(self):\n pass",
"def test_func(self):\n return self.request.user.has_permission(\"core.view_staffer\")",
"def volume_in_use(self):\n return self._volume_status_check(STATUS_IN_USE, False, 0, 0)",
"def check_permission():\n if IS_ADMIN:\n out_info(\"Running as Root/Admin\")\n else:\n out_warning(\"Running without root/admin privileges\")",
"def verify_allow_access_screen(self, raise_e=True):\n return self.driver.wait_for_object(\"dropbox_title\", timeout=10, raise_e=raise_e) and \\\n self.driver.wait_for_object(\"allow_access_btn\", timeout=30, raise_e=raise_e)",
"def can_show(self):\n return self.can_show",
"def canSeeProvider(self):\n provider = self.getProvider()\n if not provider:\n return False\n user = getSecurityManager().getUser()\n return user.has_permission('View', provider)",
"def volume_exists(vil, vol_name):\n\n exists = False\n try:\n if not vil:\n vil, err = get_basic_volume_info_all()\n if err:\n raise Exception(err)\n if vil:\n for v in vil:\n if v[\"name\"] == vol_name:\n exists = True\n except Exception, e:\n return False, 'Error checking for volume existance : %s' % str(e)\n else:\n return exists, None",
"def test_volumes_get(self):\n pass",
"def test_auth_private_owned(self):\n self.do_visible(True, 'pattieblack', False, tenant='pattieblack')",
"def test_show_container_privilege(self):\n pass",
"def validate(self):\n\n return self.volume",
"def test_get_volume(self):\n self.assertEqual(self.cat_a.volume(), 6000)",
"def storage_can_read(self):\n return True",
"def test_auth_private(self):\n self.do_visible(True, None, False, tenant='froggy')",
"def test_auth_public_owned(self):\n self.do_visible(True, 'pattieblack', True, tenant='pattieblack')",
"def is_visible(cls, request):\n if cls.permission_required:\n return request.user.has_perm(cls.permission_uri)\n else:\n return True",
"def can_retrieve(self, user):\n return user.has_perm('agenda.can_see')",
"def ft_syndicate_access():\n \n fake_user = FakeObject()\n fake_user.email = \"[email protected]\"\n\n print \"\\nensure_user_exists(%s)\\n\" % fake_user.email\n ensure_user_exists( fake_user.email, is_admin=False, max_UGs=1100, max_RGs=1 )\n\n print \"\\nensure_user_exists(%s)\\n\" % fake_user.email\n ensure_user_exists( fake_user.email, is_admin=False, max_UGs=1100, max_RGs=1 )\n\n fake_volume = FakeObject()\n fake_volume.name = \"fakevolume\"\n fake_volume.description = \"This is a fake volume, created for funtional testing\"\n fake_volume.blocksize = 1024\n fake_volume.cap_read_data = True \n fake_volume.cap_write_data = True \n fake_volume.cap_host_data = False\n fake_volume.archive = False\n fake_volume.private = True\n \n # test idempotency\n print \"\\nensure_volume_exists(%s)\\n\" % fake_volume.name\n ensure_volume_exists( fake_user.email, fake_volume )\n\n print \"\\nensure_volume_exists(%s)\\n\" % fake_volume.name\n ensure_volume_exists( fake_user.email, fake_volume )\n \n print \"\\nensure_volume_access_right_exists(%s,%s)\\n\" % (fake_user.email, fake_volume.name)\n ensure_volume_access_right_exists( fake_user.email, fake_volume.name, 31 )\n \n print \"\\nensure_volume_access_right_exists(%s,%s)\\n\" % (fake_user.email, fake_volume.name)\n ensure_volume_access_right_exists( fake_user.email, fake_volume.name, 31 )\n \n print \"\\nensure_volume_access_right_absent(%s,%s)\\n\" % (fake_user.email, fake_volume.name)\n ensure_volume_access_right_absent( fake_user.email, fake_volume.name )\n \n print \"\\nensure_volume_access_right_absent(%s,%s)\\n\" % (fake_user.email, fake_volume.name)\n ensure_volume_access_right_absent( fake_user.email, fake_volume.name )\n \n print \"\\nensure_volume_absent(%s)\\n\" % fake_volume.name\n ensure_volume_absent( fake_volume.name )\n\n print \"\\nensure_volume_absent(%s)\\n\" % fake_volume.name\n ensure_volume_absent( fake_volume.name )\n\n print \"\\nensure_user_absent(%s)\\n\" % fake_user.email\n ensure_user_absent( fake_user.email )\n\n print \"\\nensure_user_absent(%s)\\n\" % fake_user.email\n ensure_user_absent( fake_user.email )\n \n \n \n \n print \"\\nensure_principal_exists(%s)\\n\" % fake_user.email\n ensure_principal_exists( fake_user.email, \"asdf\", is_admin=False, max_UGs=1100, max_RGs=1 )\n \n print \"\\nensure_principal_exists(%s)\\n\" % fake_user.email\n ensure_principal_exists( fake_user.email, \"asdf\", is_admin=False, max_UGs=1100, max_RGs=1 )\n\n print \"\\nensure_volume_exists(%s)\\n\" % fake_volume.name\n ensure_volume_exists( fake_user.email, fake_volume )\n\n print \"\\nsetup_volume_access(%s, %s)\\n\" % (fake_user.email, fake_volume.name)\n setup_volume_access( fake_user.email, fake_volume.name, 31, 38800, \"abcdef\" )\n \n print \"\\nsetup_volume_access(%s, %s)\\n\" % (fake_user.email, fake_volume.name)\n setup_volume_access( fake_user.email, fake_volume.name, 31, 38800, \"abcdef\" )\n \n print \"\\nteardown_volume_access(%s, %s)\\n\" % (fake_user.email, fake_volume.name )\n teardown_volume_access( fake_user.email, fake_volume.name )\n \n print \"\\nteardown_volume_access(%s, %s)\\n\" % (fake_user.email, fake_volume.name )\n teardown_volume_access( fake_user.email, fake_volume.name )\n \n print \"\\nensure_volume_absent(%s)\\n\" % fake_volume.name\n ensure_volume_absent( fake_volume.name )\n\n print \"\\nensure_principal_absent(%s)\\n\" % fake_user.email\n ensure_principal_absent( fake_user.email )",
"def view(self, user, action, *args):\n if user.is_anonymous or user.is_client:\n return False\n\n if user.is_administrator:\n return True\n\n if user.is_manager:\n return False\n\n # TODO check groups in request maybe ? dunno\n if user.is_advisor:\n return True\n\n return self.admin_permission(user, action, *args)",
"def test_logged_in_owner(self):\n self.make_logged_in_owner()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u, album.display_album, ALBUM_PRIVATE)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PRIVATE)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u, album.display_photo, ALBUM_PRIVATE)",
"def test_func(self):\n member_to_view = self.get_object()\n is_self = self.request.user.rfid == member_to_view.rfid\n view_others = self.request.user.has_permission(\"core.view_member\")\n return view_others or is_self",
"def check_volume(vol, srf_names, verbose=True):\n if verbose:\n print(\" checking volume for validity\")\n print(\" ----------------------------\")\n # check if dict\n if not isinstance(vol, dict):\n if verbose:\n print(\" vol is not of type dict\")\n return False\n # check for keys in gli dict\n in_vol_keys = set(vol)\n if in_vol_keys == VOL_KEYS:\n if verbose:\n print(\" vol keys are valid\")\n else:\n if verbose:\n print(\" vol keys are not valid!\")\n print(\" needs: \" + \" \".join(VOL_KEYS))\n print(\" found: \" + \" \".join(map(str, in_vol_keys)))\n print(\" missing: \" + \" \".join(map(str, VOL_KEYS - in_vol_keys)))\n print(\" corrupted: \" + \" \".join(map(str, in_vol_keys - VOL_KEYS)))\n return False\n # check NAME\n if (\n isinstance(vol[\"NAME\"], STRTYPE)\n and not has_whitespaces(vol[\"NAME\"])\n and len(vol[\"NAME\"]) > 0\n ):\n if verbose:\n print(\" vol['NAME'] valid\")\n print(\" NAME: '\" + vol[\"NAME\"] + \"'\")\n else:\n if verbose:\n print(\" vol['NAME'] not valid\")\n return False\n # check SURFACES\n if (\n isinstance(vol[\"SURFACES\"], list)\n and len(vol[\"SURFACES\"]) > 0\n and all(srf in srf_names for srf in vol[\"SURFACES\"])\n ):\n if verbose:\n print(\" vol['SURFACES'] valid\")\n else:\n if verbose:\n print(\" vol['SURFACES'] not valid\")\n return False\n # check TYPE\n if vol[\"TYPE\"] is None or isinstance(vol[\"TYPE\"], int):\n if verbose:\n print(\" vol['TYPE'] valid\")\n else:\n if verbose:\n print(\" vol['TYPE'] not valid\")\n return False\n # check MAT_GROUP\n if vol[\"MAT_GROUP\"] is None or isinstance(vol[\"MAT_GROUP\"], int):\n if verbose:\n print(\" vol['MAT_GROUP'] valid\")\n else:\n if verbose:\n print(\" vol['MAT_GROUP'] not valid\")\n return False\n # check LAYER\n if vol[\"LAYER\"] is None or isinstance(vol[\"LAYER\"], int):\n if verbose:\n print(\" vol['LAYER'] valid\")\n else:\n if verbose:\n print(\" vol['LAYER'] not valid\")\n return False\n # finally\n if verbose:\n print(\" ------------\")\n print(\" vol is valid\")\n print(\"\")\n return True"
]
| [
"0.65072924",
"0.6313191",
"0.5997504",
"0.5938842",
"0.58930606",
"0.5835009",
"0.58183914",
"0.57365507",
"0.57155776",
"0.56943774",
"0.5685391",
"0.567308",
"0.56155986",
"0.56130064",
"0.560022",
"0.55933326",
"0.5576085",
"0.55385345",
"0.55349183",
"0.54941154",
"0.54654396",
"0.5449135",
"0.544869",
"0.5438874",
"0.54357475",
"0.5433937",
"0.54276407",
"0.53929764",
"0.5389417",
"0.53795326"
]
| 0.6700226 | 0 |
Verify that user can change volume type. | def test_change_volume_type(self, create_volume, volumes_steps):
volume_name = generate_ids('volume').next()
create_volume(volume_name, volume_type=None)
volumes_steps.change_volume_type(volume_name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_retype_setup_fail_volume_is_available(self, mock_notify):\n elevated = context.get_admin_context()\n project_id = self.context.project_id\n\n db.volume_type_create(elevated, {'name': 'old', 'extra_specs': {}})\n old_vol_type = db.volume_type_get_by_name(elevated, 'old')\n db.volume_type_create(elevated, {'name': 'new', 'extra_specs': {}})\n new_vol_type = db.volume_type_get_by_name(elevated, 'new')\n db.quota_create(elevated, project_id, 'volumes_new', 0)\n\n volume = tests_utils.create_volume(self.context, size=1,\n host=CONF.host, status='available',\n volume_type_id=old_vol_type['id'])\n\n api = cinder.volume.api.API()\n self.assertRaises(exception.VolumeLimitExceeded, api.retype,\n self.context, volume, new_vol_type['id'])\n\n volume = db.volume_get(elevated, volume.id)\n # FIXME: restore when Bug #1803648 is figured out\n # mock_notify.assert_not_called()\n self.assertEqual('available', volume['status'])",
"def retype(self, ctxt, volume, new_type, diff, host):\n self.authenticate_user()\n volume_name = self._get_vipr_volume_name(volume)\n vpool_name = new_type['extra_specs']['ViPR:VPOOL']\n\n try:\n task = self.volume_obj.update(\n self.configuration.vipr_tenant +\n \"/\" +\n self.configuration.vipr_project,\n volume_name,\n vpool_name)\n\n self.volume_obj.check_for_sync(task['task'][0], True)\n return True\n except vipr_utils.SOSError as e:\n if e.err_code == vipr_utils.SOSError.SOS_FAILURE_ERR:\n raise vipr_utils.SOSError(\n vipr_utils.SOSError.SOS_FAILURE_ERR,\n \"Volume \" + volume_name + \": update failed\\n\" + e.err_text)\n else:\n with excutils.save_and_reraise_exception():\n LOG.exception(_(\"Volume : %s type update failed\") % volume_name)",
"def test_change_volume_status(self, volume, volumes_steps):\n volumes_steps.change_volume_status(volume.name, 'Error')\n volumes_steps.change_volume_status(volume.name, 'Available')",
"def set_volume(cls, newVolume: float) -> bool:\n raise NotImplementedError",
"def test_migrate_volume_driver_for_retype(self, mock_can_use):\n # Mock driver and rpc functions\n mock_driver = self.mock_object(self.volume.driver, 'migrate_volume',\n return_value=(True, {}))\n\n volume = tests_utils.create_volume(self.context, size=0,\n host=CONF.host,\n migration_status='migrating')\n host_obj = {'host': 'newhost', 'capabilities': {}}\n self.volume.migrate_volume(self.context, volume, host_obj, False,\n fake.VOLUME_TYPE2_ID, mock.sentinel.diff)\n\n mock_can_use.assert_called_once_with(mock.sentinel.diff)\n mock_driver.assert_called_once_with(self.context, volume, host_obj)\n # check volume properties\n volume = objects.Volume.get_by_id(context.get_admin_context(),\n volume.id)\n self.assertEqual('newhost', volume.host)\n self.assertEqual('success', volume.migration_status)\n self.assertEqual(fake.VOLUME_TYPE2_ID, volume.volume_type_id)",
"def test_aws_service_api_volume_types_get(self):\n pass",
"def user_can_edit_setting_type(user, model):\n return user.has_perm(\n \"{}.change_{}\".format(model._meta.app_label, model._meta.model_name)\n )",
"def retype(self, ctxt, volume, new_type, diff, host):\n LOG.debug(\"Enter retype: id=%(id)s, new_type=%(new_type)s, \"\n \"diff=%(diff)s, host=%(host)s.\", {'id': volume['id'],\n 'new_type': new_type,\n 'diff': diff,\n 'host': host})\n\n # Check what changes are needed\n migration, change_opts, lun_id = self.determine_changes_when_retype(\n volume, new_type, host)\n\n try:\n if migration:\n LOG.debug(\"Begin to migrate LUN(id: %(lun_id)s) with \"\n \"change %(change_opts)s.\",\n {\"lun_id\": lun_id, \"change_opts\": change_opts})\n if self._migrate_volume(volume, host, new_type):\n return True\n else:\n LOG.warning(_LW(\"Storage-assisted migration failed during \"\n \"retype.\"))\n return False\n else:\n # Modify lun to change policy\n self.modify_lun(lun_id, change_opts)\n return True\n except exception.VolumeBackendAPIException:\n LOG.exception(_LE('Retype volume error.'))\n return False",
"async def _volume(self, ctx: commands.Context, *, volume: int):\n\n if not ctx.voice_state.is_playing:\n return await ctx.send('Nothing being played at the moment.')\n\n if 0 > volume > 100:\n return await ctx.send('Volume must be between 0 and 100')\n\n ctx.voice_state.volume = volume / 100\n await ctx.send('Volume of the player set to {}%'.format(volume))\n await ctx.message.add_reaction('✅')",
"def test_volume_service(self):\n self.assertIsInstance(ChangeStateScript()._deployer._volume_service,\n VolumeService)",
"def volume_type(self):\n return 'UNKNOWN'",
"def volume_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"volume_type\")",
"def volume_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"volume_type\")",
"def volume_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"volume_type\")",
"def volume_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"volume_type\")",
"def is_volume_muted(self) -> bool:\n return int(self._state.get(\"playback_mute\", 0)) == 1",
"def retype(self, context, volume, new_type, diff, host):\n LOG.debug('Retype volume request %(vol)s to be %(type)s '\n '(host: %(host)s), diff %(diff)s.',\n {'vol': volume['name'],\n 'type': new_type,\n 'host': host,\n 'diff': diff})\n\n options = dict(\n compression='compression',\n dedup='dedup',\n description='nms:description'\n )\n\n retyped = False\n migrated = False\n model_update = None\n\n src_backend = self.__class__.__name__\n dst_backend = host['capabilities']['location_info'].split(':')[0]\n if src_backend != dst_backend:\n LOG.warning('Cannot retype from %(src_backend)s to '\n '%(dst_backend)s.',\n {'src_backend': src_backend,\n 'dst_backend': dst_backend})\n return False\n\n hosts = (volume['host'], host['host'])\n old, new = hosts\n if old != new:\n migrated, provider_location = self.migrate_volume(\n context, volume, host)\n\n if not migrated:\n provider_location = volume['provider_location']\n nms = self.share2nms[provider_location]\n else:\n nms_url = host['capabilities']['nms_url']\n nms = self._get_nms_for_url(nms_url)\n model_update = provider_location\n provider_location = provider_location['provider_location']\n\n share = provider_location.split(':')[1].split('volumes/')[1]\n folder = '%(share)s/%(volume)s' % {\n 'share': share,\n 'volume': volume['name']\n }\n\n for opt in options:\n old, new = diff.get('extra_specs').get(opt, (False, False))\n if old != new:\n LOG.debug('Changing %(opt)s from %(old)s to %(new)s.',\n {'opt': opt, 'old': old, 'new': new})\n try:\n nms.folder.set_child_prop(\n folder, options[opt], new)\n retyped = True\n except utils.NexentaException:\n LOG.error('Error trying to change %(opt)s'\n ' from %(old)s to %(new)s',\n {'opt': opt, 'old': old, 'new': new})\n return False, None\n return retyped or migrated, model_update",
"def is_volume_muted(self):\n return self._state.get(\"mute\", None)",
"def OnSetVolume(self):\r\n volume = self.volume_var.get()\r\n # vlc.MediaPlayer.audio_set_volume returns 0 if success, -1 otherwise\r\n if volume > 100:\r\n volume = 100\r\n if self.player.audio_set_volume(volume) == -1:\r\n self.errorDialog(\"Failed to set volume\")",
"def test_sound_volume(self):\n return self.send(\"test_sound_volume\")",
"def test_volumes_invalid_volume_type(self):\n with open(\".scuba.yml\", \"w\") as f:\n f.write(\n r\"\"\"\n image: na\n volumes:\n /foo:\n - a list makes no sense\n \"\"\"\n )\n\n self._invalid_config(\"must be string or dict\")",
"def test_migrate_volume_driver_for_retype_generic(self, mock_can_use,\n mock_generic):\n # Mock driver and rpc functions\n mock_driver = self.mock_object(self.volume.driver, 'migrate_volume',\n return_value=(False, None))\n\n volume = tests_utils.create_volume(self.context, size=0,\n host=CONF.host,\n migration_status='migrating')\n host_obj = {'host': 'newhost', 'capabilities': {}}\n self.volume.migrate_volume(self.context, volume, host_obj, False,\n fake.VOLUME_TYPE2_ID, mock.sentinel.diff)\n\n mock_can_use.assert_called_once_with(mock.sentinel.diff)\n mock_driver.assert_called_once_with(self.context, volume, host_obj)\n mock_generic.assert_called_once_with(self.context, volume, host_obj,\n fake.VOLUME_TYPE2_ID)",
"def is_volume_muted(self):\n return self._mute",
"async def _volume(self, ctx: commands.Context, *, volume: int):\n\n # if not ctx.voice_state.is_playing:\n # return await ctx.send('Nothing being played at the moment.')\n\n if 0 > volume > 100:\n return await ctx.send('Volume must be between 0 and 100.')\n\n ctx.voice_state.volume = volume / 100\n await ctx.send(f\"Volume of the player set to {volume}%\\nThe volume will be applied in the next song.\")",
"def change_volume(self, sound_type, volume):\n if sound_type == \"background\":\n self.bg_volume = volume\n pygame.mixer.music.set_volume(self.bg_volume)\n elif sound_type == \"sound\":\n self.se_volume = volume\n for sound in self.sound_lib:\n self.sound_lib[sound].set_volume(volume)",
"async def _volume(self, ctx: commands.Context, *, volume: int):\n\n if not ctx.voice_state.is_playing:\n return await ctx.send('Nothing being played at the moment.')\n\n if 0 > volume > 100:\n return await ctx.send('Volume must be between 0 and 100')\n\n ctx.voice_state.current.source.volume = volume / 100\n await ctx.send('Volume of the player set to {}%'.format(volume))",
"def _volume_types(cls):\n try:\n return cls.volumes.behaviors.get_volume_types()\n except:\n raise DatasetGeneratorError(\n \"Unable to retrieve list of volume types during \"\n \"data-driven-test setup.\")",
"async def volume(self, ctx: commands.Context, volume: int):\n if not 0 <= volume <= 100:\n raise InvalidVolume()\n\n player = ctx.bot.lavalink.player_manager.get(ctx.guild.id)\n \n await player.set_volume(volume)\n await ctx.send(f'Volume alterado para {volume}%.')",
"def test_update_privilege_with_invalid_volume_size(self):\n\n # Create a tenant\n tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)\n\n # Create a privilege without volume size settings\n privilege = vim.vcs.storage.DatastoreAccessPrivilege()\n privilege.datastore = self.datastore\n privilege.allow_create = True\n\n # Add privilege to the tenant\n self.tenantMgr.AddPrivilege(tenant, privilege)\n\n # Update the privilege with invalid volume size\n with self.assertRaises(vmodl.fault.InvalidArgument):\n self.tenantMgr.UpdatePrivilege(tenant, self.datastore, volume_max_size=2048, volume_total_size=1024)",
"def validate(self):\n\n return self.volume"
]
| [
"0.6560247",
"0.6318833",
"0.6275313",
"0.62473804",
"0.6031198",
"0.57690156",
"0.57422614",
"0.569965",
"0.5690666",
"0.56450164",
"0.5626222",
"0.5607844",
"0.5580567",
"0.5580567",
"0.5580567",
"0.5536625",
"0.5524085",
"0.5510654",
"0.5504379",
"0.5485735",
"0.5483211",
"0.5456037",
"0.5453793",
"0.54404366",
"0.54388833",
"0.5436898",
"0.5422349",
"0.53998643",
"0.5396559",
"0.53782547"
]
| 0.7038201 | 0 |
Verify that user can upload volume to image. | def test_upload_volume_to_image(self, volume, images_steps, volumes_steps):
image_name = next(generate_ids('image', length=20))
volumes_steps.upload_volume_to_image(volume.name, image_name)
images_steps.page_images().table_images.row(
name=image_name).wait_for_presence(30)
images_steps.delete_image(image_name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ensure_volume_access_right_exists( user_email, volume_name, caps, allowed_gateways=[msconfig.GATEWAY_TYPE_UG] ):\n client = connect_syndicate()\n return syndicate_provisioning.ensure_volume_access_right_exists( client, user_email, volume_name, caps, allowed_gateways )",
"def can_upload_data(self, verify_key: VerifyKey) -> bool:\n try:\n user = self.get_user(verify_key)\n return user.role.get(\"can_upload_data\", False)\n except UserNotFoundError:\n return False",
"def can_upload_video(self):\n return self.userprofile.user.has_perm('distance_learning.add_video')",
"def test_local_uploader_upload_fails(self, mock):\r\n u = LocalUploader()\r\n file = FileStorage(filename='test.jpg')\r\n res = u.upload_file(file, container='user_3')\r\n err_msg = (\"Upload file should return False, \\\r\n as there is an exception\")\r\n assert res is False, err_msg",
"def test_local_uploader_upload_correct_file(self, mock):\r\n mock.save.return_value = None\r\n u = LocalUploader()\r\n file = FileStorage(filename='test.jpg')\r\n res = u.upload_file(file, container='user_3')\r\n err_msg = (\"Upload file should return True, \\\r\n as this extension is allowed\")\r\n assert res is True, err_msg",
"def isValidUpload(request, max_upload_size, video_types, space_used):\n if 'file' in request.FILES:\n if request.FILES['file'].size < max_upload_size and supportedType(request, video_types) and enoughSpace(request, space_used):\n return True\n return False",
"def test_volumes_post(self):\n pass",
"def show_uploadbox(self):\n\n manager = getMultiAdapter((self.context, self.context.REQUEST),\n ICheckinCheckoutManager)\n\n return manager.is_file_upload_allowed()",
"def image_is_available(filename):\n # FIXME - Implement!\n file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)\n return os.path.isfile(file_path)",
"def creation_validation(**_):\n\n for property_key in constants.VOLUME_REQUIRED_PROPERTIES:\n utils.validate_node_property(property_key, ctx.node.properties)\n\n volume_object = _get_volumes_from_id(utils.get_resource_id())\n\n if ctx.node.properties['use_external_resource'] and not volume_object:\n raise NonRecoverableError(\n 'External resource, but the supplied '\n 'EBS volume does not exist in the account.')\n\n if not ctx.node.properties['use_external_resource'] and volume_object:\n raise NonRecoverableError(\n 'Not external resource, but the supplied '\n 'EBS volume exists in the account.')",
"def test_valid_upload_modes(self):\n upload_helpers.verify_upload_mode(MODE_DEFAULT)\n upload_helpers.verify_upload_mode(MODE_FAST5)\n upload_helpers.verify_upload_mode(MODE_ASSEMBLIES)",
"def hasImageData(self,volumeNode):\n if not volumeNode:\n logging.debug('hasImageData failed: no volume node')\n return False\n return True",
"def __check_picture(self):\n if self.communications.get_picture():\n self.communications.set_status(\"Taking Picture\")\n try:\n if self.__video_status[\"in_use\"]:\n self.__live_video_stream(False) # Turn off Video Live Stream\n sleep(13)\n image_path = self.camera.take_picture(3)\n self.__live_video_stream(True) # Turn Video Stream back on.\n else:\n image_path = self.camera.take_picture(3)\n if image_path is not None:\n image_url = self.communications.upload_image(image_path)\n if image_url:\n self.communications.add_event(\"Image Capture Successful\", \"success\")\n except ValueError:\n self.communications.add_event(\"Could not capture image, try again\", \"error\")",
"def check_volume(obj, char, quiet=False):\n vol = obj.item_data.size\n if vol is None:\n raise ValueError(f\"Object {obj} has an undefined size\")\n v_max = char.item_data.capacity\n if char.used_capacity + vol > v_max:\n if not quiet:\n char.msg(\"You can't carry %s.\" % obj)\n return False\n return True",
"def verify_blob_permissions(self, blob):\n path = self.csum_to_path(blob)\n return is_readonly(path)",
"def validate(self):\n\n return self.volume",
"def media_image_remotely_accessible(self) -> bool:\n return True",
"def check_volume(vol, srf_names, verbose=True):\n if verbose:\n print(\" checking volume for validity\")\n print(\" ----------------------------\")\n # check if dict\n if not isinstance(vol, dict):\n if verbose:\n print(\" vol is not of type dict\")\n return False\n # check for keys in gli dict\n in_vol_keys = set(vol)\n if in_vol_keys == VOL_KEYS:\n if verbose:\n print(\" vol keys are valid\")\n else:\n if verbose:\n print(\" vol keys are not valid!\")\n print(\" needs: \" + \" \".join(VOL_KEYS))\n print(\" found: \" + \" \".join(map(str, in_vol_keys)))\n print(\" missing: \" + \" \".join(map(str, VOL_KEYS - in_vol_keys)))\n print(\" corrupted: \" + \" \".join(map(str, in_vol_keys - VOL_KEYS)))\n return False\n # check NAME\n if (\n isinstance(vol[\"NAME\"], STRTYPE)\n and not has_whitespaces(vol[\"NAME\"])\n and len(vol[\"NAME\"]) > 0\n ):\n if verbose:\n print(\" vol['NAME'] valid\")\n print(\" NAME: '\" + vol[\"NAME\"] + \"'\")\n else:\n if verbose:\n print(\" vol['NAME'] not valid\")\n return False\n # check SURFACES\n if (\n isinstance(vol[\"SURFACES\"], list)\n and len(vol[\"SURFACES\"]) > 0\n and all(srf in srf_names for srf in vol[\"SURFACES\"])\n ):\n if verbose:\n print(\" vol['SURFACES'] valid\")\n else:\n if verbose:\n print(\" vol['SURFACES'] not valid\")\n return False\n # check TYPE\n if vol[\"TYPE\"] is None or isinstance(vol[\"TYPE\"], int):\n if verbose:\n print(\" vol['TYPE'] valid\")\n else:\n if verbose:\n print(\" vol['TYPE'] not valid\")\n return False\n # check MAT_GROUP\n if vol[\"MAT_GROUP\"] is None or isinstance(vol[\"MAT_GROUP\"], int):\n if verbose:\n print(\" vol['MAT_GROUP'] valid\")\n else:\n if verbose:\n print(\" vol['MAT_GROUP'] not valid\")\n return False\n # check LAYER\n if vol[\"LAYER\"] is None or isinstance(vol[\"LAYER\"], int):\n if verbose:\n print(\" vol['LAYER'] valid\")\n else:\n if verbose:\n print(\" vol['LAYER'] not valid\")\n return False\n # finally\n if verbose:\n print(\" ------------\")\n print(\" vol is valid\")\n print(\"\")\n return True",
"def _upload_file(self, file, container):\r\n try:\r\n filename = secure_filename(file.filename)\r\n if not os.path.isdir(os.path.join(self.upload_folder, container)):\r\n os.makedirs(os.path.join(self.upload_folder, container))\r\n file.save(os.path.join(self.upload_folder, container, filename))\r\n return True\r\n except:\r\n return False",
"def upload_validated(request):\n if 'file' not in request.files:\n flash('No file part')\n return False \n if not request.form.get('username', None):\n flash('No username part')\n return False \n torrent_file = request.files['file']\n if torrent_file.filename == '':\n flash('No selected file')\n return False \n if torrent_file and check_allowed_extension(torrent_file.filename):\n return True",
"def test_aws_service_api_volume_attachment_put(self):\n pass",
"def image_check(kwargs) -> bool:\n\n # Kwarg argument check\n return kwarg_check(\n kwargs=kwargs,\n options=[\n \"min_captured_at\",\n \"max_captured_at\",\n \"radius\",\n \"image_type\",\n \"organization_id\",\n \"fields\",\n ],\n callback=\"image_check\",\n )",
"def runRequirements(self):\n ready = (self.user[\"Save\"] != \"\" and self.user[\"Video\"] != \"\") or self.img_exist\n return ready",
"def ensure_volume_exists( user_email, opencloud_volume, user=None ):\n client = connect_syndicate()\n\n try:\n volume = client.read_volume( opencloud_volume.name )\n except Exception, e:\n # transport error \n logger.exception(e)\n raise e\n\n if volume is None:\n # the volume does not exist....try to create it \n vol_name = opencloud_volume.name\n vol_blocksize = opencloud_volume.blocksize\n vol_description = opencloud_volume.description\n vol_private = opencloud_volume.private\n vol_archive = opencloud_volume.archive \n vol_default_gateway_caps = opencloud_caps_to_syndicate_caps( opencloud_volume.cap_read_data, opencloud_volume.cap_write_data, opencloud_volume.cap_host_data )\n\n try:\n vol_info = client.create_volume( user_email, vol_name, vol_description, vol_blocksize,\n private=vol_private,\n archive=vol_archive,\n active=True,\n default_gateway_caps=vol_default_gateway_caps,\n store_private_key=False,\n metadata_private_key=\"MAKE_METADATA_KEY\" )\n\n except Exception, e:\n # transport error\n logger.exception(e)\n raise e\n\n else:\n # successfully created the volume!\n return vol_info\n\n else:\n \n # volume already exists. Verify its owned by this user.\n if user is None:\n try:\n user = client.read_user( volume['owner_id'] )\n except Exception, e:\n # transport error, or user doesn't exist (either is unacceptable)\n logger.exception(e)\n raise e\n\n if user is None or user['email'] != user_email:\n raise Exception(\"Volume '%s' already exists, but is NOT owned by '%s'\" % (opencloud_volume.name, user_email) )\n\n # we're good!\n return None",
"def verify(image_path):\n try:\n with Image.open(image_path) as img:\n img.verify()\n return True\n except Exception as e:\n log.warn('Path [{}] does not point to an image: [{}]'.format(image_path, e))\n return False",
"def test_aws_service_api_volumes_post(self):\n pass",
"def test_attachment_deletion_allowed_volume_no_attachments(self):\n volume = tests_utils.create_volume(self.context)\n self.volume_api.attachment_deletion_allowed(self.context, None, volume)",
"def verify_event(event):\n event_details = event['event']\n file_subtype = event_details.get('subtype')\n\n if file_subtype != 'file_share':\n print('Not a file_shared event- ignoring event...')\n return False\n\n file_details = event_details['file']\n mime_type = file_details['mimetype']\n file_size = file_details['size']\n\n if mime_type not in SUPPORTED_IMAGE_FORMATS:\n print('File is not an image- ignoring event...')\n return False\n\n if file_size > MAX_SIZE:\n print(\n 'Image is larger than 5MB and cannot be processed- ignoring event...')\n return False\n\n return True",
"def test_create_volume_blocked(self, mock_ghn):\n ctxt = context.get_admin_context()\n extra_specs = {}\n type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)\n volume = {'id': '1', 'name': 'volume1',\n 'display_name': '',\n 'volume_type_id': type_ref['id'],\n 'size': 10}\n self._return_blocked = 1 # Block & fail cancel => create succeeded\n ret = self.driver.create_volume(volume)\n expected = {'redundancy': '0', 'group': 'xanadu',\n 'name': 'volume10', 'mode': '0777',\n 'user': 'kane', 'net': 'net1',\n 'storageserver': 'stor1:gbd0,stor2:gbd0,',\n 'size': '12'}\n self.assertDictMatch(expected, self.created)\n # Check the returned provider\n expected_pid = {'provider_id': 'volume10'}\n self.assertDictMatch(expected_pid, ret)\n self.assertTrue(self._request_cancel)",
"def test_auth_allow(self):\n # Setting authentication on volume for client1 using ip\n auth_dict = {'all': [self.mounts[0].client_system]}\n ret = set_auth_allow(self.volname, self.mnode, auth_dict)\n self.assertTrue(ret, \"Failed to set authentication\")\n g.log.info(\"Successfully set authentication on volume\")\n\n # Mounting volume on client1\n self.authenticated_mount(self.mounts[0])\n\n # Trying to mount volume on client2\n self.unauthenticated_mount(self.mounts[1])\n\n # Verify whether mount failure on client2 is due to auth error\n log_msg = self.is_auth_failure(self.mounts[1].client_system)\n prev_log_statement = log_msg\n\n g.log.info(\"Verification of auth.allow on volume using client IP is \"\n \"successful\")\n\n # Unmount volume from client1\n ret = self.mounts[0].unmount()\n self.assertTrue(ret, (\"Failed to unmount volume %s from client %s\"\n % (self.volname, self.mounts[0].client_system)))\n\n # Obtain hostname of client1\n ret, hostname_client1, _ = g.run(self.mounts[0].client_system,\n \"hostname\")\n self.assertEqual(ret, 0, (\"Failed to obtain hostname of client %s\"\n % self.mounts[0].client_system))\n g.log.info(\"Obtained hostname of client. IP- %s, hostname- %s\",\n self.mounts[0].client_system, hostname_client1.strip())\n\n # Setting authentication on volume for client1 using hostname\n auth_dict = {'all': [hostname_client1.strip()]}\n ret = set_auth_allow(self.volname, self.mnode, auth_dict)\n self.assertTrue(ret, \"Failed to set authentication\")\n g.log.info(\"Successfully set authentication on volume\")\n\n # Mounting volume on client1\n self.authenticated_mount(self.mounts[0])\n\n # Trying to mount volume on client2\n self.unauthenticated_mount(self.mounts[1])\n\n # Verify whether mount failure on client2 is due to auth error\n log_msg = self.is_auth_failure(self.mounts[1].client_system,\n prev_log_statement)\n prev_log_statement = log_msg\n\n g.log.info(\"Verification of auth.allow on volume using client \"\n \"hostname is successful\")\n\n # Unmount volume from client1\n ret = self.mounts[0].unmount()\n self.assertTrue(ret, (\"Failed to unmount volume %s from client %s\"\n % (self.volname, self.mounts[0].client_system)))"
]
| [
"0.6100675",
"0.60439867",
"0.5963752",
"0.58335286",
"0.5831634",
"0.5797141",
"0.5795935",
"0.57759506",
"0.5682973",
"0.56629056",
"0.56443405",
"0.560292",
"0.56017274",
"0.5593478",
"0.5586045",
"0.5571096",
"0.55482906",
"0.54941136",
"0.54915893",
"0.5468858",
"0.5466412",
"0.5463334",
"0.5460502",
"0.5454481",
"0.5446574",
"0.5442021",
"0.5423069",
"0.5409134",
"0.5394888",
"0.5371835"
]
| 0.60787594 | 1 |
Verify that admin can launch volume as instance. | def test_launch_volume_as_instance(self, volume, instances_steps,
volumes_steps):
instance_name = next(generate_ids('instance'))
volumes_steps.launch_volume_as_instance(
volume.name, instance_name, network_name=INTERNAL_NETWORK_NAME)
instances_steps.page_instances().table_instances.row(
name=instance_name).wait_for_status('Active')
instances_steps.delete_instance(instance_name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_applications(self):\n self.check_guest_applications()",
"def test_guest_applications(self):\n self.check_guest_applications()",
"def test_guest_applications(self):\n self.check_guest_applications()",
"def test_guest_applications(self):\n self.check_guest_applications()",
"def test_guest_applications(self):\n self.check_guest_applications()",
"def test_guest_applications(self):\n self.check_guest_applications()",
"def test_guest_applications(self):\n self.check_guest_applications()",
"def test_guest_applications(self):\n self.check_guest_applications()",
"def test_guest_applications(self):\n self.check_guest_applications()",
"def test_guest_applications(self):\n self.check_guest_applications()",
"def test_guest_applications(self):\n self.check_guest_applications()",
"def test_guest_applications(self):\n self.check_guest_applications()",
"def test_guest_applications(self):\n self.check_guest_applications()",
"def test_guest_applications(self):\n self.check_guest_applications()"
]
| [
"0.62880164",
"0.62880164",
"0.62880164",
"0.62880164",
"0.62880164",
"0.62880164",
"0.62880164",
"0.62880164",
"0.62880164",
"0.62880164",
"0.62880164",
"0.62880164",
"0.62880164",
"0.62880164",
"0.62880164",
"0.62880164",
"0.6120075",
"0.6120075",
"0.6120075",
"0.6120075",
"0.6120075",
"0.6120075",
"0.6120075",
"0.6120075",
"0.6120075",
"0.6120075",
"0.6120075",
"0.6120075",
"0.6120075",
"0.6120075"
]
| 0.693913 | 0 |
Verify that admin can change volume status. | def test_change_volume_status(self, volume, volumes_steps):
volumes_steps.change_volume_status(volume.name, 'Error')
volumes_steps.change_volume_status(volume.name, 'Available') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _status(self, expected_status_code):\n status = cinder_utils.get_volume_status(self._cinder, self.__volume)\n if not status:\n logger.warning(\n 'Cannot volume status for volume with ID - %s',\n self.__volume.id)\n return False\n\n if status == 'ERROR':\n raise VolumeCreationError(\n 'Instance had an error during deployment')\n logger.debug('Instance status is - ' + status)\n return status == expected_status_code",
"def test_retype_setup_fail_volume_is_available(self, mock_notify):\n elevated = context.get_admin_context()\n project_id = self.context.project_id\n\n db.volume_type_create(elevated, {'name': 'old', 'extra_specs': {}})\n old_vol_type = db.volume_type_get_by_name(elevated, 'old')\n db.volume_type_create(elevated, {'name': 'new', 'extra_specs': {}})\n new_vol_type = db.volume_type_get_by_name(elevated, 'new')\n db.quota_create(elevated, project_id, 'volumes_new', 0)\n\n volume = tests_utils.create_volume(self.context, size=1,\n host=CONF.host, status='available',\n volume_type_id=old_vol_type['id'])\n\n api = cinder.volume.api.API()\n self.assertRaises(exception.VolumeLimitExceeded, api.retype,\n self.context, volume, new_vol_type['id'])\n\n volume = db.volume_get(elevated, volume.id)\n # FIXME: restore when Bug #1803648 is figured out\n # mock_notify.assert_not_called()\n self.assertEqual('available', volume['status'])",
"def check_admin() -> bool:\n return ctypes.windll.shell32.IsUserAnAdmin() == 1",
"def vol_up_and_validate(self):\n self.logger.info('Increasing volume')\n before_vol = self.dut.volume('Up', 1)\n time.sleep(2)\n after_vol = self.dut.volume('Up', 1)\n if not after_vol or not before_vol or after_vol <= before_vol:\n self.logger.error(\n 'Unable to increase the volume. Before: %s. After: %s' %\n (before_vol, after_vol))\n raise TestActsError('error increasing volume')",
"def check_admin():\n\tif not current_user.is_admin:\n\t\tabort(403)",
"def check_admin():\r\n if not current_user.is_admin:\r\n abort(403)",
"def check_admin():\n if not current_user.is_admin:\n abort(403)",
"def check_admin():\n if not current_user.is_admin:\n abort(403)",
"def volume_state(self):\r\n return self.status",
"def test_user_can_change_admin(self):\n self.assertTrue(self.story.user_can_change(self.admin_user))",
"def status(self):\n if not self.volume:\n # no volume active\n status = volume_status.NONE\n elif self._status and self._last_status_check >= time.time() - MIN_TIME_BETWEEN_STATUS_CHECKS:\n status = self._status\n else:\n try:\n self.volume.update()\n # Take only the first word of the status as openstack adds some extra info\n # after a space\n status = volume_status_map.get(self.volume.status.split(' ')[0], None)\n if status == volume_status.IN_USE and self.volume.attachment_state() == 'attached':\n status = volume_status.ATTACHED\n if not status:\n log.error(\"Unknown volume status: {0}. Setting status to volume_status.NONE\"\n .format(self.volume.status))\n status = volume_status.NONE\n self._status = status\n self._last_status_check = time.time()\n except EC2ResponseError as e:\n log.error(\n 'Cannot retrieve status of current volume. {0}'.format(e))\n status = volume_status.NONE\n return status",
"def test_check_status_admin(self):\n post_json = {\"submission_id\": self.status_check_submission_id}\n # Log in as admin user\n self.login_admin_user()\n # Call check status route (also checking case insensitivity of header here)\n response = self.app.post_json(\"/v1/check_status/\", post_json, expect_errors=True,\n headers={\"x-SESSION-id\": self.session_id})\n # Assert 200 status\n self.assertEqual(response.status_code, 200)",
"def _volume_status_check(self, expected_status_code, block, timeout,\n poll_interval):\n # sleep and wait for volume status change\n if block:\n start = time.time()\n else:\n start = time.time() - timeout + 1\n\n while timeout > time.time() - start:\n status = self._status(expected_status_code)\n if status:\n logger.debug('Volume is active with name - %s',\n self.volume_settings.name)\n return True\n\n logger.debug('Retry querying volume status in %s seconds',\n str(poll_interval))\n time.sleep(poll_interval)\n logger.debug('Volume status query timeout in %s',\n str(timeout - (time.time() - start)))\n\n logger.error(\n 'Timeout checking for volume status for ' + expected_status_code)\n return False",
"def _get_admin_status(self):\n return self.__admin_status",
"def _voltage_changed(self):\n if self.checkValueBool:\n self.check_status()",
"def status(self):\n if self.qemu.is_running():\n status = 0\n self.log.info(\"vm-status\", result=\"online\")\n for device in list(self.qemu.block_info().values()):\n self.log.info(\n \"disk-throttle\",\n device=device[\"device\"],\n iops=device[\"inserted\"][\"iops\"],\n )\n else:\n status = 1\n self.log.info(\"vm-status\", result=\"offline\")\n for volume in self.ceph.volumes:\n locker = volume.lock_status()\n self.log.info(\"rbd-status\", volume=volume.fullname, locker=locker)\n consul = locate_live_service(self.consul, \"qemu-\" + self.name)\n if consul:\n self.log.info(\n \"consul\", service=consul[\"Service\"], address=consul[\"Address\"]\n )\n else:\n self.log.info(\"consul\", service=\"<not registered>\")\n return status",
"def test_volume_service(self):\n self.assertIsInstance(ChangeStateScript()._deployer._volume_service,\n VolumeService)",
"def test_admin_accessible(self) -> None:\n response = self.client.get(\"/admin/\")\n self.assertEqual(200, response.status_code)",
"def check_status(self):",
"def check_permission():\n if IS_ADMIN:\n out_info(\"Running as Root/Admin\")\n else:\n out_warning(\"Running without root/admin privileges\")",
"def vol_down_and_validate(self):\n self.logger.info('Decreasing volume')\n before_vol = self.dut.volume('Down', 1)\n time.sleep(2)\n after_vol = self.dut.volume('Down', 1)\n if not after_vol or not before_vol or after_vol >= before_vol:\n self.logger.error(\n 'Unable to decrease the volume. Before: %s. After: %s' %\n (before_vol, after_vol))\n raise TestActsError('error decreasing volume')",
"def check_admin(self, update, context):\n\n user = self.User(update)\n output = self.data_base.check_admin(user)\n user.send_message(output)\n self.data_base.log(user, update.message.text, str(output))",
"def test_migrate_volume_driver(self):\n # Mock driver and rpc functions\n self.mock_object(self.volume.driver, 'migrate_volume',\n lambda x, y, z, new_type_id=None: (\n True, {'user_id': fake.USER_ID}))\n\n volume = tests_utils.create_volume(ctxt=self.context, size=0,\n host=CONF.host,\n migration_status='migrating')\n host_obj = {'host': 'newhost', 'capabilities': {}}\n self.volume.migrate_volume(self.context, volume, host_obj, False)\n\n # check volume properties\n volume = objects.Volume.get_by_id(context.get_admin_context(),\n volume.id)\n self.assertEqual('newhost', volume.host)\n self.assertEqual('success', volume.migration_status)",
"def is_admin(self):\n if self.type == 1:\n return True\n else:\n return False",
"def volume_up():\n sonos.set_relative_volume(10)\n return \"Ok\"",
"def test_migrate_volume(self, volume, volumes_steps):\n old_host, _ = volumes_steps.migrate_volume(volume.name)\n volumes_steps.migrate_volume(volume.name, old_host)",
"def is_admin(self):\n return False",
"def admin(self):\n if self.is_admin:\n return True\n return False",
"def assess_status(self):\n if not self.configuration_complete():\n hookenv.status_set('blocked',\n 'Kerberos configuration incomplete')\n elif os_utils.is_unit_upgrading_set():\n hookenv.status_set('blocked',\n 'Ready for do-release-upgrade and reboot. '\n 'Set complete when finished.')\n else:\n hookenv.status_set('active',\n 'Unit is ready')",
"def test_edit_volume(self, volume, volumes_steps):\n new_name = volume.name + ' (updated)'\n with volume.put(name=new_name):\n volumes_steps.edit_volume(volume_name=volume.name,\n new_volume_name=new_name)"
]
| [
"0.633983",
"0.6286603",
"0.6239554",
"0.6180753",
"0.6155955",
"0.6117248",
"0.6067968",
"0.6067968",
"0.6054155",
"0.60433626",
"0.60228795",
"0.59958816",
"0.59942013",
"0.59775287",
"0.59159106",
"0.5823149",
"0.5788118",
"0.57730633",
"0.5766912",
"0.5748985",
"0.572745",
"0.5726491",
"0.57196754",
"0.56916195",
"0.5681746",
"0.5670001",
"0.5665015",
"0.56626695",
"0.56579125",
"0.5643769"
]
| 0.7535339 | 0 |
Verify that admin can manage volume attachments. | def test_manage_volume_attachments(self, volume, instance, volumes_steps):
volumes_steps.attach_instance(volume.name, instance.name)
volumes_steps.detach_instance(volume.name, instance.name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_attachment_deletion_allowed_volume_no_attachments(self):\n volume = tests_utils.create_volume(self.context)\n self.volume_api.attachment_deletion_allowed(self.context, None, volume)",
"def test_attachment_deletion_allowed_attachment_from_volume(\n self, mock_get_server):\n mock_get_server.side_effect = nova.API.NotFound(404)\n attachment = self._get_attachment()\n volume = attachment.volume\n volume.volume_attachment = objects.VolumeAttachmentList(\n objects=[attachment])\n self.volume_api.attachment_deletion_allowed(self.context, None, volume)\n\n mock_get_server.assert_called_once_with(self.context, fake.INSTANCE_ID,\n volume.id)",
"def test_attachment_create_readonly_volume(self):\n volume_params = {'status': 'available'}\n\n vref = tests_utils.create_volume(self.context, **volume_params)\n self.volume_api.update_readonly_flag(self.context, vref, True)\n aref = self.volume_api.attachment_create(self.context,\n vref,\n fake.UUID2)\n self.assertEqual(fake.UUID2, aref.instance_uuid)\n self.assertIsNone(aref.attach_time)\n self.assertEqual('reserved', aref.attach_status)\n self.assertEqual('ro', aref.attach_mode)\n self.assertEqual(vref.id, aref.volume_id)\n self.assertEqual({}, aref.connection_info)",
"def test_aws_service_api_volume_attachment_delete(self):\n pass",
"def can_manage(self, filename):\n return False",
"def test_aws_service_api_volume_attachment_put(self):\n pass",
"def test_delete_attached_volume(self):\n server, validation_resources = self._create_server()\n volume = self.create_volume()\n self.attach_volume(server, volume)\n\n self.assertRaises(lib_exc.BadRequest,\n self.delete_volume, volume['id'])",
"def test_attachment_deletion_allowed_no_attachment(self):\n self.assertRaises(exception.ConflictNovaUsingAttachment,\n self.volume_api.attachment_deletion_allowed,\n self.context, None)",
"def test_attachment_deletion_allowed_vm_not_found(self, mock_get_server):\n mock_get_server.side_effect = nova.API.NotFound(404)\n attachment = self._get_attachment()\n self.volume_api.attachment_deletion_allowed(self.context, attachment)\n\n mock_get_server.assert_called_once_with(self.context, fake.INSTANCE_ID,\n fake.VOLUME_ID)",
"def test_attachment_deletion_allowed_multiple_attachment(self):\n attachment = self._get_attachment()\n volume = attachment.volume\n volume.volume_attachment = objects.VolumeAttachmentList(\n objects=[attachment, attachment])\n self.assertRaises(exception.ConflictNovaUsingAttachment,\n self.volume_api.attachment_deletion_allowed,\n self.context, None, volume)",
"def test_attach_attached_volume_to_same_server(self):\n server, validation_resources = self._create_server()\n volume = self.create_volume()\n\n self.attach_volume(server, volume)\n\n self.assertRaises(lib_exc.BadRequest,\n self.attach_volume, server, volume)",
"def test_attachment_update_volume_in_error_state(self):\n volume_params = {'status': 'available'}\n\n vref = tests_utils.create_volume(self.context, **volume_params)\n aref = self.volume_api.attachment_create(self.context,\n vref,\n fake.UUID2)\n self.assertEqual(fake.UUID2, aref.instance_uuid)\n self.assertIsNone(aref.attach_time)\n self.assertEqual('reserved', aref.attach_status)\n self.assertEqual(vref.id, aref.volume_id)\n self.assertEqual({}, aref.connection_info)\n vref.status = 'error'\n vref.save()\n connector = {'fake': 'connector',\n 'host': 'somehost'}\n self.assertRaises(exception.InvalidVolume,\n self.volume_api.attachment_update,\n self.context,\n aref,\n connector)",
"def test_01_portal_attachment(self):\n\n self.authenticate(None, None)\n\n # Test public user can't create attachment without token of document\n res = self.url_open(\n url='%s/portal/attachment/add' % self.base_url,\n data={\n 'name': \"new attachment\",\n 'res_model': self.out_invoice._name,\n 'res_id': self.out_invoice.id,\n 'csrf_token': http.WebRequest.csrf_token(self),\n },\n files=[('file', ('test.txt', b'test', 'plain/text'))],\n )\n self.assertEqual(res.status_code, 400)\n self.assertIn(\"you do not have the rights\", res.text)\n\n # Test public user can create attachment with token\n res = self.url_open(\n url='%s/portal/attachment/add' % self.base_url,\n data={\n 'name': \"new attachment\",\n 'res_model': self.out_invoice._name,\n 'res_id': self.out_invoice.id,\n 'csrf_token': http.WebRequest.csrf_token(self),\n 'access_token': self.out_invoice._portal_ensure_token(),\n },\n files=[('file', ('test.txt', b'test', 'plain/text'))],\n )\n self.assertEqual(res.status_code, 200)\n create_res = json.loads(res.content.decode('utf-8'))\n self.assertTrue(self.env['ir.attachment'].sudo().search([('id', '=', create_res['id'])]))\n\n # Test created attachment is private\n res_binary = self.url_open('/web/content/%d' % create_res['id'])\n self.assertEqual(res_binary.status_code, 404)\n\n # Test created access_token is working\n res_binary = self.url_open('/web/content/%d?access_token=%s' % (create_res['id'], create_res['access_token']))\n self.assertEqual(res_binary.status_code, 200)\n\n # Test mimetype is neutered as non-admin\n res = self.url_open(\n url='%s/portal/attachment/add' % self.base_url,\n data={\n 'name': \"new attachment\",\n 'res_model': self.out_invoice._name,\n 'res_id': self.out_invoice.id,\n 'csrf_token': http.WebRequest.csrf_token(self),\n 'access_token': self.out_invoice._portal_ensure_token(),\n },\n files=[('file', ('test.svg', b'<svg></svg>', 'image/svg+xml'))],\n )\n self.assertEqual(res.status_code, 200)\n create_res = json.loads(res.content.decode('utf-8'))\n self.assertEqual(create_res['mimetype'], 'text/plain')\n\n res_binary = self.url_open('/web/content/%d?access_token=%s' % (create_res['id'], create_res['access_token']))\n self.assertEqual(res_binary.headers['Content-Type'], 'text/plain')\n self.assertEqual(res_binary.content, b'<svg></svg>')\n\n res_image = self.url_open('/web/image/%d?access_token=%s' % (create_res['id'], create_res['access_token']))\n self.assertEqual(res_image.headers['Content-Type'], 'text/plain')\n self.assertEqual(res_image.content, b'<svg></svg>')\n\n # Test attachment can't be removed without valid token\n res = self.opener.post(\n url='%s/portal/attachment/remove' % self.base_url,\n json={\n 'params': {\n 'attachment_id': create_res['id'],\n 'access_token': \"wrong\",\n },\n },\n )\n self.assertEqual(res.status_code, 200)\n self.assertTrue(self.env['ir.attachment'].sudo().search([('id', '=', create_res['id'])]))\n self.assertIn(\"you do not have the rights\", res.text)\n\n # Test attachment can be removed with token if \"pending\" state\n res = self.opener.post(\n url='%s/portal/attachment/remove' % self.base_url,\n json={\n 'params': {\n 'attachment_id': create_res['id'],\n 'access_token': create_res['access_token'],\n },\n },\n )\n self.assertEqual(res.status_code, 200)\n remove_res = json.loads(res.content.decode('utf-8'))['result']\n self.assertFalse(self.env['ir.attachment'].sudo().search([('id', '=', create_res['id'])]))\n self.assertTrue(remove_res is True)\n\n # Test attachment can't be removed if not \"pending\" state\n attachment = self.env['ir.attachment'].create({\n 'name': 'an attachment',\n 'access_token': self.env['ir.attachment']._generate_access_token(),\n })\n res = self.opener.post(\n url='%s/portal/attachment/remove' % self.base_url,\n json={\n 'params': {\n 'attachment_id': attachment.id,\n 'access_token': attachment.access_token,\n },\n },\n )\n self.assertEqual(res.status_code, 200)\n self.assertTrue(self.env['ir.attachment'].sudo().search([('id', '=', attachment.id)]))\n self.assertIn(\"not in a pending state\", res.text)\n\n # Test attachment can't be removed if attached to a message\n attachment.write({\n 'res_model': 'mail.compose.message',\n 'res_id': 0,\n })\n attachment.flush()\n message = self.env['mail.message'].create({\n 'attachment_ids': [(6, 0, attachment.ids)],\n })\n res = self.opener.post(\n url='%s/portal/attachment/remove' % self.base_url,\n json={\n 'params': {\n 'attachment_id': attachment.id,\n 'access_token': attachment.access_token,\n },\n },\n )\n self.assertEqual(res.status_code, 200)\n self.assertTrue(attachment.exists())\n self.assertIn(\"it is linked to a message\", res.text)\n message.sudo().unlink()\n\n # Test attachment can't be associated if no attachment token.\n res = self.url_open(\n url='%s/mail/chatter_post' % self.base_url,\n data={\n 'res_model': self.out_invoice._name,\n 'res_id': self.out_invoice.id,\n 'message': \"test message 1\",\n 'attachment_ids': attachment.id,\n 'attachment_tokens': 'false',\n 'csrf_token': http.WebRequest.csrf_token(self),\n },\n )\n self.assertEqual(res.status_code, 400)\n self.assertIn(\"The attachment %s does not exist or you do not have the rights to access it.\" % attachment.id, res.text)\n\n # Test attachment can't be associated if no main document token\n res = self.url_open(\n url='%s/mail/chatter_post' % self.base_url,\n data={\n 'res_model': self.out_invoice._name,\n 'res_id': self.out_invoice.id,\n 'message': \"test message 1\",\n 'attachment_ids': attachment.id,\n 'attachment_tokens': attachment.access_token,\n 'csrf_token': http.WebRequest.csrf_token(self),\n },\n )\n self.assertEqual(res.status_code, 403)\n self.assertIn(\"You are not allowed to access 'Journal Entry' (account.move) records.\", res.text)\n\n # Test attachment can't be associated if not \"pending\" state\n self.assertFalse(self.out_invoice.message_ids)\n attachment.write({'res_model': 'model'})\n res = self.url_open(\n url='%s/mail/chatter_post' % self.base_url,\n data={\n 'res_model': self.out_invoice._name,\n 'res_id': self.out_invoice.id,\n 'message': \"test message 1\",\n 'attachment_ids': attachment.id,\n 'attachment_tokens': attachment.access_token,\n 'csrf_token': http.WebRequest.csrf_token(self),\n 'token': self.out_invoice._portal_ensure_token(),\n },\n )\n self.assertEqual(res.status_code, 200)\n self.out_invoice.invalidate_cache(fnames=['message_ids'], ids=self.out_invoice.ids)\n self.assertEqual(len(self.out_invoice.message_ids), 1)\n self.assertEqual(self.out_invoice.message_ids.body, \"<p>test message 1</p>\")\n self.assertFalse(self.out_invoice.message_ids.attachment_ids)\n\n # Test attachment can't be associated if not correct user\n attachment.write({'res_model': 'mail.compose.message'})\n res = self.url_open(\n url='%s/mail/chatter_post' % self.base_url,\n data={\n 'res_model': self.out_invoice._name,\n 'res_id': self.out_invoice.id,\n 'message': \"test message 2\",\n 'attachment_ids': attachment.id,\n 'attachment_tokens': attachment.access_token,\n 'csrf_token': http.WebRequest.csrf_token(self),\n 'token': self.out_invoice._portal_ensure_token(),\n },\n )\n self.assertEqual(res.status_code, 200)\n self.out_invoice.invalidate_cache(fnames=['message_ids'], ids=self.out_invoice.ids)\n self.assertEqual(len(self.out_invoice.message_ids), 2)\n self.assertEqual(self.out_invoice.message_ids[0].body, \"<p>test message 2</p>\")\n self.assertFalse(self.out_invoice.message_ids.attachment_ids)\n\n # Test attachment can be associated if all good (complete flow)\n res = self.url_open(\n url='%s/portal/attachment/add' % self.base_url,\n data={\n 'name': \"final attachment\",\n 'res_model': self.out_invoice._name,\n 'res_id': self.out_invoice.id,\n 'csrf_token': http.WebRequest.csrf_token(self),\n 'access_token': self.out_invoice._portal_ensure_token(),\n },\n files=[('file', ('test.txt', b'test', 'plain/text'))],\n )\n self.assertEqual(res.status_code, 200)\n create_res = json.loads(res.content.decode('utf-8'))\n self.assertEqual(create_res['name'], \"final attachment\")\n\n res = self.url_open(\n url='%s/mail/chatter_post' % self.base_url,\n data={\n 'res_model': self.out_invoice._name,\n 'res_id': self.out_invoice.id,\n 'message': \"test message 3\",\n 'attachment_ids': create_res['id'],\n 'attachment_tokens': create_res['access_token'],\n 'csrf_token': http.WebRequest.csrf_token(self),\n 'token': self.out_invoice._portal_ensure_token(),\n },\n )\n self.assertEqual(res.status_code, 200)\n self.out_invoice.invalidate_cache(fnames=['message_ids'], ids=self.out_invoice.ids)\n self.assertEqual(len(self.out_invoice.message_ids), 3)\n self.assertEqual(self.out_invoice.message_ids[0].body, \"<p>test message 3</p>\")\n self.assertEqual(len(self.out_invoice.message_ids[0].attachment_ids), 1)",
"def test_pvcvolume_attach(self):\n v = self.cs.volumes.get('pvcvolume')\n self.cs.volumes.attach(v, 1, '/dev/vdc')\n self.cs.assert_called('POST',\n '/volumes/pvcvolume/action')",
"def test_attachment_create_volume_in_error_state(self):\n volume_params = {'status': 'available'}\n\n vref = tests_utils.create_volume(self.context, **volume_params)\n vref.status = \"error\"\n self.assertRaises(exception.InvalidVolume,\n self.volume_api.attachment_create,\n self.context,\n vref,\n fake.UUID2)",
"def ensure_volume_access_right_exists( user_email, volume_name, caps, allowed_gateways=[msconfig.GATEWAY_TYPE_UG] ):\n client = connect_syndicate()\n return syndicate_provisioning.ensure_volume_access_right_exists( client, user_email, volume_name, caps, allowed_gateways )",
"def test_retype_setup_fail_volume_is_available(self, mock_notify):\n elevated = context.get_admin_context()\n project_id = self.context.project_id\n\n db.volume_type_create(elevated, {'name': 'old', 'extra_specs': {}})\n old_vol_type = db.volume_type_get_by_name(elevated, 'old')\n db.volume_type_create(elevated, {'name': 'new', 'extra_specs': {}})\n new_vol_type = db.volume_type_get_by_name(elevated, 'new')\n db.quota_create(elevated, project_id, 'volumes_new', 0)\n\n volume = tests_utils.create_volume(self.context, size=1,\n host=CONF.host, status='available',\n volume_type_id=old_vol_type['id'])\n\n api = cinder.volume.api.API()\n self.assertRaises(exception.VolumeLimitExceeded, api.retype,\n self.context, volume, new_vol_type['id'])\n\n volume = db.volume_get(elevated, volume.id)\n # FIXME: restore when Bug #1803648 is figured out\n # mock_notify.assert_not_called()\n self.assertEqual('available', volume['status'])",
"def test_attachment_create_creating_volume(self):\n volume_params = {'status': 'creating'}\n\n vref = tests_utils.create_volume(self.context, **volume_params)\n self.assertRaises(exception.InvalidVolume,\n self.volume_api.attachment_create,\n self.context,\n vref,\n fake.UUID1)",
"def verify_blob_permissions(self, blob):\n path = self.csum_to_path(blob)\n return is_readonly(path)",
"def test_attach_attached_volume_to_different_server(self):\n server1, validation_resources = self._create_server()\n volume = self.create_volume()\n\n self.attach_volume(server1, volume)\n\n # Create server2 and attach in-use volume\n server2, validation_resources = self._create_server()\n self.assertRaises(lib_exc.BadRequest,\n self.attach_volume, server2, volume)",
"def test_attachment_deletion_allowed_service_call(self, mock_get_server):\n self.context.service_roles = ['reader', 'service']\n attachment = self._get_attachment()\n self.volume_api.attachment_deletion_allowed(self.context, attachment)\n mock_get_server.assert_not_called()",
"def test_reserve_reserve_delete(self, mock_allowed):\n mock_allowed.return_value = None\n volume_params = {'status': 'available'}\n\n vref = tests_utils.create_volume(self.context, **volume_params)\n aref = self.volume_api.attachment_create(self.context,\n vref,\n fake.UUID2)\n vref = objects.Volume.get_by_id(self.context,\n vref.id)\n self.assertEqual('reserved', vref.status)\n\n self.volume_api.attachment_create(self.context,\n vref,\n fake.UUID2)\n vref = objects.Volume.get_by_id(self.context,\n vref.id)\n self.assertEqual('reserved', vref.status)\n self.volume_api.attachment_delete(self.context,\n aref)\n mock_allowed.assert_called_once_with(self.context, aref)\n vref = objects.Volume.get_by_id(self.context,\n vref.id)\n self.assertEqual('reserved', vref.status)\n self.assertEqual(1, len(vref.volume_attachment))",
"def test_additional_attachment_create_no_connector(self):\n volume_params = {'status': 'available'}\n\n vref = tests_utils.create_volume(self.context, **volume_params)\n aref = self.volume_api.attachment_create(self.context,\n vref,\n fake.UUID2)\n self.assertEqual(fake.UUID2, aref.instance_uuid)\n self.assertIsNone(aref.attach_time)\n self.assertEqual('reserved', aref.attach_status)\n self.assertEqual('null', aref.attach_mode)\n self.assertEqual(vref.id, aref.volume_id)\n self.assertEqual({}, aref.connection_info)\n\n self.assertRaises(exception.InvalidVolume,\n self.volume_api.attachment_create,\n self.context,\n vref,\n fake.UUID1)\n self.volume_api.attachment_create(self.context,\n vref,\n fake.UUID2)\n vref = objects.Volume.get_by_id(self.context,\n vref.id)\n self.assertEqual(2, len(vref.volume_attachment))",
"def test_attach_volume_ignore_VolumeAttachmentNotFound(\n self, mock_notify, mock_elevate, mock_event, mock_debug_log):\n mock_elevate.return_value = self.context\n\n attachment_id = uuids.attachment_id\n fake_bdm = objects.BlockDeviceMapping(**self.fake_volume)\n fake_bdm.attachment_id = attachment_id\n instance = self._create_fake_instance_obj()\n expected_exception = test.TestingException()\n\n def fake_attach(*args, **kwargs):\n raise expected_exception\n\n with test.nested(\n mock.patch.object(driver_block_device.DriverVolumeBlockDevice,\n 'attach'),\n mock.patch.object(cinder.API, 'attachment_delete'),\n mock.patch.object(objects.BlockDeviceMapping,\n 'destroy')\n ) as (mock_attach, mock_attach_delete, mock_destroy):\n mock_attach.side_effect = fake_attach\n mock_attach_delete.side_effect = \\\n exception.VolumeAttachmentNotFound(\n attachment_id=attachment_id)\n self.assertRaises(\n test.TestingException, self.compute.attach_volume,\n self.context, instance, fake_bdm)\n mock_destroy.assert_called_once_with()\n mock_notify.assert_has_calls([\n mock.call(self.context, instance, 'fake-mini',\n action='volume_attach', phase='start',\n volume_id=uuids.volume_id),\n mock.call(self.context, instance, 'fake-mini',\n action='volume_attach', phase='error',\n volume_id=uuids.volume_id,\n exception=expected_exception),\n ])\n mock_event.assert_called_once_with(\n self.context, 'compute_attach_volume', CONF.host,\n instance.uuid, graceful_exit=False)\n self.assertIsInstance(mock_debug_log.call_args[0][1],\n exception.VolumeAttachmentNotFound)",
"def _attach_volume(self):\n return []",
"def test_attachment_deletion_allowed_mismatched_volume_and_attach_id(\n self, mock_get_attatchment):\n attachment = self._get_attachment()\n volume = attachment.volume\n volume.volume_attachment = objects.VolumeAttachmentList(\n objects=[attachment])\n attachment2 = self._get_attachment()\n attachment2.volume_id = attachment.volume.id = fake.VOLUME2_ID\n self.assertRaises(exception.InvalidInput,\n self.volume_api.attachment_deletion_allowed,\n self.context, attachment2.id, volume)\n mock_get_attatchment.assert_called_once_with(self.context,\n attachment2.id)",
"def check_admin():\n\tif not current_user.is_admin:\n\t\tabort(403)",
"def test_attachment_create_no_connector(self):\n volume_params = {'status': 'available'}\n\n vref = tests_utils.create_volume(self.context, **volume_params)\n aref = self.volume_api.attachment_create(self.context,\n vref,\n fake.UUID2)\n self.assertEqual(fake.UUID2, aref.instance_uuid)\n self.assertIsNone(aref.attach_time)\n self.assertEqual('reserved', aref.attach_status)\n self.assertEqual('null', aref.attach_mode)\n self.assertEqual(vref.id, aref.volume_id)\n self.assertEqual({}, aref.connection_info)",
"def test_attachment_deletion_allowed_no_instance(self, mock_get_server):\n attachment = self._get_attachment(with_instance_id=False)\n self.volume_api.attachment_deletion_allowed(self.context, attachment)\n mock_get_server.assert_not_called()",
"def test_admin_upload(cidc_api, clean_db, monkeypatch):\n user_id = setup_trial_and_user(cidc_api, monkeypatch)\n make_admin(user_id, cidc_api)\n mocks = UploadMocks(\n monkeypatch,\n prismify_extra=PBMC_PATCH,\n )\n\n client = cidc_api.test_client()\n\n res = client.post(\n MANIFEST_UPLOAD,\n data=form_data(\n \"pbmc.xlsx\",\n io.BytesIO(b\"a\"),\n \"pbmc\",\n ),\n )\n assert res.status_code == 200\n\n res = client.post(\n ASSAY_UPLOAD,\n data=form_data(\n \"wes.xlsx\",\n io.BytesIO(b\"1234\"),\n \"wes_fastq\",\n ),\n )\n assert res.status_code == 200"
]
| [
"0.6995282",
"0.65689677",
"0.63907975",
"0.62546706",
"0.6135261",
"0.6078998",
"0.60550547",
"0.5991479",
"0.59363604",
"0.5931643",
"0.5893291",
"0.5874586",
"0.5873932",
"0.58355445",
"0.5821134",
"0.5810227",
"0.5801993",
"0.5787049",
"0.57592803",
"0.5747946",
"0.5727795",
"0.5723162",
"0.56751484",
"0.56559074",
"0.56444293",
"0.56226623",
"0.55950373",
"0.556333",
"0.55514556",
"0.55364376"
]
| 0.67760146 | 1 |
Verify that volume can be transfered between users. | def test_transfer_volume(self, volume, auth_steps, volumes_steps):
transfer_name = next(generate_ids('transfer'))
transfer_id, transfer_key = volumes_steps.create_transfer(
volume.name, transfer_name)
auth_steps.logout()
auth_steps.login(USER_NAME, USER_PASSWD)
volumes_steps.accept_transfer(transfer_id, transfer_key, volume.name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ensure_volume_access_right_exists( user_email, volume_name, caps, allowed_gateways=[msconfig.GATEWAY_TYPE_UG] ):\n client = connect_syndicate()\n return syndicate_provisioning.ensure_volume_access_right_exists( client, user_email, volume_name, caps, allowed_gateways )",
"def test_least_busy_host_gets_volume(self):\n volume1 = self.start_service('volume', host='host1')\n volume2 = self.start_service('volume', host='host2')\n volume_id1 = self._create_volume()\n volume1.create_volume(self.context, volume_id1)\n volume_id2 = self._create_volume()\n host = self.scheduler.driver.schedule_create_volume(self.context,\n volume_id2)\n self.assertEqual(host, 'host2')\n volume1.delete_volume(self.context, volume_id1)\n db.volume_destroy(self.context, volume_id2)\n volume1.kill()\n volume2.kill()",
"def ft_syndicate_access():\n \n fake_user = FakeObject()\n fake_user.email = \"[email protected]\"\n\n print \"\\nensure_user_exists(%s)\\n\" % fake_user.email\n ensure_user_exists( fake_user.email, is_admin=False, max_UGs=1100, max_RGs=1 )\n\n print \"\\nensure_user_exists(%s)\\n\" % fake_user.email\n ensure_user_exists( fake_user.email, is_admin=False, max_UGs=1100, max_RGs=1 )\n\n fake_volume = FakeObject()\n fake_volume.name = \"fakevolume\"\n fake_volume.description = \"This is a fake volume, created for funtional testing\"\n fake_volume.blocksize = 1024\n fake_volume.cap_read_data = True \n fake_volume.cap_write_data = True \n fake_volume.cap_host_data = False\n fake_volume.archive = False\n fake_volume.private = True\n \n # test idempotency\n print \"\\nensure_volume_exists(%s)\\n\" % fake_volume.name\n ensure_volume_exists( fake_user.email, fake_volume )\n\n print \"\\nensure_volume_exists(%s)\\n\" % fake_volume.name\n ensure_volume_exists( fake_user.email, fake_volume )\n \n print \"\\nensure_volume_access_right_exists(%s,%s)\\n\" % (fake_user.email, fake_volume.name)\n ensure_volume_access_right_exists( fake_user.email, fake_volume.name, 31 )\n \n print \"\\nensure_volume_access_right_exists(%s,%s)\\n\" % (fake_user.email, fake_volume.name)\n ensure_volume_access_right_exists( fake_user.email, fake_volume.name, 31 )\n \n print \"\\nensure_volume_access_right_absent(%s,%s)\\n\" % (fake_user.email, fake_volume.name)\n ensure_volume_access_right_absent( fake_user.email, fake_volume.name )\n \n print \"\\nensure_volume_access_right_absent(%s,%s)\\n\" % (fake_user.email, fake_volume.name)\n ensure_volume_access_right_absent( fake_user.email, fake_volume.name )\n \n print \"\\nensure_volume_absent(%s)\\n\" % fake_volume.name\n ensure_volume_absent( fake_volume.name )\n\n print \"\\nensure_volume_absent(%s)\\n\" % fake_volume.name\n ensure_volume_absent( fake_volume.name )\n\n print \"\\nensure_user_absent(%s)\\n\" % fake_user.email\n ensure_user_absent( fake_user.email )\n\n print \"\\nensure_user_absent(%s)\\n\" % fake_user.email\n ensure_user_absent( fake_user.email )\n \n \n \n \n print \"\\nensure_principal_exists(%s)\\n\" % fake_user.email\n ensure_principal_exists( fake_user.email, \"asdf\", is_admin=False, max_UGs=1100, max_RGs=1 )\n \n print \"\\nensure_principal_exists(%s)\\n\" % fake_user.email\n ensure_principal_exists( fake_user.email, \"asdf\", is_admin=False, max_UGs=1100, max_RGs=1 )\n\n print \"\\nensure_volume_exists(%s)\\n\" % fake_volume.name\n ensure_volume_exists( fake_user.email, fake_volume )\n\n print \"\\nsetup_volume_access(%s, %s)\\n\" % (fake_user.email, fake_volume.name)\n setup_volume_access( fake_user.email, fake_volume.name, 31, 38800, \"abcdef\" )\n \n print \"\\nsetup_volume_access(%s, %s)\\n\" % (fake_user.email, fake_volume.name)\n setup_volume_access( fake_user.email, fake_volume.name, 31, 38800, \"abcdef\" )\n \n print \"\\nteardown_volume_access(%s, %s)\\n\" % (fake_user.email, fake_volume.name )\n teardown_volume_access( fake_user.email, fake_volume.name )\n \n print \"\\nteardown_volume_access(%s, %s)\\n\" % (fake_user.email, fake_volume.name )\n teardown_volume_access( fake_user.email, fake_volume.name )\n \n print \"\\nensure_volume_absent(%s)\\n\" % fake_volume.name\n ensure_volume_absent( fake_volume.name )\n\n print \"\\nensure_principal_absent(%s)\\n\" % fake_user.email\n ensure_principal_absent( fake_user.email )",
"def vol_up_and_validate(self):\n self.logger.info('Increasing volume')\n before_vol = self.dut.volume('Up', 1)\n time.sleep(2)\n after_vol = self.dut.volume('Up', 1)\n if not after_vol or not before_vol or after_vol <= before_vol:\n self.logger.error(\n 'Unable to increase the volume. Before: %s. After: %s' %\n (before_vol, after_vol))\n raise TestActsError('error increasing volume')",
"def test_mount_status_nas_share(self):\n pass",
"def check_volume(obj, char, quiet=False):\n vol = obj.item_data.size\n if vol is None:\n raise ValueError(f\"Object {obj} has an undefined size\")\n v_max = char.item_data.capacity\n if char.used_capacity + vol > v_max:\n if not quiet:\n char.msg(\"You can't carry %s.\" % obj)\n return False\n return True",
"def test_least_busy_host_gets_volume_no_queue(self):\n volume1 = service.Service('host1',\n 'nova-volume',\n 'volume',\n FLAGS.volume_manager)\n volume1.start()\n volume2 = service.Service('host2',\n 'nova-volume',\n 'volume',\n FLAGS.volume_manager)\n volume2.start()\n volume_id1 = self._create_volume()\n volume1.create_volume(self.context, volume_id1)\n volume_id2 = self._create_volume()\n host = self.scheduler.driver.schedule_create_volume(self.context,\n volume_id2)\n self.assertEqual(host, 'host2')\n volume1.delete_volume(self.context, volume_id1)\n db.volume_destroy(self.context, volume_id2)",
"def test_auth_allow(self):\n # Setting authentication on volume for client1 using ip\n auth_dict = {'all': [self.mounts[0].client_system]}\n ret = set_auth_allow(self.volname, self.mnode, auth_dict)\n self.assertTrue(ret, \"Failed to set authentication\")\n g.log.info(\"Successfully set authentication on volume\")\n\n # Mounting volume on client1\n self.authenticated_mount(self.mounts[0])\n\n # Trying to mount volume on client2\n self.unauthenticated_mount(self.mounts[1])\n\n # Verify whether mount failure on client2 is due to auth error\n log_msg = self.is_auth_failure(self.mounts[1].client_system)\n prev_log_statement = log_msg\n\n g.log.info(\"Verification of auth.allow on volume using client IP is \"\n \"successful\")\n\n # Unmount volume from client1\n ret = self.mounts[0].unmount()\n self.assertTrue(ret, (\"Failed to unmount volume %s from client %s\"\n % (self.volname, self.mounts[0].client_system)))\n\n # Obtain hostname of client1\n ret, hostname_client1, _ = g.run(self.mounts[0].client_system,\n \"hostname\")\n self.assertEqual(ret, 0, (\"Failed to obtain hostname of client %s\"\n % self.mounts[0].client_system))\n g.log.info(\"Obtained hostname of client. IP- %s, hostname- %s\",\n self.mounts[0].client_system, hostname_client1.strip())\n\n # Setting authentication on volume for client1 using hostname\n auth_dict = {'all': [hostname_client1.strip()]}\n ret = set_auth_allow(self.volname, self.mnode, auth_dict)\n self.assertTrue(ret, \"Failed to set authentication\")\n g.log.info(\"Successfully set authentication on volume\")\n\n # Mounting volume on client1\n self.authenticated_mount(self.mounts[0])\n\n # Trying to mount volume on client2\n self.unauthenticated_mount(self.mounts[1])\n\n # Verify whether mount failure on client2 is due to auth error\n log_msg = self.is_auth_failure(self.mounts[1].client_system,\n prev_log_statement)\n prev_log_statement = log_msg\n\n g.log.info(\"Verification of auth.allow on volume using client \"\n \"hostname is successful\")\n\n # Unmount volume from client1\n ret = self.mounts[0].unmount()\n self.assertTrue(ret, (\"Failed to unmount volume %s from client %s\"\n % (self.volname, self.mounts[0].client_system)))",
"def ensure_volume_exists( user_email, opencloud_volume, user=None ):\n client = connect_syndicate()\n\n try:\n volume = client.read_volume( opencloud_volume.name )\n except Exception, e:\n # transport error \n logger.exception(e)\n raise e\n\n if volume is None:\n # the volume does not exist....try to create it \n vol_name = opencloud_volume.name\n vol_blocksize = opencloud_volume.blocksize\n vol_description = opencloud_volume.description\n vol_private = opencloud_volume.private\n vol_archive = opencloud_volume.archive \n vol_default_gateway_caps = opencloud_caps_to_syndicate_caps( opencloud_volume.cap_read_data, opencloud_volume.cap_write_data, opencloud_volume.cap_host_data )\n\n try:\n vol_info = client.create_volume( user_email, vol_name, vol_description, vol_blocksize,\n private=vol_private,\n archive=vol_archive,\n active=True,\n default_gateway_caps=vol_default_gateway_caps,\n store_private_key=False,\n metadata_private_key=\"MAKE_METADATA_KEY\" )\n\n except Exception, e:\n # transport error\n logger.exception(e)\n raise e\n\n else:\n # successfully created the volume!\n return vol_info\n\n else:\n \n # volume already exists. Verify its owned by this user.\n if user is None:\n try:\n user = client.read_user( volume['owner_id'] )\n except Exception, e:\n # transport error, or user doesn't exist (either is unacceptable)\n logger.exception(e)\n raise e\n\n if user is None or user['email'] != user_email:\n raise Exception(\"Volume '%s' already exists, but is NOT owned by '%s'\" % (opencloud_volume.name, user_email) )\n\n # we're good!\n return None",
"def check_free_space():\n subprocess.run([\"ssh\",backup_host, \"du -h\", dest ]])\n # get output",
"def test_mount_status_nas_share_by_nas(self):\n pass",
"def test_delete_destination_volume_in_migration(self):\n self._test_delete_volume_in_migration('target:vol-id')",
"def test_retype_setup_fail_volume_is_available(self, mock_notify):\n elevated = context.get_admin_context()\n project_id = self.context.project_id\n\n db.volume_type_create(elevated, {'name': 'old', 'extra_specs': {}})\n old_vol_type = db.volume_type_get_by_name(elevated, 'old')\n db.volume_type_create(elevated, {'name': 'new', 'extra_specs': {}})\n new_vol_type = db.volume_type_get_by_name(elevated, 'new')\n db.quota_create(elevated, project_id, 'volumes_new', 0)\n\n volume = tests_utils.create_volume(self.context, size=1,\n host=CONF.host, status='available',\n volume_type_id=old_vol_type['id'])\n\n api = cinder.volume.api.API()\n self.assertRaises(exception.VolumeLimitExceeded, api.retype,\n self.context, volume, new_vol_type['id'])\n\n volume = db.volume_get(elevated, volume.id)\n # FIXME: restore when Bug #1803648 is figured out\n # mock_notify.assert_not_called()\n self.assertEqual('available', volume['status'])",
"def migrate_volume(self, ctxt, volume, host):\n LOG.debug('Enter: migrate_volume: id=%(id)s, host=%(host)s',\n {'id': volume['id'], 'host': host})\n\n false_ret = (False, None)\n\n if volume['status'] not in ('available', 'retyping'):\n LOG.warning(\"Volume status must be 'available' or 'retyping'.\"\n \" Current volume status: %s\", volume['status'])\n return false_ret\n\n if 'capabilities' not in host:\n LOG.warning(\"Unsupported host. No capabilities found\")\n return false_ret\n\n capabilities = host['capabilities']\n ns_shares = capabilities['ns_shares']\n dst_parts = capabilities['location_info'].split(':')\n dst_host, dst_volume = dst_parts[1:]\n\n if (capabilities.get('vendor_name') != 'Nexenta' or\n dst_parts[0] != self.__class__.__name__ or\n capabilities['free_capacity_gb'] < volume['size']):\n return false_ret\n\n nms = self.share2nms[volume['provider_location']]\n ssh_bindings = nms.appliance.ssh_list_bindings()\n shares = []\n for bind in ssh_bindings:\n for share in ns_shares:\n if (share.startswith(ssh_bindings[bind][3]) and\n ns_shares[share] >= volume['size']):\n shares.append(share)\n if len(shares) == 0:\n LOG.warning(\"Remote NexentaStor appliance at %s should be \"\n \"SSH-bound.\", share)\n return false_ret\n share = sorted(shares, key=ns_shares.get, reverse=True)[0]\n snapshot = {\n 'volume_name': volume['name'],\n 'volume_id': volume['id'],\n 'name': utils.get_migrate_snapshot_name(volume)\n }\n self.create_snapshot(snapshot)\n location = volume['provider_location']\n src = '%(share)s/%(volume)s@%(snapshot)s' % {\n 'share': location.split(':')[1].split('volumes/')[1],\n 'volume': volume['name'],\n 'snapshot': snapshot['name']\n }\n dst = ':'.join([dst_host, dst_volume.split('/volumes/')[1]])\n try:\n nms.appliance.execute(self._get_zfs_send_recv_cmd(src, dst))\n except utils.NexentaException as exc:\n LOG.warning(\"Cannot send source snapshot %(src)s to \"\n \"destination %(dst)s. Reason: %(exc)s\",\n {'src': src, 'dst': dst, 'exc': exc})\n return false_ret\n finally:\n try:\n self.delete_snapshot(snapshot)\n except utils.NexentaException as exc:\n LOG.warning(\"Cannot delete temporary source snapshot \"\n \"%(src)s on NexentaStor Appliance: %(exc)s\",\n {'src': src, 'exc': exc})\n try:\n self.delete_volume(volume)\n except utils.NexentaException as exc:\n LOG.warning(\"Cannot delete source volume %(volume)s on \"\n \"NexentaStor Appliance: %(exc)s\",\n {'volume': volume['name'], 'exc': exc})\n\n dst_nms = self._get_nms_for_url(capabilities['nms_url'])\n dst_snapshot = '%s/%s@%s' % (dst_volume.split('volumes/')[1],\n volume['name'], snapshot['name'])\n try:\n dst_nms.snapshot.destroy(dst_snapshot, '')\n except utils.NexentaException as exc:\n LOG.warning(\"Cannot delete temporary destination snapshot \"\n \"%(dst)s on NexentaStor Appliance: %(exc)s\",\n {'dst': dst_snapshot, 'exc': exc})\n return True, {'provider_location': share}",
"def test_live_migration_src_check_volume_node_not_alive(self):\n\n instance_id = self._create_instance()\n i_ref = db.instance_get(self.context, instance_id)\n dic = {'instance_id': instance_id, 'size': 1}\n v_ref = db.volume_create(self.context, {'instance_id': instance_id,\n 'size': 1})\n t1 = utils.utcnow() - datetime.timedelta(1)\n dic = {'created_at': t1, 'updated_at': t1, 'binary': 'nova-volume',\n 'topic': 'volume', 'report_count': 0}\n s_ref = db.service_create(self.context, dic)\n\n self.assertRaises(exception.VolumeServiceUnavailable,\n self.scheduler.driver.schedule_live_migration,\n self.context, instance_id, i_ref['host'])\n\n db.instance_destroy(self.context, instance_id)\n db.service_destroy(self.context, s_ref['id'])\n db.volume_destroy(self.context, v_ref['id'])",
"def test_migrate_volume(self, volume, volumes_steps):\n old_host, _ = volumes_steps.migrate_volume(volume.name)\n volumes_steps.migrate_volume(volume.name, old_host)",
"def test_change_volume_status(self, volume, volumes_steps):\n volumes_steps.change_volume_status(volume.name, 'Error')\n volumes_steps.change_volume_status(volume.name, 'Available')",
"def test_subscriber_access_if_vsg2_goes_down(self):",
"def test_create_volume_blocked(self, mock_ghn):\n ctxt = context.get_admin_context()\n extra_specs = {}\n type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)\n volume = {'id': '1', 'name': 'volume1',\n 'display_name': '',\n 'volume_type_id': type_ref['id'],\n 'size': 10}\n self._return_blocked = 1 # Block & fail cancel => create succeeded\n ret = self.driver.create_volume(volume)\n expected = {'redundancy': '0', 'group': 'xanadu',\n 'name': 'volume10', 'mode': '0777',\n 'user': 'kane', 'net': 'net1',\n 'storageserver': 'stor1:gbd0,stor2:gbd0,',\n 'size': '12'}\n self.assertDictMatch(expected, self.created)\n # Check the returned provider\n expected_pid = {'provider_id': 'volume10'}\n self.assertDictMatch(expected_pid, ret)\n self.assertTrue(self._request_cancel)",
"def check_file_transferred(replica, location):\n\n from tardis.tardis_portal.models import Dataset_File\n datafile = Dataset_File.objects.get(pk=replica.datafile.id)\n\n # If the remote is capable, get it to send us the checksums and / or\n # file length for its copy of the file\n try:\n # Fetch the remote's metadata for the file\n m = location.provider.get_metadata(replica)\n _check_attribute(m, datafile.size, 'length')\n if (_check_attribute(m, datafile.sha512sum, 'sha512sum') or \\\n _check_attribute(m, datafile.md5sum, 'md5sum')):\n return True\n if location.trust_length and \\\n _check_attribute(m, datafile.size, 'length') :\n return False\n raise MigrationError('Not enough metadata for verification')\n except NotImplementedError:\n pass\n except HTTPError as e:\n # Bad request means that the remote didn't recognize the query\n if e.code != 400:\n raise\n\n if location.provider.trust_length :\n try:\n length = location.provider.get_length(replica)\n if _check_attribute2(length, datafile.size, 'length'):\n return False\n except NotImplementedError:\n pass\n\n # Fetch back the remote file and verify it locally.\n f = location.provider.get_opener(replica)()\n md5sum, sha512sum, size, x = generate_file_checksums(f, None)\n _check_attribute2(str(size), datafile.size, 'length')\n if _check_attribute2(sha512sum, datafile.sha512sum, 'sha512sum') or \\\n _check_attribute2(md5sum, datafile.md5sum, 'md5sum'):\n return True\n raise MigrationError('Not enough metadata for file verification')",
"def test_delete_volume_failure_modes(self):\n ctxt = context.get_admin_context()\n extra_specs = {}\n type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)\n volume = {'id': '1', 'name': 'volume1',\n 'display_name': '',\n 'volume_type_id': type_ref['id'],\n 'size': 10,\n 'provider_id': 'volume10'}\n self._fail_space_delete = True\n # This should not throw an exception, space-delete failure not problem\n self.driver.delete_volume(volume)\n self._fail_space_delete = False\n volume['provider_id'] = None\n # This should also not throw an exception\n self.driver.delete_volume(volume)",
"def test_attach_attached_volume_to_different_server(self):\n server1, validation_resources = self._create_server()\n volume = self.create_volume()\n\n self.attach_volume(server1, volume)\n\n # Create server2 and attach in-use volume\n server2, validation_resources = self._create_server()\n self.assertRaises(lib_exc.BadRequest,\n self.attach_volume, server2, volume)",
"def test_subscriber_access_if_vsg1_goes_down(self):",
"async def control_checks(self, ctx):\n server_id = ctx.message.server.id\n requester = ctx.message.author\n #silently drop if not in voice\n if not self.in_voice(server_id):\n return False\n #refuse if user not in the same channel\n if not self.user_in_channel(server_id, requester):\n vcname = self.get_server_dict(server_id)['voice'].channel.name\n await ctx.bot.send_message(ctx.message.channel, \"You can't control me outside of {}.\".format(vcname))\n return False\n return True",
"def test_create_virtual_account_transfer(self):\n pass",
"def test_transfer(chain, token, shareholder1, boogieman):\n\n set_state(chain, token, canTransferFlag=True)\n token.transact({\"from\": shareholder1}).transfer(boogieman, 4000)\n assert token.call().balanceOf(shareholder1) == 0\n assert token.call().balanceOf(boogieman) == 4000",
"def test_migrate_volume_driver_for_retype(self, mock_can_use):\n # Mock driver and rpc functions\n mock_driver = self.mock_object(self.volume.driver, 'migrate_volume',\n return_value=(True, {}))\n\n volume = tests_utils.create_volume(self.context, size=0,\n host=CONF.host,\n migration_status='migrating')\n host_obj = {'host': 'newhost', 'capabilities': {}}\n self.volume.migrate_volume(self.context, volume, host_obj, False,\n fake.VOLUME_TYPE2_ID, mock.sentinel.diff)\n\n mock_can_use.assert_called_once_with(mock.sentinel.diff)\n mock_driver.assert_called_once_with(self.context, volume, host_obj)\n # check volume properties\n volume = objects.Volume.get_by_id(context.get_admin_context(),\n volume.id)\n self.assertEqual('newhost', volume.host)\n self.assertEqual('success', volume.migration_status)\n self.assertEqual(fake.VOLUME_TYPE2_ID, volume.volume_type_id)",
"def vol_down_and_validate(self):\n self.logger.info('Decreasing volume')\n before_vol = self.dut.volume('Down', 1)\n time.sleep(2)\n after_vol = self.dut.volume('Down', 1)\n if not after_vol or not before_vol or after_vol >= before_vol:\n self.logger.error(\n 'Unable to decrease the volume. Before: %s. After: %s' %\n (before_vol, after_vol))\n raise TestActsError('error decreasing volume')",
"def test_delete__volume(self):\n arglist = [\n '--volume',\n self.projects[0].id,\n ]\n verifylist = [\n ('service', 'volume'),\n ('project', self.projects[0].id),\n ]\n\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n\n result = self.cmd.take_action(parsed_args)\n\n self.assertIsNone(result)\n self.projects_mock.get.assert_called_once_with(self.projects[0].id)\n self.compute_quotas_mock.delete.assert_not_called()\n self.volume_quotas_mock.delete.assert_called_once_with(\n self.projects[0].id,\n )\n self.network_mock.delete_quota.assert_not_called()",
"def test_aws_service_api_volume_delete(self):\n pass"
]
| [
"0.6001911",
"0.59510225",
"0.59452003",
"0.5890787",
"0.5814993",
"0.58066094",
"0.5777035",
"0.5719183",
"0.5708301",
"0.5675022",
"0.56528234",
"0.5611603",
"0.5581504",
"0.55581987",
"0.5523851",
"0.54841036",
"0.5479376",
"0.54642123",
"0.5446505",
"0.5428832",
"0.5421429",
"0.54023707",
"0.5399043",
"0.53979933",
"0.5395437",
"0.53755265",
"0.5361127",
"0.5357371",
"0.53500086",
"0.534975"
]
| 0.69842476 | 0 |
Assigns networks to specified interface. | def assign_networks(cls, instance, networks):
instance.assigned_networks_list = networks
db().flush() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def network_interfaces(self, network_interfaces):\n\n self._network_interfaces = network_interfaces",
"def attach_interface_to_namespace(node, namespace, interface):\n cmd = f\"ip link set {interface} netns {namespace}\"\n\n ret_code, _, stderr = exec_cmd(node, cmd, timeout=5, sudo=True)\n if ret_code != 0:\n raise RuntimeError(f\"Could not attach interface, reason:\\n{stderr}\")\n\n cmd = f\"ip netns exec {namespace} ip link set {interface} up\"\n\n ret_code, _, stderr = exec_cmd(node, cmd, timeout=5, sudo=True)\n if ret_code != 0:\n raise RuntimeError(\n f\"Could not set interface state, reason:\\n{stderr}\"\n )",
"def set_network(self, path, ip=\"\", netmask=\"255.255.255.0\", gateway=\"\"):\n\n with open(os.path.join(path, 'etc', 'network', 'interfaces'), 'w') \\\n as f:\n f.write(\"auto lo\\niface lo inet loopback\\n\\n\")\n\n if len(ip) <= 0:\n f.write(\"auto eth0\\niface eth0 inet dhcp\\n\")\n else:\n f.write(\"auto eth0\\niface eth0 inet static\\n\")\n f.write(\"\\taddress {0}\\n\\tnetmask {1}\\n\\tgateway {2}\\n\".\\\n format(ip, netmask, gateway))",
"def update_interfaces(self, interfaces):\n for i in interfaces:\n self.update_interface(i)",
"def iface_config(self, iface, *args, **kwargs):\n if not set(kwargs).issubset({'intf_ip_addr', 'netns', 'adminMode'}):\n raise NotImplementedError(\"Method is not implemented for current kwargs.\")\n if kwargs.get('netns', False):\n # Create network namespaces for current iface\n self.create_namespaces(iface)\n del kwargs['netns']\n if 'intf_ip_addr' in kwargs:\n kwargs['ipAddr'] = \"{}/24\".format(kwargs['intf_ip_addr'])\n if iface in self.namespaces:\n self._lhost.ui.enter_namespace(self.namespaces[iface])\n self._lhost.ui.modify_ports([iface], **kwargs)\n if iface in self.namespaces:\n self._lhost.ui.exit_namespace()",
"def moveIntf(intf, node):\n intf = str(intf)\n cmd = 'ip link set %s netns %s' % (intf, node.pid)\n result = node.rcmd(cmd)\n if result:\n raise Exception('error executing command %s' % cmd)\n return True",
"def setNetwork(self, network):\n # type: (str)->None\n\n self._validator.validate_one(\n 'network', VALID_OPTS['network'], network)\n self._ifAttributes['network'] = network",
"def test_configure_interfaces_of_several_nodes(self):\n # Go back to nodes page\n Tabs().nodes.click()\n # Add second node\n time.sleep(1)\n Nodes().add_nodes.click()\n Nodes().nodes_discovered[2].checkbox.click()\n RolesPanel().compute.click()\n Nodes().apply_changes.click()\n time.sleep(1)\n Tabs().nodes.click()\n time.sleep(1)\n Nodes().add_nodes.click()\n Nodes().nodes_discovered[1].checkbox.click()\n RolesPanel().compute.click()\n Nodes().apply_changes.click()\n time.sleep(1)\n # rearrange interfaces\n with Nodes() as n:\n n.nodes[1].checkbox.click()\n n.nodes[2].checkbox.click()\n n.configure_interfaces.click()\n with InterfacesSettings() as s:\n ActionChains(browser.driver).drag_and_drop(\n s.interfaces[0].networks['management'],\n s.interfaces[1].networks_box).perform()\n ActionChains(browser.driver).drag_and_drop(\n s.interfaces[0].networks['storage'],\n s.interfaces[1].networks_box).perform()\n s.apply.click()\n time.sleep(1)\n\n for i in range(1, 3):\n # Go to nodes page\n Tabs().nodes.click()\n # Verify interfaces settings of each node\n Nodes().nodes[i].details.click()\n NodeInfo().edit_networks.click()\n self.assertIn(\n 'management', s.interfaces[1].networks,\n 'management at eht1. Node #{0}'.format(i))\n self.assertIn(\n 'storage', s.interfaces[1].networks,\n 'storage at eht1. Node #{0}'.format(i))",
"def SetWirelessInterface(self, interface):\n print \"setting wireless interface %s\" % (str(interface))\n self.wifi.wireless_interface = noneToBlankString(interface)\n self.wired.wireless_interface = noneToBlankString(interface)\n config = ConfigParser.ConfigParser()\n config.read(self.app_conf)\n config.set(\"Settings\",\"wireless_interface\", interface)\n configfile = open(self.app_conf, \"w\")\n config.write(configfile)",
"def add_interface(self, inf):\n self.interfaces[inf] = {'ip': 'unassigned', 'status': 'shutdown', 'connect': ['none', 'none']}",
"def set_interface(interface, name=''):\n if not interface:\n raise ValueError('interface is empty')\n\n global interfaces\n logger.debug('connection_name: \"{}\" -> {}.{}'.format(\n name,\n interface.__module__,\n interface.__class__.__name__\n ))\n interfaces[name] = interface",
"def add_network_interface(self, iface: 'NetworkInterface',\n is_gateway: bool = False):\n self.ifaces.append(iface)\n if is_gateway:\n self._gateway = iface",
"def add_router_interfaces(self):\n for subnet_name in self.router_data['properties']['networks'].keys():\n #print(subnet_name)\n interface = OrderedDict({\n str(self.router_name + '_interface_' + subnet_name): {\n 'type': 'OS::Neutron::RouterInterface',\n 'properties': {\n 'router_id': { 'get_resource': self.router_name },\n 'subnet_id': { 'get_resource': str(self.router_name + '_' + subnet_name) }\n } \n }\n })\n self.template['resources'].update(interface)",
"def set_interface(self, interface):\n if not interface_exists(interface):\n raise ValueError(f\"Interface {interface} is invalid.\")\n self.interface = interface",
"def set_network(self, network: str) -> None:\n return self.add_value(self._network_attribute, network)",
"def navigate_to_network_then_to_interfaces(driver):\n driver.find_element_by_xpath('//mat-list-item[@ix-auto=\"option__Network\"]').click()\n wait_on_element(driver, 0.5, 30, '//mat-list-item[@ix-auto=\"option__Interfaces\"]')\n driver.find_element_by_xpath('//mat-list-item[@ix-auto=\"option__Interfaces\"]').click()",
"def modify_network_interface_attribute(\n name=None,\n network_interface_id=None,\n attr=None,\n value=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n if not (name or network_interface_id):\n raise SaltInvocationError(\n \"Either name or network_interface_id must be provided.\"\n )\n if attr is None and value is None:\n raise SaltInvocationError(\"attr and value must be provided.\")\n r = {}\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n result = _get_network_interface(conn, name, network_interface_id)\n if \"error\" in result:\n return result\n eni = result[\"result\"]\n info = _describe_network_interface(eni)\n network_interface_id = info[\"id\"]\n # munge attr into what the API requires\n if attr == \"groups\":\n _attr = \"groupSet\"\n elif attr == \"source_dest_check\":\n _attr = \"sourceDestCheck\"\n elif attr == \"delete_on_termination\":\n _attr = \"deleteOnTermination\"\n else:\n _attr = attr\n _value = value\n if info.get(\"vpc_id\") and _attr == \"groupSet\":\n _value = __salt__[\"boto_secgroup.convert_to_group_ids\"](\n value,\n vpc_id=info.get(\"vpc_id\"),\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n )\n if not _value:\n r[\"error\"] = {\n \"message\": \"Security groups do not map to valid security group ids\"\n }\n return r\n _attachment_id = None\n if _attr == \"deleteOnTermination\":\n try:\n _attachment_id = info[\"attachment\"][\"id\"]\n except KeyError:\n r[\"error\"] = {\n \"message\": (\n \"No attachment id found for this ENI. The ENI must\"\n \" be attached before delete_on_termination can be\"\n \" modified\"\n )\n }\n return r\n try:\n r[\"result\"] = conn.modify_network_interface_attribute(\n network_interface_id, _attr, _value, attachment_id=_attachment_id\n )\n except boto.exception.EC2ResponseError as e:\n r[\"error\"] = __utils__[\"boto.get_error\"](e)\n return r",
"def modif_network(self):\n print \"preparation du fichier network interfaces\"\n if version_os[\"OS\"] == \"CentOS\":\n self.exec_cmd(\"cp %s/etc/sysconfig/network_scripts/ifcfg-eth0 %s/etc/sysconfig/network_scripts/ifcfg-eth0.pre.p2v\" % (self.rep_vhosts_vm,self.rep_vhosts_vm))\n else:\n self.exec_cmd(\"cp %s/etc/network/interfaces %s/etc/network/interfaces.post.p2v\" % (self.rep_vhosts_vm,self.rep_vhosts_vm))\n self.exec_cmd(\"cp %s/etc/network/interfaces.pre.p2v %s/etc/network/interfaces\" % (self.rep_vhosts_vm,self.rep_vhosts_vm))",
"def update_interfaces_config(self):\n\n for i in self._nodes.items():\n node = i[1]\n devices = node[\"devices\"]\n all_devices = devices[\"other_devices\"]\n all_devices.update(devices[\"dpdk_devices\"])\n all_devices.update(devices[\"kernel_devices\"])\n\n current_ifcs = {}\n interfaces = {}\n if \"interfaces\" in node:\n current_ifcs = node[\"interfaces\"]\n if current_ifcs:\n for ifc in current_ifcs.values():\n dvid = ifc[\"pci_address\"]\n if dvid in all_devices:\n VppPCIUtil.vpp_create_interface(\n interfaces, dvid, all_devices[dvid]\n )\n node[\"interfaces\"] = interfaces\n\n self.updateconfig()",
"def ifaces(self, ifaces):\n \n self._ifaces = ifaces",
"def create_namespaces(self, iface):\n if iface not in self.namespaces:\n name = \"netns_{}\".format(iface)\n self._lhost.ui.create_namespace(name)\n\n self._lhost.ui.modify_ports([iface], netns=name)\n self.namespaces[iface] = name\n\n self.iface_config(iface, adminMode='Up')",
"def setup_bridge_network(self, iface):\n out = utils.run_script('conjure-up.lxc network show conjureup1')\n if out.returncode == 0:\n return # already configured\n\n out = utils.run_script('conjure-up.lxc network create conjureup1 '\n 'ipv4.address=auto '\n 'ipv4.nat=true '\n 'ipv6.address=none '\n 'ipv6.nat=false')\n if out.returncode != 0:\n raise Exception(\"Failed to create LXD conjureup1 network bridge: \"\n \"{}\".format(out.stderr.decode()))",
"def attach_network_interface(\n device_index,\n name=None,\n network_interface_id=None,\n instance_name=None,\n instance_id=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n if not salt.utils.data.exactly_one((name, network_interface_id)):\n raise SaltInvocationError(\n \"Exactly one (but not both) of 'name' or 'network_interface_id' \"\n \"must be provided.\"\n )\n\n if not salt.utils.data.exactly_one((instance_name, instance_id)):\n raise SaltInvocationError(\n \"Exactly one (but not both) of 'instance_name' or 'instance_id' \"\n \"must be provided.\"\n )\n\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n r = {}\n result = _get_network_interface(conn, name, network_interface_id)\n if \"error\" in result:\n return result\n eni = result[\"result\"]\n try:\n info = _describe_network_interface(eni)\n network_interface_id = info[\"id\"]\n except KeyError:\n r[\"error\"] = {\"message\": \"ID not found for this network interface.\"}\n return r\n\n if instance_name:\n try:\n instance_id = get_id(\n name=instance_name, region=region, key=key, keyid=keyid, profile=profile\n )\n except boto.exception.BotoServerError as e:\n log.error(e)\n return False\n\n try:\n r[\"result\"] = conn.attach_network_interface(\n network_interface_id, instance_id, device_index\n )\n except boto.exception.EC2ResponseError as e:\n r[\"error\"] = __utils__[\"boto.get_error\"](e)\n return r",
"def renameIface(self, station, nextWlan, iface):\n iface = iface[:-1]\n station.cmd('ip link set dev %s name %s-wlan%s' % (iface, station, nextWlan))\n station.cmd('ifconfig %s-wlan%s up' % (station, nextWlan))",
"def create_bridge_for_int_in_namespace(\n node, namespace, bridge_name, *interfaces):\n cmd = f\"ip netns exec {namespace} brctl addbr {bridge_name}\"\n exec_cmd_no_error(node, cmd, sudo=True)\n\n for interface in interfaces:\n cmd = f\"ip netns exec {namespace} brctl addif {bridge_name} \" \\\n f\"{interface}\"\n exec_cmd_no_error(node, cmd, sudo=True)\n\n cmd = f\"ip netns exec {namespace} ip link set dev {bridge_name} up\"\n exec_cmd_no_error(node, cmd, sudo=True)",
"def ibns_intf(task):\n # init lists of interfaces\n access_interfaces = []\n uplink_interfaces = []\n # iterate over all interfaces\n for intf in task.host[\"intfs\"]:\n\n # uplink interfaces\n if intf[\"interface\"] in task.host[\"uplinks\"]:\n uplink_interfaces.append(intf)\n\n # other non-excluded access ports\n elif intf[\"interface\"] not in task.host[\"excluded_intf\"]:\n if intf[\"access_vlan\"] in task.host[\"vlans\"]:\n access_interfaces.append(intf)\n\n # assign uplink interface list to task.host\n task.host[\"uplink_interfaces\"] = uplink_interfaces\n # render uplink interface configs\n uplink_intf_cfg = task.run(\n task=text.template_file,\n template=\"IBNS_uplink_intf.j2\",\n path=\"templates/\",\n **task.host,\n )\n # assign access interface list to task.host\n task.host[\"access_interfaces\"] = access_interfaces\n # render access interface configs\n access_intf_cfg = task.run(\n task=text.template_file,\n template=f\"IBNS{task.host['ibns_ver']}_access_intf.j2\",\n path=\"templates/\",\n **task.host,\n )\n\n # init list of L3 vlan interfaces\n l3_vlan_int = [\"Vlan777\"]\n # list of vlan interfaces that will not relay\n no_relay_ints = [\"1\", \"666\", \"667\"]\n # iterate over active L3 interfaces\n for intf in task.host[\"ip_int_br\"]:\n # accept only those that are active vlan interfaces\n if intf[\"intf\"].startswith(\"Vlan\") == True and intf[\"status\"] == \"up\":\n # strip vlan id from interface name\n vlan_id = intf[\"intf\"].strip(\"Vlan\")\n # compare with list of no relay ints\n if vlan_id not in no_relay_ints:\n # add to list of interfaces for ISE DHPC relay\n l3_vlan_int.append(intf[\"intf\"])\n\n # save L3 vlan interfaces to task.host\n task.host[\"l3_vlan_int\"] = l3_vlan_int\n\n if \"emea\" in task.host['region']:\n L3VLAN_template = \"IBNS_EMEA_L3VLAN_intf.j2\"\n else:\n L3VLAN_template = \"IBNS_L3VLAN_intf.j2\"\n\n # render L3 vlan interface configs\n l3_vlan_int_cfg = task.run(\n task=text.template_file,\n template=L3VLAN_template,\n path=\"templates/\",\n **task.host,\n )\n\n # return configuration\n return uplink_intf_cfg.result + access_intf_cfg.result + l3_vlan_int_cfg.result",
"def move_ips_to_interface(apps, schema_editor):\n UserAS = apps.get_model('scionlab', 'UserAS')\n\n for useras in UserAS.objects.iterator():\n # UserASes have a unique host and before the multi-AP feature had a unique interface\n host = useras.hosts.get()\n iface = useras.interfaces.get()\n if not iface.public_ip:\n iface.public_ip = host.public_ip\n iface.bind_ip = host.bind_ip\n iface.save()\n host.public_ip = None\n host.bind_ip = None\n host.save()",
"def connect(self):\n self.net.active(True)\n self.net.config(essid=self.ssid, password=self.pwd, channel=3)\n\n while not self.net.active():\n pass\n\n self.net.ifconfig((\"192.168.4.5\", \"255.255.255.0\", \"192.168.4.1\", \"208.67.222.222\"))",
"def get_network_interfaces(project_id, network_url, auto_assign_external_ip):\n network = network_url or get_network_url(project_id, 'default')\n network_interfaces = [{'network': network}]\n if auto_assign_external_ip:\n # This creates a single accessConfig instance and uses default values for\n # all fields to enable external network with auto-assigned IP.\n network_interfaces[0]['accessConfigs'] = [{'type': 'ONE_TO_ONE_NAT'}]\n return network_interfaces",
"def manage_vrf_interfaces(args):\n with IPDB() as ipdb:\n with ipdb.interfaces[args.vrf_name] as vrf:\n if args.action == \"add_interface\":\n vrf.add_port(ipdb.interfaces[args.interface].index)\n logger.info(f\"{args.interface} added to vrf {args.vrf_name}\")\n if args.action == \"remove_interface\":\n subprocess.run(f\"ip link set dev {args.interface} nomaster\", shell=True)\n logger.info(f\"{args.interface} removed from vrf {args.vrf_name}\")"
]
| [
"0.7055572",
"0.689255",
"0.61575776",
"0.6092796",
"0.5999253",
"0.5881435",
"0.58456975",
"0.58353883",
"0.58285594",
"0.582017",
"0.5815144",
"0.5740349",
"0.5718538",
"0.5695416",
"0.56693494",
"0.56601954",
"0.56459874",
"0.5645395",
"0.5645298",
"0.5542378",
"0.5535832",
"0.5526707",
"0.5522057",
"0.55146027",
"0.5484339",
"0.5476435",
"0.54751515",
"0.5468403",
"0.5462853",
"0.5443095"
]
| 0.71166056 | 0 |
Checks availability of DPDK for given interface. DPDK availability of the interface depends on presence of DPDK drivers and libraries for particular NIC. It may vary for different OpenStack releases. So, dpdk_drivers vary for different releases and it can be not empty only for node that is assigned to cluster currently. Also, DPDK is only supported for Neutron with VLAN segmentation currently. | def dpdk_available(cls, instance, dpdk_drivers):
return (cls.get_dpdk_driver(instance, dpdk_drivers) is not None and
instance.node.cluster.network_config.segmentation_type ==
consts.NEUTRON_SEGMENT_TYPES.vlan) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_enable_dpdk(self):\n logging.info('Pre-flight check')\n self._dpdk_pre_post_flight_check()\n self._ovs_br_ex_port_is_system_interface()\n\n self.enable_hugepages_vfio_on_hvs_in_vms(4)\n with self.config_change(\n {\n 'enable-dpdk': False,\n 'dpdk-driver': '',\n },\n {\n 'enable-dpdk': True,\n 'dpdk-driver': 'vfio-pci',\n },\n application_name='ovn-chassis'):\n logging.info('Checking openvswitch-switch-dpdk is installed')\n self._openvswitch_switch_dpdk_installed()\n logging.info('Checking DPDK is configured in OVS')\n self._ovs_dpdk_init_configured()\n logging.info('Checking DPDK is successfully initialized in OVS')\n self._ovs_dpdk_initialized()\n logging.info('Checking that br-ex configed with DPDK interface...')\n self._ovs_br_ex_port_is_dpdk_interface()\n logging.info('and is not in error.')\n self._ovs_br_ex_interface_not_in_error()\n\n logging.info('Post-flight check')\n self._dpdk_pre_post_flight_check()\n\n self.disable_hugepages_vfio_on_hvs_in_vms()\n self._ovs_br_ex_port_is_system_interface()",
"def _ovs_br_ex_port_is_dpdk_interface(self):\n cmd = (\n 'dpdk-devbind.py --status-dev net '\n '| grep ^$(ovs-vsctl --bare --columns options '\n 'find interface external_ids:charm-ovn-chassis=br-ex '\n '|cut -f2 -d=)'\n '|grep \"drv=vfio-pci unused=$\"')\n for unit in zaza.model.get_units(self.application_name):\n zaza.utilities.juju.remote_run(\n unit.name, cmd, model_name=self.model_name, fatal=True)",
"def _ovs_dpdk_initialized(self):\n cmd = 'ovs-vsctl get open-vswitch . dpdk_initialized'\n for unit in zaza.model.get_units(self.application_name):\n result = zaza.utilities.juju.remote_run(\n unit.name,\n cmd,\n model_name=self.model_name,\n fatal=True).rstrip()\n assert result == 'true', (\n 'DPDK not initialized on {}'.format(unit.name))",
"def _ovs_dpdk_init_configured(self):\n cmd = 'ovs-vsctl get open-vswitch . other_config:dpdk-init'\n for unit in zaza.model.get_units(self.application_name):\n result = zaza.utilities.juju.remote_run(\n unit.name,\n cmd,\n model_name=self.model_name,\n fatal=True).rstrip()\n assert result == '\"true\"', (\n 'DPDK not configured on {}'.format(unit.name))",
"def verify_dhclient_on_interface(dut, search_string, interface, expected_count=2):\n st.log(\"Verifying dhclient for {} interface\".format(interface))\n ps_aux = basic_obj.get_ps_aux(dut, search_string)\n # if len(ps_aux) != expected_count:\n st.log(\"Observed {} DHCLIENT entries on {} interface\".format(len(ps_aux), interface))\n # return False\n dhclient_str = \"/run/dhclient.{}.pid\".format(interface)\n if not ps_aux:\n st.error(\"DHCLIENT process not found on DUT ...\")\n return False\n for entry in ps_aux:\n if dhclient_str in entry[\"command\"]:\n st.log(\"Required dhclient is found ...\")\n return True\n return False",
"def _dpdk_pre_post_flight_check(self):\n with self.assertRaises(\n zaza.model.CommandRunFailed,\n msg='openvswitch-switch-dpdk unexpectedly installed'):\n self._openvswitch_switch_dpdk_installed()\n with self.assertRaises(\n zaza.model.CommandRunFailed,\n msg='OVS unexpectedly configured for DPDK'):\n self._ovs_dpdk_init_configured()\n with self.assertRaises(\n AssertionError,\n msg='OVS unexpectedly has DPDK initialized'):\n self._ovs_dpdk_initialized()",
"def is_available() -> bool:\n # This function never throws and returns 0 if driver is missing or can't\n # be initialized\n return device_count() > 0",
"def _CheckAttachDisk(self, params):\n uuid = params.get(\"uuid\", None)\n name = params.get(constants.IDISK_NAME, None)\n\n disk = self.GenericGetDiskInfo(uuid, name)\n instance_template = self.cfg.GetInstanceDiskTemplate(self.instance.uuid)\n if (disk.dev_type != instance_template and\n instance_template != constants.DT_DISKLESS):\n raise errors.OpPrereqError(\"Instance has '%s' template while disk has\"\n \" '%s' template\" %\n (instance_template, disk.dev_type),\n errors.ECODE_INVAL)\n\n instance_nodes = self.cfg.GetInstanceNodes(self.instance.uuid)\n # Make sure we do not attach disks to instances on wrong nodes. If the\n # instance is diskless, that instance is associated only to the primary\n # node, whereas the disk can be associated to two nodes in the case of DRBD,\n # hence, we have a subset check here.\n if disk.nodes and not set(instance_nodes).issubset(set(disk.nodes)):\n raise errors.OpPrereqError(\"Disk nodes are %s while the instance's nodes\"\n \" are %s\" %\n (disk.nodes, instance_nodes),\n errors.ECODE_INVAL)\n # Make sure a DRBD disk has the same primary node as the instance where it\n # will be attached to.\n disk_primary = disk.GetPrimaryNode(self.instance.primary_node)\n if self.instance.primary_node != disk_primary:\n raise errors.OpExecError(\"The disks' primary node is %s whereas the \"\n \"instance's primary node is %s.\"\n % (disk_primary, self.instance.primary_node))",
"def testCheckAvailable(self):\n img = self.img\n img.inspect()\n with converter.RootMounted(img.converter._h,\n '/dev/VolGroup00/LogVol00'):\n c = img.converter\n installer = redhat.LocalInstaller(\n c._h, '/dev/VolGroup00/LogVol00',\n db.DB(['{}/conf/guestconv.db'.format(env.topdir)]),\n log.get_logger_object(test_helper.logger)\n )\n\n kernel = redhat.Package('kernel',\n version='2.6.9', release='89.EL',\n arch='i686')\n self.assertTrue(installer.check_available([kernel]))",
"def async_device_available_fn(controller: UniFiController, obj_id: str) -> bool:\n device = controller.api.devices[obj_id]\n return controller.available and not device.disabled",
"def _has_ip_config(self, device_dict):\n keys_that_indicate_ip_config = [agent.IFCFG_IPADDR,\n agent.IFCFG_IPV6ADDR,\n agent.IFCFG_DHCP_HOSTNAME,\n agent.IFCFG_DHCPV6C,\n agent.IFCFG_DHCPV6C_OPTIONS,\n agent.IFCFG_DHCP_HOSTNAME,\n ]\n for key in keys_that_indicate_ip_config:\n if key in device_dict and device_dict[key]:\n return True\n return False",
"def _openvswitch_switch_dpdk_installed(self):\n cmd = 'dpkg-query -s openvswitch-switch-dpdk'\n for unit in zaza.model.get_units(self.application_name):\n zaza.utilities.juju.remote_run(\n unit.name, cmd, model_name=self.model_name, fatal=True)",
"def is_discover(pkt):\n dhcp_discover = 1\n try:\n dhcp_options = pkt['BOOTP']['DHCP options'].options\n message_type = filter(lambda x: x[0] == 'message-type',\n dhcp_options)\n message_type = message_type[0][1]\n return message_type == dhcp_discover\n except:\n return False",
"def initialize_dpdk_framework(node, if1, if2, nic_driver):\n if node[u\"type\"] == NodeType.DUT:\n pci_address1 = Topology.get_interface_pci_addr(node, if1)\n pci_address2 = Topology.get_interface_pci_addr(node, if2)\n\n command = f\"{Constants.REMOTE_FW_DIR}/{Constants.RESOURCES_LIB_SH}\"\\\n f\"/entry/init_dpdk.sh \" \\\n f\"{nic_driver} {pci_address1} {pci_address2}\"\n message = u\"Initialize the DPDK failed!\"\n exec_cmd_no_error(node, command, timeout=600, message=message)",
"def validate_driver_interfaces(self, context, node_id):\n return self.call(context,\n self.make_msg('validate_driver_interfaces',\n node_id=node_id))",
"def is_available():",
"def check_interface(self, interface):\n\n command = \"ifconfig %s > /dev/null\" % interface\n return subprocess.call(command, shell=True)",
"def _discover_interfaces(self, hostip):\n with LydianClient(hostip) as client:\n try:\n client.controller.discover_interfaces()\n self._add_endpoints(client, hostip)\n return True\n except Exception as _:\n return False",
"def verify_dhcpd_service_status(dut, process_id):\n st.log(\"Verifying DHCPD for {} \".format(process_id))\n dhcpd_pid = \"/run/dhcp-server/{}\".format(process_id)\n ps_aux = basic_obj.get_ps_aux(dut, dhcpd_pid, device=\"server\")\n st.log(ps_aux)\n config_string = \"\"\n if process_id == \"dhcpd6.pid\":\n config_string = \"-cf /etc/dhcp/dhcpd6.conf\"\n if process_id == \"dhcpd.pid\":\n config_string = \"-cf /etc/dhcp/dhcpd.conf\"\n st.log(\"Verifying the output with {}\".format(config_string))\n if config_string not in ps_aux:\n st.log(\"Required DHCPD service not found ...\")\n return False\n return True",
"def is_available(cls):\n\n try:\n proc = subprocess.Popen(\n ['systemctl', 'status', 'NetworkManager'],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n proc.communicate()\n return proc.returncode == 0\n except OSError:\n return False",
"def checkip(\n group: DroneGroup,\n interface=\"lan\",\n check_ipv4: bool = True,\n check_ipv6: bool = False,\n specific_ipv4: str = None,\n specific_ipv6: str = None,\n):\n logger.debug(\n f\"{interface=} {check_ipv4=} {check_ipv6=} {specific_ipv4=} {specific_ipv6=}\"\n )\n\n responses = group.call(\n \"ubus\", {\"path\": f\"network.interface.{interface}\", \"method\": \"dump\"}\n )\n\n for drone, response in responses.items():\n if response[\"status\"] != \"ok\":\n continue\n\n if check_ipv4:\n ipv4_addresses = response[\"data\"].get(\"ipv4-address\", [])\n if not specific_ipv4:\n if len(ipv4_addresses) > 0:\n response[\"status\"] = \"failed\"\n else:\n found = False\n for ip in ipv4_addresses:\n if ip[\"address\"] == specific_ipv4:\n found = True\n if not found:\n response[\"status\"] = \"failed\"\n\n if check_ipv6:\n ipv6_addresses = response[\"data\"].get(\"ipv6-address\", [])\n if not specific_ipv6:\n if len(ipv6_addresses) > 0:\n response[\"status\"] = \"failed\"\n else:\n found = False\n for ip in ipv6_addresses:\n if ip[\"address\"] == specific_ipv6:\n found = True\n if not found:\n response[\"status\"] = \"failed\"\n\n return responses",
"def verify_interface(dut, **kwargs):\n if not kwargs.get(\"interface_name\"):\n st.log(\"Interface name not provided\")\n return False\n interface_name = kwargs.get(\"interface_name\")\n cli_type = st.get_ui_type(dut, **kwargs)\n cli_type = \"klish\" if cli_type in [\"rest-put\", \"rest-patch\"] else cli_type\n output = show_interface(dut, interface_name=interface_name, cli_type=cli_type)\n if output:\n for data in output:\n if data[\"interface\"] == interface_name:\n st.log(\"Parsing data for interface {}\".format(interface_name))\n if \"sampling_rate\" in kwargs:\n if str(data[\"sampling_rate\"]) != str(kwargs[\"sampling_rate\"]):\n st.log(\"Sampling rate verification failed ..\")\n return False\n if \"admin_status\" in kwargs:\n if data[\"admin_status\"] != kwargs[\"admin_status\"]:\n st.log(\"Admin status verification failed ..\")\n return False\n st.log(\"Verification successful ...\")\n return True\n else:\n st.log(\"Show output not found ...\")\n return False",
"def verify_dbus_service(my_interface):\n try:\n interface = get_dbus_interface('org.freedesktop.DBus',\n '/org/freedesktop/DBus')\n return my_interface in interface.ListNames()\n except dbus.DBusException:\n return False",
"def _check_min_required(self):\n self._adb_available = False\n try:\n adb_version = utils.do_shell_command('adb --version')\n if adb_version:\n if adb_version and 'Android Debug Bridge' in adb_version:\n self._adb_available = True\n else:\n self._adb_available = False\n\n if self._adb_available:\n self._adb_available = False\n adb_devices = utils.do_shell_command('adb devices')\n\n try:\n if adb_devices:\n adb_devices = adb_devices.split(os.linesep)\n\n for i, adb_device in enumerate(adb_devices):\n if not adb_device: # skip empty lines at bottom\n continue\n if i == 0: # skip first line 'List of devices attached'\n continue\n if adb_device.startswith('*'): # skip these lines '* daemon started successfully *'\n continue\n\n self._adb_available = True\n\n if not self._adb_available:\n print('No Devices! Make sure \\'Usb-Debugging\\' is enabled in DeveloperSettings')\n\n except Exception as e:\n print(e)\n\n # io error is handled here not in do_shell_command\n # if adb isnt there it gives file not found\n except IOError as io_error:\n # file not found\n if io_error.errno == 2:\n self._adb_available = False",
"def _check_kvm():\n if not os.path.exists(os.path.join(os.sep, 'dev', 'kvm')):\n raise CommandError('KVM interface not found - check that /dev/kvm '\n 'exists. Alternatively, you can disable KVM (-n '\n 'option) or download pre-built images (-d option)')",
"def is_configure_with_dhcp(self):\n\t\treturn bool(call_sdk_function('PrlVmDevNet_IsConfigureWithDhcp', self.handle))",
"def nat_interface_name_is_valid(interface_name):\n\n config_db = ConfigDBConnector()\n config_db.connect()\n\n if interface_name.startswith(\"Ethernet\"):\n interface_dict = config_db.get_table('PORT')\n elif interface_name.startswith(\"PortChannel\"):\n interface_dict = config_db.get_table('PORTCHANNEL')\n elif interface_name.startswith(\"Vlan\"):\n interface_dict = config_db.get_table('VLAN')\n elif interface_name.startswith(\"Loopback\"):\n return True\n else:\n return False\n\n if interface_name is not None:\n if not interface_dict:\n return False\n return interface_name in interface_dict\n\n return False",
"def check_vpn_interface():\n return validate_vpn_interface(call_command('netstat -i')[0].split('\\n'))",
"def _is_valid_interface(device, switch, nos_driver):\n for key in device.keys():\n for (speed, interface) in device[key]:\n if not _is_valid_three_tupple(interface):\n return False\n if not _is_valid_interface_speed(speed):\n return False\n return True",
"def _validate_interface_exists(params, error_callback):\n local_interface = params['local_interface']\n net_override = params['net_config_override']\n if not net_override and local_interface not in netifaces.interfaces():\n message = ('Invalid local_interface specified. %s is not available.' %\n local_interface)\n error_callback(message)"
]
| [
"0.6499057",
"0.6261596",
"0.58940613",
"0.588549",
"0.5753477",
"0.56981313",
"0.5615253",
"0.5498032",
"0.54936856",
"0.53996813",
"0.5281756",
"0.5256299",
"0.5163212",
"0.5129217",
"0.5039332",
"0.497954",
"0.49176756",
"0.48825467",
"0.48662212",
"0.48462084",
"0.48421496",
"0.48408353",
"0.48380327",
"0.48262867",
"0.48173383",
"0.48115498",
"0.4783826",
"0.4780819",
"0.47727367",
"0.47369644"
]
| 0.653007 | 0 |
Update information about offloading modes for the interface. | def update_offloading_modes(cls, instance, new_modes, keep_states=False):
def set_old_states(modes):
"""Set old state for offloading modes
:param modes: List of offloading modes
"""
for mode in modes:
if mode['name'] in old_modes_states:
mode['state'] = old_modes_states[mode['name']]
if mode.get('sub'):
set_old_states(mode['sub'])
if keep_states:
old_modes_states = instance.offloading_modes_as_flat_dict(
instance.offloading_modes)
set_old_states(new_modes)
instance.offloading_modes = new_modes | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update(self) -> None:\n active = None\n if self.type == \"on_off\":\n self._data = self._tm_client.api.data\n if self._data:\n active = self._data.active_torrent_count > 0\n\n elif self.type == \"turtle_mode\":\n active = self._tm_client.api.get_alt_speed_enabled()\n\n if active is None:\n return\n\n self._state = STATE_ON if active else STATE_OFF",
"def __toggle_mode(self):\n # Update mode\n # Update mode, default canvas controls\n self.__ui_mode = {\n UImode.CANVASCONTROL: UImode.TEACHPANEL,\n UImode.TEACHPANEL: UImode.CANVASCONTROL\n }.get(self.__ui_mode, UImode.CANVASCONTROL)\n\n # Update UI\n # get list of robots\n new_list = []\n for name in self.__ui_controls.get('menu_robots').choices:\n new_list.append(name)\n\n self.__reload_caption(new_list)",
"async def _load_modes(self) -> None:\n modes: List[Dict[str, Any]] = await self._api_request(\"modes\")\n _LOGGER.debug(\"Loaded modes\")\n self._modes = [Mode(m) for m in modes]",
"def update(self):\n self._is_on = self._is_on",
"def _io_update(self):\n logger.info(\"IO Update\")\n self._toggle_pin(\"IO_UPDATE\")",
"def ChangeMode(self, mode):\n if mode in MODE_DICT:\n self.ImportCover(MODE_DICT[mode], layer = MODE_LAYER)",
"def platform_mode_manifest_updates(self, dbapi, mode):\n pass",
"def update(self,update_flags):\n pass",
"def updateOptions(self):\r\n if self.varSegment.get() == \"binary\":\r\n self.checkSaveBinary.config(state=tk.DISABLED)\r\n else:\r\n self.checkSaveBinary.config(state=tk.NORMAL)",
"def set_current_operation_mode(self, operation_mode):\n self._current_operation_mode = operation_mode\n \"\"\"Retrieve from textual representation\"\"\"\n if self._current_operation_mode == 'Off':\n self._api._opmode = 0;\n elif self._current_operation_mode == 'Heat only':\n self._api._opmode = 1;\n elif self._current_operation_mode == 'Cool only':\n self._api._opmode = 2;\n elif self._current_operation_mode == 'Heat & Cool':\n self._api._opmode = 3; \n self._api.set()\n self.schedule_update_ha_state()",
"def reloadMode(self): \n\t\tpass",
"async def async_update_all(self):\n await self.async_update_transport_state()\n if self._state != STATE_OFF:\n await self.async_update_volume_level()\n await self.async_update_mute()\n await self.async_update_track_info()\n await self.async_update_play_mode()",
"def notify_mode_change(self, mode):\n pass",
"def change_mode(self, mode):\r\n self.update_enrollment(mode=mode)",
"def get_modes_of_operation(self):\n return [\"Online\", \"Offline\"]",
"def update(self):\n self.downloader.authorize()\n self.update_users()\n self.update_channels()\n self.update_history()",
"def conf_update(self):\n pass",
"def modes(self, mode):\n # Sends the update to the piston worker\n self.worker_piston.mode = mode\n if mode == 1: # 'VCV'\n self.VCV_start_btn.setEnabled(False)\n self.PCV_start_btn.setEnabled(True)\n self.PSV_start_btn.setEnabled(True)\n self.stop_btn.setEnabled(True)\n elif mode == 2: # 'PCV'\n self.VCV_start_btn.setEnabled(True)\n self.PCV_start_btn.setEnabled(False)\n self.PSV_start_btn.setEnabled(True)\n self.stop_btn.setEnabled(True)\n elif mode == 3: # 'PSV'\n self.VCV_start_btn.setEnabled(True)\n self.PCV_start_btn.setEnabled(True)\n self.PSV_start_btn.setEnabled(False)\n self.stop_btn.setEnabled(True)\n elif mode == 4: # 'Emergency'\n print('Emergency')\n self.VCV_start_btn.setEnabled(True)\n self.PCV_start_btn.setEnabled(True)\n self.PSV_start_btn.setEnabled(True)\n self.stop_btn.setEnabled(False)\n else: # STOP\n self.VCV_start_btn.setEnabled(True)\n self.PCV_start_btn.setEnabled(True)\n self.PSV_start_btn.setEnabled(True)\n self.stop_btn.setEnabled(False)",
"def update(self, **options):\n pass",
"def UpdateData(self, event = None):\n ##NOTE: Will have to change way user's variables are saved if \n ##modes allow users to change these in the future.\n ##Probably by grabbing the oldMode and checking its settings.\n array = {\"JconfSelection\": [self.chJconf,\n self.chJconf.GetStringSelection()],\n \"NameServer\": [self.cbNameServer,\n self.cbNameServer.GetValue()],\n \"Xplorer\": [self.cbXplorer, self.cbXplorer.GetValue()],\n \"Conductor\": [self.cbConductor, self.cbConductor.GetValue()],\n \"DesktopMode\": [self.cbDesktop, self.cbDesktop.GetValue()],\n \"XplorerType\": [self.rbXplorer,\n XPLORER_TYPE_LIST[self.rbXplorer.GetSelection()]]}\n\n for var in array:\n ##if array[var][0].IsEnabled():\n self.state.Edit(var, array[var][1])\n self.React()\n return",
"def save_changes(self):\n\n velib, autolib, subway = None, None, None\n for key, value in VELIB_SUBSCRIPTIONS.iteritems():\n if self._velib.get() == value:\n velib = key\n break\n for key, value in AUTOLIB_SUBSCRIPTIONS.iteritems():\n if self._autolib.get() == value:\n autolib = key\n break\n for key, value in SUBWAY_SUBSCRIPTIONS.iteritems():\n if self._subway.get() == value:\n subway = key\n break\n preferences = {\n FASTEST: self._fastest.get(),\n SHORTEST: self._shortest.get(),\n CHEAPEST: self._cheapest.get(),\n SIMPLEST: self._simplest.get(),\n WEATHER_IMPACT: self._weather_impact.get(),\n LESS_PAINFUL: self._less_painful.get(),\n LESS_WALKING: self._less_walking.get()\n }\n\n result = self._system.set_profile_settings(velib, autolib, subway, self._driving_licence.get(), preferences)\n if not result[\"success\"]:\n showerror('Erreur système', result[\"error\"])\n return\n\n # Redirection vers la page principale\n from settings import RideSettingsPage\n self.pack_forget()\n RideSettingsPage(self._window, self._system)",
"def _io_update(self,):\n\n self._toggle_pin(IOUPDATE_PIN)",
"def update():\n\n # load the OPML file and update any feeds\n for o in oercloud.Session().query(oercloud.Feed).filter_by(\n feed_type=oercloud.feed.OPML):\n \n aggregator.LOG.info(\"Loading OPML from %s\" % o.url)\n update_feed_list(opml.parse(o.url))\n\n # check each feed and see if it should be polled\n check_feeds()",
"def reloadInformations(self):\n\n json = requests.get(\"{}/service/{}\".format(os.getenv(\n \"USE_CASE_SERVICE_PORT_SERVICE\", \"{}/port-service\".format(self.portaddress)), self.port)).json()\n\n logger.debug(\"reload metadata informations: got {}\".format(json))\n\n svc = Util.getServiceObject(json[\"informations\"])\n\n self.useZipForFolder = svc.fileTransferArchive == FileTransferArchive.zip\n self.fileTransferMode = svc.fileTransferMode\n self.fileTransferArchive = svc.fileTransferArchive\n\n if isinstance(svc, OAuth2Service):\n self.loginMode = 0\n self.credentials = svc.to_dict().get(\"credentials\", {})\n else:\n self.loginMode = 1\n\n logger.debug(\"got svc: {}, loginmode: {}\".format(\n svc.to_dict(), self.loginMode))",
"def updateActionsAndMenus(self):\n self.app.actions.getAction(\"save_CAlpha\").setEnabled(self.loaded)\n self.app.actions.getAction(\"unload_CAlpha\").setEnabled(self.loaded)",
"def set_mode(self, mode):\n\t\tif mode not in (self.MODE_PREVIEW, self.MODE_ANALYZE, self.MODE_SEND):\n\t\t\traise ValueError('mode must be one of the MODE_* constants')\n\t\tself._mode = mode\n\t\tif mode == self.MODE_ANALYZE:\n\t\t\tself.attachment_images = {}",
"def modes(self, modes):\n\n self._modes = modes",
"def modes(self, modes):\n\n self._modes = modes",
"def _on_mode_change(self, event_name: str, data: dict, kwargs: dict) -> None:\n mode = data[\"name\"]\n\n if data[\"state\"] == \"on\":\n self.mode_events.append(mode)\n elif mode in self.mode_events:\n self.mode_events.remove(mode)\n\n try:\n primary = max(\n (m for m in self.mode_alterations if m[\"mode\"] in self.mode_events),\n key=lambda m: m[\"priority\"],\n )\n except ValueError:\n try:\n primary = next((m for m in self.mode_alterations if m[\"mode\"] == mode))\n except StopIteration:\n return\n\n if primary[\"action\"] == \"enable\":\n primary[\"action\"] = \"disable\"\n else:\n primary[\"action\"] = \"enable\"\n\n # If the primary mode alteration prescribes an action that matches the state the\n # app is already in, return:\n if (self.enabled and primary[\"action\"] == \"enable\") or (\n not self.enabled and primary[\"action\"] == \"disable\"\n ):\n return\n\n if primary[\"action\"] == \"enable\":\n self.enable()\n else:\n self.disable()",
"def Update(self, mode = UPDATE_MODE.all):\r\n aux_versions = dstore.Get(\"versions\")\r\n \r\n if(aux_versions['hw'] != None): \r\n Ui().lineHwVersion.setText(str(aux_versions['hw'])) \r\n else:\r\n Ui().lineHwVersion.setText(\"- -\")\r\n \r\n if(aux_versions['fw'] != None): \r\n Ui().lineFwVersion.setText(str(aux_versions['fw'])) \r\n else:\r\n Ui().lineFwVersion.setText(\"- -\") \r\n \r\n \r\n \r\n \"\"\" TERMINAL INFO \"\"\"\r\n aux_terminal_info = dstore.Get(\"terminal_info\", \"GET\")\r\n \r\n \"\"\" number of cells \"\"\"\r\n if(aux_terminal_info['number_of_cells'] != None):\r\n Ui().lineCells.setText(str(aux_terminal_info['number_of_cells'])) \r\n else:\r\n Ui().lineCells.setText(\"-\") \r\n \r\n \r\n \"\"\" battery \"\"\"\r\n if(aux_terminal_info['battery'] != None):\r\n Ui().lineBattery.setText(str(aux_terminal_info['battery'])+\" %\") \r\n else:\r\n Ui().lineBattery.setText(\"-- %\") \r\n \r\n \"\"\" speaker \"\"\" \r\n if(aux_terminal_info['speaker']['keys'] == True):\r\n Ui().lineSpeakerKeys.setText(\"ON\")\r\n Ui().pushSpeakerKeys.setText(\"OFF\")\r\n Ui().pushSpeakerKeys.setEnabled(True)\r\n Ui().pushSpeakerSystem.setEnabled(True)\r\n Ui().pushSpeakerTiming.setEnabled(True)\r\n elif(aux_terminal_info['speaker']['keys'] == False):\r\n Ui().lineSpeakerKeys.setText(\"OFF\")\r\n Ui().pushSpeakerKeys.setText(\"ON\")\r\n Ui().pushSpeakerKeys.setEnabled(True)\r\n Ui().pushSpeakerSystem.setEnabled(True)\r\n Ui().pushSpeakerTiming.setEnabled(True)\r\n else:\r\n Ui().lineSpeakerKeys.setText(\"- -\")\r\n Ui().pushSpeakerKeys.setText(\"- -\")\r\n \r\n if(aux_terminal_info['speaker']['system'] == True):\r\n Ui().lineSpeakerSystem.setText(\"ON\")\r\n Ui().pushSpeakerSystem.setText(\"OFF\")\r\n Ui().pushSpeakerSystem.setEnabled(True)\r\n elif(aux_terminal_info['speaker']['system'] == False):\r\n Ui().lineSpeakerSystem.setText(\"OFF\")\r\n Ui().pushSpeakerSystem.setText(\"ON\")\r\n Ui().pushSpeakerSystem.setEnabled(True)\r\n else:\r\n Ui().lineSpeakerSystem.setText(\"- -\")\r\n Ui().pushSpeakerSystem.setText(\"- -\")\r\n Ui().pushSpeakerSystem.setEnabled(False)\r\n \r\n if(aux_terminal_info['speaker']['timing'] == True):\r\n Ui().lineSpeakerTiming.setText(\"ON\")\r\n Ui().pushSpeakerTiming.setText(\"OFF\")\r\n Ui().pushSpeakerTiming.setEnabled(True)\r\n elif(aux_terminal_info['speaker']['timing'] == False):\r\n Ui().lineSpeakerTiming.setText(\"OFF\")\r\n Ui().pushSpeakerTiming.setText(\"ON\")\r\n Ui().pushSpeakerTiming.setEnabled(True)\r\n else: \r\n Ui().lineSpeakerTiming.setText(\"- -\")\r\n Ui().pushSpeakerTiming.setText(\"- -\")\r\n Ui().pushSpeakerTiming.setEnabled(False)\r\n \r\n if(aux_terminal_info['speaker']['keys'] == None or aux_terminal_info['speaker']['timing']==None or aux_terminal_info['speaker']['system']==None): \r\n Ui().pushSpeakerKeys.setEnabled(False)\r\n Ui().pushSpeakerSystem.setEnabled(False)\r\n Ui().pushSpeakerTiming.setEnabled(False)\r\n else:\r\n Ui().pushSpeakerKeys.setEnabled(True)\r\n Ui().pushSpeakerSystem.setEnabled(True)\r\n Ui().pushSpeakerTiming.setEnabled(True)\r\n \r\n \r\n return True"
]
| [
"0.53883725",
"0.5323775",
"0.52740496",
"0.5260813",
"0.52405226",
"0.5239493",
"0.52206844",
"0.51972073",
"0.5171975",
"0.516889",
"0.51636213",
"0.5140995",
"0.51388824",
"0.51300997",
"0.5129447",
"0.51022875",
"0.5071882",
"0.5071376",
"0.5067457",
"0.50577545",
"0.5016692",
"0.501581",
"0.5005964",
"0.50022066",
"0.49943954",
"0.49920917",
"0.49919",
"0.49919",
"0.49914688",
"0.49748337"
]
| 0.68992096 | 0 |
Query networks to interfaces mapping on all nodes in cluster. Returns combined results for NICs and bonds for every node. Names are returned for node and interface (NIC or bond), IDs are returned for networks. Results are sorted by node name then interface name. | def get_networks_to_interfaces_mapping_on_all_nodes(cls, cluster):
nodes_nics_networks = db().query(
models.Node.hostname,
models.NodeNICInterface.name,
models.NetworkGroup.id,
).join(
models.Node.nic_interfaces,
models.NodeNICInterface.assigned_networks_list
).filter(
models.Node.cluster_id == cluster.id,
)
nodes_bonds_networks = db().query(
models.Node.hostname,
models.NodeBondInterface.name,
models.NetworkGroup.id,
).join(
models.Node.bond_interfaces,
models.NodeBondInterface.assigned_networks_list
).filter(
models.Node.cluster_id == cluster.id,
)
return nodes_nics_networks.union(
nodes_bonds_networks
).order_by(
# column 1 then 2 from the result. cannot call them by name as
# names for column 2 are different in this union
'1', '2'
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def enumerate_network(arg):\n\n network = ip_network(arg, strict=False)\n data = list(map(str, network.hosts()))\n data.insert(0, str(network.network_address))\n if network.prefixlen != network.max_prefixlen:\n data.append(str(network.broadcast_address))\n return data",
"def _compile_networks(self):\n\n _header_ = self._header_ + '_compile_networks(): '\n\n if self.verbose:\n print(_header_ + 'Compiling all networks ...')\n\n networks = []\n\n all_nidx = set(self.nidx2lidx.keys())\n\n while all_nidx:\n\n nidx0 = [all_nidx.pop()]\n network = set(nidx0)\n\n while nidx0 and all_nidx:\n\n nidx = set()\n\n for l in nidx0:\n lidx = self.nidx2lidx[l]\n for n in lidx:\n nidx |= self.lidx2nidx[n]\n\n nidx -= network\n network |= nidx\n all_nidx -= nidx\n nidx0 = nidx.copy()\n\n networks.append(network)\n\n if self.verbose:\n print(_header_ + 'Found %d networks' % len(networks))\n for i, network in enumerate(networks):\n print(' Network %d - %s' % (i, ','.join([str(j) for j in network])))\n\n return networks",
"def getnodes(self):\n # assumes self._objslock already held\n r = set()\n for e in self._objs.values():\n for netif in e.netifs():\n r.add(netif.node)\n return r",
"def get_node_list(self):\n logger.debug('Retrieving node list')\n self.node_ids = []\n\n # Iterate over interfaces, try to grab gateway ipv4 addr\n # Try to /ping gateway over TCP using default port.. if we get a pong, we may get a node ID\n gateways = netifaces.gateways()\n gateways = gateways.get(netifaces.AF_INET, [])\n\n for gateway in gateways:\n node_id = gateway[0]\n node = self.select_node(node_id)\n info = node.get_info()\n\n if info and info.get('node'):\n logger.debug('Found node with ID \"%s\"', node_id)\n self.node_ids.append(node_id)\n\n return self.node_ids",
"def network_list(self, kwargs=None):\n try:\n scode, networks = Rest.get('Network')\n except docker.errors.APIError as e:\n Console.error(e.explanation)\n return\n\n if len(networks) == 0:\n Console.info(\"No network exist\")\n return\n\n n = 1\n e = {}\n data = []\n for network in networks:\n d = {}\n d['Ip'] = network['Ip']\n d['Id'] = network['Id']\n d['Name'] = network['Name']\n d['Containers'] = network['Containers']\n e[n] = d\n n = n + 1\n Console.ok(str(Printer.dict_table(e, order=['Ip', 'Id', 'Name', 'Containers'])))",
"def list_net(self):\n _url = \"http://\" + self.host_ip + \":9696/v2.0/networks\"\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.project_info[\"token_project\"]}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\"No response from Server while listing the networks\")\n return response\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Get network list Failed with status %s \" %\n response.status)\n return response.status\n output = json.loads(response.data)\n\n LOG_OBJ.info(\"Network List : %s \" % output)\n return output['networks']",
"def list():\n\n\treturn netifaces.interfaces()",
"def test_list_cluster_network(self):\n pass",
"def getAllHosts(cluster):\n nics = []\n hosts = rhevGet(\"/api/hosts\")\n doc = libxml2.parseDoc(hosts)\n ctxt = doc.xpathNewContext()\n res = ctxt.xpathEval(\"/hosts/host[cluster[@id='\" + getClusterData(cluster ,\"id\") + \"']]\")\n for i in res:\n #hrefs.append(i.prop(\"href\"))\n nic = rhevGet(i.prop(\"href\")+\"/nics\")\n nicdoc = libxml2.parseDoc(nic)\n ctxt = nicdoc.xpathNewContext()\n res = ctxt.xpathEval(\"/host_nics/host_nic/name[text() = '%s']/parent::*\" %rhev_settings.NIC)\n for i in res:\n nics.append(i.prop(\"href\"))\n return nics",
"def get_interfaces(self):\n interfaces = _parse_interfaces(self.do('addr', 'show'),\n filters=PHYSICAL_INTERFACES)\n\n interfaces.sort(key=lambda x: x.ifname)\n for i in interfaces:\n if i.ifname not in self.host_mapping:\n generic_name = 'ge%d' % self.next_generic_index\n self.host_mapping[i.ifname] = generic_name\n self.next_generic_index += 1\n\n # change ifname to generic version\n i.ifname = self.host_mapping[i.ifname]\n self.generic_mapping = dict((v, k) for k, v in\n self.host_mapping.iteritems())\n\n return interfaces",
"def __get_network_interface_info(self):\n iface_list = []\n for i in netifaces.interfaces():\n addr = netifaces.ifaddresses(i)\n\n\n # clumsy way to filter which interfaces get added to list. If these elements raise KeyErrors, we skip\n try:\n iface_list.append( {i : { \n 'ip_address' : addr[netifaces.AF_INET][0]['addr'],\n 'mac' : addr[netifaces.AF_LINK][0]['addr']\n }})\n except KeyError,e:\n\t pass\n self.print_debug(\"Key not found - _get_network_interface_info - {0}\".format(addr))\n\n return iface_list",
"def print_interfaces(interfaces):\n\n for i in interfaces:\n print \"\"\"<tr>\"\"\"\n print \"\"\"<td valign=top style=\"border-bottom: 1px dotted #C0C0C0\">%s</td>\"\"\" % i\n print \"\"\"<td style=\"border-bottom: 1px dotted #C0C0C0\">\"\"\"\n\n sql_netname = \"\"\"SELECT DISTINCT net_name FROM ipall_ip WHERE interface_name LIKE '%s' \"\"\" % str(i)\n netname = conn.get_data(sql_netname)\n if netname == ():\n print \"\"\" \"\"\"\n continue\n else:\n# for n in netname:\n print \"\"\"%s<br>\"\"\" % (netname[0][0])\n# print \"\"\"%s - %s<br>\"\"\" % (n[0], n[1])\n print \"\"\"</td>\"\"\"\n print \"\"\"</tr>\"\"\"",
"def getnetnodes(self):\n # GetNetNodes2_bn is not listed in the API manual, but GetNetNodes_bn\n # is. Looks like an update to the API that is undocumented.\n\n # (const net_bn* net, const char options[])\n zerochar_type = c_char * 0\n cnetica.GetNetNodes2_bn.argtypes = [c_void_p, zerochar_type]\n cnetica.GetNetNodes2_bn.restype = c_void_p\n return cnetica.GetNetNodes2_bn(self.net, zerochar_type()) # nl_p",
"def get_all_netids(self):\n self.setQuery(\"\"\"\n Select ?netid where {\n ?who <http://vivo.dartmouth.edu/ontology/netId> ?netid .\n }\"\"\")\n\n try:\n rval = self.query()\n g = rval.convert()\n return [x['netid']['value'] for x in g['results']['bindings']]\n except:\n print \"Select failed\"\n traceback.print_exc(file=sys.stdout)",
"def nodes(self, irc, msg, args, channel):\n pie = self.instances[irc.network]\n irc.reply([str(x) for x in pie.graphs[channel].nodes.values()])",
"def get_all_interfaces():\n global all_interfaces\n if all_interfaces:\n return all_interfaces\n\n f = open('/proc/net/dev','r')\n ifacelist = f.read().split('\\n')\n f.close()\n\n # remove 2 lines header\n ifacelist.pop(0)\n ifacelist.pop(0)\n\n all_interfaces = {}\n # loop to check each line\n for line in ifacelist:\n\n ifacedata = line.replace(' ','').split(':')\n\n # check the data have 2 elements\n if len(ifacedata) == 2:\n all_interfaces[ifacedata[0]] = get_interface_ip(ifacedata[0])\n\n return all_interfaces",
"def list_networks():\n return __sets.keys()",
"def get_all(self, addresses=None, node_uuid=None):\n if not api_utils.allow_ramdisk_endpoints():\n raise exception.NotFound()\n\n api_utils.check_policy('baremetal:driver:ipa_lookup')\n\n # Validate the list of MAC addresses\n if addresses is None:\n addresses = []\n\n valid_addresses = []\n invalid_addresses = []\n for addr in addresses:\n try:\n mac = utils.validate_and_normalize_mac(addr)\n valid_addresses.append(mac)\n except exception.InvalidMAC:\n invalid_addresses.append(addr)\n\n if invalid_addresses:\n node_log = ('' if not node_uuid\n else '(Node UUID: %s)' % node_uuid)\n LOG.warning('The following MAC addresses \"%(addrs)s\" are '\n 'invalid and will be ignored by the lookup '\n 'request %(node)s',\n {'addrs': ', '.join(invalid_addresses),\n 'node': node_log})\n\n if not valid_addresses and not node_uuid:\n raise exception.IncompleteLookup()\n\n try:\n if node_uuid:\n node = objects.Node.get_by_uuid(\n api.request.context, node_uuid)\n else:\n node = objects.Node.get_by_port_addresses(\n api.request.context, valid_addresses)\n except exception.NotFound:\n # NOTE(dtantsur): we are reraising the same exception to make sure\n # we don't disclose the difference between nodes that are not found\n # at all and nodes in a wrong state by different error messages.\n raise exception.NotFound()\n\n if CONF.api.restrict_lookup and not self.lookup_allowed(node):\n raise exception.NotFound()\n\n if api_utils.allow_agent_token():\n try:\n topic = api.request.rpcapi.get_topic_for(node)\n except exception.NoValidHost as e:\n e.code = http_client.BAD_REQUEST\n raise\n\n found_node = api.request.rpcapi.get_node_with_token(\n api.request.context, node.uuid, topic=topic)\n else:\n found_node = node\n return convert_with_links(found_node)",
"def find_connections(conn, input_wires, output_wires, tile_type_pkey):\n\n # Create connected node sets\n node_sets = list(expand_nodes_in_tile_type(conn, tile_type_pkey))\n\n # Number of site external connections for ports\n is_top_level_pin_external = {}\n for wire in input_wires:\n is_top_level_pin_external[wire] = False\n\n for wire in output_wires:\n is_top_level_pin_external[wire] = False\n\n internal_connections = {}\n top_level_connections = {}\n\n wire_to_site_pin = {}\n\n cur = conn.cursor()\n for node_set in node_sets:\n ipin_count = 0\n opin_count = 0\n node_set_has_external = False\n\n pins_used_in_node_sets = set()\n input_pins = set()\n output_pins = set()\n\n for node_pkey in node_set:\n cur.execute(\n \"\"\"\nSELECT\n tile.tile_type_pkey,\n wire_in_tile.name,\n site_type.name,\n site_pin.name,\n site_pin.direction,\n count()\nFROM wire\nINNER JOIN wire_in_tile\nON wire.wire_in_tile_pkey = wire_in_tile.pkey\nINNER JOIN site_pin\nON site_pin.pkey = wire_in_tile.site_pin_pkey\nINNER JOIN tile\nON wire.tile_pkey = tile.pkey\nINNER JOIN site_type\nON site_type.pkey = site_pin.site_type_pkey\nWHERE\n wire.node_pkey = ?\nAND\n wire_in_tile.site_pin_pkey IS NOT NULL\nGROUP BY site_pin.direction;\n \"\"\", (node_pkey, )\n )\n for (wire_tile_type_pkey, wire_name, site_type_name, site_pin_name,\n direction, count) in cur:\n if wire_tile_type_pkey == tile_type_pkey:\n value = (site_type_name, site_pin_name)\n if wire_name in wire_to_site_pin:\n assert value == wire_to_site_pin[wire_name], (\n wire_name, value, wire_to_site_pin[wire_name]\n )\n else:\n wire_to_site_pin[wire_name] = value\n\n pins_used_in_node_sets.add(wire_name)\n direction = prjxray.site_type.SitePinDirection(direction)\n\n if direction == prjxray.site_type.SitePinDirection.IN:\n output_pins.add(wire_name)\n opin_count += count\n elif direction == prjxray.site_type.SitePinDirection.OUT:\n input_pins.add(wire_name)\n ipin_count += count\n else:\n assert False, (node_pkey, direction)\n else:\n node_set_has_external = True\n\n assert len(input_pins) in [0, 1], input_pins\n\n if ipin_count == 0 or opin_count == 0 or node_set_has_external:\n # This node set is connected externally, mark as such\n for wire_name in pins_used_in_node_sets:\n is_top_level_pin_external[wire_name] = True\n\n if ipin_count > 0 and opin_count > 0:\n # TODO: Add check that pips and site pins on these internal\n # connections are 0 delay.\n assert len(input_pins) == 1\n input_wire = input_pins.pop()\n\n for wire_name in output_pins:\n if wire_name in internal_connections:\n assert input_wire == internal_connections[wire_name]\n else:\n internal_connections[wire_name] = input_wire\n\n for wire in sorted(is_top_level_pin_external):\n if not is_top_level_pin_external[wire]:\n if wire in input_wires:\n input_wires.remove(wire)\n elif wire in output_wires:\n output_wires.remove(wire)\n else:\n assert False, wire\n else:\n top_level_connections[wire] = wire_to_site_pin[wire]\n\n output_internal_connections = {}\n for output_wire in internal_connections:\n output_internal_connections[wire_to_site_pin[output_wire]] = \\\n wire_to_site_pin[internal_connections[output_wire]]\n\n return top_level_connections, output_internal_connections",
"def get_interface_by_net_name(cls, node_id, netname):\n iface = db().query(models.NodeNICInterface).join(\n (models.NetworkGroup,\n models.NodeNICInterface.assigned_networks_list)\n ).filter(\n models.NetworkGroup.name == netname\n ).filter(\n models.NodeNICInterface.node_id == node_id\n ).first()\n if iface:\n return iface\n\n return db().query(models.NodeBondInterface).join(\n (models.NetworkGroup,\n models.NodeBondInterface.assigned_networks_list)\n ).filter(\n models.NetworkGroup.name == netname\n ).filter(\n models.NodeBondInterface.node_id == node_id\n ).first()",
"async def get_all_units():\n all_devices = []\n if network is None:\n import netifaces\n\n gateway = netifaces.gateways().get(\"default\", {})\n subnet = gateway.get(netifaces.AF_INET, ())[0][:-1] + \"0/24\"\n else:\n subnet = network\n async with gdh_session() as session:\n googledevices = NetworkScan(loop, session)\n result = await googledevices.scan_for_units(subnet)\n if feature:\n for unit in result:\n if unit[feature]:\n all_devices.append(unit)\n else:\n all_devices = result\n print(format_json(all_devices))",
"async def get_all_units():\n all_devices = []\n if network is None:\n import netifaces\n\n gateway = netifaces.gateways().get(\"default\", {})\n subnet = gateway.get(netifaces.AF_INET, ())[0][:-1] + \"0/24\"\n else:\n subnet = network\n async with gdh_session() as session:\n googledevices = NetworkScan(loop, session)\n result = await googledevices.scan_for_units(subnet)\n if feature:\n for unit in result:\n if unit[feature]:\n all_devices.append(unit)\n else:\n all_devices = result\n print(format_json(all_devices))",
"def do_network_list(cs, args):\n opts = {}\n opts['container'] = args.container\n opts = zun_utils.remove_null_parms(**opts)\n networks = cs.containers.network_list(**opts)\n zun_utils.list_container_networks(networks)",
"def network_interfaces(self) -> Optional[Sequence['outputs.NetworkInterfaceResponse']]:\n return pulumi.get(self, \"network_interfaces\")",
"def scan_wifi_modules(network: str = \"wlan0\") -> Dict[str, str]:\n\n # Optional requieres are imported only inside the function\n import netifaces\n import nmap\n\n ip_list = {}\n local_net = netifaces.ifaddresses(network)[netifaces.AF_INET][0][\"addr\"]\n\n nm = nmap.PortScanner()\n nm.scan(hosts=f\"{local_net}/24\", arguments=\"-sn\")\n hosts = nm.all_hosts()\n\n for host in hosts:\n try:\n response = requests.get(f\"http://{host}/board\", timeout=0.1)\n if response.ok:\n ip_list[host] = response.json()\n except:\n continue\n\n return ip_list",
"def nmap_scan():\n # Create the search and config objects\n hs = HostSearch()\n config = Config()\n\n # Static options to be able to figure out what options to use depending on the input the user gives.\n nmap_types = ['top10', 'top100', 'custom', 'top1000', 'all']\n options = {'top10':'--top-ports 10', 'top100':'--top-ports 100', 'custom': config.get('nmap', 'options'), 'top1000': '--top-ports 1000', 'all': '-p-'}\n\n # Create an argument parser\n hs_parser = hs.argparser\n argparser = argparse.ArgumentParser(parents=[hs_parser], conflict_handler='resolve', \\\n description=\"Scans hosts from the database using nmap, any arguments that are not in the help are passed to nmap\")\n argparser.add_argument('type', metavar='type', \\\n help='The number of ports to scan: top10, top100, custom, top1000 (default) or all', \\\n type=str, choices=nmap_types, default='top1000', const='top1000', nargs='?')\n arguments, extra_nmap_args = argparser.parse_known_args()\n\n # Fix the tags for the search\n tags = nmap_types[nmap_types.index(arguments.type):]\n tags = [\"!nmap_\" + tag for tag in tags]\n\n hosts = hs.get_hosts(tags=tags)\n hosts = [host for host in hosts]\n\n # Create the nmap arguments\n nmap_args = []\n nmap_args.extend(extra_nmap_args)\n nmap_args.extend(options[arguments.type].split(' '))\n\n # Run nmap\n print_notification(\"Running nmap with args: {} on {} hosts(s)\".format(nmap_args, len(hosts)))\n if len(hosts):\n result = nmap(nmap_args, [str(h.address) for h in hosts])\n # Import the nmap result\n for host in hosts:\n host.add_tag(\"nmap_{}\".format(arguments.type))\n host.save()\n print_notification(\"Nmap done, importing results\")\n stats = import_nmap(result, \"nmap_{}\".format(arguments.type), check_function=all_hosts, import_services=True)\n stats['scanned_hosts'] = len(hosts)\n stats['type'] = arguments.type\n\n Logger().log('nmap_scan', \"Performed nmap {} scan on {} hosts\".format(arguments.type, len(hosts)), stats)\n else:\n print_notification(\"No hosts found\")",
"def networks(view):\n return \"network?\" \\\n \"_return_fields=\" \\\n \"extattrs,\" \\\n \"comment,\" \\\n \"network,\" \\\n \"network_view,\" \\\n \"utilization&\" \\\n \"network_view=\" + view + \\\n \"&_max_results=-25000\"",
"def collectNet(self):\n network = self.options.net\n # net option from the config file is a string\n if isinstance(network, basestring):\n network = [network]\n # in case someone uses 10.0.0.0,192.168.0.1 instead of\n # --net 10.0.0.0 --net 192.168.0.1\n if isinstance(network, (list, tuple)) and \",\" in network[0]:\n network = [n.strip() for n in network[0].split(\",\")]\n count = 0\n devices = []\n if not network:\n network = yield self.config().callRemote(\"getDefaultNetworks\")\n\n if not network:\n self.log.warning(\"No networks configured\")\n defer.returnValue(None)\n\n for net in network:\n try:\n nets = yield self.config().callRemote(\n \"getNetworks\", net, self.options.subnets\n )\n if not nets:\n self.log.warning(\"No networks found for %s\", net)\n continue\n ips = yield self.discoverIps(nets)\n devices += ips\n count += len(ips)\n except Exception as ex:\n self.log.exception(\n \"Error performing net discovery on %s: %s\", net, ex\n )\n self.log.info(\"Working on devices: %s\", devices)\n\n foundDevices = []\n for device in devices:\n result = yield self.discoverDevice(\n device, self.options.deviceclass, self.options.productionState\n )\n if result is not None:\n foundDevices.append(result)\n defer.returnValue(foundDevices)",
"def getNets(self):\n\t\treturn NetLoader.listNetworks()",
"def _get_ipv6_addresses(self, host: str) -> Dict[str, List[IPv6Address]]:\n if host == \"self\":\n command = \"show ipv6 interface\"\n elif host == \"peer\":\n command = \"failover exec mate show ipv6 interface\"\n\n show_ipv6_interface = self.show(command)\n show_ipv6_interface_lines: List[str] = show_ipv6_interface.strip().splitlines()\n first_line = show_ipv6_interface_lines.pop(0)\n interface: str = first_line.split()[0]\n ipv6_addresses: List[IPv6Interface] = []\n results: Dict[str, List] = {}\n for line in show_ipv6_interface_lines:\n # match IPv6 addresses under interface line\n if line[0].isspace():\n match = RE_IPV6_INTERFACE_MATCH.match(line)\n if match:\n ipv6_addresses.append(IPv6Interface(f\"{match.group(1)}{match.group(2)}\"))\n # update results mapping interface to matched IPv6 addresses and generate the next interface name\n else:\n if ipv6_addresses:\n results[interface] = ipv6_addresses\n ipv6_addresses = []\n interface = line.split()[0]\n\n # Add final interface in iteration if it has IPv6 addresses\n if ipv6_addresses:\n results[interface] = ipv6_addresses\n\n log.debug(\"Host %s: ip interfaces %s\", self.host, results)\n return results"
]
| [
"0.6063",
"0.5890889",
"0.57809484",
"0.5535784",
"0.5527903",
"0.5459575",
"0.5419172",
"0.5402386",
"0.5387223",
"0.5363235",
"0.53565717",
"0.5336501",
"0.53361285",
"0.5334019",
"0.52824455",
"0.5259729",
"0.52580357",
"0.5255129",
"0.52336955",
"0.520638",
"0.52045494",
"0.52045494",
"0.5190373",
"0.5187256",
"0.5177102",
"0.51766163",
"0.5155706",
"0.51459146",
"0.5139827",
"0.5136607"
]
| 0.78624934 | 0 |
Get interface with specified network assigned to it. This method first checks for a NodeNICInterface with the specified network assigned. If that fails it will look for a NodeBondInterface with that network assigned. | def get_interface_by_net_name(cls, node_id, netname):
iface = db().query(models.NodeNICInterface).join(
(models.NetworkGroup,
models.NodeNICInterface.assigned_networks_list)
).filter(
models.NetworkGroup.name == netname
).filter(
models.NodeNICInterface.node_id == node_id
).first()
if iface:
return iface
return db().query(models.NodeBondInterface).join(
(models.NetworkGroup,
models.NodeBondInterface.assigned_networks_list)
).filter(
models.NetworkGroup.name == netname
).filter(
models.NodeBondInterface.node_id == node_id
).first() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_interface(\n network: Union[ipaddress.IPv6Interface, ipaddress.IPv4Interface, str], index: int\n) -> Union[ipaddress.IPv6Interface, ipaddress.IPv4Interface]:\n if isinstance(network, str):\n network = ipaddress.ip_network(network)\n\n host = network[index]\n return ipaddress.ip_interface(f\"{host}/{network.prefixlen}\")",
"def get_netiface():\n ip = mu.get_ip()\n for interface in netifaces.interfaces():\n addrs = netifaces.ifaddresses(interface)\n if netifaces.AF_INET in addrs.keys():\n i_addr = addrs[netifaces.AF_INET][0]['addr']\n if i_addr == ip:\n return interface\n\n # Return None if no interface found\n return None",
"def getNodeInterface(self,node,interface):\n data = self.connect('get','nodes/%s/network/%s' % (node,interface),None)\n return data",
"def get_network_interface(\n name=None,\n network_interface_id=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n):\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n r = {}\n result = _get_network_interface(conn, name, network_interface_id)\n if \"error\" in result:\n if result[\"error\"][\"message\"] == \"No ENIs found.\":\n r[\"result\"] = None\n return r\n return result\n eni = result[\"result\"]\n r[\"result\"] = _describe_network_interface(eni)\n return r",
"def get_first_network_interface_matching(self, predicate):\n for network in self.raw_vm.network:\n if predicate(network):\n return network\n return None",
"def get_ethernet_interface(server_profile_uuid, eth_id):\n profile = g.oneview_client.server_profiles.get(server_profile_uuid)\n\n connections = profile[\"connectionSettings\"][\"connections\"]\n\n connection = None\n for conn in connections:\n if str(conn[\"id\"]) == str(eth_id):\n connection = conn\n break\n\n if connection is None:\n abort(status.HTTP_404_NOT_FOUND, \"EthernetInterface {} not found\"\n .format(eth_id))\n\n network_attrs = g.oneview_client.index_resources\\\n .get(connection[\"networkUri\"])\n\n ethernet = EthernetInterface.build(profile, connection, network_attrs)\n\n return ResponseBuilder.success(ethernet)",
"def privateInterface(self):\n\t\t\n\t\t# I wrote the Network module in C just for this purpose. The\n\t\t# netcmp() function works like strcmp(), where 0 means args are\n\t\t# the same.\n\n\t\tintfs = gmon.Network.interfaces()\n\n\t\tif self.master_network:\n\t\t\ttarget_net = \"%s/%s\" % \\\n\t\t\t\t(self.master_network, self.master_netmask)\n\t\t\tfor i in intfs:\n\t\t\t\tif not gmon.Network.netcmp(intfs[i], target_net):\n\t\t\t\t\treturn i\n\n\t\t# We have no hint, check default.\n\t\t\n\t\tif 'eth0' in intfs:\n\t\t\treturn 'eth0'\n\t\t\t\n\t\traise Exception, \"Your private interface (eth0) is down\"",
"def get_cellular_network_interface(self):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)",
"def network_interface_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"network_interface_id\")",
"def network_interface(self): \n return self._network_interface",
"def get_vm_nic(vm, nic_id):\n try:\n nic_id = int(nic_id)\n return vm.nics.get(id=nic_id)\n except NetworkInterface.DoesNotExist:\n raise faults.ItemNotFound(\"NIC '%s' not found\" % nic_id)\n except (ValueError, TypeError):\n raise faults.BadRequest(\"Invalid NIC ID '%s'\" % nic_id)",
"def getInterface(self, iTag):\r\n try:\r\n return self._interfaces[iTag]\r\n except KeyError:\r\n raise InvalidRequest('Can not get a non existent interface '\r\n \"'{0}' from the container.\".format(iTag))",
"def get_network_interface_ip_address(interface):\n while True:\n if interface not in ni.interfaces():\n logger.error('Could not find interface %s.' % (interface,))\n exit(1)\n interface = ni.ifaddresses(interface)\n if (2 not in interface) or (len(interface[2]) == 0):\n logger.warning('Could not find IP of interface %s. Sleeping.' % (interface,))\n sleep(60)\n continue\n return interface[2][0]['addr']",
"def get_net(con):\n try:\n return con.virtual_network_read(fq_name=conf.get('default_net', 'UNEXPECTED_VALUE'))\n except NoIdError:\n log.debug('Unable to find net.')\n return None",
"def GetNetwork(self, network, reason=None):\n query = []\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_GET,\n \"/%s/networks/%s\" % (GANETI_RAPI_VERSION, network),\n query, None)",
"def find_nic():\n result = subprocess.run([\"iw\", \"dev\"], capture_output=True).stdout.decode()\n network_interface_controllers = wlan_code.findall(result)\n return network_interface_controllers",
"def SelectInterface(self, interface=None):\n # Check that we have an online WLAN interface.\n interfaces = self.GetInterfaces()\n\n # Ensure there are WLAN interfaces available.\n if not interfaces:\n raise WiFiError('No available WLAN interfaces.')\n\n # If a specific interface is specified, check that it exists.\n if interface:\n if interface not in interfaces:\n raise WiFiError('Specified interface %s not available' %\n interface)\n return interface\n\n # If no interface is specified, check the uniqueness.\n if len(interfaces) != 1:\n raise WiFiError(\n 'There are multiple interfaces. '\n 'Please specify one from: %r' % interfaces)\n return interfaces[0]",
"def get_physnet(self, port, iface_name, introspection_data):",
"def get_network(network_id, user_id, for_update=False, non_deleted=False):\n\n try:\n network_id = int(network_id)\n objects = Network.objects\n if for_update:\n objects = objects.select_for_update()\n network = objects.get(Q(userid=user_id) | Q(public=True),\n id=network_id)\n if non_deleted and network.deleted:\n raise faults.BadRequest(\"Network has been deleted.\")\n return network\n except (ValueError, TypeError):\n raise faults.BadRequest(\"Invalid network ID '%s'\" % network_id)\n except Network.DoesNotExist:\n raise faults.ItemNotFound('Network %s not found.' % network_id)",
"def get_interface(self, ifname):\n real_ifname = self.generic_to_host(ifname)\n retval = _parse_interface(self.do('addr', 'show', real_ifname))\n retval.ifname = ifname\n return retval",
"def get_fip_router_interface(self, net_name):\n nets = self.neutron.list_networks(tenant_id=self.project_id, name=net_name)\n try:\n net_id = nets['networks'][0]['id']\n except (IndexError, KeyError) as e:\n msg = \"No network found with name %s!\" % net_name\n logger.error(' '.join([msg, \"ERROR:\", str(e)]))\n raise VIMAgentsException(ERROR, msg)\n\n ports = self.neutron.list_ports(device_owner=\"network:router_interface\",\n tenant_id=self.project_id,\n network_id=net_id)\n # What is the best way to find out the port id when there is more than one router\n # sharing the same network in a same project\n try:\n port_id = ports['ports'][0]['id']\n except (IndexError, KeyError) as e:\n msg = \"No router port configured in network %s!\" % net_name\n logger.error(' '.join([msg, \"ERROR:\", str(e)]))\n raise VIMAgentsException(ERROR, msg)\n\n return port_id",
"def get_network(self, network_id):\n url = '%s/v2.0/networks/%s' % (self.catalog['network'], network_id)\n res = self.get(url)\n if res['status'] == 200:\n return json.loads(res['body'])['network']\n else:\n LOG.error('Get network failed: %s %s %s' %\n (res['status'], res['reason'], res['body']))\n raise InvalidResponse(res)",
"def get_network_with_name(self, name):\n for network in self.networks:\n if network.name == name:\n return network\n return None",
"def getInterface(self, iTag):\r\n try:\r\n return self._interfaces[iTag]\r\n except KeyError:\r\n raise InvalidRequest('Can not get a non existent interface '\r\n \"'{0}' from the robot.\".format(iTag))",
"def get_nic_driver(pci_id):\n driverlist = dict(zip(NICS.values(), DRIVERS.keys()))\n try:\n driver = DRIVERS[driverlist[pci_id]]\n except Exception as e:\n driver = None\n return driver",
"def get_network_interfaces(project_id, network_url, auto_assign_external_ip):\n network = network_url or get_network_url(project_id, 'default')\n network_interfaces = [{'network': network}]\n if auto_assign_external_ip:\n # This creates a single accessConfig instance and uses default values for\n # all fields to enable external network with auto-assigned IP.\n network_interfaces[0]['accessConfigs'] = [{'type': 'ONE_TO_ONE_NAT'}]\n return network_interfaces",
"def _get_network_by(self, field, search):\n for network in self.mobile_networks:\n f = network.get(field)\n if f is not None:\n if f == search:\n return network\n return None",
"def GetInterface(interface, dummy_interface=DummyInterface, is_dummy=False):\n return dummy_interface if is_dummy else interface",
"def network_interfaces(self) -> Optional[Sequence['outputs.NetworkInterfaceResponse']]:\n return pulumi.get(self, \"network_interfaces\")",
"def get_logical_ifname(self, interface_name, proto='provision'): # pragma: no cover\n output = check_output(['uci', 'show', 'network'])\n network_list = output.strip().split('\\n')\n for config in network_list:\n cfg, option = config.split('=')\n net_prex = cfg.split(\".\")\n if net_prex[-1] == \"proto\" and str(option) != proto:\n ifname = '.'.join(net_prex[:-1]) + '.ifname'\n interface = check_output(['uci', 'get', ifname]).split('\\n')[0]\n if interface == interface_name:\n return net_prex[1]\n return ''"
]
| [
"0.6707713",
"0.6606925",
"0.61241245",
"0.60492706",
"0.5958547",
"0.5794652",
"0.55923915",
"0.5566157",
"0.5558983",
"0.55508775",
"0.5550377",
"0.54599",
"0.54525834",
"0.5438885",
"0.5434333",
"0.53795886",
"0.5368822",
"0.5366266",
"0.53524244",
"0.5328939",
"0.5320066",
"0.5317045",
"0.53062063",
"0.526514",
"0.5256212",
"0.5251359",
"0.5248401",
"0.52453643",
"0.5193488",
"0.5190743"
]
| 0.7778597 | 0 |
Find all interfaces with MAC address not in mac_addresses. | def get_interfaces_not_in_mac_list(cls, node_id, mac_addresses):
return db().query(models.NodeNICInterface).filter(
models.NodeNICInterface.node_id == node_id
).filter(
not_(models.NodeNICInterface.mac.in_(mac_addresses))
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def possible_mac_addresses(interface):\n\n mac_addrs = []\n\n # In case of VLANs, just grab the parent interface\n if interface.interface_type == 'vlan':\n interface = interface.parent\n\n # Bonding/bridge: append the MACs of the physical interfaces\n # TODO: drop the public/bootable check once we decide how to send the extra\n # information to clients\n for slave in interface.all_slaves():\n if slave.mac and (slave.interface_type != \"public\" or slave.bootable):\n mac_addrs.append(slave.mac)\n\n # Handle physical interfaces, and bonding with a dedicated MAC\n # TODO: drop the public/bootable check once we decide how to send the extra\n # information to clients\n if interface.mac and (interface.interface_type != \"public\" or interface.bootable):\n mac_addrs.append(interface.mac)\n\n return mac_addrs",
"def getmacaddrs():\n # Unpack just for the sake of being meaningful.\n ifaddrs, sockaddr_dl, sockaddr = PLATFORM_LOOKUP[PLATFORM]\n ptr = c_void_p(None)\n result = LIBC.getifaddrs(pointer(ptr))\n if result != 0:\n return {}\n ifa = ifaddrs.from_address(ptr.value)\n result = {}\n\n while True:\n name = ifa.ifa_name\n if name not in result:\n result[name] = []\n # Some interface (such as a TUN virtual network) doesn't give us\n # ifa_addr at all and we can usually skip them because they're hardly\n # relevant for our usage case.\n if ifa.ifa_addr:\n sa = sockaddr.from_address(ifa.ifa_addr)\n if sa.sa_family == AF_LINK:\n si = sockaddr_dl.from_address(ifa.ifa_addr)\n addr = \"%s\" % si\n if addr:\n result[name].append(addr)\n if ifa.ifa_next:\n ifa = ifaddrs.from_address(ifa.ifa_next)\n else:\n break\n\n LIBC.freeifaddrs(ptr)\n return result",
"def fetch():\n\t\n\t_interfaces = [Interface(iface) for iface in netifaces.interfaces()]\n\t\n\tfor iface in _interfaces: \n\t\tif (iface.id in BLACK_ID) or (iface.mac in BLACK_MAC) or (len(iface.mac) < 5):\n\t\t\t_interfaces.remove(iface)\n\t\t\t\n\treturn _interfaces",
"def get_my_mac_set(iface_filter=None):\n\n out_set = set()\n if sys.platform.startswith(\"win\"):\n from scapy.arch.windows import NetworkInterface\n if type(iface_filter) == NetworkInterface:\n out_set.add(iface_filter.mac)\n\n for iface in sc.get_if_list():\n if iface_filter is not None and iface != iface_filter:\n continue\n try:\n mac = sc.get_if_hwaddr(iface)\n except Exception as e:\n continue\n else:\n out_set.add(mac)\n\n return out_set",
"def remove_interfaces(ctx):\n config_db = ConfigDBConnector()\n config_db.connect()\n\n tables = ['INTERFACE', 'PORTCHANNEL_INTERFACE', 'VLAN_INTERFACE', 'LOOPBACK_INTERFACE']\n nat_config = {\"nat_zone\": \"0\"}\n\n for table_name in tables:\n table_dict = config_db.get_table(table_name)\n if table_dict:\n for table_key_name in table_dict:\n if isinstance(table_key_name, str) is False:\n continue\n\n config_db.set_entry(table_name, table_key_name, nat_config)",
"def all_interfaces():\n max_possible = 128 # arbitrary. raise if needed.\n number_of_bytes = max_possible * 32\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n names = array.array('B', '\\0' * number_of_bytes)\n outbytes = struct.unpack('iL', fcntl.ioctl(\n s.fileno(),\n 0x8912, # SIOCGIFCONF\n struct.pack('iL', number_of_bytes, names.buffer_info()[0])\n ))[0]\n namestr = names.tostring()\n interfaces = {}\n\n for i in range(0, outbytes, 40):\n name = namestr[i:i+16].split('\\0', 1)[0]\n ip = namestr[i+20:i+24]\n interfaces[name] = format_ip(ip)\n return interfaces",
"def _remove_ifaces(self, ipdb, ifnames, netns='host'):\n for ifname in ifnames:\n if ifname in ipdb.interfaces:\n LOG.warning('Found hanging interface %(ifname)s inside '\n '%(netns)s netns. Most likely it is a leftover '\n 'from a kuryr-daemon restart. Trying to delete '\n 'it.', {'ifname': ifname, 'netns': netns})\n with ipdb.interfaces[ifname] as iface:\n iface.remove()",
"def get_broadcast_addresses():\n addr_list = []\n if HAS_NETIFACES:\n for iface in netifaces.interfaces():\n addresses = netifaces.ifaddresses(iface).get(netifaces.AF_INET)\n if addresses is None:\n continue\n for address in addresses:\n broadcast_addr = address.get(\"broadcast\")\n if broadcast_addr is None:\n continue\n addr_list.append(broadcast_addr)\n return [\"127.0.0.1\", \"255.255.255.255\", \"<broadcast>\"] + addr_list",
"def network_interfaces():\n try:\n command = which('ipadm')\n args = ('show-addr', '-p', '-o', 'STATE,ADDR')\n pattern = r'ok:(\\d+\\.\\d+\\.\\d+\\.\\d+)'\n except CommandMissing:\n # Fall back to old command on old solaris releases.\n command = which('/usr/sbin/ifconfig')\n args = ('-a')\n pattern = r'inet (\\d+\\.\\d+\\.\\d+\\.\\d+)'\n addrs = []\n output = sh(command, *args)\n for line in output:\n match = re.match(pattern, line)\n if match:\n addr = match.group(1)\n if not addr.startswith(\"127.\"):\n addrs.append(addr)\n return addrs",
"def get_interface_broadcast_addresses(self):\n broadcast_addresses = []\n ip_interfaces = self.get_interfaces_ip()\n for k, v in ip_interfaces.items():\n if 'ipv4' in v:\n ipv4_address_info = ip_interfaces[k]['ipv4']\n ip_addresses = ipv4_address_info.keys()\n for ip_address in ip_addresses:\n netmask = ip_interfaces[k]['ipv4'][ip_address]['prefix_length']\n ipv4_address = ipaddress.ip_interface(\"{}/{}\".format(ip_address, netmask))\n network = ipv4_address.network\n broadcast_addresses.append(str(network.broadcast_address))\n if 'ipv6' in v:\n ipv4_address_info = ip_interfaces[k]['ipv6']\n ip_addresses = ipv4_address_info.keys()\n for ip_address in ip_addresses:\n netmask = ip_interfaces[k]['ipv6'][ip_address]['prefix_length']\n ipv4_address = ipaddress.ip_interface(\"{}/{}\".format(ip_address, netmask))\n network = ipv4_address.network\n broadcast_addresses.append(str(network.broadcast_address))\n return broadcast_addresses",
"def net_if_addrs():\n ret = []\n for items in cext.net_if_addrs():\n items = list(items)\n items[0] = py2_strencode(items[0])\n ret.append(items)\n return ret",
"def list_all_sys_net_if():\n sys_net_path = glob.glob('/sys/class/net/*')\n # Now remove the /sys/class/net prefix, keep only the interface name\n p = re.compile('^/sys/class/net/')\n result = [ p.sub('', s) for s in sys_net_path ]\n \n return result",
"def scan_devices(self):\n self._update_info()\n return [client[\"mac\"] for client in self.last_results]",
"def __get_network_interface_info(self):\n iface_list = []\n for i in netifaces.interfaces():\n addr = netifaces.ifaddresses(i)\n\n\n # clumsy way to filter which interfaces get added to list. If these elements raise KeyErrors, we skip\n try:\n iface_list.append( {i : { \n 'ip_address' : addr[netifaces.AF_INET][0]['addr'],\n 'mac' : addr[netifaces.AF_LINK][0]['addr']\n }})\n except KeyError,e:\n\t pass\n self.print_debug(\"Key not found - _get_network_interface_info - {0}\".format(addr))\n\n return iface_list",
"def getLocalInterfaces():\n SIOCGIFCONF = 0x8912\n MAXBYTES = 8096\n \n var1 = 32\n var2 = 32\n \n sock = socket(AF_INET, SOCK_DGRAM)\n names = array('B', '\\0' * MAXBYTES)\n outbytes = unpack('iL', ioctl(sock.fileno(), SIOCGIFCONF, pack('iL', MAXBYTES, names.buffer_info()[0]) ))[0]\n \n namestr = names.tostring()\n \n return [(namestr[i:i+var1].split('\\0', 1)[0], inet_ntoa(namestr[i+20:i+24])) for i in xrange(0, outbytes, var2)]",
"def scan_devices(self):\n self._update_info()\n\n return [client['mac'] for client in self.last_results]",
"def keepAddresses(networkItems_):\n for i in networkItems_[:]:\n try:\n ip = netaddr.IPAddress(i)\n except:\n networkItems_.remove(i)\n return networkItems_",
"def get_illegal_source_addresses(sessions, subnets):\n\n # Don't care about login source address\n if not subnets:\n return []\n\n subnets = [netaddr.IPNetwork(x) for x in subnets]\n\n illegal_address = []\n\n for session in sessions:\n user = sessions[session]['user']\n source = sessions[session]['source']\n\n # If the login has a source (e.g. 192.168.0.1, 192.168.0.2:S.1, :0)\n # Extract anything that looks like an IPv4 address\n matches = source and re.search(r'(\\d{1,3}\\.){3}\\d{1,3}', source)\n address = matches and matches.group(0)\n if not address:\n continue\n addr = netaddr.IPAddress(address)\n\n # Compare with the first and last addresses in the subnet\n # recording if it is contained within any defined allowed range\n if not [s for s in subnets if addr in s]:\n illegal_address.append(session + ':' + user + ':' + address)\n\n return illegal_address",
"def get_all_interfaces():\n global all_interfaces\n if all_interfaces:\n return all_interfaces\n\n f = open('/proc/net/dev','r')\n ifacelist = f.read().split('\\n')\n f.close()\n\n # remove 2 lines header\n ifacelist.pop(0)\n ifacelist.pop(0)\n\n all_interfaces = {}\n # loop to check each line\n for line in ifacelist:\n\n ifacedata = line.replace(' ','').split(':')\n\n # check the data have 2 elements\n if len(ifacedata) == 2:\n all_interfaces[ifacedata[0]] = get_interface_ip(ifacedata[0])\n\n return all_interfaces",
"def get_devices(mac=None):\n wemo_devices = discover_wemo()\n\n if mac:\n dev = get_device(mac, wemo_devices)\n if not dev:\n return []\n return [dev]\n\n return wemo_devices",
"def get_agent_network_interfaces(self):\n iface_list = [iface.serialize()['name'] for iface in\n hardware.dispatch_to_managers('list_network_interfaces')]\n iface_list = [name for name in iface_list if 'lo' not in name]\n\n if len(iface_list) == 0:\n raise errors.LookupAgentInterfaceError('Agent could not find a '\n 'valid network interface.')\n else:\n return iface_list",
"def exclude_auditor_emails(emails):\n acl = all_models.AccessControlList\n acr = all_models.AccessControlRole\n acp = all_models.AccessControlPerson\n\n if not isinstance(emails, set):\n emails = set(emails)\n\n auditor_emails = db.session.query(\n all_models.Person.email\n ).join(\n acp\n ).join(\n acl\n ).join(\n acr\n ).filter(\n acr.name == \"Auditors\",\n all_models.Person.email.in_(emails)\n ).distinct().all()\n\n emails_to_exlude = {line.email for line in auditor_emails}\n return emails - emails_to_exlude",
"def _get_default_interfaces() -> set:\n interfaces, _, _ = utils.run_command(r\"ip -4 route | egrep '^default ' | awk '{print $5}'\", shell=True)\n return set(interfaces.strip().split())",
"def search_mac_in_arp(args):\n arp_table = get_arp_table()\n \n mac = convert_mac(args.mac, 2)\n columns = ['IP', 'AGE(min)', 'MAC', 'INTERFACE', 'VENDOR']\n result = []\n for row in arp_table.split('\\n'):\n if mac in row:\n lists = []\n _,ip,age,r_mac,_,interface = row.strip().split()\n r_mac = convert_mac(mac, 1)\n vendor = convert_mac(r_mac, 3)\n vendor = find_in_database(vendor)\n r_mac = blue + r_mac + endblue\n lists = [ip, age, r_mac, interface, vendor]\n result.append(lists)\n else:\n pass\n print(tabulate(result, headers=columns))",
"def __get_scanning_range(self):\n if self.__network is not None:\n return [self.__network]\n networks = []\n interfaces = netifaces.interfaces()\n for data in interfaces:\n ips = netifaces.ifaddresses(data)\n for key, interface_data in ips.items():\n for item in interface_data:\n if item.get(\"netmask\", None) is not None and \\\n item.get(\"addr\", None) is not None and \\\n self.is_legal_ip(item[\"netmask\"]):\n if item.get(\"addr\") not in [\"127.0.0.1\", \"0.0.0.0\"]:\n network = \"{ip}/{cird}\".format(ip=item[\"addr\"],\n cird=IPAddress(item[\"netmask\"]).netmask_bits())\n if network not in networks:\n networks.append(network)\n return networks",
"def get_mac():\n\n interface = [x for x in netifaces.interfaces() if 'wlan' in x or 'wlp' in x][0]\n return netifaces.ifaddresses(interface)[netifaces.AF_LINK][0]['addr']",
"def _get_interface_ip(mac_addr):\n interfaces = netifaces.interfaces()\n for iface in interfaces:\n addresses = netifaces.ifaddresses(iface)\n link_addresses = addresses.get(netifaces.AF_LINK, [])\n for link_addr in link_addresses:\n if link_addr.get('addr') == mac_addr:\n ip_addresses = addresses.get(netifaces.AF_INET)\n if ip_addresses:\n # NOTE: return first address, ironic API does not\n # support multiple\n return ip_addresses[0].get('addr')\n else:\n break",
"def _get_interface_ip(mac_addr):\n interfaces = netifaces.interfaces()\n for iface in interfaces:\n addresses = netifaces.ifaddresses(iface)\n link_addresses = addresses.get(netifaces.AF_LINK, [])\n for link_addr in link_addresses:\n if link_addr.get('addr') == mac_addr:\n ip_addresses = addresses.get(netifaces.AF_INET)\n if ip_addresses:\n # NOTE: return first address, ironic API does not\n # support multiple\n return ip_addresses[0].get('addr')\n else:\n break",
"def blacklist_ips(self):\r\n if self.blacklist == '':\r\n return []\r\n return self.IPFilterList([addr.strip() for addr in self.blacklist.split(',')]) # pylint: disable=no-member\r",
"def get_net_interfaces():\n import netifaces\n return netifaces.interfaces()"
]
| [
"0.61820143",
"0.60239303",
"0.5998274",
"0.5552142",
"0.54893565",
"0.53755784",
"0.534444",
"0.5340872",
"0.5331961",
"0.5222017",
"0.5130869",
"0.51166475",
"0.50925165",
"0.5075239",
"0.50387186",
"0.5014326",
"0.49901056",
"0.49875855",
"0.49597886",
"0.49526137",
"0.49336508",
"0.49316338",
"0.49303615",
"0.4921452",
"0.49116528",
"0.49089804",
"0.49057844",
"0.49057844",
"0.4875981",
"0.4845548"
]
| 0.8007386 | 0 |
Takes a hypothesis (string) and a list of rules (list of IF objects), returning an AND/OR tree representing the backchain of possible statements we may need to test to determine if this hypothesis is reachable or not. This method should return an AND/OR tree, that is, an AND or OR object, whose constituents are the subgoals that need to be tested. The leaves of this tree should be strings (possibly with unbound variables), not AND or OR objects. Make sure to use simplify(...) to flatten trees where appropriate. | def backchain_to_goal_tree(rules, hypothesis):
goal_tree = []
for rule in rules:
var = match(rule.consequent(),hypothesis)
if var:
sub_hypothesis = populate(rule.antecedent(), var)
if isinstance(rule.antecedent(), OR):
sub_tree = [backchain_to_goal_tree(rules, antecedent) for antecedent in sub_hypothesis]
goal_tree.append(OR(sub_tree))
elif isinstance(rule.antecedent(), AND):
sub_tree = [backchain_to_goal_tree(rules, antecedent) for antecedent in sub_hypothesis]
goal_tree.append(AND(sub_tree))
else:
goal_tree.append(backchain_to_goal_tree(rules, sub_hypothesis))
return simplify(OR(hypotesis, goal_tree) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def tree_build(sv, piece):\r\n if piece==None: return None \r\n # process various string expressions (or triplets without args for conditions and values)\r\n piece=piece[0].strip(Space) if type(piece)==tuple else piece.strip(Space) # convert to string \r\n alphabetic=Alphakwords+sv.Object_list\r\n \r\n # empty expression\r\n if not piece: return None\r\n\r\n # a string between quotes\r\n if piece[0]==Quote and piece[-1]==Quote: return (piece, None, None) # return string as a leaf\r\n \r\n # a protected string: restore without further parsing \r\n key=piece.strip(Special) \r\n if key in sv.Strings: return (Quote+sv.Strings[key]+Quote, None, None) # return string as a leaf\r\n\r\n # a bracketed expression: parse from outer ones on, RECURSIVE\r\n if key in sv.Blocks: return (Obr, tree_build(sv, sv.Blocks[key]), None)\r\n\r\n piece=save_bracketed(sv, piece) # protect outer bracketed expressions from parsing\r\n piece=Space+piece+Space # add Spaces to help detect alphabetic keys \r\n \r\n # PARSE by operator priority and descending order of position \r\n for op_group in Priority_groups+[sv.Object_list]: # ops by priority groups\r\n op_list=find_op(sv, piece, op_group, alphabetic) # detect operators of this group\r\n\r\n for o, op in op_list: # found ops from this group in reverse order of occurrence\r\n\r\n # process comma operator \r\n if o==Comma and o in piece: return make_list(sv, piece) # list will be linear (not a tree). Build RECURSIVE \r\n\r\n # process unary functions and defined objects (all unary operators are alphabetic)\r\n if o in Unary or o in sv.Object: # unary operators (non space-delimited)\r\n if piece.startswith(op): # operator must be at the start (space-delimited)\r\n res=make_unary(sv, piece, o, op)\r\n if res and (not res[1] or o in [Begin, End]):\r\n return special_unary(sv, res) # process special case \r\n return res\r\n \r\n # process binary operators (always lower priority than unary). Build RECURSIVE\r\n elif op in piece:\r\n res=make_binary(sv, piece, o, op) # binary operators (space-delimited)\r\n if res and (not res[1] or o==Isnot):\r\n return special_binary(sv, res) # process special case \r\n return res\r\n\r\n # process other (args and doubly) subscripted objects. Build RECURSIVE\r\n piece=piece.strip(Space)\r\n if Special+Bloc in piece: return make_subscripted(sv, piece) # the object is subscripted / has args\r\n\r\n # when all operators have been processed, only leaves remain\r\n return make_leaf(sv, piece)",
"def resolve(self, nonempty_universe=True, debug=False, **kwargs):\n kwargs[\"nonempty_universe\"], kwargs[\"debug\"] = nonempty_universe, debug\n resolved = super(And, self).resolve(**kwargs)\n if not isinstance(resolved, And):\n log_simplification(self, resolved, **kwargs)\n return resolved\n clauses = (list)(resolved.ands)\n quantifiers = []\n if not nonempty_universe and not self.admits_empty_universe():\n blank = Exists.blank(top)\n (quantifiers.append)(blank.change_elem)\n kwargs = (blank.inner_kwargs)(kwargs)\n prev_clause_len = 1\n while prev_clause_len < len(clauses):\n prev_clause_len = len(clauses)\n# reversed ensures conclusions get tested first\n for i in (reversed)(range(1, len(clauses))):\n x = clauses[i]\n for y in clauses[:i + 1]: # allow resolution of a clause against itself\n resolution = x.resolve_against(y)\n if resolution is not None:\n resolution = resolution.simplify(dnf=False, **kwargs)\n if debug:\n print(x, \"+\", y, \"=>\", resolution)\n new_quantifiers = []\n inner_kwargs = kwargs\n while isinstance(resolution, Quantifier) and self.can_prenex(resolution, **kwargs):\n (new_quantifiers.append)(resolution.change_elem)\n inner_kwargs = (resolution.inner_kwargs)(inner_kwargs)\n resolution = resolution.elem\n if isinstance(resolution, And):\n new_clauses = resolution.ands\n else:\n new_clauses = (resolution,)\n novel = False\n for new_clause in new_clauses:\n if new_clause == bot:\n clauses = [bot]\n novel = True\n break\n elif new_clause != top and new_clause not in clauses:\n clauses.append(new_clause)\n novel = True\n if novel:\n quantifiers.extend(new_quantifiers)\n kwargs = inner_kwargs\n if clauses == [bot]:\n break\n if clauses == [bot]:\n break\n resolved = reduce(_coconut_pipe, [And(*clauses)] + quantifiers)\n log_simplification(self, resolved, **kwargs)\n return _coconut_tail_call(resolved.simplify, dnf=False, **kwargs)",
"def conjuncts(s):\n return dissociate(\"AND\", s)",
"def _check_criteria(dep, dep_obj, all_related, edge, sibling_idx):\n # Check for a matching dependency type\n related = []\n\n if edge[2][\"dep\"] == dep:\n # Check for matching POS type(s)\n for pos_logic in dep_obj.keys():\n connector = None\n\n if isinstance(dep_obj[pos_logic], dict):\n for pos in dep_obj[pos_logic].keys():\n\n # Check for allowed part of speech tags in matched dependency patterns\n if (pos_logic == \"pos_in\" and pos in G.nodes[sibling_idx][\"pos\"]) or (\n pos_logic == \"pos_equals\" and pos == G.nodes[sibling_idx][\"pos\"]):\n pass\n elif pos_logic == \"pos_not\":\n if not [False if not_pos == G.nodes[sibling_idx][\"pos\"] else True for not_pos in\n dep_obj.keys()]: continue\n else:\n continue\n\n # if no additional checks, have a match\n if dep_obj[pos_logic][pos] == None or any(\n y in dep_obj[pos_logic][pos] for y in [None, \"add_sibling\"]):\n all_related = _add_related(G.nodes[sibling_idx]['word'], dep, all_related,\n A.index_lookup[G.nodes[sibling_idx]['word']])\n\n # if additional checks are required, process further\n if dep_obj[pos_logic][pos]:\n if \"get_cousin\" in dep_obj[pos_logic][pos]:\n related.extend(_get_cousin(sibling_idx, dep_obj[pos_logic][pos][\"get_cousin\"]))\n connector = G.nodes[sibling_idx]['word']\n\n if \"special\" in dep_obj[pos_logic][pos]:\n if dep == \"compound\" and pos == \"NN\":\n related = [G.nodes[sibling_idx]['word']]\n\n if None in related:\n related.remove(None)\n\n # Allows for getting cousin and returning sibling\n if \"else\" in dep_obj[pos_logic][pos].keys() and dep_obj[pos_logic][pos][\"else\"] == \"always\":\n all_related = _add_related(G.nodes[sibling_idx]['word'], dep, all_related,\n A.index_lookup[G.nodes[sibling_idx]['word']], connector=connector)\n if len(related) > 0 and isinstance(related, list):\n for x in related:\n if x != None:\n all_related = _add_related(x, dep, all_related, A.index_lookup[x],\n connector=connector)\n elif \"else\" in dep_obj[pos_logic][pos].keys() and dep_obj[pos_logic][pos][\"else\"] == True:\n all_related = _add_related(G.nodes[sibling_idx]['word'], dep, all_related,\n A.index_lookup[G.nodes[sibling_idx]['word']], connector=connector)\n\n return all_related",
"def parse_conditions(\n operand_builder: Callable[[Any, ColumnSet, Set[str]], TExpression],\n and_builder: Callable[[Sequence[TExpression]], Optional[TExpression]],\n or_builder: Callable[[Sequence[TExpression]], Optional[TExpression]],\n unpack_array_condition_builder: Callable[[TExpression, str, Any], TExpression],\n simple_condition_builder: Callable[[TExpression, str, Any], TExpression],\n entity: Entity,\n conditions: Any,\n arrayjoin_cols: Set[str],\n depth: int = 0,\n) -> Optional[TExpression]:\n from snuba.clickhouse.columns import Array\n\n if not conditions:\n return None\n\n if depth == 0:\n parsed = [\n parse_conditions(\n operand_builder,\n and_builder,\n or_builder,\n unpack_array_condition_builder,\n simple_condition_builder,\n entity,\n cond,\n arrayjoin_cols,\n depth + 1,\n )\n for cond in conditions\n ]\n return and_builder([c for c in parsed if c])\n elif is_condition(conditions):\n try:\n lhs, op, lit = conditions\n except Exception as cause:\n raise ParsingException(f\"Cannot process condition {conditions}\") from cause\n\n # facilitate deduping IN conditions by sorting them.\n if op in (\"IN\", \"NOT IN\") and isinstance(lit, tuple):\n lit = tuple(sorted(lit))\n\n # If the LHS is a simple column name that refers to an array column\n # (and we are not arrayJoining on that column, which would make it\n # scalar again) and the RHS is a scalar value, we assume that the user\n # actually means to check if any (or all) items in the array match the\n # predicate, so we return an `any(x == value for x in array_column)`\n # type expression. We assume that operators looking for a specific value\n # (IN, =, LIKE) are looking for rows where any array value matches, and\n # exclusionary operators (NOT IN, NOT LIKE, !=) are looking for rows\n # where all elements match (eg. all NOT LIKE 'foo').\n columns = entity.get_data_model()\n if (\n isinstance(lhs, str)\n and lhs in columns\n and isinstance(columns[lhs].type, Array)\n and columns[lhs].base_name not in arrayjoin_cols\n and columns[lhs].flattened not in arrayjoin_cols\n and not isinstance(lit, (list, tuple))\n ):\n return unpack_array_condition_builder(\n operand_builder(lhs, entity.get_data_model(), arrayjoin_cols), op, lit,\n )\n else:\n return simple_condition_builder(\n operand_builder(lhs, entity.get_data_model(), arrayjoin_cols), op, lit,\n )\n\n elif depth == 1:\n sub_expression = (\n parse_conditions(\n operand_builder,\n and_builder,\n or_builder,\n unpack_array_condition_builder,\n simple_condition_builder,\n entity,\n cond,\n arrayjoin_cols,\n depth + 1,\n )\n for cond in conditions\n )\n return or_builder([s for s in sub_expression if s])\n else:\n raise InvalidConditionException(str(conditions))",
"def parse_l1_logical_express(express):\n if express.find(\" AND \")!=-1 or express.find(\" NOT\")!=-1 or express.find(\"NOT \")!=-1 or express == \"L1GlobalDecision\":\n return []\n\n return express.split(\" OR \")",
"def And(*conditions):\n def andPred(db):\n from functools import reduce\n return reduce(lambda result, c: c(result),\n conditions, db)\n\n return andPred",
"def test_expression_and_or(self):\n\n # Checks several examples with \"and\" and \"or\" operators\n expression = BooleanExpression(\"NORMAL\", or_(and_(models.Network.label != \"network_3\", models.Network.multi_host == True), models.Network.label == \"network_3\"))\n value = expression.evaluate(KeyedTuple([{\"label\": \"network_3\", \"multi_host\": False}], [\"networks\"]))\n self.assertTrue(value, \"complex expression (1)\")\n\n expression = BooleanExpression(\"NORMAL\", or_(and_(models.Network.label != \"network_3\", models.Network.multi_host == True), models.Network.label == \"network_3\"))\n value = expression.evaluate(KeyedTuple([{\"label\": \"network_2\", \"multi_host\": True}], [\"networks\"]))\n self.assertTrue(value, \"complex expression (2)\")\n\n expression = BooleanExpression(\"NORMAL\", or_(and_(models.Network.label != \"network_3\", models.Network.multi_host == True), models.Network.label == \"network_3\"))\n value = expression.evaluate(KeyedTuple([{\"label\": \"network_2\", \"multi_host\": False}], [\"networks\"]))\n self.assertFalse(value, \"complex expression (3)\")",
"def parse_logic(logic):\n\n ###print \"parse_logic(logic): logic:\",logic\n\n tokens = logic.split()\n\n # begin recursive logic parse\n return grammar_0(tokens)",
"def test_eval(self):\n # expr and expr\n base = abs_path('./specs/')\n ps = Parser(base + 'script3-6.py', base)\n ps.main(verbose=False)\n self.assertEqual(ps.wrangler.counter, 2)\n\n # expr or expr\n ps.spec['constraints'] = [{\"block\": \"D\", \"condition\": \"a == if or B == b1\"}]\n ps._parse_constraints()\n ps.main(verbose=False)\n self.assertEqual(ps.wrangler.counter, 6)\n\n # expr and (expr or expr)\n ps.spec['constraints'] = [{\"block\": \"D\", \"condition\": \"a == if and (B == b1 or B == b2)\"}]\n ps._parse_constraints()\n ps.main(verbose=False)\n self.assertEqual(ps.wrangler.counter, 4)\n\n # testing !=\n ps.spec['constraints'] = [{\"block\": \"D\", \"condition\": \"a != if\"}]\n ps._parse_constraints()\n ps.main(verbose=False)\n self.assertEqual(ps.wrangler.counter, 4)\n\n # testing >=\n ps.spec['constraints'] = [{\"block\": \"D\", \"condition\": \"a.index >= 1\"}]\n ps._parse_constraints()\n ps.main(verbose=False)\n self.assertEqual(ps.wrangler.counter, 4)\n\n # testing index\n ps.spec['constraints'] = [{\"block\": \"D\", \"condition\": \"b.index == 1\"}]\n ps._parse_constraints()\n ps.main(verbose=False)\n self.assertEqual(ps.wrangler.counter, 4)\n\n # testing option with integer type\n ps.spec['constraints'] = [{\"block\": \"D\", \"condition\": \"b == 0\"}]\n ps._parse_constraints()\n ps.main(verbose=False)\n self.assertEqual(ps.wrangler.counter, 4)\n\n # testing option with float type\n ps.spec['constraints'] = [{\"block\": \"D\", \"condition\": \"b == 1.5\"}]\n ps._parse_constraints()\n ps.main(verbose=False)\n self.assertEqual(ps.wrangler.counter, 4)\n\n # testing unmade decision\n ps.spec['constraints'] = [{\"block\": \"A\", \"condition\": \"b.index == 0\"}]\n ps._parse_constraints()\n ps.main(verbose=False)\n self.assertEqual(ps.wrangler.counter, 0)\n\n # testing if the decision is made when the block depends on a variable\n # inside the block\n ps.spec['constraints'] = [{\"block\": \"B\", \"condition\": \"b.index == 0\"}]\n ps._parse_constraints()\n ps.main(verbose=False)\n self.assertEqual(ps.wrangler.counter, 0)",
"def test_multi_chains_flatten():\n age = User.age >= 3\n name = User.name == \"foo\"\n email = User.email != \"bar\"\n\n and_condition = bloop.condition.Condition()\n or_condition = bloop.condition.Condition()\n for c in [age, name, email]:\n and_condition &= c\n or_condition |= c\n assert and_condition == bloop.condition.And(age, name, email)\n assert or_condition == bloop.condition.Or(age, name, email)",
"def parse_conditions_to_expr(\n expr: Sequence[Any], entity: Entity, arrayjoin: Set[str]\n) -> Optional[Expression]:\n\n def and_builder(expressions: Sequence[Expression]) -> Optional[Expression]:\n if not expressions:\n return None\n return combine_and_conditions(expressions)\n\n def or_builder(expressions: Sequence[Expression]) -> Optional[Expression]:\n if not expressions:\n return None\n return combine_or_conditions(expressions)\n\n def preprocess_literal(op: str, literal: Any) -> Expression:\n \"\"\"\n Replaces lists with a function call to tuple.\n \"\"\"\n if isinstance(literal, (list, tuple)):\n if op not in [\"IN\", \"NOT IN\"]:\n raise ParsingException(\n (\n f\"Invalid operator {op} for literal {literal}. Literal is a sequence. \"\n \"Operator must be IN/NOT IN\"\n ),\n report=False,\n )\n literals = tuple([Literal(None, lit) for lit in literal])\n return FunctionCall(None, \"tuple\", literals)\n else:\n if op in [\"IN\", \"NOT IN\"]:\n raise ParsingException(\n (\n f\"Invalid operator {op} for literal {literal}. Literal is not a sequence. \"\n \"Operator cannot be IN/NOT IN\"\n ),\n report=False,\n )\n return Literal(None, literal)\n\n def unpack_array_condition_builder(\n lhs: Expression, op: str, literal: Any\n ) -> Expression:\n function_name = \"arrayExists\" if op in POSITIVE_OPERATORS else \"arrayAll\"\n\n # This is an expression like:\n # arrayExists(x -> assumeNotNull(notLike(x, rhs)), lhs)\n return FunctionCall(\n None,\n function_name,\n (\n Lambda(\n None,\n (\"x\",),\n FunctionCall(\n None,\n \"assumeNotNull\",\n (\n FunctionCall(\n None,\n OPERATOR_TO_FUNCTION[op],\n (Argument(None, \"x\"), preprocess_literal(op, literal)),\n ),\n ),\n ),\n ),\n lhs,\n ),\n )\n\n def simple_condition_builder(lhs: Expression, op: str, literal: Any) -> Expression:\n if op in UNARY_OPERATORS:\n if literal is not None:\n raise ParsingException(\n f\"Right hand side operand {literal} provided to unary operator {op}\",\n report=False,\n )\n return unary_condition(OPERATOR_TO_FUNCTION[op], lhs)\n\n else:\n if literal is None:\n raise ParsingException(\n f\"Missing right hand side operand for binary operator {op}\",\n report=False,\n )\n return binary_condition(\n OPERATOR_TO_FUNCTION[op], lhs, preprocess_literal(op, literal)\n )\n\n return parse_conditions(\n parse_expression,\n and_builder,\n or_builder,\n unpack_array_condition_builder,\n simple_condition_builder,\n entity,\n expr,\n arrayjoin,\n 0,\n )",
"def make_control_knowledge(self, horizon):\n\n \"\"\" *** YOUR CODE HERE *** \"\"\"\n\n # ADD_RULE1_COUNT = 0\n # ADD_RULE2_COUNT = 0\n # ADD_RULE3_COUNT = 0\n\n close = list()\n far = list()\n\n for g in self.problem.goal:\n for p in self.problem.propositions:\n if re.match(r'at\\spackage\\d+\\scity\\d+-\\d+', str(p)):\n p_split = str(p).split()\n g_split = str(g).split()\n\n # if \"at\" and \"package[oo]\" match\n if p_split[0] == g_split[0] and p_split[1] == g_split[1]:\n # also \"city[oo]-[xx]\" match\n if p_split[2][:-2] == g_split[2][:-2]:\n close.append(p)\n else:\n far.append(p)\n\n # Rule 1:\n # ===============================\n # If a package is at its goal location, then it must remain there.\n # p@t and goal@t) -> p@t+1), where p is at(package, location)\n # cnf: not p@t or not goal@t or p@t+1\n\n for g in self.problem.goal:\n for t in range(0, horizon):\n clause = list()\n clause.append(-self.proposition_fluent_codes[(g, t)])\n clause.append(self.proposition_fluent_codes[(g, t + 1)])\n self.add_clause(clause, \"control\")\n # ADD_RULE1_COUNT += 1\n\n for t in range(0, horizon):\n for a in self.problem.actions:\n\n # Rule 2\n # ===============================\n\n # RULE\n # close -> do not load airplane\n # p1: close@t\n # p2: at the location of an airport @t\n # p3: airplane at this location @t\n # p4: plane is not loaded\n # a: load this airplane\n #\n # p1@t and p2@t and p3@t and p4@t => a@t\n # not p1@t or not p2@t or not p3@t or not p4@t or a@t\n # cnf: not p@t or not a@t\n if str(a).startswith('load-airplane'):\n for i in close:\n package = str(i).split()[1]\n if str(a).split()[1] == package:\n clause = list()\n clause.append(\n -self.proposition_fluent_codes[(i, t)])\n clause.append(-self.action_fluent_codes[(a, t)])\n self.add_clause(clause, \"control\")\n # ADD_RULE2_COUNT += 1\n\n # Rule 3\n # ===============================\n\n # RULE\n # far -> do not unload airplane\n # p@t -> not a@t, where p is far, a is unload-airplane\n # cnf: not p@t or not a@t\n if str(a).startswith('unload-airplane'):\n for j in far:\n package = str(j).split()[1]\n if str(a).split()[1] == package:\n clause = list()\n clause.append(\n -self.proposition_fluent_codes[(j, t)])\n clause.append(-self.action_fluent_codes[(a, t)])\n self.add_clause(clause, \"control\")\n # ADD_RULE3_COUNT += 1\n\n # # RULE\n # # if an airplane has a package on it and the package's\n # # destination is close do not fly this airplane.\n # # in fact, if the destination of package is far,\n # # fly this plane to it.\n # #\n # # p1: package on airplane @ t\n # # p2: package at a place @ t\n # # p3: the place and the goal are in the same city\n # # rule: p1@t and p2@t and p@3 => not fly plane@t\n # # and unload the plane@t\n #\n # # not p1@t or not p2@t or not fly@t\n # # not p1@t or not p2@t or unload\n #\n # # rule: p1@t and p2@t and not p3@t => fly plane@t and not\n # # unload the plane@t\n #\n # if str(a).startswith('fly-airplane'):\n # plane = str(a).split()[1]\n # # loc_from = str(a).split()[2]\n # for p1 in self.problem.propositions:\n # if str(p1).startswith('in package') and str(p1).split()[2] == plane: # in package plane\n # package = str(p1).split()[1]\n # for p2 in self.problem.propositions:\n # if p2 in close and str(p2).split()[1] == package: # at package location\n # clause = list()\n # clause.append(-self.proposition_fluent_codes[p1, t])\n # clause.append(-self.proposition_fluent_codes[p2, t])\n # clause.append(-self.action_fluent_codes[a, t])\n # self.add_clause(clause, 'control')\n # ADD_RULE2_COUNT += 1\n #\n #\n # for g in self.problem.goal:\n # if str(g).split()[1] == package:\n # destination = str(g).split()[2]\n # for do in self.problem.actions:\n # # unload-airplane package00 plane00 city00-00\n # if str(do).startswith('unload') and str(do).split()[1] == package and str(do).split()[2] == plane and str(do).split()[3] == destination:\n # clause2 = list()\n # clause2.append(-\n # self.proposition_fluent_codes[\n # p1, t])\n # clause2.append(-\n # self.proposition_fluent_codes[\n # p2, t])\n # clause2.append(\n # self.action_fluent_codes[\n # do, t])\n # self.add_clause(clause2,\n # 'control')\n #\n # ADD_RULE3_COUNT += 1\n\n # RULE\n # if there is no package needs to be transferred at a location,\n # and the location has a truck\n # drive the truck to its airport\n\n # p1: (at package__ city__-__ /\\ (it is a goal)@t\n # p2: (at truck__ city__-__)@t\n # p3: (city__-__ is not airport)\n # not p1/\\p2/\\p3 => drive_truck_to_its_airport@t\n #\n #\n # CNF: p1 V not p2 V not p3 V drive_truck_to_its_airport@t\n # if str(a).startswith('DRIVE-TRUCK'):\n # for p1 in self.problem.goal:\n # city = str(p1).split()[2]\n # for p2 in self.problem.propositions:\n # if str(p2).startswith('at truck') and str(p2).split()[2] == city:\n # for p3 in self.problem.propositions:\n # if str(p3).startswith('airport') and str(p3).split()[1] == city:\n # clause = list()\n # clause.append(self.proposition_fluent_codes[(p1, t)])\n # clause.append(-self.proposition_fluent_codes[(p2, t)])\n # clause.append(-self.proposition_fluent_codes[(p3, t)])\n # clause.append(self.action_fluent_codes[(a, t)])\n # self.add_clause(clause, \"control\")\n\n # RULE\n # if there is an airplane is loaded with a package need\n # transfer (to another city), fly airplane to the corresponding\n # city.\n\n # p1: (at airplane__ city__-__)@t\n # p2: (in package__ airplane__)@t\n # p3: ( p2 is in far)\n # p1/\\p2/\\p3 => fly_airplane_to_its_airport@t\n #\n #\n # CNF: not p1@t V not p2@t V not p3@t V fly_plane_to_airport@t\n\n # print(\"ADDED RULE 1:\")\n # print(ADD_RULE1_COUNT)\n #\n # print(\"ADDED RULE 2:\")\n # print(ADD_RULE2_COUNT)\n #\n # print(\"ADDED RULE 3:\")\n # print(ADD_RULE3_COUNT)",
"def print_rules(tree, attribute_names, class_name, rule_string=\"IF\"):\n\n node_type = tree[0]\n if(node_type == \"Attribute\"):\n for i in range(2,len(tree)):\n attr_name = attribute_names[int(tree[1][3:])]\n val_name = tree[i][1]\n if(rule_string != \"IF\"):\n rule_string = rule_string + \" AND\"\n rule_string = rule_string + \" {} == {}\".format(attr_name,val_name)\n print_rules(tree[i][2],attribute_names,class_name,rule_string)\n try:\n index = rule_string.rindex(\" AND\")\n rule_string = rule_string[0:index]\n except:\n rule_string = \"IF\"\n else:\n rule_string = rule_string + \" THEN {} == {}\".format(class_name, tree[1])\n print(rule_string)",
"def create_automaton(formula, predicates):\n \n if \"--spot\" in sys.argv:\n spot = True\n else:\n spot = False\n if \"--rabit\" in sys.argv:\n rabit = True\n else:\n rabit = False\n \n stack=[]\n atom=[]\n first=True \n for element in formula:\n if element!=\")\":\n stack.append(element)\n else:\n atom.append(element)\n # pop everything to '(' and add to atom\n while(stack[-1]!=\"(\"):\n atom.append(stack.pop())\n atom.append(stack.pop())\n atom.reverse()\n error=False\n\n # user-defined predicates\n if atom[1] in predicates.keys():\n a = deepcopy(predicates[atom[1]][1])\n for i in range(predicates[atom[1]][0]):\n for j in range(len(a.transitions)):\n a.transitions[j][1] = a.transitions[j][1].replace(\"#\"+str(i+1), atom[i+2])\n new_alphabet = set()\n for symbol in a.alphabet:\n new = symbol.replace(\"#\"+str(i+1), atom[i+2])\n new_alphabet.add(new)\n a.alphabet = deepcopy(new_alphabet)\n\n # operations with automata\n elif atom[1]==\"exists\":\n if not (isinstance(atom[3], Automaton)):\n error=True\n else:\n a=exists(atom[2],atom[3])\n elif atom[1]==\"forall\":\n if not (isinstance(atom[3], Automaton)):\n error=True\n else:\n a = atom[3]\n if spot:\n a = spot_complement(a)\n else:\n a=comp2(a)\n a = exists(atom[2], a)\n if rabit:\n a = rabit_reduction(a)\n if spot:\n a = spot_complement(a)\n else:\n a=comp2(a)\n elif atom[1]==\"and\":\n if not (isinstance(atom[2], Automaton) and isinstance(atom[3], Automaton)):\n error=True\n else:\n a=intersection(atom[2],atom[3])\n elif atom[1]==\"or\":\n if not (isinstance(atom[2], Automaton) and isinstance(atom[3], Automaton)):\n error=True\n else:\n a=union(atom[2],atom[3])\n elif atom[1]==\"neg\":\n if not (isinstance(atom[2], Automaton)):\n error=True\n else:\n a = atom[2]\n if spot:\n a = spot_complement(a)\n else:\n a=comp2(a)\n elif atom[1]==\"implies\":\n if not (isinstance(atom[2], Automaton) and isinstance(atom[3], Automaton)):\n error=True\n else:\n a = atom[2]\n if spot:\n a = spot_complement(a)\n else:\n a = comp2(a)\n if rabit:\n a = rabit_reduction(a)\n a=union(a, atom[3])\n\n # atomic automata\n elif atom[1]==\"zeroin\":\n a=zeroin(atom[2])\n elif atom[1]==\"sing\":\n a=sing(atom[2])\n elif atom[1]==\"sub\":\n a=sub(atom[2],atom[3])\n elif atom[1]==\"succ\":\n a=succ(atom[2],atom[3])\n elif atom[1]==\"<\":\n a=less(atom[2],atom[3])\n \n else:\n if (not first) or len(atom)!=4:\n raise SyntaxError('Invalid form of input formula near \"{}\".'.format(' '.join(map(str,atom))))\n if isinstance(atom[2], Automaton) or isinstance(atom[3], Automaton):\n raise SyntaxError('Invalid form of input formula near \"{}\".'.format(atom[1]))\n\n # arguments of succ or sub can be in parentheses\n atom.remove('(')\n atom.remove(')')\n atom.reverse()\n for i in range(len(atom)):\n stack.append(atom[len(atom)-i-1])\n atom=[]\n first=False\n continue\n\n if error:\n raise SyntaxError('Invalid form of input formula near \"{}\".'.format(atom[1]))\n stack.append(a)\n first=True\n atom=[]\n\n # reduction\n if rabit:\n a = rabit_reduction(a)\n\n return a",
"def __parse_clauses(self, clauses, where_fields):\r\n self.__clean_list(clauses)\r\n if type(clauses) != list: # This is a token, not an expression \r\n return clauses\r\n elif clauses[0] == QueryTokens.WHERE_CONDITION: # This is an operator expression\r\n return self.__parse_operator(clauses[1:], where_fields)\r\n else: # This is a combination of expressions w/ AND/OR\r\n # ands take precedent over ors, so \r\n # A and B or C and D -> (A and B) or (C and D)\r\n ands = []\r\n ors = []\r\n i = 0\r\n while i < len(clauses):\r\n ands.append(self.__parse_clauses(clauses[i], where_fields))\r\n if i+1 == len(clauses):\r\n ors.append(self.__and_or_single(ands))\r\n else:\r\n if clauses[i+1] == QueryTokens.OR:\r\n ors.append(self.__and_or_single(ands))\r\n ands = []\r\n elif clauses[i+1] == QueryTokens.AND:\r\n pass\r\n i += 2\r\n # TODO: rewrite __and_or_single to handle the ors below just\r\n # like it does the ands above \r\n if len(ors) == 1:\r\n return ors[0]\r\n else:\r\n return operators.Or(ors)",
"def all_conditions(trees, tp=None):\n if not isinstance(trees, list):\n assert isinstance(trees, ast.AST)\n trees = [trees]\n\n visitor = ConditionVisitor()\n for tree in trees:\n visitor.visit(tree)\n conditions = visitor.conditions\n if tp is not None:\n conditions = [c for c in conditions if isinstance(c, tp)]\n\n return conditions",
"def Conjunction(self, paren=False):\n left = self.Equality(paren)\n while self.currtok[1].name == \"AND\":\n op = self.currtok[0]\n self.currtok = next(self.tg)\n right = self.Equality(paren)\n left = BinaryExpr(op, left, right, paren)\n return left",
"def test_pathop7(self):\n xpb = XPathBuilder()\n xp = xpb.foo.bar | xpb.foobar & xpb.action.source\n exp = '/foo/bar or /foobar and /action/source'\n self.assertEqual(xp.tostring(), exp)",
"def build_compound_conditions(key, compound):\n if isinstance(compound, basestring):\n return [RuleCondition(key, compound)]\n\n invalid_keys = set(compound) - set(['any', 'all', 'not'])\n if invalid_keys:\n raise KeyError(invalid_keys)\n\n # Listify a single string rather than turning each letter into a condition; this is a common user mistake\n # and it's better to second-guess their intent than to treat a string like a list of single-letter searches.\n conditions = []\n\n if 'any' in compound:\n value = [compound['any']] if isinstance(compound['any'], basestring) else compound['any']\n conditions.append(RuleCondition.or_(key, value))\n\n if 'all' in compound:\n value = [compound['all']] if isinstance(compound['all'], basestring) else compound['all']\n conditions.append(RuleCondition.and_(key, value))\n\n if 'not' in compound:\n conditions.extend(rule.negated() for rule in build_compound_conditions(key, compound['not']))\n\n return sorted(conditions)",
"def Or(*conditions):\n def orPred(db):\n from functools import reduce\n return reduce(lambda result, c: result.add(c(db)),\n conditions, Result())\n\n return orPred",
"def apply_rule(seq):\n for idx,prop in enumerate(seq.ant):\n\n if prop.conn == \"not\":\n # create a copy of seq (we don't want to mutate it)\n new_seq = Sequent(seq.ant[:],seq.con[:])\n # pop the proposition from the list\n not_a = new_seq.ant.pop(idx)\n # make sure we popped the correct one\n assert not_a.conn == \"not\"\n # apply the rule\n new_seq.con = [ not_a.p1 ] + new_seq.con\n # return a list of 3 values with seq2 being None\n # (since there is not split in this rule)\n return [new_seq , None, \"not left\"]\n\n elif prop.conn == \"or\":\n # create two copies of seq\n new_seq1 = Sequent(seq.ant[:], seq.con[:])\n new_seq2 = Sequent(seq.ant[:], seq.con[:])\n # pop the proposition from the list\n b_or_c = new_seq1.ant.pop(idx)\n # make sure we popped the correct one\n assert b_or_c.conn == \"or\"\n assert b_or_c == new_seq2.ant.pop(idx)\n # apply the rule\n new_seq1.ant.append(b_or_c.p1)\n new_seq2.ant.append(b_or_c.p2)\n # return the obtained sequents and the rule name\n # here we have two sequents since \"or left\"\n # has two sequents at the top\n return [new_seq1 , new_seq2, \"or left\"]\n\n elif prop.conn == \"and\":\n #create one copy of seq\n new_seq = Sequent(seq.ant[:], seq.con[:])\n # pop the proposition from the list\n b_and_c = new_seq.ant.pop(idx)\n # make sure we popped the correct one\n assert b_and_c.conn == \"and\"\n # apply the rule\n new_seq.ant.append(b_and_c.p1)\n new_seq.ant.append(b_and_c.p2)\n # return a list of 3 values with seq2 being None\n return [new_seq, None, 'and left']\n\n \n elif prop.conn == \"imp\":\n # create two copies of seq\n new_seq1 = Sequent(seq.ant[:], seq.con[:])\n new_seq2 = Sequent(seq.ant[:], seq.con[:])\n # pop the proposition from the list\n b_imp_c = new_seq1.ant.pop(idx)\n # make sure we popped the correct one\n assert b_imp_c.conn == \"imp\"\n assert b_imp_c == new_seq2.ant.pop(idx)\n # apply the rule\n new_seq1.ant.append(b_imp_c.p2)\n new_seq2.con.append(b_imp_c.p1)\n # return the obtained sequents and the rule name\n return [new_seq1 , new_seq2, \"implies left\"]\n\n for idx,prop in enumerate(seq.con):\n if prop.conn == \"not\":\n new_seq = Sequent(seq.ant[:],seq.con[:])\n # pop the proposition from the list\n not_a = new_seq.con.pop(idx)\n # make sure we popped the correct one\n assert not_a.conn == \"not\"\n # apply the rule\n new_seq.ant = [ not_a.p1 ] + new_seq.ant\n # return a list of 3 values with seq2 being None\n return [new_seq , None, \"not right\"]\n elif prop.conn == \"or\":\n # create one copy of seq\n new_seq = Sequent(seq.ant[:], seq.con[:])\n # pop the proposition from the list\n b_or_c = new_seq.con.pop(idx)\n # make sure we popped the correct one\n assert b_or_c.conn == \"or\" \n # apply the rule\n new_seq.con.append(b_or_c.p1)\n new_seq.con.append(b_or_c.p2)\n # return the obtained sequent and the rule name\n return [new_seq , None, \"or right\"]\n\n elif prop.conn == 'and':\n new_seq1 = Sequent(seq.ant[:], seq.con[:])\n new_seq2 = Sequent(seq.ant[:], seq.con[:])\n b_and_c = new_seq1.con.pop(idx)\n assert b_and_c.conn == \"and\"\n assert b_and_c == new_seq2.con.pop(idx)\n new_seq1.con.append(b_and_c.p1)\n new_seq2.con.append(b_and_c.p2)\n return [new_seq1 , new_seq2, \"and right\"]\n\n elif prop.conn == 'imp':\n new_seq = Sequent(seq.ant[:], seq.con[:])\n b_imp_c = new_seq.con.pop(idx)\n assert b_imp_c.conn == \"imp\"\n new_seq.ant.append(b_imp_c.p1)\n new_seq.con.append(b_imp_c.p2)\n return [new_seq , None, \"implies right\"]",
"def evalBoolean(tree):\n # check if children the children is a \"or\" or a \"and\" tokken\n if (tree.children[0].data == \"or\"):\n return evalBoolean(tree.children[0].children[0]) or evalBoolean(tree.children[0].children[1])\n if (tree.children[0].data) == \"and\":\n return evalBoolean(tree.children[0].children[0]) and evalBoolean(tree.children[0].children[1])\n \n # set var1\n if(tree.children[0].data == \"integer\"):\n var1 = evalInteger(tree.children[0])\n elif(tree.children[0].data == \"variable\"):\n var1 = getValue(tree.children[0].children[0].value)\n\n # set var2\n if(tree.children[2].data == \"integer\"):\n var2 = evalInteger(tree.children[2])\n elif(tree.children[2].data == \"variable\"):\n var2 = getValue(tree.children[2].children[0].value)\n\n if(tree.children[1].children[0].data == \"greater\"):\n return var1 > var2\n if(tree.children[1].children[0].data == \"less\"):\n return var1 < var2\n if(tree.children[1].children[0].data == \"equals\"):\n return var1 == var2\n if(tree.children[1].children[0].data == \"nequal\"):\n return var1 != var2\n\n print(\"ERROR : UNEXPECTED TOKKEN\")\n return False",
"def relative_filter(tree):\n\n for idx in range(len(tree)):\n if \"SBAR\" in str(tree[idx]).split(\"->\")[0].strip():\n # print tree[idx]\n if \"WHNP\" in str(tree[idx]).split(\"->\")[1].strip().split(\" \"):\n return True\n if \"WHPP\" in str(tree[idx]).split(\"->\")[1].strip().split(\" \"):\n return True\n if \"IN\" in str(tree[idx]).split(\"->\")[1].strip().split(\" \"):\n return True\n if \"WHADVP\" in str(tree[idx]).split(\"->\")[1].strip().split(\" \"):\n return True\n if \"WHADJP\" in str(tree[idx]).split(\"->\")[1].strip().split(\" \"):\n return True\n # print tree\n return False",
"def test_predicate10(self):\n xpb = XPathBuilder()\n pred = xpb.attr('foo').equals('bar').log_or(xpb.foobar)\n xp = xpb.foo.bar.where(pred)\n exp = '/foo/bar[@foo = \"bar\" or /foobar]'\n self.assertEqual(xp.tostring(), exp)",
"def lexer(string): # TODO: refactor\n parsedlist = []\n parsedstring = ''\n leftbcounter = 0\n rightbcounter = 0\n qcounter = 0\n for index, a in enumerate(string):\n if qcounter == 2:\n if a.isalpha():\n qcounter = 1\n else:\n qcounter = 0\n if a == '(':\n leftbcounter += 1\n if a == ')':\n rightbcounter += 1\n if a == \"'\" and leftbcounter == rightbcounter:\n qcounter += 1\n if a != ' ' and leftbcounter == rightbcounter \\\n and qcounter == 0:\n parsedstring += a\n if index+1 == len(string):\n parsedlist.append(parsedstring)\n parsedstring = ''\n elif leftbcounter != rightbcounter:\n parsedstring += a\n elif qcounter > 0:\n parsedstring += a\n if index+1 == len(string):\n parsedlist.append(parsedstring)\n parsedstring = ''\n else:\n parsedlist.append(parsedstring)\n parsedstring = ''\n if leftbcounter != rightbcounter:\n raise BadRequest()\n bl = []\n sl = []\n counter = 0\n for index, query in enumerate(parsedlist, 1):\n if query == \"and\" or query == \"or\" or query == \"not\":\n if sl:\n bl.append(sl)\n bl.append([query])\n counter = 0\n sl = []\n continue\n sl.append(query)\n counter += 1\n if index == len(parsedlist) and sl:\n bl.append(sl)\n # i later added a third nested list to seperate AND and OR\n query_list = []\n al = []\n counter = 0\n for index, grouped_query in enumerate(bl, 1):\n if grouped_query[0] == \"or\":\n query_list.append(al)\n query_list.append([grouped_query])\n counter = 0\n al = []\n continue\n al.append(grouped_query)\n counter += 1\n if index == len(bl):\n query_list.append(al)\n\n for x in query_list:\n for y in x:\n if y[0] == 'and' or y[0] == 'or' or y[0] == 'not':\n QueryObjects.B.append(y[0])\n continue\n if y[0][0] == '(' and y[0][-1] == ')':\n QueryObjects.B.append(y[0][0])\n lexer(y[0][1:-1])\n QueryObjects.B.append(y[0][-1])\n else:\n QueryObjects.IND += 1\n n = 'arg' + str(QueryObjects.IND)\n QueryObjects.D[n] = query_mapping(y, QueryObjects.IND)[\"query\"]\n QueryObjects.B.append(n)\n return QueryObjects.B",
"def join_conjuncts(conjuncts):\n if (len(conjuncts) == 0):\n return EmptyExpression()\n elif (len(conjuncts) == 1):\n return conjuncts[0]\n return AndExpression(conjuncts[0], join_conjuncts(conjuncts[1:]))",
"def visit_BoolOp(self, node: ast.BoolOp) -> ast.AST:\n # 1. wrap each operand with a lambda function\n operands = []\n for operand in node.values:\n o = self.visit(operand)\n if self.is_proposition_factory(o):\n # if the operand is already an temporal requirement factory, keep it\n operands.append(self.visit(o))\n continue\n # if the operand is not an temporal requirement factory, make it an AP\n closure = self._create_atomic_proposition_factory(o)\n operands.append(closure)\n\n # 2. create a function call and pass operands\n boolOpToFunctionName = {\n ast.Or: \"PropositionOr\",\n ast.And: \"PropositionAnd\",\n }\n funcId = boolOpToFunctionName.get(type(node.op))\n newNode = ast.Call(\n func=ast.Name(id=funcId, ctx=ast.Load()),\n # pass a list of operands as the first argument\n args=[ast.copy_location(ast.List(elts=operands, ctx=ast.Load()), node)],\n keywords=[],\n )\n return ast.copy_location(newNode, node)",
"def evaluate(self, payload, level=0, verbose=True):\n\n # if children are joined by AND, evaluate every child until all children\n # are evaluated or until a False breaks the loop (Need all True for AND)\n if self.conjunction_ in ['AND', 'NAND']:\n result = True\n i = 0\n while result and (i < len(self.children_)):\n \n if verbose:\n tabs = \"\\t\" * level\n if i > 0: print(\"\\n\" + tabs + f\"{self.conjunction_} \\n\")\n child_print = 'Composite' if isinstance(self.children_[i], Composite) else 'Evaluation'\n print(tabs + f\"Evaluating Child {i + 1}, Level {level + 1} - {child_print}\")\n \n result = self.children_[i].evaluate(payload, level + 1, verbose=verbose)\n i += 1\n if self.conjunction_ == 'NAND':\n result = not result\n\n\n # if children are joined by OR, evaluate every child until all children\n # are evaluated or until a True breaks the loop (only need 1 True for OR)\n elif self.conjunction_ in ['OR', 'NOR']:\n result = False\n i = 0\n while result == False and (i < len(self.children_)):\n \n if verbose:\n tabs = \"\\t\" * level\n if i > 0: print(\"\\n\" + tabs + f\"{self.conjunction_} \\n\")\n child_print = 'Composite' if isinstance(self.children_[i], Composite) else 'Evaluation'\n print(tabs + f\"Evaluating Child {i + 1}, Level {level + 1} - {child_print}\")\n \n result = self.children_[i].evaluate(payload, level + 1, verbose=verbose)\n i += 1\n if self.conjunction_ == 'NOR':\n result = not result\n\n # XOR evaluation - 1 and only 1 can be True. Have to iterate over all children unless the number of trues becomes greater than 1\n else:\n i = 0\n true_count = 0\n while true_count < 2 and (i < len(self.children_)):\n if verbose:\n tabs = \"\\t\" * level\n if i > 0: print(\"\\n\" + tabs + f\"{self.conjunction_} \\n\")\n child_print = 'Composite' if isinstance(self.children_[i], Composite) else 'Evaluation'\n print(tabs + f\"Evaluating Child {i + 1}, Level {level + 1} - {child_print}\")\n \n # += a boolean is equivalent to += 1 for T and += 0 for False\n true_count += self.children_[i].evaluate(payload, level + 1, verbose=verbose)\n i += 1\n\n if true_count == 1:\n result = True\n else:\n result = False\n\n if verbose: \n tabs = \"\\t\" * level\n print(\"\\n\" + tabs + f\"Composite Result: {result}\")\n\n return result",
"def test_pathop8(self):\n xpb = XPathBuilder()\n xp = (xpb.foo.bar | xpb.foobar).parenthesize() & xpb.action.source\n exp = '(/foo/bar or /foobar) and /action/source'\n self.assertEqual(xp.tostring(), exp)"
]
| [
"0.57285845",
"0.56554633",
"0.56072646",
"0.55281454",
"0.54787534",
"0.54459876",
"0.543675",
"0.5399575",
"0.53608644",
"0.53082603",
"0.524149",
"0.5240573",
"0.519464",
"0.5154944",
"0.5149902",
"0.5137273",
"0.5120655",
"0.511196",
"0.5110548",
"0.51060253",
"0.50971836",
"0.5065744",
"0.5030196",
"0.5021714",
"0.50092083",
"0.49886623",
"0.497712",
"0.49681228",
"0.49652353",
"0.49638933"
]
| 0.78787416 | 0 |
Adds calibration point fields to the import window. | def add_calibration_entry(self):
self.calibration_points=True
self.begin_ind_calibration = tkinter.Label(self.rightmostframe, text='Calibration Point 1 Row', bg='white')
self.begin_ind_calibration.pack(pady=4)
self.begin_ind_calibration_entry = tkinter.Entry(self.rightmostframe)
self.begin_ind_calibration_entry.pack(pady=4)
# end ind calibration (left side)
self.end_ind_calibration = tkinter.Label(self.rightmostframe, text='Calibration Point 2 Row', bg='white')
self.end_ind_calibration.pack(pady=4)
self.end_ind_calibration_entry = tkinter.Entry(self.rightmostframe)
self.end_ind_calibration_entry.pack(pady=4)
# known distance label (left side)
self.known_distance_label = tkinter.Label(self.rightmostframe, text='Known Calibration Distance', bg='white')
self.known_distance_label.pack(pady=4)
self.known_distance_entry = tkinter.Entry(self.rightmostframe)
self.known_distance_entry.pack(pady=4) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def addpoints(self):\n x=float(self.x1ValueLabel.text()) \n w=float(self.w1ValueEdit.text())\n #x=[1904.5, 1687.22, 3124.3499999999999, 632.57000000000005]\n #w=[4671.2259999999997, 4624.2757000000001, 4916.5100000000002, 4383.9092000000001]\n self.arcdisplay.addpoints(x, w)",
"def get_fields_point(self):\n self.set_definition(sps21point)\n return self.get_fields()",
"def process(self):\n\n\n index = self.dlg.ui.layerCombo.currentIndex() \n if index < 0: \n # it may occur if there's no layer in the combo/legend \n pass\n else: \n layer = self.dlg.ui.layerCombo.itemData(index) \n # layer = QgsVectorLayer(self.fileName, \"layer_name\", \"ogr\")\n \n\n nFeat = layer.featureCount()\n layer.startEditing()\n\n \n\n # Should really put these in a function\n\n index = layer.fieldNameIndex(\"_lts\")\n if index == -1: # field doesn't exist\n caps = layer.dataProvider().capabilities()\n if caps & QgsVectorDataProvider.AddAttributes:\n res = layer.dataProvider().addAttributes( [ QgsField(\"_lts\", \\\n QVariant.Int) ] )\n layer.updateFields()\n index = layer.fieldNameIndex(\"_num_lane\")\n if index == -1: # field doesn't exist\n caps = layer.dataProvider().capabilities()\n if caps & QgsVectorDataProvider.AddAttributes:\n res = layer.dataProvider().addAttributes( [ QgsField(\"_num_lane\", \\\n QVariant.Int) ] )\n layer.updateFields()\n\n index = layer.fieldNameIndex(\"_protected\")\n if index == -1: # field doesn't exist\n caps = layer.dataProvider().capabilities()\n if caps & QgsVectorDataProvider.AddAttributes:\n res = layer.dataProvider().addAttributes( [ QgsField(\"_protected\", \\\n QVariant.Int) ] )\n layer.updateFields()\n index = layer.fieldNameIndex(\"_bike_lane\")\n if index == -1: # field doesn't exist\n caps = layer.dataProvider().capabilities()\n if caps & QgsVectorDataProvider.AddAttributes:\n res = layer.dataProvider().addAttributes( [ QgsField(\"_bike_lane\", \\\n QVariant.Int) ] )\n layer.updateFields()\n index = layer.fieldNameIndex(\"CROSSINGME\")\n if index == -1: # field doesn't exist\n caps = layer.dataProvider().capabilities()\n if caps & QgsVectorDataProvider.AddAttributes:\n res = layer.dataProvider().addAttributes( [ QgsField(\"CROSSINGME\", \\\n QVariant.Int) ] )\n layer.updateFields()\n index = layer.fieldNameIndex(\"_lts11\")\n if index == -1: # field doesn't exist\n caps = layer.dataProvider().capabilities()\n if caps & QgsVectorDataProvider.AddAttributes:\n res = layer.dataProvider().addAttributes( [ QgsField(\"_lts11\", \\\n QVariant.Int) ] )\n layer.updateFields()\n index = layer.fieldNameIndex(\"_lts12\")\n if index == -1: # field doesn't exist\n caps = layer.dataProvider().capabilities()\n if caps & QgsVectorDataProvider.AddAttributes:\n res = layer.dataProvider().addAttributes( [ QgsField(\"_lts12\", \\\n QVariant.Int) ] )\n layer.updateFields()\n index = layer.fieldNameIndex(\"_lts13\")\n if index == -1: # field doesn't exist\n caps = layer.dataProvider().capabilities()\n if caps & QgsVectorDataProvider.AddAttributes:\n res = layer.dataProvider().addAttributes( [ QgsField(\"_lts13\", \\\n QVariant.Int) ] )\n layer.updateFields()\n index = layer.fieldNameIndex(\"_lts_woX\")\n if index == -1: # field doesn't exist\n caps = layer.dataProvider().capabilities()\n if caps & QgsVectorDataProvider.AddAttributes:\n res = layer.dataProvider().addAttributes( [ QgsField(\"_lts_woX\", \\\n QVariant.Int) ] )\n layer.updateFields()\n index = layer.fieldNameIndex(\"LTS\")\n if index == -1: # field doesn't exist\n caps = layer.dataProvider().capabilities()\n if caps & QgsVectorDataProvider.AddAttributes:\n res = layer.dataProvider().addAttributes( [ QgsField(\"LTS\", \\\n QVariant.Int) ] )\n layer.updateFields()\n\n\n\n i=1\n featid_lts ={}\n for feature in layer.getFeatures():\n street = street_link_object()\n street.path_width = feature['PATHWIDTH']\n street.park_width = feature['PARKWIDTH']\n street.num_lane = feature['NUMLANE']\n street.f_code = feature['ROADCLASS']\n street.foc_width = feature['FOC_WIDTH']\n # street.median = feature['MEDIAN']\n street.speed_limit = feature['SPD_LIM']\n # street.pocket_lane = feature['RTLANE']\n street.illegial_parking = feature['ILLPARKING']\n street.center_line = feature['CL']\n street.net_type = feature['NET_TYPE']\n street.right_turn_speed=feature['RTSPEED']\n street.pocket_lane_shift = feature['RTLANSHIFT']\n street.right_turn_lane_length = feature['RTPOCKLENG']\n street.one_way = feature['ONEWAY']\n street.raw_cross_stress = feature['_rawCrossS']\n street.cross_treat = feature['CrossTreat']\n\n street.calculate_crossing_me(street.num_lane) # has to always be before computing lts\n street.compute_LTS()\n if street.LTS != None :\n i+=1\n j=ceil(i/(nFeat/100))\n self.dlg.ui.progress_bar.setValue(j)\n feature[\"_lts_woX\"] = street.LTS\n feature[\"_lts\"] = street.LTS\n feature[\"_lts11\"] = street.lts11\n feature[\"_lts12\"] = street.lts12\n feature[\"_lts13\"] = street.lts13\n feature[\"_num_lane\"] = street.num_lane\n feature[\"_bike_lane\"] = street.bike_lane\n feature[\"_protected\"] = street.protected\n feature[\"CROSSINGME\"] = street.crossing_me\n layer.updateFeature(feature)\n # layer.updateFields()\n # QMessageBox.information(self.dlg, (\"WAIT\"), (\"Please wait!\"))\n layer.commitChanges()\n # layer.commitChanges()\n QMessageBox.information(self.dlg, (\"Successful\"), (\"LTS has been computed!\")) \n\n self.dlg.close()",
"def InitPointInsertion(self, vtkPoints, ):\n ...",
"def loadCalibrationPoints(self):\n\n with open('cali_points.csv', 'rb') as csvfile:\n csvreader = csv.reader(csvfile, delimiter=\",\", quotechar=\"|\")\n i = 0\n\n for row in csvreader:\n j = 0\n for col in row:\n \n if i < 5:\n self.rgb_click_points[i][j] = int(col)\n j += 1\n if j == 2:\n j = 0\n elif i > 4 :\n self.depth_click_points[i-5][j] = int(col)\n j += 1\n if j ==2:\n j = 0\n i+=1\n self.cameraCalibration()\n pass",
"def populateNewFields(nadPoints):\n with arcpy.da.UpdateCursor(nadPoints,\n ['SHAPE@X', 'SHAPE@Y', 'longitude', 'latitude', 'Source'],\n spatial_reference=arcpy.SpatialReference(4326)) as cursor:\n for row in cursor:\n row[2] = row[0]\n row[3] = row[1]\n row[4] = 'Utah AGRC'\n cursor.updateRow(row)",
"def add(self, p):\n\n self.poses.append(CalibrationPoint(p))\n self.selected_point = len(self.poses) - 1\n self.calibration_changed()",
"def run_calib(projector=OPTOMA_HD33()):\n w, h = (0.2160, 0.2794)\n obj_points = np.array([[-w/2, h/2, 0], [w/2, h/2, 0],\n [-w/2, 0, 0], [w/2, 0, 0],\n [-w/2, 0, h/2], [w/2, 0, h/2]])\n\n global img_points, going\n img_points = []\n\n try:\n window = Window()\n window.MoveXY(1600,0)\n window.ShowFullScreen(True)\n going = True\n\n @window.eventx\n def EVT_MOUSE_EVENTS(evt):\n global going, img_points\n if evt.ButtonUp(wx.MOUSE_BTN_LEFT):\n img_points.append(evt.Position)\n print('Picked point %d of 6' % (len(img_points)))\n if len(img_points) == len(obj_points):\n print \"Done\"\n going = False\n\n print(\"\"\"[Extrinsic Calibration] \n\nThere should be 6 points marked on the table and backdrop. \nMoving the mouse over the projected display, click each of the points\nin order:\n (left top, on the backdrop),\n (right top, on the backdrop),\n (left center, on the crease),\n (right center, on the crease),\n (left bottom, on the table),\n (right bottom, on the table)\n\nFollow along with this illustration: http://imgur.com/asfsfd.jpg\n\nClick the six points:\n\"\"\")\n\n while going: cv.WaitKey(10)\n\n finally:\n window.Close()\n\n img_points = np.array(img_points, 'f')\n projector.calibrate_extrinsic(img_points, obj_points)\n\n np.save('%s/config/projector' % (newest_folder), (projector.KK, projector.RT))\n print('OK')",
"def edit(self, p):\n self.poses[self.selected_point].model = p\n self.calibration_changed()",
"def load_cal(self):\n global DEBUG, dtParameterDesc\n if DEBUG:\n print(self.__class__.__name__+'.load_cal(): calibration is loaded')\n if 'refatt' in self.parameters and 'refoutpower' in self.parameters:\n self.parameters['refatt'] = dtParameterDesc['refatt']['default']\n self.parameters['refoutpower'] = dtParameterDesc['refoutpower']['default']",
"def fill_active(self, layout):\n self.pcdi_triggers = QLineEdit()\n layout.addRow(\"pcdi triggers\", self.pcdi_triggers)\n self.pcdi_type = QLineEdit()\n layout.addRow(\"partial coherence algorithm\", self.pcdi_type)\n self.pcdi_iter = QLineEdit()\n layout.addRow(\"pcdi iteration number\", self.pcdi_iter)\n self.pcdi_normalize = QLineEdit()\n layout.addRow(\"normalize\", self.pcdi_normalize)\n self.pcdi_roi = QLineEdit()\n layout.addRow(\"pcdi kernel area\", self.pcdi_roi)",
"def load_default(self):\n\n poses = []\n\n cp = CalibrationPoint(Pose(), Pose())\n cp.measured.position = Point(0.0, 0.0, 0.12)\n cp.measured.orientation.x = -0.025\n cp.measured.orientation.y = 1.\n cp.measured.orientation.z = 0.011\n cp.measured.orientation.w = 0.002\n cp.model = Point(0., 0., 0.)\n poses.append(copy.deepcopy(cp))\n\n cp.measured.position = Point(0.25, 0., 0.12)\n cp.model = Point(0.25, 0., 0.)\n cp.measured.orientation.x = -0.025\n cp.measured.orientation.y = 1.\n cp.measured.orientation.z = 0.011\n cp.measured.orientation.w = 0.002\n poses.append(copy.deepcopy(cp))\n\n cp.measured.position = Point(0.25, 0.14, 0.12)\n cp.model = Point(0.25, 0.14, 0.)\n cp.measured.orientation.x = -0.025\n cp.measured.orientation.y = 1.\n cp.measured.orientation.z = 0.011\n cp.measured.orientation.w = 0.002\n poses.append(copy.deepcopy(cp))\n\n cp.measured.position = Point(0., 0.12, 0.12)\n cp.model = Point(0., 0.12, 0.)\n cp.measured.orientation.x = -0.025\n cp.measured.orientation.y = 1.\n cp.measured.orientation.z = 0.011\n cp.measured.orientation.w = 0.002\n poses.append(copy.deepcopy(cp))\n\n self.poses = poses\n self.set_selected_point(0)",
"def InitPointInsertion(self, vtkPoints, p_int):\n ...",
"def run(self):\n\n # Create the dialog with elements (after translation) and keep reference\n # Only create GUI ONCE in callback, so that it will only load when the plugin is started\n if self.first_start == True:\n self.first_start = False\n self.dlg = CCTVMapperDialog()\n self.dlg.pushButton.clicked.connect(self.select_csv_file)\n \n # Fetch the currently loaded layers\n layers = QgsProject.instance().layerTreeRoot().children()\n # Clear the contents of the comboBox from previous runs\n self.dlg.comboBox.clear()\n # Populate the comboBox with names of all the loaded layers\n self.dlg.comboBox.addItems([layer.name() for layer in layers])\n\n # show the dialog\n self.dlg.show()\n # Run the dialog event loop\n result = self.dlg.exec_()\n # See if OK was pressed\n if result:\n # Do something useful here - delete the line containing pass and\n # substitute with your code.\n # pass\n \n filename = self.dlg.lineEdit.text()\n selectedLayerIndex = self.dlg.comboBox.currentIndex()\n self.layer = layers[selectedLayerIndex].layer()\n self.create_mem_layer ()\n # points = []\n new_points = []\n # pt = ''\n with open(filename, 'r') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n next(csv_reader)\n for row in csv_reader:\n # print (row)\n # if lin\n point,error = self.get_point_geometry (row[0], float(row[9]), row[8])\n # print (point)\n row.append(error) \n row.append(point)\n new_points.append(row)\n # points.append(point)\n # pipeid = row[0]\n # print (row)\n # print csv_reader\n \n # for point in points:\n # print (point)\n # self.create_point (point)\n \n # print (new_points)\n \n for item in new_points:\n self.create_point (item)",
"def addDataPoints(self):\n pass",
"def fill_import_section():\n section = _SectionData(\"Import\")\n section.props.append((\"ImportScale\", _property_utils.get_by_type(bpy.types.GlobalSCSProps.import_scale)))\n section.props.append((\"PreservePathForExport\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_preserve_path_for_export))))\n section.props.append((\"ImportPimFile\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_pim_file))))\n section.props.append((\"UseWelding\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_use_welding))))\n section.props.append((\"WeldingPrecision\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_welding_precision))))\n section.props.append((\"UseNormals\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_use_normals))))\n section.props.append((\"ImportPitFile\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_pit_file))))\n section.props.append((\"LoadTextures\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_load_textures))))\n section.props.append((\"ImportPicFile\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_pic_file))))\n section.props.append((\"ImportPipFile\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_pip_file))))\n section.props.append((\"ImportPisFile\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_pis_file))))\n section.props.append((\"ConnectedBones\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_connected_bones))))\n section.props.append((\"BoneImportScale\", _property_utils.get_by_type(bpy.types.GlobalSCSProps.import_bone_scale)))\n section.props.append((\"ImportPiaFile\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_pia_file))))\n section.props.append((\"IncludeSubdirsForPia\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_include_subdirs_for_pia))))\n return section",
"def saveCalibrationPoints(self):\n\n if self.kinectCalibrated == True:\n with open('cali_points.csv', 'wb') as csvfile:\n csvwriter = csv.writer(csvfile, delimiter=',')\n for row in range(5):\n csvwriter.writerow(self.rgb_click_points[row])\n for row in range(5): \n csvwriter.writerow(self.depth_click_points[row])\n pass",
"def wp_insertion() -> \"fire_rs.geodata.display.GeoDataDisplay\":\n\n # Geographic environment (elevation, landcover, wind...)\n wind = (2., 0.)\n area = ((480000.0, 480200.0), (6210000.0, 6210200.0))\n env = PlanningEnvironment(area, wind_speed=wind[0], wind_dir=wind[1],\n planning_elevation_mode='flat', flat_altitude=0)\n\n ignition_points = [\n TimedPoint(area[0][0], area[1][0], 0),\n ]\n logging.info(\"Start of propagation\")\n fire = propagation.propagate_from_points(env, ignition_points, 180 * 60)\n logging.info(\"End of propagation\")\n\n fire1 = fire.ignitions()\n\n gdd = fire_rs.geodata.display.GeoDataDisplay.pyplot_figure(env.raster.combine(fire1),\n )\n gdd.add_extension(TrajectoryDisplayExtension, (None,), {})\n\n # print(env.raster.x_offset)\n # gdd.axis.set_xticks(np.arange(area[0][0]-25, area[0][1], 22.22))\n # gdd.axis.set_yticks(np.arange(area[1][0]-25, area[1][1], 22.22))\n # gdd.axis.grid(True)\n gdd.axes.tick_params(\n axis='both', # changes apply to the x-axis\n which='both', # both major and minor ticks are affected\n bottom='off', # ticks along the bottom edge are off\n left='off', # ticks along the bottom edge are off\n top='off', # ticks along the top edge are off\n labelleft='off', # ticks along the bottom edge are off\n labelbottom='off') # labels along the bottom edge are off\n gdd.axes.set_xlabel(\"\")\n gdd.axes.set_ylabel(\"\")\n t_range_fire = (0, np.inf)\n gdd.draw_ignition_contour(geodata=fire1, time_range=t_range_fire, cmap=matplotlib.cm.plasma)\n gdd.draw_ignition_shade(with_colorbar=False, geodata=fire1, vmin=0, vmax=120*60, cmap=matplotlib.cm.Reds)\n\n gdd.legend()\n return gdd",
"def fill_active(self, layout):\n self.new_func_triggers = QLineEdit()\n layout.addRow(\"new func triggers\", self.new_func_triggers)\n self.new_param = QLineEdit()\n layout.addRow(\"new parameter\", self.new_param)",
"def load_collection(self):\n col = str(self.collectionBox.currentText())\n\n west=None\n east=None\n south=None\n north=None\n crs=None\n\n if self.westEdit.text() != \"\":\n west = float(self.westEdit.text())\n if self.eastEdit.text() != \"\":\n east = float(self.eastEdit.text())\n if self.southEdit.text() != \"\":\n south = float(self.southEdit.text())\n if self.northEdit.text() != \"\":\n north = float(self.northEdit.text())\n if self.crsEdit.text() != \"\":\n crs = int(self.crsEdit.text())\n\n start = self.startDateEdit.date().toPyDate()\n end = self.endDateEdit.date().toPyDate()\n\n arguments = {\n \"id\": col,\n \"temporal_extent\": [str(start), str(end)],\n \"spatial_extent\": {}\n }\n\n if west:\n arguments[\"spatial_extent\"][\"west\"] = west\n if east:\n arguments[\"spatial_extent\"][\"east\"] = east\n if south:\n arguments[\"spatial_extent\"][\"south\"] = south\n if north:\n arguments[\"spatial_extent\"][\"north\"] = north\n if crs:\n arguments[\"spatial_extent\"][\"crs\"] = crs\n\n # info(self.iface, \"Load Collection {}\".format(str(arguments)))\n\n self.processgraph.load_collection(arguments)\n # Refresh process graph in GUI\n self.reload_processgraph_view()",
"def enable_import(self):\n n_t = 0\n if self.tree_ctrl is not None:\n n_t = self.tree_ctrl.GetCount()\n if n_t > 0 and len(self.list_of_perspective) > 0:\n self.bt_import.Enable()\n else:\n self.bt_import.Disable()\n if len(self.list_of_perspective) <= 0 or \\\n self.perspective_cbox.GetValue() in [\"None\",\n \"No Active Application\"]:\n self.perspective_cbox.Disable()\n else:\n self.perspective_cbox.Enable()",
"def add_point(self, point, fill_auto_fields=True, timestamp=None):\n self.points.append(point)\n if fill_auto_fields:\n self.fill_auto_fields(point)\n if timestamp:\n point.timestamp = timestamp",
"def add_datum(self, x, fields):\n\t\n\t\tfor name, value in fields.iteritems():\n\t\t\tif name not in self.curves:\n\t\t\t\tcurve = QwtPlotCurve()\n\t\t\t\tcurve.attach(self)\n\t\t\t\tself.curves[name] = [curve, [], []]\n\t\t\t\n\t\t\tstuff = self.curves[name]\n\t\t\tstuff[1].append(x)\n\t\t\tstuff[2].append(value)",
"def addPoints(self, points):\r\n self.points = points",
"def fill_cbox_analysis(self, plugin):\n self.list_of_perspective = plugin\n if self.parent is None or \\\n not hasattr(self.parent, \"get_current_perspective\") or \\\n len(self.list_of_perspective) == 0:\n return\n if self.parent is not None and self.perspective_cbox is not None:\n for plug in self.list_of_perspective:\n if plug.get_perspective():\n self.perspective_cbox.Append(plug.sub_menu, plug)\n\n curr_pers = self.parent.get_current_perspective()\n if curr_pers:\n self.perspective_cbox.SetStringSelection(curr_pers.sub_menu)\n self.enable_import()",
"def start(self):\n self.start_pre()\n\n #scn = bpy.context.scene\n #bpy.ops.ed.undo_push() # push current state to undo\n\n #self.header_text_set(\"PointsPicker\")\n #self.cursor_modal_set(\"CROSSHAIR\")\n #self.manipulator_hide()\n #self.b_pts = list() # list of 'Point' objects (see /lib/point.py)\n self.b_pts = []\n \n \n self.points_shader = None\n self.points_batch = None \n default_keymap = {\n \"add\": {\"LEFTMOUSE\"},\n \"grab\": {\"LEFTMOUSE\"},\n \"remove\": {\"ALT+LEFTMOUSE\", \"RIGHTMOUSE\"},\n \"commit\": {\"RET\"},\n \"cancel\": {\"ESC\"},\n \"done\": {'ENTER', 'UP_ARROW'}\n }\n\n self.actions = ActionHandler(self.context, default_keymap)\n #self.reload_stylings()\n \n \n self.variable_1 = BoundFloat('''options['variable_1']''', min_value =0.5, max_value = 15.5)\n self.variable_2 = BoundInt('''self.variable_2_gs''', min_value = 0, max_value = 10)\n self.variable_3 = BoundBool('''options['variable_3']''')\n \n self.ui_setup()\n self.ui_setup_post()\n\n self.snap_type = \"SCENE\" #'SCENE' 'OBJECT'\n self.snap_ob = None #bpy.context.object\n \n self.started = False\n \n \n self.selected = -1\n self.hovered = [None, -1]\n\n self.grab_undo_loc = None\n self.grab_undo_no = None\n self.mouse = (None, None)\n\n self.xform = XForm(Matrix.Identity(4))\n\n self.d3_points_render = D3PointsRender(self, render_opts)\n \n self.start_post()\n self.update_ui()",
"def calibration_changed(self):\n\n for marker in self.marker_array.markers:\n marker.action = Marker.DELETE\n\n for idx, pose in enumerate(self.poses):\n marker = Marker()\n marker.id = int(str(int(round(time.time() * 1000)) + idx)[6:])\n marker.header.frame_id = Calibration.marker_workpiece_frame\n marker.type = Marker.SPHERE\n marker.action = Marker.ADD\n marker.scale.x = Calibration.marker_size\n marker.scale.y = Calibration.marker_size\n marker.scale.z = Calibration.marker_size\n marker.color.a = 1.0\n marker.color = ColorRGBA(a=1, r=1, g=0, b=0) if\\\n idx == self.selected_point else ColorRGBA(a=1, r=1, g=1, b=0)\n marker.pose.orientation.w = 1.0\n marker.pose.position = pose.model\n self.marker_array.markers.append(marker)\n\n RosProxy().publish_topic(\n \"calibration_markers\", MarkerArray, self.marker_array, latch=True)\n\n # Keep only the added markers\n self.marker_array.markers = [\n marker for marker in self.marker_array.markers if marker.action == Marker.ADD]\n\n calibration_msg = CalibrationMsg(\n points=self.poses,\n selected_point=-\n 1 if self.selected_point is None else self.selected_point,\n transformation=np.append([], self.transformation_matrix))\n\n RosProxy().publish_topic(\n \"calibration_points\", CalibrationMsg, calibration_msg, latch=True)\n\n RobotControllerHandler().set_transformation(self.transformation_matrix)",
"def draw_design(self, dxfversion=None):\n\n if self.file == None:\n raise Exception(\"No file name given. Use design.file to set name.\")\n \n if dxfversion is None:\n self.drawing = ezdxf.new()\n else:\n self.drawing = ezdxf.new(dxfversion=dxfversion)\n self.msp = self.drawing.modelspace()\n \n for x in self.layers:\n self.drawing.layers.add(self.layers[x]['name'], color=self.layers[x]['color'])\n\n for x in self.features:\n self.add_polyline(self.layers[self.features[x].layer],self.features[x].coord,\n self.features[x].open)\n \n self.drawing.saveas(self.file)",
"def load_object(self, obj):\n\n self.poses = obj.poses\n self.selected_point = obj.selected_point\n self.calibration_changed()",
"def onOpenFileClicked(self):\n fname = QtGui.QFileDialog.getOpenFileName(self, \"Open File\")\n self.calibration.setFile(fname)\n self.filelabel.setText(fname)"
]
| [
"0.5901763",
"0.5490056",
"0.54606605",
"0.53818756",
"0.5338017",
"0.52542025",
"0.52198553",
"0.52113515",
"0.51915157",
"0.5189108",
"0.5149515",
"0.50918996",
"0.50601363",
"0.5030394",
"0.5027425",
"0.49972564",
"0.49930912",
"0.49795052",
"0.4955607",
"0.49454013",
"0.49407855",
"0.49261045",
"0.49071085",
"0.4901107",
"0.49010038",
"0.49004528",
"0.48966944",
"0.4895964",
"0.4868158",
"0.48643252"
]
| 0.64659566 | 0 |
Adds filter field to import window. Allows user to specify another column in the data to filter the data by. | def add_filter_entry(self, filter_column=None, filter_entry=None):
new_filter_label = tkinter.Label(self.rightmostframe, text='Custom Column Filter:')
new_filter_label.pack(pady=4)
my_str = tkinter.StringVar()
new_filter_columns = tkinter.OptionMenu(self.rightmostframe, my_str, *self.columns_list)
if filter_column != None:
my_str.set(filter_column)
new_filter_columns.pack(pady=4)
new_filter_entry = tkinter.Entry(self.rightmostframe)
if filter_entry != None:
new_filter_entry.insert(0, filter_entry)
new_filter_entry.pack(pady=4)
self.filter_entries_list.append((new_filter_entry, my_str)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def import_data_filter_options(data_url, column_filter, authorization_token):\n # filter_url = data_url\n headers = {'content-type': 'application/json',\n 'Authorization': authorization_token}\n response = requests.get(data_url, headers=headers, verify=False)\n get_json = json.loads(response.content)\n data = get_json\n data_filters = data[column_filter]\n return HttpResponse(data_filters, content_type=\"application/json\")",
"def filter_inbound_statements_grid(self, column_name, filter_item_text):\n self.grid_filter_with_textbox(self.inbound_statement_grid_div_id, column_name, filter_item_text, self.inbound_statements_grid_filter_position)",
"def add_filter(self, f):\n raise NotImplementedError",
"def _load_filter(self, *args, **kwargs):\n raise NotImplementedError",
"def add_filters_from_module(self, filter_functions):\n\n super(BigqueryInsertFilterMixin, self).add_filters_from_module(filter_functions)\n\n self.bigquery_insert_filter = getattr(\n filter_functions,\n self.bigquery_insert_filter\n )",
"def add_add_filter(source, args, index):\n tagspec = _parse_tagspec(args.get('add-tag%02d' % index))\n header = args.get('add-header%02d' % index)\n value = args.get('add-value%02d' % index, '')\n before = (args.get('add-before%02d' % index) == 'on')\n values = [(hxl.Column.parse(tagspec, header=header), value)]\n return source.add_columns(specs=values, before=before)",
"def _add_filters(self, filters):\n self._env.filters['dateformat'] = dateformat\n self._env.filters.update(filters or {})",
"def filter_table(self):\n\n filter_text = self.dlg.uTextFilter.text()\n self.proxy_model.setFilterCaseSensitivity(Qt.CaseInsensitive)\n self.proxy_model.setFilterKeyColumn(2)\n self.proxy_model.setFilterFixedString(filter_text)",
"def filter():\n return get_filter_data(db, MyTable)",
"def addAutoSaveFilter(filter):",
"def _initFilterTable(self):\n\n t = self.tableWidget_filter # shorthand notation\n\n ### Header population & properties\n t.setHorizontalHeaderLabels(self.data.filter_col_name_list)\n t.horizontalHeader().setMovable(True)\n\n ### Item population\n nRows = len(self.data.filter_spec)\n t.setRowCount(nRows)\n for (j, spec) in enumerate(self.data.filter_spec):\n for (i, filter_prop) in enumerate(self.data.filter_property_list):\n if filter_prop is not 'exclude':\n if filter_prop in spec[0]:\n item_string = spec[0][filter_prop]\n else:\n item_string = ''\n t.setItem(j,i,\n Qt.QTableWidgetItem(item_string))\n\n t.item(j,i).setFlags(Qt.Qt.ItemIsSelectable|\n Qt.Qt.ItemIsEditable|\n Qt.Qt.ItemIsDragEnabled|\n Qt.Qt.ItemIsEnabled) # Make it editable\n else:\n t.setItem(j,i,Qt.QTableWidgetItem(''))\n\n t.item(j,i).setFlags(Qt.Qt.ItemIsSelectable|\n Qt.Qt.ItemIsEditable|\n Qt.Qt.ItemIsDragEnabled|\n Qt.Qt.ItemIsUserCheckable|\n Qt.Qt.ItemIsEnabled) # Make it checkable\n if spec[1]: # exclusion flag\n t.item(j,i).setCheckState(Qt.Qt.Checked)\n else:\n t.item(j,i).setCheckState(Qt.Qt.Unchecked)\n\n\n\n ### Presentation formatting\n t.resizeColumnsToContents()\n for i in range(t.columnCount()):\n if t.columnWidth(i) > self.max_auto_adjust_column_width:\n t.setColumnWidth(i,self.max_auto_adjust_column_width)",
"def _augment_filter(self, header):\n return header",
"def dbtrace_filter_change(filter_name_field):\n\n pass",
"def prepend_crds_filter(self, filter):\n if filter not in self.filters:\n self.filters = [filter] + self.filters",
"def filter_charges_grid(self, column_name, filter_item_text):\n self.grid_filter_with_textbox(self.charges_grid_div_id, column_name, filter_item_text, self.adjustment_folders_column_position)",
"def add_filter(self, filter_: DataGridFilter, index: int = None):\n if index:\n self.filters.insert(index, filter_)\n else:\n self.filters.append(filter_)",
"def filter_resolution_grid(self, column_name, filter_item_text):\n self.grid_filter_with_textbox(self.resolution_grid_div_id, column_name, filter_item_text)",
"def add_filter(self, filter):\n self._filters.append(filter.as_dict())",
"def add_filter(self, name: str, value: any):\n self.filters[name] = value",
"def on_export_button(self, event):\n wildcard = \"Filtered _iso_res_filt.csv file (*_iso_res_filt.csv)|*_iso_res_filt.csv|\"\\\n \"All files (*.*)|*.*|\"\n defFile = self.datafile[:-4]+'_filt.csv'\n dlg = wx.FileDialog(\n self, message=\"Save file as ...\", \n defaultDir=self.currentDirectory, \n defaultFile=defFile, wildcard=wildcard, style=wx.SAVE\n )\n if dlg.ShowModal() == wx.ID_OK:\n path = dlg.GetPath()\n self.recalcAll()\n self.redrawAll()\n self.dataFrame['priorFilter'] = self.dataFrame['allFPass']\n self.dataFrame.to_csv(path, index=False)\n summaryCSVPath = path.split('.')[0] + '_median_[' + ''.join(self.calcNum) + ']_[' + ''.join(self.calcDen) + '].csv'\n self.writeSummaryCSV(summaryCSVPath)\n \n dlg.Destroy()",
"def set_import_filter(self, regex: Union[str, re.Pattern], blacklist: bool = False):\n self._variables['IMPORT_FILTER'] = (\n re.compile(regex) if isinstance(regex, str) else regex,\n bool(blacklist)\n )",
"def add_column_filter(source, args, index):\n include_tags = hxl.TagPattern.parse_list(args.get('cut-include-tags%02d' % index, []))\n exclude_tags = hxl.TagPattern.parse_list(args.get('cut-exclude-tags%02d' % index, []))\n skip_untagged = args.get('cut-skip-untagged%02d' % index, False)\n if include_tags:\n source = source.with_columns(include_tags)\n if exclude_tags or skip_untagged:\n source = source.without_columns(exclude_tags, skip_untagged=skip_untagged)\n return source",
"def add_filter(self, f, **kwargs):\n if not isinstance(f, UnitFilter):\n msg = \"Argument of type Filter expected. Got type {0}\"\n raise TypeError(msg.format(type(f)))\n\n if f.wavelength_unit is None:\n msg = \"Filter wavelength must have units for storage.\"\n raise AttributeError(msg)\n\n append = kwargs.pop('append', True)\n\n f.write_to(\"{0:s}\".format(self.source),\n tablename='/filters/{0}'.format(f.name),\n createparents=True, append=append,\n **kwargs)",
"def __init__(self, source, parameter='', file_path=None):\n super().__init__() \n self.filter_type = 'data'\n self.source = source\n self.parameter = parameter\n self._initate_filter_items()\n if file_path:\n self.load_filter_file(file_path)",
"def filter_data(self):\n self.data = filter_pandas(self.data, self.filters)",
"def row_filter(self, fields):\r\n return RowFieldFilter(self.field_selectors(fields))",
"def filter(self, filter):\n self._filter = filter",
"def filter(self, **kwargs):\n kwargs['query'] += ' FROM {0}'\n return kwargs",
"def add_filter(self, value=''):\n # Create the filter\n filter_ = FilterWithPlaceholder(self, value=value)\n filter_.focus_force()\n filter_.bind('<Return>',\n lambda evt: self.event_generate('<<FiltersReady>>'))\n\n def _on_typing_out_event(evt):\n if filter_.get() == '':\n self._filters.remove(filter_)\n filter_.grid_forget()\n filter_.destroy()\n filter_.bind('<<TypingOut>>', _on_typing_out_event)\n\n # Push the filter in the list\n self._filters = self._filters[:-1] + [filter_] + [self._filters[-1]]\n\n # Refresh the grid\n for (i, curr) in enumerate(self._filters):\n curr.grid(row=0, column=i, sticky='EW')\n curr.lift()\n\n return filter_",
"def add_filter_entry(entry_message, data=''):\n return partial(__add_entry,\n event_type='FILTER',\n entry_message=entry_message,\n data=data)"
]
| [
"0.61835283",
"0.576259",
"0.57332873",
"0.57192385",
"0.5686915",
"0.5638789",
"0.55743355",
"0.55560887",
"0.55493474",
"0.5512042",
"0.55096805",
"0.5441423",
"0.54244393",
"0.54233164",
"0.54224974",
"0.5414842",
"0.5397378",
"0.5363333",
"0.5350326",
"0.5346639",
"0.53460985",
"0.53327054",
"0.53149503",
"0.5313054",
"0.52914774",
"0.52898467",
"0.527734",
"0.5270161",
"0.5267623",
"0.5266497"
]
| 0.662433 | 0 |
Retrieves values from filter fields and creates entries in a filter dict | def get_filters(self, saving):
self.filter_entry_dict.clear()
for entry, var in self.filter_entries_list:
if (entry.get() != "") and (var.get() != "") and (not saving):
self.filter_entry_dict[var.get()] = entry.get()
elif saving and var.get() != "":
self.filter_entry_dict[var.get()] = entry.get() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fieldToFilter(fields):\n if len(fields) == 0:\n return dict()\n return dict(\n pdir_keywords=dict(\n query=[\"%s:%s\" % (k, v) for (k, v) in fields.items()],\n operator=\"and\",\n )\n )",
"def filter(self, filter_dict):\n pass",
"def filters(self):\n filters = IterDict()\n for key in self.FILTERS:\n filter = IterDict()\n filter_param = ((self.prefix or '') + '-' + key).strip('-')\n\n for value, display in self.fields[key].choices:\n choice = {}\n choice['value'] = value\n choice['display'] = display\n\n # These are raw values so they must come from data, and be\n # coerced to strings\n choice['active'] = str(value) == self.data.get(filter_param, '')\n\n params = copy.copy(self.data)\n # Filter by this current choice\n params[filter_param] = value\n choice['querystring'] = urllib.urlencode(params)\n # remove this filter\n params[filter_param] = ''\n choice['remove'] = urllib.urlencode(params)\n\n filter[value] = choice\n filters[key] = filter\n return filters",
"def _get_filters(self, request, queryset, view): # noqa\n self.opts = queryset.model._meta\n filter_fields = getattr(view, \"filter_fields\", None)\n self.exclude = {}\n self.filters = {}\n\n if filter_fields:\n blacklist = RexList(getattr(view, \"filter_blacklist\", []))\n mapping = self._get_mapping(view)\n\n for fieldname_arg in self.query_params:\n raw_value = self.query_params.get(fieldname_arg)\n if raw_value in [\"''\", '\"\"']:\n raw_value = \"\"\n\n negate = fieldname_arg[-1] == \"!\"\n\n if negate:\n filter_field_name = fieldname_arg[:-1]\n TARGET = self.exclude\n else:\n TARGET = self.filters\n filter_field_name = fieldname_arg\n\n if filter_field_name in self.excluded_query_params:\n continue\n if self.ignore_filter(request, filter_field_name, view):\n continue\n try:\n if filter_field_name in blacklist:\n raise InvalidQueryArgumentError(fieldname_arg)\n parts = None\n if \"__\" in filter_field_name:\n parts = filter_field_name.split(\"__\")\n filter_field_name = parts[0]\n op = parts[-1]\n else:\n op = \"\"\n processor = getattr(\n self,\n \"process_{}\".format(filter_field_name),\n getattr(view, \"drfqs_filter_{}\".format(filter_field_name), None),\n )\n\n if (filter_field_name not in filter_fields) and (not processor):\n self.unknown_arguments.append((fieldname_arg, filter_field_name))\n continue\n # raise InvalidQueryArgumentError(filter_field_name)\n if raw_value is None and not processor:\n continue\n # field is configured in Serializer\n # so we use 'source' attribute\n if filter_field_name in mapping:\n real_field_name = mapping[filter_field_name].source\n # if '.' in real_field_name:\n # real_field_name = real_field_name.split('.')[0]\n # field_name = real_field_name.replace('.', '__')\n else:\n real_field_name = filter_field_name\n\n if processor:\n payload = {\n \"field\": filter_field_name,\n \"request\": request,\n \"param\": fieldname_arg,\n \"negate\": negate,\n \"op\": op,\n \"field_name\": real_field_name,\n \"parts\": parts,\n \"value\": raw_value,\n \"real_field_name\": real_field_name,\n }\n _f, _e = processor(dict(self.filters), dict(self.exclude), **payload)\n self.filters.update(**_f)\n self.exclude.update(**_e)\n else:\n if not raw_value:\n continue\n # field_object = opts.get_field(real_field_name)\n value_type = self.field_type(real_field_name)\n if parts:\n f = \"{}__{}\".format(real_field_name, \"__\".join(parts[1:]))\n else:\n f = filter_field_name\n if op in [\"in\", \"contained_by\"]:\n value = raw_value.split(\",\")\n elif op == \"acontains\":\n value = raw_value.split(\",\")\n f = f.replace(\"__acontains\", \"__contains\")\n elif op == \"isnull\":\n value = parse_bool(raw_value)\n elif value_type == bool:\n value = parse_bool(raw_value)\n else:\n value = raw_value\n TARGET[f] = value\n except ValueError:\n raise InvalidQueryValueError(fieldname_arg, raw_value)\n except QueryFilterException:\n raise\n except Exception as e:\n logger.exception(e)\n raise\n return self.filters, self.exclude",
"def filter_values(self):\n dfilter = self.args.datafilter\n self.logger.info(u'Filtering values with:{f}'.format(f=dfilter))\n data = self.outputdata\n newdata = {}\n for key, value in data.items():\n self.logger.info(u'\\nProcessing Key:{k}, value:{v}'.format(k=key,\n v=value))\n returned_data = dict_value_filter(key, value, dfilter, self.logger)\n if bool(returned_data):\n newdata[key] = returned_data\n self.logger.info(u'Data after filter:{d}'.format(d=newdata))\n\n self.outputdata = newdata",
"def _create_filter_object(form_data: Dict) -> Q:\n filter_object = Q(title__icontains=form_data[\"title\"])\n filter_object &= Q(author__icontains=form_data[\"author\"])\n filter_object &= Q(\n publication_language__icontains=form_data[\"publication_language\"]\n )\n if form_data[\"publication_date_start\"]:\n filter_object &= Q(\n publication_date__gte=form_data[\"publication_date_start\"]\n )\n if form_data[\"publication_date_end\"]:\n filter_object &= Q(publication_date__lte=form_data[\"publication_date_end\"])\n return filter_object",
"def create_filter_query(self, collection_name: str, field: str, filter_type: str, filter_values: Union[List[str], str]=None):\n if filter_type == 'contains':\n # return [{'field' : field, 'filter_type' : 'contains', \"condition\":\"==\", \"condition_value\": filter_values}]\n return [{'field': field, 'filter_type': 'regexp', 'condition': '==', 'condition_value': '.*' + str(filter_values) + '.*'}]\n if filter_type == 'exact_match':\n return [{'field' : field, 'filter_type' : 'exact_match', \"condition\":\"==\", \"condition_value\": filter_values}]\n if filter_type == 'categories':\n return [{'field' : field, 'filter_type' : 'categories', \"condition\":\"==\", \"condition_value\": filter_values}]\n if filter_type == 'exists':\n if filter_values is None or filter_values == '==':\n return [{'field' : field, 'filter_type' : 'exists', \"condition\":\"==\", \"condition_value\":\" \"}]\n elif filter_values == '!=':\n return [{'field' : field, 'filter_type' : 'exists', \"condition\":\"!=\", \"condition_value\":\" \"}]\n if filter_type == '<=' or filter_type == '>=' or filter_type == '>' or filter_type == '<' or filter_type == '==':\n if self.collection_schema(collection_name)[field] == 'date':\n return [{'field' : field, 'filter_type' : 'date', \"condition\":filter_type, \"condition_value\": filter_values}]\n elif self.collection_schema(collection_name)[field] == 'numeric':\n return [{'field' : field, 'filter_type' : 'numeric', \"condition\":filter_type, \"condition_value\":filter_values}]\n else:\n raise ValueError(f\"{filter_type} has not been defined. Please choose one of contains/exact_match/exists/categories/>=/<=/>/<.\")",
"def _get_filters(self, req):\n filters = {}\n properties = {}\n\n for param in req.params:\n if param in SUPPORTED_FILTERS:\n filters[param] = req.params.get(param)\n if param.startswith('property-'):\n _param = param[9:]\n properties[_param] = req.params.get(param)\n\n if 'changes-since' in filters:\n isotime = filters['changes-since']\n try:\n filters['changes-since'] = timeutils.parse_isotime(isotime)\n except ValueError:\n raise exc.HTTPBadRequest(_(\"Unrecognized changes-since value\"))\n\n if 'protected' in filters:\n value = self._get_bool(filters['protected'])\n if value is None:\n raise exc.HTTPBadRequest(_(\"protected must be True, or \"\n \"False\"))\n\n filters['protected'] = value\n\n # only allow admins to filter on 'deleted'\n if req.context.is_admin:\n deleted_filter = self._parse_deleted_filter(req)\n if deleted_filter is not None:\n filters['deleted'] = deleted_filter\n elif 'changes-since' not in filters:\n filters['deleted'] = False\n elif 'changes-since' not in filters:\n filters['deleted'] = False\n\n if properties:\n filters['properties'] = properties\n\n return filters",
"def handle_filters():\n params = {'api_key': API_KEY}\n for k in demisto.args():\n if demisto.getArg(k):\n params[k] = demisto.getArg(k)\n return params",
"def build_filters(self, filters=None):\n if filters is None:\n filters = {}\n\n grouped = get_grouped_filters(filters)\n branch_filters = get_branch_filter(filters)\n orm_filters = super(StoryResource, self).build_filters(filters)\n orm_filters['grouped'] = grouped\n orm_filters['br_filter'] = branch_filters\n\n if 'content_type__in' in filters:\n orm_filters['content_type__in'] = [CONTENT_HYDRATE[f] for f in filters['content_type__in'].split(',')]\n\n return orm_filters",
"def filter_format(filter_dict, query_data, filter_type, index): \r\n \r\n filter_list = ''\r\n count = 0 \r\n \r\n while query_data[index] != 'PRESENT\\n': \r\n if filter_type in query_data[index]: \r\n count += 1\r\n filter_keyword = query_data[index].strip(filter_type)\r\n filter_list = (filter_keyword.strip('\\n'))\r\n \r\n index += 1 \r\n \r\n if count > 0: \r\n filter_dict[filter_type] = filter_list\r\n return filter_dict",
"def apply_filters(self, filters):\n self._data = self.model.objects.filter(**filters)",
"def _get_filters(self, req):\n filters = {}\n properties = {}\n\n for param in req.str_params:\n if param in SUPPORTED_FILTERS:\n filters[param] = req.str_params.get(param)\n if param.startswith('property-'):\n _param = param[9:]\n properties[_param] = req.str_params.get(param)\n\n if len(properties) > 0:\n filters['properties'] = properties\n\n return filters",
"def get_filter_kwargs(self, *_, **__) -> Dict[str, Any]:",
"def _get_filters(self, req):\n filters = {}\n for param in req.str_params:\n if param in SUPPORTED_FILTERS or param.startswith('property-'):\n # map filter name or carry through if property-*\n filter_name = SUPPORTED_FILTERS.get(param, param)\n filters[filter_name] = req.str_params.get(param)\n return filters",
"def __format_conditional_filters(field: dict) -> dict:\n if \"conditionalFormats\" in field:\n for cf in field[\"conditionalFormats\"]:\n if \"filter\" in cf and isinstance(\n cf[\"filter\"], QueryFilter\n ): # Supports one QueryFilter without list form\n cf[\"filter\"] = encode_conditional_format_filter(cf[\"filter\"])\n\n elif \"filter\" in cf and isinstance(cf[\"filter\"], list): # Supports list of QueryFilters\n filters = []\n for query_filter in cf[\"filter\"]:\n filters.append(encode_conditional_format_filter(query_filter))\n if len(filters) > 2:\n raise Exception(\"Too many QueryFilters given for one conditional format.\")\n cf[\"filter\"] = filters[0] + \"&\" + filters[1] if len(filters) == 2 else filters[0]\n\n return field",
"def filters(self):\n return {\n 'dict_merge': do_merge,\n 'list_merge': do_list_merge,\n 'attrs': do_attrs,\n 'merge_mysql_privs': do_merge_mysql_privs,\n 'role': do_role,\n 'reduce': do_reduce,\n 'dict_join': do_dict_join,\n 'get': do_get,\n 'contains': do_contains,\n 'selectattrs': do_selectattrs,\n 'convert_integer': do_convert_integer,\n 'camel': do_camel\n }",
"def filter(self, filters):",
"def apply_search_filters():\n params = {'api_key': API_KEY}\n for k in demisto.args():\n if demisto.getArg(k):\n params['term'] = k\n params['query'] = demisto.getArg(k)\n break\n return params",
"def create_filter(args: dict) -> dict | None:\n if 'ip' in args:\n args['networkInterfaces.ipv4'] = args.pop('ip')\n expression_list = []\n for arg in args:\n value = args.get(arg)\n if arg == 'riskScore':\n restriction = \"GREATER_THAN_OR_EQUAL_TO\"\n values_list = [arg_to_number(value)]\n else:\n restriction = \"IN\"\n values_list = argToList(value)\n\n values_res = [{\"value\": val} for val in values_list]\n expression = {\n \"propertyName\": arg,\n \"restrictionType\": restriction,\n \"propertyValues\": values_res\n }\n expression_list.append(expression)\n if expression_list:\n return {\"criteria\": {\"criteriaList\": [{\"expressionList\": expression_list}], \"predicateType\": \"AND\"}}\n else:\n return None",
"def _build_query_filters(self, query: dict, filters: list) -> dict:\n\n for filter_tuple in filters:\n if not isinstance(filter_tuple, tuple) or len(filter_tuple) != 3:\n LOG.error(\"polling_filters tuple %s : invalid format or does not contain 3 elements - skipping this filter\", filter_tuple)\n continue\n if isinstance(filter_tuple[2], list) :\n # If \"value\" is a list of values then create a rule (json object) for each \n # value and use \"OR\" condition.\n condition = {'condition': \"OR\",\n 'rules': []}\n for value in filter_tuple[2]:\n rule = {}\n # Prepend fieldname with \"table.\" string\n rule['field'] = f\"table.{filter_tuple[0]}\"\n rule['operator'] = filter_tuple[1]\n rule['value'] = value\n condition['rules'].append(rule)\n query['rules'].append(condition)\n else:\n # Create a single rule for this tuple\n rule = {}\n field_name = f\"table.{filter_tuple[0]}\"\n rule['field'] = field_name\n rule['operator'] = filter_tuple[1]\n rule['value'] = filter_tuple[2]\n query['rules'].append(rule)\n return query",
"def _add_filters(self, filters):\n self._env.filters['dateformat'] = dateformat\n self._env.filters.update(filters or {})",
"def _get_filter_args(self, filter):\n kwargs = {}\n for arg in filter.args[1:]:\n kwargs[arg] = input(f'Type the {arg.replace(\"_\", \" \").title()}: ')\n return kwargs",
"def query_filter(query_params, allow_func=None):\n query_params = query_unflatten(query_params)\n d = {}\n for name, value in query_params.items():\n if allow_func and not allow_func(name, value):\n continue\n else:\n d[name] = value\n return d",
"def get_filters(self):",
"def get_filters(self):\n location_id = self.cleaned_data.get('location_id')\n if (\n location_id\n and user_can_access_location_id(self.domain, self.user, location_id)\n ):\n location_ids = [location_id]\n else:\n location_ids = []\n\n filters = {\n 'location_ids': location_ids,\n 'selected_location_only': self.cleaned_data.get('selected_location_only', False)\n }\n location_status_active = self.cleaned_data.get('location_status_active', None)\n\n if location_status_active is not None:\n filters['is_archived'] = (not location_status_active)\n\n return filters",
"def _set_filters(self, options):\n if options.keywords:\n self.filters[\"keywords\"] = string_to_list(options.keywords)\n if options.features:\n self.filters[\"features\"] = string_to_list(options.features)\n if options.authors:\n self.filters[\"authors\"] = string_to_list(options.authors)\n if options.version:\n self.filters[\"version\"] = options.version",
"def handle_filters(filters: Optional[List[str]]) -> List[Dict[str, Any]]:\n filters_to_send = []\n for filter_ in filters or []:\n split_filter = filter_.split('=')\n if len(split_filter) != 2 or not split_filter[0] or not split_filter[1]:\n raise DemistoException('Filters should be in the format of \"filtername1=filtervalue1,filtername2=filtervalue2\". '\n f'The filter \"{filter_}\" doesn\\'t meet this requirement.')\n filters_to_send.append({'name': split_filter[0],\n 'operator': '=',\n 'value': split_filter[1]})\n return filters_to_send",
"def get_filter(cls, filter, odata=False):\n\n if filter:\n #www.odata.org/libraries\n if odata:\n lst_filter = []\n if 'and' in filter:\n tmp_filters = filter.split('and')\n else:\n tmp_filters = [filter, ]\n for tmp_filter in tmp_filters:\n if 'eq' in tmp_filter:\n tmp_filter = tmp_filter.replace('eq', '=')\n elif 'gt' in tmp_filter:\n tmp_filter = tmp_filter.raplace('gt', '>')\n elif 'lt' in tmp_filter:\n tmp_filter = tmp_filter.replace('lt', '>')\n lst_filter.append(tmp_filter.split())\n return lst_filter\n else:\n dict_filter = {}\n for lst_attribut in filter.split(','):\n attribut = lst_attribut.split(':')\n if \"/\" in attribut[1]:\n dict_filter[attribut[0]] = attribut[1].split('/')\n else:\n if attribut[1] == 'false':\n dict_filter[attribut[0]] = False\n elif attribut[1] == 'true':\n dict_filter[attribut[0]] = True\n else:\n dict_filter[attribut[0]] = attribut[1]\n return dict_filter\n return False",
"def build_filters(self, filters = None):\n if filters is None:\n filters = {}\n \n orm_filters = super(UserResource, self).build_filters(filters)\n \n if \"area\" in filters:\n area_id = filters['area']\n area = Area.objects.get(id = area_id)\n \n #checked_in_user_profiles = [user_ctx.user for user_ctx in UserContext.objects.filter(currentArea = area)]\n orm_filters[\"pk__in\"] = [user_ctx.user.pk \n for user_ctx in UserContext.objects.filter(currentArea = area)]\n \n elif \"environment\" in filters:\n environment_id = filters['environment']\n environment = Environment.objects.get(id = environment_id)\n \n #checked_in_user_profiles = [user_ctx.user for user_ctx in UserContext.objects.filter(currentArea = area)]\n orm_filters[\"pk__in\"] = [user_ctx.user.pk \n for user_ctx in UserContext.objects.filter(currentEnvironment = environment)]\n \n return orm_filters"
]
| [
"0.68263674",
"0.6572805",
"0.64243484",
"0.6419161",
"0.63560766",
"0.6310068",
"0.62887585",
"0.62512815",
"0.6176382",
"0.617537",
"0.6146272",
"0.61409056",
"0.61252195",
"0.6115444",
"0.6105876",
"0.60870826",
"0.607684",
"0.6060113",
"0.60489786",
"0.6046886",
"0.6034719",
"0.60339844",
"0.6025156",
"0.6022942",
"0.5994059",
"0.59588224",
"0.5955839",
"0.59486073",
"0.59440106",
"0.5929925"
]
| 0.65957147 | 1 |
Opens up save window to save the current import | def save_import(self, out_dict):
self.attributes('-topmost', 'false')
options = self.create_options(saving=True)
options['spreadsheet_path'] = self.spreadsheet_path
self.wait_window(SavePage(self, options))
self.attributes('-topmost', 'true') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def openSave(self):\n save_dir = QFileDialog.getExistingDirectory(\n self, \"Select a folder\", self.user[\"Save\"], QFileDialog.ShowDirsOnly\n )\n if save_dir != \"\":\n self.user[\"Save\"] = save_dir\n name_split = self.splitPath(save_dir)[-1]\n name = name_split.split(\".\")[0]\n self.ui.l_save.setText(\"Save to: \" + name)\n self.checkFiles()",
"def save(self, filename):\n Application.save(self, self.MNU_SAVE)\n ooldtp.context(self.name)\n\n ldtp.waittillguiexist(self.SAVE_DLG)\n save_dialog = ooldtp.context(self.SAVE_DLG)\n \n save_dlg_txt_filename = save_dialog.getchild(self.SAVE_DLG_TXT_NAME)\n ldtp.wait(2)\n save_dlg_txt_filename.settextvalue(filename)\n\n save_dlg_btn_save = save_dialog.getchild(self.SAVE_DLG_BTN_SAVE)\n \n save_dlg_btn_save.click()\n\n ldtp.waittillguinotexist(self.SAVE_DLG)\n ldtp.wait(1)",
"def save_as(self):\r\n fname = self.get_current_filename()\r\n if fname is not None:\r\n self.emit(SIGNAL('redirect_stdio(bool)'), False)\r\n filename = QFileDialog.getSaveFileName(self,\r\n self.tr(\"Save Python script\"), fname,\r\n self.get_filetype_filters())\r\n self.emit(SIGNAL('redirect_stdio(bool)'), True)\r\n if filename:\r\n filename = osp.normpath(unicode(filename))\r\n editortabwidget = self.get_current_editortabwidget()\r\n index = editortabwidget.currentIndex()\r\n editortabwidget.filenames[index] = filename\r\n else:\r\n return False\r\n self.save(force=True)\r\n # Refresh the explorer widget if it exists:\r\n self.emit(SIGNAL(\"refresh_explorer()\"))",
"def open_editor_save_dialog(self, instance):\n if EVENTS['IS_OBJ']:\n toast('Obj files cannot be modified.')\n\n else:\n if EVENTS['LOADED_FILE']:\n self.run_window.editor.save(EVENTS['FILE_PATH'])\n toast('Content saved on loaded file')\n EVENTS['EDITOR_SAVED'] = True\n else:\n dialog = MDInputDialog(title='Save file: Enter file name',\n hint_text='Enter file name',\n size_hint=(.3, .3),\n text_button_ok='Save',\n text_button_cancel='Cancel',\n events_callback=self.save_asm_file)\n if self.dpi >= 192:\n dialog.pos_hint = {\n 'x': dp(0.18),\n 'y': dp(0.18)\n }\n toast('Save Editor Content')\n dialog.open()",
"def save(self, export_path: str):",
"def save_file(self):\n save_dialog = SaveToNWBDialog(parent=self)\n if save_dialog.value:\n print('Copying content to new nwb file, please wait...')\n nwb_copy_file(\n old_file=self.model.nwb,\n new_file=save_dialog.newfile,\n cp_objs=save_dialog.cp_objs,\n save_to_file=True\n )\n print('File successfully copied!')",
"def askSave(parent,title='',defaultDir='',defaultFile='',wildcard='',style=wx.OVERWRITE_PROMPT):\r\n return askOpen(parent,title,defaultDir,defaultFile,wildcard,wx.SAVE|style )",
"def export_file_chooser(self):\n filename = tk.filedialog.askopenfilename()\n self._export_path_var.set(filename)",
"def export_file_chooser(self):\n filename = tk.filedialog.askopenfilename()\n self._export_path_var.set(filename)",
"def export_file_chooser(self):\n filename = tk.filedialog.askopenfilename()\n self._export_path_var.set(filename)",
"def export_file_chooser(self):\n filename = tk.filedialog.askopenfilename()\n self._export_path_var.set(filename)",
"def save(self, event=None):\n filename = self.view.file.get()\n self.model.save(filename)\n self.view.file.set(\"Saved.\")",
"def saveInConfigFileDlg( self ):\n pass",
"def save():\n click.echo(\"Not implemented yet. In the future, this command will be used for saving.\")\n sys.exit(-2)",
"def file_menu_save_activate(self, widget, data=None):\n self.communicator.save_project()",
"def onSaveDialog(self, event):\r\n btn = event.GetEventObject()\r\n\r\n ctrls = self.__other_ctrls\r\n\r\n # check the trigger object\r\n # elif event.GetID() == XRCID('op_restart_dialog'):\r\n # wildcard = 'Amber restart file (*.rst)|*.rst'\r\n # ctrls = self.__ctrls['op_restart_file']\r\n if btn == ctrls['log_file_dialog']:\r\n wildcard = 'Amber log file (*.log)|*.log'\r\n ctrls = self.__ctrls['log_file']\r\n\r\n elif btn == ctrls['op_restart_dialog']:\r\n wildcard = 'Amber restart file (*.rst)|*.rst'\r\n ctrls = self.__ctrls['op_restart_file']\r\n\r\n elif btn == ctrls['crds_file_dialog']:\r\n wildcard = 'Amber trajectory file (*.mdcrd.gz)|*.mdcrd.gz'\r\n ctrls = self.__ctrls['crds_file']\r\n\r\n elif btn == ctrls['vels_file_dialog']:\r\n wildcard = 'Amber velocity file (*.mdvel.gz)|*.mdvel.gz'\r\n ctrls = self.__ctrls['vels_file']\r\n\r\n elif btn == ctrls['enes_file_dialog']:\r\n wildcard = 'Amber energy file (*.ene)|*.ene'\r\n ctrls = self.__ctrls['enes_file']\r\n\r\n else:\r\n raise 'aaaaa'\r\n\r\n dlg = wx.FileDialog(\r\n self, message=\"Save file as ...\", defaultDir=os.getcwd(), \r\n defaultFile=\"\", wildcard=wildcard, style=wx.SAVE\r\n )\r\n # default filter\r\n # dlg.SetFilterIndex(2)\r\n # show the dialog\r\n if dlg.ShowModal() == wx.ID_OK:\r\n path = dlg.GetPath()\r\n # show the dialog\r\n ctrls.SetValue(path)\r\n dlg.Destroy()",
"def onExport(self, event):\n dlg = wx.FileDialog(self, wildcard=\"*.csv\", style=wx.SAVE)\n if dlg.ShowModal() == wx.ID_OK:\n path = dlg.GetPath()\n self.model.exportAssignments(path)\n dlg.Destroy()",
"def onSave(self):\r\n # productive #onButton\r\n profprint()\r\n\r\n self.dirDialog = qt.QFileDialog(self.parent)\r\n self.dirDialog.setDirectory(slicer.modules.needlefinder.path.replace(\"NeedleFinder.py\", \"Config\"))\r\n self.dirDialog.options = self.dirDialog.DontUseNativeDialog\r\n self.dirDialog.acceptMode = self.dirDialog.AcceptSave\r\n self.dirDialog.defaultSuffix = \"cfg\"\r\n self.dirDialog.setNameFilter(\"Configuration file (*.cfg)\")\r\n self.dirDialog.connect(\"fileSelected(QString)\", self.saveFileSelected)\r\n self.dirDialog.show()",
"def onSave(self, event):\n\n saveDialog = wx.FileDialog(self, style=wx.SAVE|wx.OVERWRITE_PROMPT,\n wildcard=\"HTML (*.html)|*.html|\"\n \"Text files (*.txt)|*.txt\")\n if (saveDialog.ShowModal() == wx.ID_OK):\n file(saveDialog.GetPath(), 'w').write(\n XRCCTRL(self, \"TXT_RDF\").GetValue())",
"def onSave(self):\n #productive #onButton\n profprint()\n \n self.fileDialog = qt.QFileDialog(self.parent)\n self.fileDialog.setDirectory(slicer.modules.needlefinder.path.replace(\"NeedleFinder.py\",\"Config\"))\n self.fileDialog.options = self.fileDialog.DontUseNativeDialog\n self.fileDialog.acceptMode = self.fileDialog.AcceptSave\n self.fileDialog.defaultSuffix = \"cfg\"\n self.fileDialog.setNameFilter(\"Configuration file (*.cfg)\")\n self.fileDialog.connect(\"fileSelected(QString)\", self.saveFileSelected)\n self.fileDialog.show()",
"def OnSaveAsFileRibbon(self, event):\n# def onSaveAsFile(self, event):\n wildcard = \"Text source (*.txt)|*.txt|\" \\\n \"All files (*.*)|*.*\"\n \n self.currentDirectory = os.getcwd()\n \n dlg = wx.FileDialog(self, message=\"Сохранение документа\", defaultDir=self.currentDirectory, \n defaultFile=\"\", wildcard=wildcard, style=wx.FD_SAVE)\n if dlg.ShowModal() == wx.ID_OK:\n path = dlg.GetPath()\n report = open(path, \"w\") \n\n report.close()\n\n dlg.Destroy()",
"def filemenu_Export(self):\n line_dict = {}\n for line in self.lines.values():\n for name, arr in line.to_mat().items():\n line_dict[name] = arr\n fileTypes = [(\"MATLAB file\",\"*.mat\"), (\"NumPy file\",\"*.npz\")]\n options = {}\n options['initialdir'] = os.path.expanduser('~')\n options['filetypes'] = fileTypes\n options['parent'] = self.master\n filename = filedialog.asksaveasfilename(**options)\n if filename:\n _, ext = os.path.splitext(filename)\n if ext == \".mat\":\n sio.savemat(filename, line_dict)\n elif ext == \".npz\":\n np.savez(filename, lines=line_dict)",
"def handleActionSave(self):\n for w in self.filesList.selectedItems():\n self.filesList.saveFile(w.text(2))",
"def OnSaveAs(self, event):\r\n dirname = ''\r\n d = wx.FileDialog(self, \"Save File\", dirname, \"\", \"*.panda\", wx.SAVE)\r\n if d.ShowModal() == wx.ID_OK:\r\n self.filename = os.path.join(d.GetDirectory(), d.GetFilename())\r\n self.core.Save(self.filename)\r\n# TODO check for and strip off .panda?\r\n d.Destroy()\r\n\r\n self.RefreshTitleBar()",
"def OnSave(self, e):\n if (not self.mainparent.file_loaded):\n msg = \"An input file must be loaded/built before it can be written\"\n ShowMessage(msg, kind='warn')\n return\n self.mainparent.statusbar.SetStatusText(\"Select a File ...\", 0)\n\n dirname = os.getcwd()\n dlg = wx.FileDialog(self, \"Save File\", dirname, \"\", \"*\", wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)\n\n if (dlg.ShowModal() != wx.ID_OK):\n dlg.Destroy()\n self.mainparent.reset_statusbar()\n return\n\n full_path = str(dlg.GetPath()) # get selected filename and convert to standard string\n\n # set overwrite to True since the above FileDialog already asked\n self.mainparent.input_file.write(output=full_path, indent=defaults.indent, overwrite=True)\n self.mainparent.statusbar.SetStatusText(\"Written to: {}\".format(full_path), 0)\n\n self.mainparent.input_file.filename = full_path\n self.mainparent.statusbar.SetStatusText(\"File: {}\".format(full_path), 2)",
"def saveAs(self):\n print('running saveAs')\n fileName = filedialog.asksaveasfilename(\n defaultextension='.gmcr',\n filetypes=((\"GMCR+ Save Files\", \"*.gmcr\"), (\"All files\", \"*.*\")),\n parent=self.root\n )\n if fileName:\n self.file = fileName\n self.root.wm_title('GMCR+ v{} | {}'.format(__version__, self.file))\n self.saveConflict()",
"def saveFileAs(self, textEntry):\n textEntry.setText(QtGui.QFileDialog.getSaveFileName(self, 'Export'))",
"def on_press_save(self):\n\n if self.dbChk.isChecked():\n self.processed_fields['db'] = self.dbPath.text()\n self.dbForm = DBFormWindow(self.processed_fields, self)\n self.dbForm.show()\n\n if self.savePathChk.isChecked():\n if self.savePath.text():\n shutil.copy(self.tempFile[1], os.path.join(self.savePath.text(), self.saveName.text()+'.wav'))\n os.close(self.tempFile[0])\n os.remove(self.tempFile[1])\n\n QMessageBox.information(self, 'Saved', f'Saved to: {os.path.join(self.savePath.text(), self.saveName.text()+\".wav\")}')\n\n self.saveBtn.setEnabled(False)\n self.deleteBtn.setEnabled(False)\n self.recordBtn.setEnabled(True)\n self.recordBtn.setIcon(QIcon(r'.\\assets\\record.png'))\n self.recordShortCut.setEnabled(True)\n self.inputDropDown.setEnabled(True)\n self.canvasStack.setCurrentWidget(self.microphoneCanvas)\n self.state = 'Waiting'",
"def save_fileDialog(self):\n\n if self.check_data():\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n fileName, _ = QFileDialog.getSaveFileName(self, \"Сохранить как\", os.path.expanduser(\"~\"), \"Все файлы (*);;XML Файлы (*.xml);;JSON Файлы (*.json)\", options=options)\n if fileName:\n file_format = fileName.split('.')[1]\n if file_format =='xml':\n self.create_gen_xml(fileName)\n elif file_format =='json':\n self.create_gen_json(fileName)\n self.msg2Statusbar.emit('Сохранено в файл: {0}'.format(fileName))",
"def on_save(self, *args):\n file = self.get_filename()\n f = open(file.path, \"w\")\n content = self.SwitchesChooser.get_cmd_line()\n content = re.sub(\" +\", \"\\n\", content)\n f.write(content)\n f.close()\n if self.open_file_after_exit_check.get_active():\n GPS.EditorBuffer.get(file)\n self.response(Gtk.ResponseType.APPLY)"
]
| [
"0.7050034",
"0.68424726",
"0.6820891",
"0.6648275",
"0.66444814",
"0.6643438",
"0.66269696",
"0.6616628",
"0.6616628",
"0.6616628",
"0.6616628",
"0.65899795",
"0.65818965",
"0.65569484",
"0.6535644",
"0.652975",
"0.65023637",
"0.64961636",
"0.64725995",
"0.64671546",
"0.6463132",
"0.6445232",
"0.64376026",
"0.64174867",
"0.6406019",
"0.6394308",
"0.63830894",
"0.63650113",
"0.6360318",
"0.63317245"
]
| 0.68973863 | 1 |
Handles saving of the import with the specified name Includes error checking for invalid characters, empty name, and file already existing | def save(self, file_name):
invalid_characters = ['#','%','&','{','}','\\','<','>','*','?','/','^','$','!','\'','\"',':','@','+',"`",'|','=','~']
if len(file_name) == 0:
message = "The import name cannot be empty"
SaveError(self, message)
elif any(invalid_char in file_name for invalid_char in invalid_characters):
used_invalid_chars = [invalid_char for invalid_char in invalid_characters if invalid_char in file_name]
display_text = ",".join(used_invalid_chars)
message = "The import name cannot contain the character(s) \n " + display_text
SaveError(self, message)
else:
if(self.file_exists(file_name)):
SaveOverwrite(self, file_name)
else:
self.create_json_file(file_name)
self.destroy() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_valid_file_name(self):\n Base.save_to_file([self.r0, self.s1])\n self.assertTrue(path.exists('Base.json'))",
"def handle_file_name(self):\r\n self.tmp_name = (os.path.basename(self.source_file_name)).split('.')[0]\r\n result_name = self.tmp_name + '_result_'\r\n log_name = self.tmp_name + '_log.csv'\r\n \r\n self.result_file_name = os.path.join(self.save_path , result_name) \r\n self.log_file_name = os.path.join(self.log_path , log_name)",
"def saveas(self, name):\n self.filename = name\n self.save()",
"def on_save(self):\n checkFN = True\n filePath = str(self.lLibPathVal.text())\n fileName = str(self.leFileName.text())\n #-- Check FileName --#\n exclude = [' ', '/', '\\\\', '.']\n if fileName == '' or fileName == ' ' or fileName.startswith('_'):\n mess = \"!!! ERROR: FileName can not be empty !!!\"\n self.mainUi._defaultErrorDialog(mess, self)\n else:\n for iter in exclude:\n if iter in fileName:\n checkFN = False\n if not checkFN:\n mess = \"!!! ERROR: FileName is not valid !!!\"\n self.mainUi._defaultErrorDialog(mess, self)\n else:\n #-- Check FilePath --#\n if not (filePath.endswith('script') or not filePath.endswith('node')\n or not filePath.endswith('branch')):\n mess = \"!!! ERROR: FilePath is not valid !!!\"\n self.mainUi._defaultErrorDialog(mess, self)\n else:\n absPath = os.path.join(filePath, \"%s.py\" % fileName)\n itemType = filePath.split(os.sep)[-1]\n if os.path.exists(absPath):\n mess = \"%s already exists ! Overwrite ?\" % fileName\n cmds = [partial(self.writeLibFile, absPath, itemType, True)]\n self.confirmSave = pQt.ConfirmDialog(mess, ['Ok'], cmds)\n self.confirmSave.exec_()\n else:\n self.writeLibFile(absPath, itemType)",
"def save(forcedFileName=\"\"):\n if (forcedFileName == \"\"):\n pass #TODO\n else:\n pass #TODO",
"def save(self, *args, **kwargs):\n empty_std_name = False\n if not self.standard_name or self.standard_name.isspace():\n empty_std_name = True\n\n empty_sys_name = False\n if not self.systematic_name or self.systematic_name.isspace():\n empty_sys_name = True\n\n if empty_std_name and empty_sys_name:\n raise ValueError(\n \"Both standard_name and systematic_name are empty\")\n\n super(Gene, self).save(*args, **kwargs) # Call the \"real\" save().",
"def save(self, fname):\n pass",
"def save_to_file(self, file_name):\n\n try:\n self.__model.save(file_name)\n return True\n except OSError:\n return False",
"def save(self, filename):\n pass",
"def persist(self, file_name, model_dir):\n pass",
"def persist(self, file_name, model_dir):\n\n pass",
"def OnSaveAs(self, event):\r\n dirname = ''\r\n d = wx.FileDialog(self, \"Save File\", dirname, \"\", \"*.panda\", wx.SAVE)\r\n if d.ShowModal() == wx.ID_OK:\r\n self.filename = os.path.join(d.GetDirectory(), d.GetFilename())\r\n self.core.Save(self.filename)\r\n# TODO check for and strip off .panda?\r\n d.Destroy()\r\n\r\n self.RefreshTitleBar()",
"def save(self, filename):\n pass",
"def save(self, handler, name):",
"def store(self, filename):",
"def callback_Save():\n saving_msg = 'Save Bioprocess As:'\\\n '\\n(will save in processes/ by default)'\n fileName = sg.popup_get_text(saving_msg, 'File Saver')\n\n if fileName:\n # read filename and add default path\n fileName = fileName.strip(' ')\n\n # if user does not input a fileName\n elif fileName is None:\n fileName = 'cancel'\n elif fileName == '':\n fileName = 'exit'\n\n return fileName",
"def save(self, filename):\n raise NotImplementedError",
"def save_inp_file(name: str = f'/tmp/{datetime.datetime.now()}'):\n epamodule.ENsaveinpfile(name) # THIS IS OPTIONAL\n logger.debug(name)",
"def save(self, fname = None):\n return True",
"def save_model(model, model_name):\r\n if os.path.isfile(model_name):\r\n print('Error: File already exists - please change name or remove conflicting file')\r\n else:\r\n joblib.dump(model, model_name)",
"def save_fileDialog(self):\n\n if self.check_data():\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n fileName, _ = QFileDialog.getSaveFileName(self, \"Сохранить как\", os.path.expanduser(\"~\"), \"Все файлы (*);;XML Файлы (*.xml);;JSON Файлы (*.json)\", options=options)\n if fileName:\n file_format = fileName.split('.')[1]\n if file_format =='xml':\n self.create_gen_xml(fileName)\n elif file_format =='json':\n self.create_gen_json(fileName)\n self.msg2Statusbar.emit('Сохранено в файл: {0}'.format(fileName))",
"def OnSave(self, event):\r\n if self.filename == None:\r\n self.OnSaveAs(event)\r\n else:\r\n self.core.Save(self.filename)",
"def setSaveName(self,name):\n self.saveName = name",
"async def savename(self, ctx, *, iracing_name):\n if is_support_guild(ctx.guild.id):\n await ctx.send('Sorry, this discord does not allow update, saveid, savename, '\n 'leaderboard, and series commands so as not to overload me. '\n 'Try `!careerstats` or `!yearlystats` with your customer ID to test '\n 'or go to #invite-link to bring the bot to your discord for all functionality')\n return\n await self.save_name.call(ctx, iracing_name)",
"def test_existing_file_name(self):\n\t\ttp = self.sess.query(sql.Post).filter(sql.Post.reddit_id == 't3_ahal9v').first()\n\t\tfile = ng.choose_file_name(tp.urls[0], tp, sql.session(), album_size=1)\n\t\tself.assertTrue(file.endswith(' - 2'), msg='Failed to increment duplicate post!')",
"def upload_formatted_file(filename, obsid, pulsar, bins, cal_id, filetype, name_info=\"\", extension=\"\"):\n all_ftypes = std.get_filetypes_from_db(obsid, pulsar, filetype)\n fname_pref = std.filename_prefix(obsid, pulsar, bins=bins, cal=cal_id)\n upname = \"{}\".format(fname_pref)\n upname += name_info\n upname += extension\n\n metadata = get_common_obs_metadata(obsid)\n subbands = std.get_subbands(metadata)\n\n if os.path.basename(upname) not in all_ftypes:\n logger.info(\"Archive file not on databse. Uploading...\")\n shutil.copy(filename, upname)\n std.upload_file_to_db(obsid, pulsar, upname, filetype, metadata=metadata, coh=True)\n os.remove(upname)\n else:\n logger.info(\"file on database. Not uploading\")",
"def OnSave(self, e):\n\t\tconvert_to = None\n\t\tif e.Id == 201:\n\t\t\tconvert_to = \"photoabsorption\"\n\t\telif e.Id == 202:\n\t\t\tconvert_to = \"refractive_index\"\n\t\tlogger.info(\"Save\")\n\t\tfd = wx.FileDialog(self, style=wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)\n\t\tif fd.ShowModal()==wx.ID_OK:\n\t\t\tmetadata = {\"Density\": float(self.DensityText.GetValue()), \"Molecular Formula\":self.StoichiometryText.GetValue(),\"Formula Mass\":data.calculate_FormulaMass(self.Stoichiometry)}\n\t\t\tdata.export_data(fd.GetPath(), numpy.transpose(numpy.vstack((self.Full_E,self.KK_Real_Spectrum,data.coeffs_to_ASF(self.Full_E,self.Imaginary_Spectrum)))), header_info=metadata, convert_to=convert_to)",
"def save_file(self, username, filename) -> bool:\r\n file_id = self.get_file_id(username, filename)\r\n file_path = self.users_dir / username / filename\r\n return (file_id in self.patch_history) and self.try_save_file(\r\n file_path, self.patch_history[file_id])",
"def test_osimportname_name_proper_chars(self):\n\n # get foreign key object id\n os_id = Os.objects.get(os_name='os_1').os_id\n # get object\n form = OsimportnameForm(data = {\n 'osimportname_name': 'oooooooooooooooooooooooooooooo',\n 'osimportname_importer': 'osimportname_importer_1',\n 'os': os_id,\n })\n # compare\n self.assertTrue(form.is_valid())",
"def saveFileAs(self, textEntry):\n textEntry.setText(QtGui.QFileDialog.getSaveFileName(self, 'Export'))"
]
| [
"0.63546175",
"0.61734945",
"0.61724585",
"0.6132157",
"0.5944297",
"0.5903181",
"0.58933043",
"0.5844493",
"0.57878095",
"0.57557",
"0.57276547",
"0.5724395",
"0.5696154",
"0.56941295",
"0.56651986",
"0.5626924",
"0.56145585",
"0.5611102",
"0.559869",
"0.55925167",
"0.55807865",
"0.55546767",
"0.55512255",
"0.55430144",
"0.5518283",
"0.5495777",
"0.5474906",
"0.54648036",
"0.5460221",
"0.5448315"
]
| 0.8128952 | 0 |
Set or show the User Role. Members with this role can create polls and manage their own polls. | async def userrole(self, ctx, *, role=None):
server = ctx.message.guild
if not role:
result = await self.bot.db.config.find_one({'_id': str(server.id)})
if result and result.get('user_role'):
await ctx.send(f'The user role restricts which users are able to create and manage their own polls. \n'
f'The current user role is `{result.get("user_role")}`. '
f'To change it type `{result.get("prefix")}userrole <role name>`')
else:
await ctx.send(f'The user role restricts which users are able to create and manage their own polls. \n'
f'No user role set. '
f'To set one type `{result.get("prefix")}userrole <role name>`')
elif role in [r.name for r in server.roles]:
await self.bot.db.config.update_one({'_id': str(server.id)}, {'$set': {'user_role': str(role)}}, upsert=True)
await ctx.send(f'Server role `{role}` can now create and manage their own polls.')
else:
await ctx.send(f'Server role `{role}` not found.') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getUserRole(self):\n\n # general question concerning the user's role (hersteller, beauftragter...)\n self.roleView.getUserRole()",
"def __setRole(self, session):\r\n self.__role = session.role\r\n if self._config.has_key('purpose'):\r\n co_role = ccm.get_role_for_purpose(session, self._config['purpose'])\r\n _logger.info(\"Switching user to role: %s\" % co_role)\r\n session.role = co_role\r\n _logger.info(\"Switched user to role: %s\" % session.role)",
"def user_role(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"user_role\")",
"def user_role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_role\")",
"def user_role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_role\")",
"def _set_role(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'4 .. 32']}), is_leaf=True, yang_name=\"role\", rest_name=\"role\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Role of the user'}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='string', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"role must be of a type compatible with string\"\"\",\n 'defined-type': \"string\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'4 .. 32']}), is_leaf=True, yang_name=\"role\", rest_name=\"role\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Role of the user'}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='string', is_config=True)\"\"\",\n })\n\n self.__role = t\n if hasattr(self, '_set'):\n self._set()",
"def set_role(self, user, role):\n obj = self._get_through_object(user)\n obj.role = role if isinstance(role, int) else obj.ROLE_MAP_REV[role]\n obj.save()",
"def get_role(self):\n return self.role",
"def role(self, role):\n\n self._role = int(role)",
"def role(self) -> str:\n return pulumi.get(self, \"role\")",
"def role(self, role):\n\n self._role = role",
"def role(self, role):\n\n self._role = role",
"def set_rights(self, user, role):\n for_user = User.get_user_by_username(user)\n role = UserRole(role)\n self.access_handler.check_set_rights(for_user, role)\n for_user.set_user_type(role)",
"def role(self):\n return self._role",
"def role(self):\n return self._role",
"def role(self):\n return self._role",
"def role(self):\n\n return self._role",
"def set_role(username, role_name=\"\"):\n\tsession = get_session()\n\tdata = {\"username\": username, \"role\": role_name}\n\tsession.post(\"{url}/api/users/set_role\".format(url=get_registry_url()), json=data)",
"def set_role(userid, role, group, request=None):",
"def role(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"role\")",
"def show_user_role(instance_id):\n client = get_client()\n\n return client.get_current_user_role(instance_id=instance_id,\n custom_headers=get_custom_headers())",
"def role(self) -> str:\n\n assert self.data is not None\n return self.data[\"role\"][\"name\"]",
"def role(self, roleSpec):\n self.sparkProperties[SparkProperties.SPARK_MESOS_ROLE] = roleSpec\n return self",
"def role(self):\r\n roles = {\r\n 'student': u'Student',\r\n 'staff': u'Administrator',\r\n 'instructor': u'Instructor',\r\n }\r\n return roles.get(self.system.get_user_role(), u'Student')",
"def change_user_role(username, new_role):\n user_connector.change_user_role(username, new_role)",
"def role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role\")",
"def role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role\")",
"def role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role\")",
"async def roles(self, ctx, *, role: Fuzzy[Selfrole] = None):\n\n if role:\n await self._toggle_role(ctx, role)\n else:\n await self._list_all_roles(ctx)",
"def set_user_role(request):\n id_user = request.POST.get('user_id')\n role = request.POST.get('role')\n id_projet = request.POST.get('project_id')\n # retrieves the user whose role needs to be changed\n user_to_modify = User.objects.get(pk=id_user)\n # check if user can attribute role for the project\n project = UtilsData.get_object_by_type_and_id('project', id_projet)\n if request.user.can_affect(project):\n # Verifies if the user whose role is to be changed is the administrator\n if user_to_modify.is_superuser:\n return HttpResponse(json.dumps(\"error you can't remove admin role\"),\n content_type=\"application/json\")\n else:\n # change role\n project.setRole(user_to_modify, role)\n return HttpResponse(json.dumps(\"ok\"),\n content_type=\"application/json\")"
]
| [
"0.71884406",
"0.6784374",
"0.676158",
"0.66587776",
"0.66587776",
"0.66521305",
"0.64882016",
"0.6451127",
"0.64366496",
"0.64330333",
"0.633826",
"0.633826",
"0.6334347",
"0.6305694",
"0.6305694",
"0.6305694",
"0.62603354",
"0.62150025",
"0.62140286",
"0.6164558",
"0.602059",
"0.60118973",
"0.5997346",
"0.59893966",
"0.5957245",
"0.59518355",
"0.59518355",
"0.59518355",
"0.5931484",
"0.5911533"
]
| 0.7142816 | 1 |
Fuse this track estimates with the current estimates from argument track. | def fuse_estimates(self, tracks):
# compile states and covariances
mu = []
sigma = []
s, t = self.state_estimator.state(get_fused=False)
p = self.state_estimator.P
mu.append(s[0])
sigma.append(p[-1])
for track in tracks:
s, t = track.state_estimator.state(get_fused=False)
mu.append(s[0])
sigma.append(track.state_estimator.P[-1])
fused_mu, fused_sigma = self.fusion_method(mu, sigma)
self.state_estimator.x_k_fused.append(fused_mu) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def estimate(self, estimate):\n\n self._estimate = estimate",
"def assign_estimate(self, estimate):\n return self.update(estimate=estimate)",
"def update(self, kf, detection):\n self.mean, self.covariance = kf.update(\n self.mean, self.covariance, detection.to_xyah())\n self.features.append(detection.feature)\n\n self.hits += 1\n self.time_since_update = 0\n if self.state == TrackState.Tentative and self.hits >= self._n_init:\n self.state = TrackState.Confirmed",
"def estimate(self) -> None:\n pass",
"def update(self):\n \n for track in self.tracks:\n track.update()",
"def update(self, kf, detection, detection_noise):\n self.mean, self.covariance = kf.update(\n self.mean, self.covariance, detection, detection_noise)\n\n self.hits += 1\n self.time_since_update = 0\n if self.state == TrackState.Tentative and self.hits >= self._n_init:\n self.state = TrackState.Confirmed",
"def update(self, detections):\n # Run matching cascade.\n matches, unmatched_tracks, unmatched_detections = \\\n self._match(detections)\n\n # Update track set.\n if len(matches) != 0:\n matched_track_means = []\n matched_track_covs = []\n matched_measures = []\n\n for track_idx, detection_idx in matches:\n track = self.tracks[track_idx]\n detection = detections[detection_idx]\n matched_track_means.append(track.mean)\n matched_track_covs.append(track.covariance)\n matched_measures.append(detection.tlwh)\n\n matched_track_means = torch.cat(matched_track_means, dim=0)\n matched_track_covs = torch.cat(matched_track_covs, dim=0)\n matched_measures = torch.stack(matched_measures, dim=0)\n\n matched_measures[:, :2] += matched_measures[:, 2:] / 2\n matched_measures[:, 2] /= matched_measures[:, 3]\n\n # Make the most of the GPU\n updated_means, updated_covs = self.kf.update(matched_track_means, matched_track_covs, matched_measures)\n for i, (track_idx, detection_idx) in enumerate(matches):\n track = self.tracks[track_idx]\n detection = detections[detection_idx]\n track.update(updated_means[i].unsqueeze(0),\n updated_covs[i].unsqueeze(0),\n detection.feature,\n detection.payload)\n\n for track_idx in unmatched_tracks:\n self.tracks[track_idx].mark_missed()\n for detection_idx in unmatched_detections:\n self._initiate_track(detections[detection_idx])\n self.tracks = [t for t in self.tracks if not t.is_deleted()]\n\n # Update distance metric.\n active_targets = [t.track_id for t in self.tracks if t.is_confirmed()]\n features, targets = [], []\n for track in self.tracks:\n if not track.is_confirmed():\n continue\n features += track.features\n targets += [track.track_id for _ in track.features]\n track.features = []\n self.metric.partial_fit(\n features,\n targets,\n active_targets)",
"def tracker(self, tracker):\n self._tracker = tracker",
"def run_analysis(self):\n\n self._apply_loads_to_framat_model()\n\n # ----- Run the FramAT analysis -----\n results = standard_run(args=StdRunArgs(filename=self.own_files['model_file'], verbose=True))\n self.last_solution = results\n\n # ----- Share loads -----\n logger.info(\"Sharing loads...\")\n frame = results['frame']\n self.shared.structure.def_fields = frame.deformation.get_displacement_fields(frame, n_sup=1000)",
"def _track(self, phases=None, name=\"Main\", y0_dict=None):\n sim_df = self.simulate(phases=phases, name=name, y0_dict=y0_dict, show_figure=False)\n param_df = self._track_param(name=name)\n return pd.merge(\n sim_df, param_df, how=\"inner\", left_on=self.DATE, right_index=True, sort=True)",
"def apply(self, xy0, xy1):\n x0, y0 = xy0\n x1, y1 = xy1\n data_size = self._used[y0][x0]\n assert(self._used[y1][x1] + data_size <= self._size[y1][x1])\n self.history.append((xy0, xy1, data_size))\n self._used[y1][x1] += data_size\n self._used[y0][x0] = 0\n if self.goal == xy0:\n self.goal = xy1",
"def update_estimator(self):\n self.optimizer.step()\n self.optimizer.zero_grad()",
"def update_estimator(self):\n self.optimizer.step()\n self.optimizer.zero_grad()",
"def _update_targets(self):\n for ga_main, ga_targ in zip(self.ga.variables, self.ga_.variables):\n ga_targ.assign(self._polyak * ga_targ + (1 - self._polyak) * ga_main)\n if self.use_lyapunov:\n for lc_main, lc_targ in zip(self.lc.variables, self.lc_.variables):\n lc_targ.assign(self._polyak * lc_targ + (1 - self._polyak) * lc_main)\n else:\n for q_1_main, q_1_targ in zip(self.q_1.variables, self.q_1_.variables):\n q_1_targ.assign(self._polyak * q_1_targ + (1 - self._polyak) * q_1_main)\n for q_2_main, q_2_targ in zip(self.q_2.variables, self.q_2_.variables):\n q_2_targ.assign(self._polyak * q_2_targ + (1 - self._polyak) * q_2_main)",
"def predict(self):\n for track in self.tracks:\n track.predict(self.kf)",
"def predict(self):\n for track in self.tracks:\n track.predict(self.kf)",
"def predict(self):\n for track in self.tracks:\n track.predict(self.kf)",
"def predict(self):\n for track in self.tracks:\n track.predict(self.kf)\n #track.du_doan(self.kf_test)",
"def update_fits( self ):\n\n\t\tself._submit_to_queue( None )\n\t\tqueue_contents = self._retrieve_from_queue()\n\n\t\tfor (title,dQ) in queue_contents:\n\t\t\tE = self.get_experiment_by_title(title)\n\t\t\tE.dQ_fit = dQ\n\n\t\treturn",
"def apply(self, store):\n self.update.apply(store, self)",
"def soft_update(self, local_model, target_model, tau):\n for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):\n target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)",
"def rebuild(self):\n self.from_samples(self.samples)",
"def test_two_transforms_track_with_one_already_applied(self):\n track = Track(artist='Artist', title='Title')\n track.last_transform = 1\n tflist = TransformList()\n tflist.add_transform(Transform(1,\n cond_artist=True, pattern_artist='Artist',\n change_artist=True, to_artist='Artist 2',\n ))\n tflist.add_transform(Transform(2,\n cond_title=True, pattern_title='Title',\n change_title=True, to_title='Title 2',\n ))\n\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.title, 'Title')\n self.assertEqual(track.transformed, False)\n\n tflist.apply_track(track)\n\n self.assertEqual(track.last_transform, 2)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.title, 'Title 2')\n self.assertEqual(track.transformed, True)",
"def update_with_fit_args(self, **kwargs):\n pass",
"def estimate_history(self, phase, name=\"Main\", **kwargs):\n estimator = self.phase_estimator(phase=phase, name=name)\n estimator.history(**kwargs)",
"def track_smooth(self, filtered_track):\n\n estimates = self._get_estimates(filtered_track)\n\n penultimate_index = len(filtered_track) - 2\n\n smoothed_track = copy.deepcopy(filtered_track)\n\n # Iterating backwards from the penultimate state, to the first state.\n for t in range(penultimate_index, -1, -1):\n smoothed_track[t] = self.smooth(filtered_track[t],\n estimates[t+1],\n smoothed_track[t+1])\n\n return smoothed_track",
"def update(self, detections):\n # Run matching cascade.\n matches, unmatched_tracks, unmatched_detections = \\\n self._match(detections)\n\n # Update track set.\n for track_idx, detection_idx in matches:\n self.tracks[track_idx].update(\n self.kf, detections[detection_idx])\n for track_idx in unmatched_tracks:\n self.tracks[track_idx].mark_missed()\n for detection_idx in unmatched_detections:\n self._initiate_track(detections[detection_idx])\n self.tracks = [t for t in self.tracks if not t.is_deleted()]\n\n # Update distance metric.\n active_targets = [t.track_id for t in self.tracks if t.is_confirmed()]\n features, targets = [], []\n for track in self.tracks:\n if not track.is_confirmed():\n continue\n features += track.features\n targets += [track.track_id for _ in track.features]\n track.features = []\n self.metric.partial_fit(\n np.asarray(features), np.asarray(targets), active_targets)",
"def _track_attr(self):\n self._track_item['joint_pos'].append(self.joint_pos.copy())\n self._track_item['action'].append(self.action.copy())\n self._track_item['velocity'].append(self.sim.data.qvel[:6].copy())\n self._track_item['position'].append(self.sim.data.qpos[:3].copy())\n self._track_item['true_joint_pos'].append(self.sim.data.qpos[-self._num_joints:].copy())\n self._track_item['sensordata'].append(self.sim.data.sensordata.copy())\n self._track_item['qpos'].append(self.sim.data.qpos.copy())\n self._track_item['qvel'].append(self.sim.data.qvel.copy())\n ob = self._get_obs()\n self._track_item['achieved_goal'].append(ob['achieved_goal'].copy())\n self._track_item['observation'].append(ob['observation'].copy())\n self._track_item['desired_goal'].append(ob['desired_goal'].copy())\n self._track_item['omega_o'].append(self.omega.copy())\n self._track_item['omega'].append(self.w.copy())\n self._track_item['z'].append(self.z.copy())\n self._track_item['mu'].append(self.mu.copy())\n self._track_item['d1'].append(np.array([self.d1], dtype = np.float32))\n self._track_item['d2'].append(np.array([self.d2], dtype = np.float32))\n self._track_item['d3'].append(np.array([self.d3], dtype = np.float32))\n self._track_item['stability'].append(np.array([self.stability], dtype = np.float32))",
"def __call__(self, config: Mapping[Sensor, Sequence[Action]], tracks: Set[Track],\n metric_time: datetime.datetime, *args, **kwargs):\n\n # Reward value\n config_metric = 0\n\n predicted_sensors = list()\n memo = {}\n # For each sensor in the configuration\n for sensor, actions in config.items():\n predicted_sensor = copy.deepcopy(sensor, memo)\n predicted_sensor.add_actions(actions)\n predicted_sensor.act(metric_time)\n if isinstance(sensor, Sensor):\n predicted_sensors.append(predicted_sensor) # checks if its a sensor\n\n # Create dictionary of predictions for the tracks in the configuration\n predicted_tracks = set()\n for track in tracks:\n predicted_track = copy.copy(track)\n predicted_track.append(self.predictor.predict(predicted_track, timestamp=metric_time))\n predicted_tracks.add(predicted_track)\n\n for sensor in predicted_sensors:\n\n # Assumes one detection per track\n detections = {detection.groundtruth_path: detection\n for detection in sensor.measure(predicted_tracks, noise=False)\n if isinstance(detection, TrueDetection)}\n\n for predicted_track, detection in detections.items():\n # Generate hypothesis based on prediction/previous update and detection\n hypothesis = SingleHypothesis(predicted_track.state, detection)\n\n # Do the update based on this hypothesis and store covariance matrix\n update = self.updater.update(hypothesis)\n\n previous_cov_norm = np.linalg.norm(predicted_track.covar)\n update_cov_norm = np.linalg.norm(update.covar)\n\n # Replace prediction with update\n predicted_track.append(update)\n\n # Calculate metric for the track observation and add to the metric\n # for the configuration\n metric = previous_cov_norm - update_cov_norm\n config_metric += metric\n\n if self.method_sum is False and len(detections) != 0:\n\n config_metric /= len(detections)\n\n # Return value of configuration metric\n return config_metric",
"def _update(self, bandit): \n \n bandit_logs = self.logging[bandit]\n bandit = bandit.id\n estimate = bandit_logs['reward'] / bandit_logs['actions'] # if not assigned\n actions = bandit_logs['actions']\n self.mu[bandit] = (self.mu_pri[bandit]/self.var_pri[bandit] + actions*estimate/self.var0)/(actions/self.var0 + 1/self.var_pri[bandit])\n self.var[bandit] = 1/(actions/self.var0 + 1/self.var[bandit])"
]
| [
"0.5544471",
"0.52727795",
"0.5136149",
"0.50760716",
"0.50091",
"0.49874175",
"0.49845898",
"0.4978564",
"0.49625763",
"0.49504185",
"0.49458835",
"0.49335563",
"0.49335563",
"0.49217388",
"0.49156517",
"0.49156517",
"0.49156517",
"0.49147886",
"0.4909424",
"0.4891846",
"0.48892817",
"0.488252",
"0.4870053",
"0.48624507",
"0.48550594",
"0.4854027",
"0.4849871",
"0.48478287",
"0.48446855",
"0.48364645"
]
| 0.6842899 | 0 |
Pass a new message onto the state estimator and update state of the Track to reflect this. | def store(self, new_msg, track_list):
self.state_estimator.store(new_msg, ts.TimeStamp(new_msg['h'], new_msg['m'], new_msg['s']))
self.received_measurement = True
self.n_consecutive_measurements += 1
self.n_consecutive_missed = 0
self.lane = new_msg['lane']
if self.served and self.sensor == DSRC.DSRC and new_msg['served'] == 0:
self.served = False
if self.state_estimator.fused_track:
for fused_track_id in self.fused_track_ids:
track_list[fused_track_id].served = False
if not self.served and self.sensor == DSRC.DSRC and new_msg['served'] == 1:
self.served = True
if self.state_estimator.fused_track:
for fused_track_id in self.fused_track_ids:
track_list[fused_track_id].served = True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __setstate__(self, message):\n self._message = message",
"def handle_msg(self, state_id, msg):\n pass",
"def _state_message_received(self, msg: ReceiveMessage) -> None:\n try:\n self._state = int(msg.payload)\n self.async_write_ha_state()\n except ValueError:\n pass",
"def update(self, msg):\n pass",
"def handle_message(self, message):",
"def message(self, msg: AgentMessage):\n self._message = msg",
"def _sync(self, message=None):\n if message[0] == self._multi_level_sensor_property.element_uid:\n self._state = self._device_instance.multi_level_sensor_property[\n message[0]\n ].value\n elif message[0].startswith(\"hdm\"):\n self._available = self._device_instance.is_online()\n else:\n _LOGGER.debug(\"No valid message received: %s\", message)\n self.schedule_update_ha_state()",
"def on_message(self, json_state):\n global receiving_message\n receiving_message = True\n global current_state\n current_state = json.JSONDecoder(object_pairs_hook=OrderedDict).decode(json_state)\n global n_messages\n\n if not n_messages: #first message ever\n new_state = self.initialize_state(current_state)\n else:\n new_state = self.on_state_change(current_state)\n\n n_messages += 1\n if new_state: #if you return a new state send it back\n receiving_message = False",
"def on_state_notification(self, data):\n\n self.channel_data.update(data)\n\n # synchronize DataManager data with processed update & entity data\n self.sync_data_update_ha()",
"def handle(self, message):",
"def _cmdCallback(self, cmdToTrack):\n state, textMsg, hubMsg = cmdToTrack.fullState\n self.setState(state, textMsg=textMsg, hubMsg=hubMsg)",
"def state_message_received(msg):\n self._last_image = msg.payload",
"def message(self, message):\n self._message = message",
"def message(self, message):\n self._message = message",
"def message(self, msg):\n self._message = msg",
"def update_message(self, message: str):\n self.message = message\n self.updated_time = datetime.now()",
"def update(self):\n path = os.path.expanduser('~') + '/.homeassistant'\n path += '/msg.txt'\n file_contain = open(path, 'r')\n if file_contain != '':\n message = file_contain.read()\n self._state = message\n file_contain.close()\n file_chance = open(path, 'w')\n file_chance.write('')\n file_chance.close()\n else:\n self._state = ''",
"def message(self, message):\n\n self._message = message",
"def message(self, message):\n\n self._message = message",
"def message(self, message):\n\n self._message = message",
"def message(self, message):\n\n self._message = message",
"def message(self, message):\n\n self._message = message",
"def message(self, message):\n\n self._message = message",
"def message(self, message):\n\n self._message = message",
"def message(self, message):\n\n self._message = message",
"def update_message(self, message_state: 'MessageState'):\n self._check_new_message_state(message_state)\n self.channel_state.message_state = message_state\n self._compute_amount_owner1_can_transfer_to_owner2()\n self._compute_amount_owner2_can_transfer_to_owner1()",
"def setState(self, newState, textMsg=\"\", hubMsg=\"\"):\n if self.isDone:\n raise RuntimeError(\"Command is done; cannot change state\")\n if newState not in self.StateSet:\n raise RuntimeError(\"Unknown state %s\" % newState)\n self.state = newState\n self._textMsg = str(textMsg)\n self._hubMsg = str(hubMsg)\n self._basicDoCallbacks(self)\n if self.isDone:\n self._removeAllCallbacks()\n self._cmdToTrack = None",
"def _handle_message(self, msg):\n self.event('message', msg)",
"def process(self, new_state, **args):\n self._state = new_state",
"def receive_message(self, message):\r\n self.state.receive_message(message)\r\n return"
]
| [
"0.69827133",
"0.65274626",
"0.6313561",
"0.6236883",
"0.62238866",
"0.6210473",
"0.6189199",
"0.6182856",
"0.61365014",
"0.6115923",
"0.61149055",
"0.60821867",
"0.60749793",
"0.60749793",
"0.605385",
"0.60528314",
"0.6038874",
"0.603857",
"0.603857",
"0.603857",
"0.603857",
"0.603857",
"0.603857",
"0.603857",
"0.603857",
"0.6029407",
"0.60116047",
"0.60003793",
"0.5989063",
"0.5987705"
]
| 0.6710784 | 1 |
Write the labels of a 2D grid to file | def write_grid2d(grid_file, grid2d):
with grid_file.open('w') as f:
for row in grid2d['label']:
f.write('\t'.join(row) + '\n') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def write_labels():\n with open('../data/labels.txt', 'w') as labels_file:\n labels = generate_labels()\n labels_file.write('\\n'.join(labels))",
"def save_GRID( self , filename ):\n self._fwrite_GRID( filename )",
"def write_grid(self, file_path, fmt='%0.16g'):\n with open(file_path, 'w') as outfile:\n if self.grid.size == 3:\n outfile.write('{}\\t{}\\t{}\\n'.format(self.grid[0].size - 1,\n self.grid[1].size - 1,\n self.grid[2].size - 1))\n else:\n outfile.write('{}\\t{}\\n'.format(self.grid[0].size - 1,\n self.grid[1].size - 1))\n with open(file_path, 'ab') as outfile:\n numpy.savetxt(outfile, numpy.c_[self.grid[0]], fmt=fmt)\n numpy.savetxt(outfile, numpy.c_[self.grid[1]], fmt=fmt)\n if self.grid.size == 3:\n numpy.savetxt(outfile, numpy.c_[self.grid[2]], fmt=fmt)",
"def write_label(filename, label, verbose=None):\n\n with open(filename, 'wb') as fid:\n n_vertices = len(label.vertices)\n data = np.zeros((n_vertices, 5), dtype=np.float)\n data[:, 0] = label.vertices\n data[:, 1:4] = label.coords # self.pos #1e3 *\n data[:, 4] = label.values\n fid.write(b(\"#%s\\n\" % label.comment))\n fid.write(b(\"%d\\n\" % n_vertices))\n for d in data:\n fid.write(b(\"%d %f %f %f %f\\n\" % tuple(d)))\n return label",
"def save_to_file(grid, filepath):\n outfile = codecs.open(filepath, mode='w+', encoding='utf-8')\n outfile.writelines([((''.join(row)) + u'\\n') for row in grid])",
"def read_grid2d(grid_file):\n labels = []\n with grid_file.open('r') as f:\n for row in f.readlines():\n labels.append([x.strip() for x in row.split('\\t')])\n\n labels = array(labels)\n grid2d = make_grid(labels.shape[0], labels.shape[1])\n grid2d['label'] = labels\n return grid2d",
"def save_grid(fname, grid):\n\twith open((\"%sGridFix\" % fname), 'w') as file_handler:\n\t for item in grid:\n\t file_handler.write(\"{}\\n\".format(item))\n\t pass\n # Return the name of the file\n\treturn (\"%sGridFix\" % fname)",
"def exportTOUGH2(self, fname):\r\n STR = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\"\r\n self.ne, self.nn, self.nz = np.array(self.Grid.GetDimensions()) # - 1 #\r\n filename, ext = os.path.splitext(fname)\r\n if self.GridType == \"vtkStructuredGrid\":\r\n with io.open(filename, 'w', newline='\\r\\n') as f:\r\n f.write(\"ELEME\")\r\n # debug\r\n f.write(\r\n \"\"\"\r\n 1 10 20 30 40 50 60 70 80\r\n |--------|---------|---------|---------|---------|---------|---------|---------|\r\n 12345678901234567890123456789012345678901234567890123456789012345678901234567890\r\n \"\"\")\r\n\r\n ii = 0\r\n for iy in range(self.nn):\r\n for ix in range(self.ne):\r\n # f.write(str(iy)+str(ix)+\"\\n\")\r\n # first base\r\n b2 = ii // (len(STR) * len(STR))\r\n b1 = (ii - len(STR) * b2) // len(STR)\r\n b0 = ii % len(STR)\r\n\r\n f.write(STR[b2] + STR[b1] + STR[b0] + \"\\t\" + str(ii) + \"\\n\")\r\n ii += 1",
"def write_ptm_gridfile(self,fn):\n vertex_hdr = \" Vertex Data: vertex_number, x, y\"\n poly_hdr = \" Polygon Data: polygon_number, number_of_sides,center_x, center_y, center_depth, side_indices(number_of_sides), marker(0=internal,1=open boundary)\"\n side_hdr = \" Side Data: side_number, side_depth, node_indices(2), cell_indices(2), marker(0=internal,1=external,2=flow boundary,3=open boundary)\"\n\n with open(fn,'wt') as fp:\n # write header counts\n fp.write(\" Number of Vertices\\n\")\n fp.write(\" %20d\\n\"%self.Nnodes())\n fp.write(\" Number of Polygons\\n\")\n fp.write(\" %20d\\n\"%self.Ncells())\n fp.write(\" Number of Sides\\n\")\n fp.write(\" %20d\\n\"%self.Nedges())\n fp.write(\" NODATA (land) value\\n\")\n fp.write(\" -9999.000000000\\n\")\n\n # write vertex info\n fp.write(vertex_hdr+\"\\n\")\n for v in range(self.Nnodes()):\n fp.write(\" %10d %16.7f %16.7f\\n\"%(v+1,\n self.nodes['x'][v,0],\n self.nodes['x'][v,1]))\n\n # write polygon info\n fp.write(poly_hdr+\"\\n\")\n cell_write_str1 = \" %10d %10d %16.7f %16.7f %16.7f \"\n cell_depths = self.cell_depths()\n for e in range(self.Ncells()):\n edges = self.cells['edges'][e,:]\n edges[edges<0] = -1\n edge_str = \" \".join( [\"%10d\"%(s+1) for s in edges] )\n edge_str = edge_str+\" %10d\\n\"%(self.cells['mark'][e])\n nsides = sum(edges>=0)\n fp.write(cell_write_str1%(e+1,\n nsides,\n self.cells['_center'][e,0],\n self.cells['_center'][e,1],\n cell_depths[e]))\n fp.write(edge_str)\n \n # write side info\n fp.write(side_hdr+\"\\n\")\n edge_depths = self.edge_depths()\n edge_write_str = \" %10d %16.7f %10d %10d %10d %10d %10d\\n\"\n for s in range(self.Nedges()):\n edges = self.edges['cells'][s,:]\n edges[edges<0] = -1 \n nodes = self.edges['nodes'][s,:]\n nodes[nodes<0] = -1\n fp.write(edge_write_str%(s+1,\n edge_depths[s],\n nodes[0]+1,\n nodes[1]+1,\n edges[0]+1,\n edges[1]+1,\n self.edges['mark'][s]))",
"def to_cdo_grid(self, outfile):",
"def write_file(self):\n if self.it_num % 5 == 0:\n #plt.imshow(self.grid)\n #plt.savefig(\"output%.4d.png\" % self.it_num, bbox_inches='tight')\n io.savemat(\"MLOutput%.4d\" % self.it_num, { \"Grid\":self.grid})",
"def _write_labels(self, labels: List[str], labels_path: Path):\n labels_path.write_text(escape_line_delimited_texts(labels))",
"def _write_labels(self, labels: List[str], labels_path: Path):\n labels_path.write_text(escape_line_delimited_texts(labels))",
"def SaveLabels(filepath, labels):\n # 1) Create a string with all the text to be stored\n text = '\\n'.join(labels)\n\n # 2) Open the datafile and save the text\n with open(filepath, 'w') as outfile:\n outfile.write(text)",
"def write_label_file(labels_to_class_names, labels_filename):\n with tf.gfile.Open(labels_filename, \"w\") as f:\n for label in labels_to_class_names:\n class_name = labels_to_class_names[label]\n f.write('%d:%s\\n'%(label, class_name))",
"def write_output(label1, label2, label3, submission_file):\n with open(submission_file, 'w') as f:\n f.write('Id,Bound'+ '\\n')\n for index, lab in enumerate(label1):\n f.write(str(index) + ',' + str(int(lab)) + '\\n')\n for index, lab in enumerate(label2):\n f.write(str(len(label1) + index) + ',' + str(int(lab)) + '\\n')\n for index, lab in enumerate(label3):\n f.write(str(len(label1) + len(label2) + index) + ',' + str(int(lab)))\n if index < len(label3) - 1:\n f.write('\\n')",
"def output_classLabel_to_txt(save_path):\n file_obj = open(save_path,'w')\n length = len(class_label)\n for i in range(0,length):\n line = '%d:%s'%(i,class_label[i])\n file_obj.writelines(line+'\\n')\n return True",
"def save_pattern_file(file_path: str, grid_pattern: np.ndarray):\n\n # Transform the grid into a list of string lines\n lines = []\n for row in range(len(grid_pattern)):\n line = [\".\" if cell == 0 else \"O\" for cell in grid_pattern[row]]\n line_str = \"\".join(line) + \"\\n\"\n lines.append(line_str)\n\n with open(file_path, mode=\"w\") as f:\n f.writelines(lines)",
"def save_world(world, filename):\n with open(filename, \"w\") as f:\n for i, row in enumerate(world):\n if i % 2 != 0 and i not in [0, 1]:\n f.write(\" \")\n\n for column in row:\n f.write(str(column))\n f.write(\" \")\n\n f.write(\"\\n\")",
"def write_raw_text(self, path='.'):\n cells = self.get_cells()\n arrays = []\n for cell in cells:\n arrays.append(cell.data)\n array = np.concatenate(arrays)\n fn = os.path.join(path, self.label + '.txt')\n fmt = []\n p = re.compile('(\\w)(\\d+)')\n for key, value in self.datatype:\n m = p.search(value)\n if m:\n kind, size = m.groups()\n # strings\n if kind == 'S':\n add = '%{}c'.format(size)\n # integers\n elif kind in ['u', 'i']:\n add = '%d'\n else:\n add = '%.8e'\n else:\n add = '%.8e'\n fmt.append(add)\n np.savetxt(fn, array, fmt=fmt, delimiter='\\t')\n return",
"def write( self, NewFilename='', Integer=True ):\n try:\n if NewFilename != '':\n self.name=NewFilename\n Output = open( self.name, 'w' )\n Output.write( 'ncols\\t\\t %d\\n' % self.ncols )\n Output.write( 'nrows\\t\\t %d\\n' % self.nrows )\n Output.write( 'xllcorner\\t\\t %f\\n' % self.xllcorner)\n Output.write( 'yllcorner\\t\\t %f\\n' % self.yllcorner)\n Output.write( 'cellsize\\t\\t %f\\n' % self.cellsize)\n if Integer:\n Output.write( 'NODATA_value\\t\\t %d\\n' % int(self.nodata) )\n else:\n Output.write( 'NODATA_value\\t\\t %f\\n' % self.nodata )\n for row in range( self.nrows-1,-1,-1 ):\n record = []\n for col in range( self.ncols ):\n if Integer:\n record.append( str( int( round( self.data[row,col]) ) ) )\n else:\n record.append( str(self.data[row,col]) )\n Output.write( string.join(record, ' ')+'\\n' )\n Output.close()\n except:\n print \"Error writing grid ::\", self.name",
"def write_label_file(labels_to_class_names, dataset_dir, filename='labels.txt'):\n labels_filename = os.path.join(dataset_dir, filename)\n with tf.gfile.Open(labels_filename, 'w') as f:\n for label in labels_to_class_names:\n class_name = labels_to_class_names[label]\n f.write('%d:%s\\n' % (label, class_name))",
"def writeImage(image, filename):\n Sky = [128,128,128]\n Building = [128,0,0]\n Pole = [192,192,128]\n Road_marking = [255,69,0]\n Road = [128,64,128]\n Pavement = [60,40,222]\n Tree = [128,128,0]\n SignSymbol = [192,128,128]\n Fence = [64,64,128]\n Car = [64,0,128]\n Pedestrian = [64,64,0]\n Bicyclist = [0,128,192]\n Unlabelled = [0,0,0]\n r = image.copy()\n g = image.copy()\n b = image.copy()\n label_colours = np.array([Sky, Building, Pole, Road_marking, Road, Pavement, Tree, SignSymbol, Fence, Car, Pedestrian, Bicyclist, Unlabelled])\n for l in range(0,12):\n r[image==l] = label_colours[l,0]\n g[image==l] = label_colours[l,1]\n b[image==l] = label_colours[l,2]\n rgb = np.zeros((image.shape[0], image.shape[1], 3))\n rgb[:,:,0] = r/1.0\n rgb[:,:,1] = g/1.0\n rgb[:,:,2] = b/1.0\n im = Image.fromarray(np.uint8(rgb))\n # im.save('/Users/koheiyamamoto/Desktop/SegNet/out/' + filename)\n im.save('./out/' + filename)",
"def write_AsciiGrid(fname, data, info, fmt='%.18e'):\n import numpy as np\n\n # replace nans with nodatavalue according to info\n nodata = int(info[-1].split(' ')[-1])\n data[np.isnan(data)] = nodata\n # write info\n fid = open(fname, 'w')\n fid.writelines(info)\n fid.close()\n\n # write data\n fid = open(fname, 'a')\n np.savetxt(fid, data, fmt=fmt, delimiter=' ')\n fid.close()",
"def save_grd(filename, meta, map):\n if os.path.exists(filename):\n raise ValueError(\"File already exists: {}\".format(filename))\n if map.shape != (meta['NX'], meta['NY'], meta['NCOMP']):\n raise ValueError(\"The map shape does not match the metadata dictionary.\")\n points = meta['NX'] * meta['NY']\n components = meta['NCOMP']\n data = np.empty((points, 2 * components))\n for component in range(components):\n data[:, 2 * component] = map[:, :, component].reshape(points, order='F').real\n data[:, 2 * component + 1] = map[:, :, component].reshape(points, order='F').imag\n with open(filename, 'w') as f:\n for line in meta['header']:\n f.write('{}\\n'.format(line))\n f.write('{:2d}\\n'.format(meta['KTYPE']))\n f.write('{:12d}{:12d}{:12d}{:12d}\\n'.format(meta['NSET'], meta['ICOMP'], meta['NCOMP'], meta['IGRID']))\n f.write('{:12d}{:12d}\\n'.format(meta['IX'], meta['IY']))\n f.write(' {: 0.10E} {: 0.10E} {: 0.10E} {: 0.10E}\\n'.format(meta['XS'], meta['YS'], meta['XE'], meta['YE']))\n f.write('{:12d}{:12d}{:12d}\\n'.format(meta['NX'], meta['NY'], meta['KLIMIT']))\n for p in range(points):\n f.write(''.join([float_to_string(number) for number in data[p, :]]) + '\\n')",
"def save_labels_to_disk(labels: list, label_path: str):\n\n with open(label_path, \"w\") as result_file:\n wr = csv.writer(result_file, dialect=\"excel\")\n wr.writerows(labels)",
"def write_labels_file(labels_to_class_names, dataset_dir,\n filename='labels.txt'):\n labels_path = os.path.join(dataset_dir, filename)\n with open(labels_path, 'w') as f:\n for label in labels_to_class_names:\n class_name = labels_to_class_names[label]\n f.write('%d:%s\\n' % (label, class_name))",
"def write_features_to_file(filename,locs,desc):\n savetxt(filename, hstack((locs, desc)))",
"def writeGP(loc, fname, data, header, ncol=6):\n size = len(data)\n nrow = int(size / ncol)\n size_last_row = size % ncol\n\n lines = \"\"\n for line in np.reshape(range(nrow * ncol), (nrow, ncol)):\n for val in line:\n lines += \"{:^20.6e}\".format(data[val]) + 3 * \" \"\n lines = lines.rstrip(3 * \" \") + \"\\n\"\n\n if size_last_row:\n for i in range(1, size_last_row + 1):\n lines += \"{:^20.6e}\".format(data[-i]) + 3 * \" \"\n lines = lines.rstrip(3 * \" \")\n\n with open(\"/\".join([loc, fname]), \"w\") as f:\n f.writelines(header)\n f.writelines(lines)\n return",
"def write_towhee_coord(self, filename):\n with open(filename, 'w') as f:\n df = self.contents[['X', 'Y', 'Z']].copy()\n np.savetxt(f, df.values, fmt=\" %20.15f\"*3)"
]
| [
"0.71003115",
"0.6925979",
"0.6832347",
"0.67913246",
"0.66992986",
"0.65994614",
"0.6515918",
"0.6390906",
"0.63335294",
"0.63307697",
"0.62936795",
"0.61924064",
"0.61924064",
"0.6182035",
"0.61560845",
"0.6152925",
"0.61473215",
"0.6145884",
"0.6100551",
"0.60703844",
"0.60657746",
"0.60568655",
"0.60386235",
"0.6006115",
"0.59808964",
"0.5961127",
"0.59508765",
"0.59367144",
"0.59079516",
"0.589126"
]
| 0.8044423 | 0 |
Read the labels of a 2D grid to file | def read_grid2d(grid_file):
labels = []
with grid_file.open('r') as f:
for row in f.readlines():
labels.append([x.strip() for x in row.split('\t')])
labels = array(labels)
grid2d = make_grid(labels.shape[0], labels.shape[1])
grid2d['label'] = labels
return grid2d | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def write_grid2d(grid_file, grid2d):\n with grid_file.open('w') as f:\n for row in grid2d['label']:\n f.write('\\t'.join(row) + '\\n')",
"def load_label(self, idx):\n im = open('{}/GTTXT/{}.txt'.format(root_dir, idx))\n\t#print(type(im.readlines()[0].rstrip(\"\\n\")))\n rgb_label = [i.rstrip(\"\\n\").split(\" \") for i in im.readlines()]\n\tlabel=[]\t\n\tfor i in rgb_label:\n\t\tlabel+=[int(j) for j in i]\n\tlabel=np.array(label).reshape(720,960)\n\tlabel[label==-1]=12\n\t#print(np.unique(label))\n #label = label[np.newaxis, ...]\n return label",
"def write_labels():\n with open('../data/labels.txt', 'w') as labels_file:\n labels = generate_labels()\n labels_file.write('\\n'.join(labels))",
"def read_labels(labels_path):\n with open(labels_path, 'r') as file:\n data = file.read()\n data = data.split()\n data = np.array(data)\n data = np.reshape(data, (-1, 2))\n return data",
"def get_gridinfo(filename, xpoints, ypoints):\n with open(f\"{filename[:-4]}.wld\", encoding=\"ascii\") as fh:\n lines = fh.readlines()\n dx = float(lines[0])\n dy = float(lines[3])\n west = float(lines[4])\n north = float(lines[5])\n south = north + dy * ypoints\n lats = np.arange(0, ypoints) * (0 - dy) + south\n lons = np.arange(0, xpoints) * dx + west\n return lons, lats",
"def read_ecog2d(ecog_file, grid_file):\n ecog = loadtxt(ecog_file, delimiter='\\t')\n\n ecog_on_grid = zeros(ecog.shape, dtype=DTYPE_ECOG)\n ecog_on_grid['value'] = ecog\n ecog_on_grid['good'] = ~isnan(ecog)\n ecog_on_grid['label'] = read_grid2d(grid_file)['label']\n\n return ecog_on_grid",
"def load_labels(path):\n with open(path, 'r', encoding='utf-8') as f:\n lines = f.readlines()\n labels = []\n for row_number, content in enumerate(lines):\n pair = re.split(r'[:\\s]+', content.strip(), maxsplit=1)\n #if len(pair) == 2 and pair[0].strip().isdigit():\n labels.append(np.array([int(pair[0].strip()),pair[1].strip()]))\n #else:\n # labels.append(pair[0].strip())\n return np.array(labels)",
"def save_to_file(grid, filepath):\n outfile = codecs.open(filepath, mode='w+', encoding='utf-8')\n outfile.writelines([((''.join(row)) + u'\\n') for row in grid])",
"def save_GRID( self , filename ):\n self._fwrite_GRID( filename )",
"def extract_labels(nlabels,filename, one_hot=False):\n print('Extracting', filename,'bbbccicicicicib')\n\n labels=numpy.loadtxt(filename,dtype='int64')\n \n if one_hot:\n print(\"LABELS ONE HOT\")\n print(labels.shape)\n XXX=dense_to_one_hot(labels,nlabels)\n print(XXX.shape)\n return dense_to_one_hot(labels,nlabels)\n print(\"LABELS\")\n print(labels.shape)\n return labels",
"def read_grid(filename):\r\n with open(filename) as infile:\r\n lines = infile.read().splitlines()\r\n\r\n grid = [[int(bit) for bit in line.split()] for line in lines]\r\n return grid",
"def read_grid(filename):\r\n with open(filename) as infile:\r\n lines = infile.read().splitlines()\r\n\r\n grid = [[int(bit) for bit in line.split()] for line in lines]\r\n return grid",
"def readPermeability(self, fname, label=(\"$\\kappa_x$\", \"$\\kappa_y$\", \"$\\kappa_z$\")):\r\n k = np.loadtxt(fname, comments=\"#\")\r\n nr, nc = np.shape(k)\r\n if self.GridType == \"vtkStructuredGrid\":\r\n # Sutra and VTK use opposite ordering\r\n k = np.reshape(k, (self.nx - 1, self.ny - 1, self.nz - 1, np.shape(k)[1]))\r\n k = np.reshape(k, (nr, nc), order='F')\r\n kx = vtk.vtkDoubleArray()\r\n kx.SetName(label[0])\r\n ky = vtk.vtkDoubleArray()\r\n ky.SetName(label[1])\r\n kz = vtk.vtkDoubleArray()\r\n kz.SetName(label[2])\r\n for ik, K in enumerate(k):\r\n kx.InsertNextTuple1(K[2])\r\n ky.InsertNextTuple1(K[3])\r\n kz.InsertNextTuple1(K[4])\r\n self.Grid.GetCellData().AddArray(kx)\r\n self.Grid.GetCellData().AddArray(ky)\r\n self.Grid.GetCellData().AddArray(kz)",
"def exportTOUGH2(self, fname):\r\n STR = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\"\r\n self.ne, self.nn, self.nz = np.array(self.Grid.GetDimensions()) # - 1 #\r\n filename, ext = os.path.splitext(fname)\r\n if self.GridType == \"vtkStructuredGrid\":\r\n with io.open(filename, 'w', newline='\\r\\n') as f:\r\n f.write(\"ELEME\")\r\n # debug\r\n f.write(\r\n \"\"\"\r\n 1 10 20 30 40 50 60 70 80\r\n |--------|---------|---------|---------|---------|---------|---------|---------|\r\n 12345678901234567890123456789012345678901234567890123456789012345678901234567890\r\n \"\"\")\r\n\r\n ii = 0\r\n for iy in range(self.nn):\r\n for ix in range(self.ne):\r\n # f.write(str(iy)+str(ix)+\"\\n\")\r\n # first base\r\n b2 = ii // (len(STR) * len(STR))\r\n b1 = (ii - len(STR) * b2) // len(STR)\r\n b0 = ii % len(STR)\r\n\r\n f.write(STR[b2] + STR[b1] + STR[b0] + \"\\t\" + str(ii) + \"\\n\")\r\n ii += 1",
"def write_label(filename, label, verbose=None):\n\n with open(filename, 'wb') as fid:\n n_vertices = len(label.vertices)\n data = np.zeros((n_vertices, 5), dtype=np.float)\n data[:, 0] = label.vertices\n data[:, 1:4] = label.coords # self.pos #1e3 *\n data[:, 4] = label.values\n fid.write(b(\"#%s\\n\" % label.comment))\n fid.write(b(\"%d\\n\" % n_vertices))\n for d in data:\n fid.write(b(\"%d %f %f %f %f\\n\" % tuple(d)))\n return label",
"def save_grid(fname, grid):\n\twith open((\"%sGridFix\" % fname), 'w') as file_handler:\n\t for item in grid:\n\t file_handler.write(\"{}\\n\".format(item))\n\t pass\n # Return the name of the file\n\treturn (\"%sGridFix\" % fname)",
"def SaveLabels(filepath, labels):\n # 1) Create a string with all the text to be stored\n text = '\\n'.join(labels)\n\n # 2) Open the datafile and save the text\n with open(filepath, 'w') as outfile:\n outfile.write(text)",
"def load_labels(label_path):\r\n\r\n with open(label_path, \"r\") as f:\r\n\r\n lines = f.readlines()\r\n \r\n label = {}\r\n index = []\r\n for i, line in enumerate(lines):\r\n sp = line.split()\r\n label[sp[0]] = [int(sp[1]),int(sp[2]),int(sp[3])]\r\n index.append([int(sp[3]),int(sp[2]),int(sp[1])])\r\n\r\n return label, index",
"def read_grid(self, file_path=None):\n print('[info] reading the grid ...')\n if not file_path:\n file_path = os.path.join(self.directory, 'grid.dat')\n if not os.path.exists(file_path):\n file_path = os.path.join(self.directory, 'grid.txt')\n # test if file written in binary format\n textchars = bytearray({7, 8, 9, 10, 12, 13, 27}\n | set(range(0x20, 0x100)) - {0x7f})\n is_binary_string = lambda bytes: bool(bytes.translate(None, textchars))\n infile = open(file_path, 'rb')\n binary_format = is_binary_string(infile.read(1024))\n infile.close()\n if binary_format:\n with open(file_path, 'rb') as infile:\n # x-direction\n nx = struct.unpack('i', infile.read(4))[0]\n x = numpy.array(struct.unpack('d' * (nx + 1),\n infile.read(8 * (nx + 1))))\n # y-direction\n ny = struct.unpack('i', infile.read(4))[0]\n y = numpy.array(struct.unpack('d' * (ny + 1),\n infile.read(8 * (ny + 1))))\n self.grid = numpy.array([x, y])\n else:\n with open(file_path, 'r') as infile:\n n_cells = numpy.array([int(n)\n for n in infile.readline().strip().split()])\n coords = numpy.loadtxt(infile, dtype=numpy.float64)\n self.grid = numpy.array(numpy.split(coords,\n numpy.cumsum(n_cells[:-1] + 1)))\n if self.grid.size == 2:\n print('\\tgrid-size: {}x{}'.format(self.grid[0].size - 1,\n self.grid[1].size - 1))\n elif self.grid.size == 3:\n print('\\tgrid-size: {}x{}x{}'.format(self.grid[0].size - 1,\n self.grid[1].size - 1,\n self.grid[2].size - 1))",
"def load_grd(filename):\n with open(filename, 'r') as f:\n meta = {}\n meta['header'] = []\n meta['header'].append(f.readline().rstrip('\\n'))\n while meta['header'][-1] != '++++':\n meta['header'].append(f.readline().rstrip('\\n'))\n # These determine the type of grid and the field format.\n meta['KTYPE'] = int(f.readline().split()[0])\n if meta['KTYPE'] != 1:\n raise ValueError(\"Not implemented.\")\n meta['NSET'], meta['ICOMP'], meta['NCOMP'], meta['IGRID'] = [int(s) for s in f.readline().split()]\n # The grid center in units of the x and y grid spacing.\n meta['IX'], meta['IY'] = [int(s) for s in f.readline().split()]\n # These are the x and y grid limits: S is lower, and E is upper.\n meta['XS'], meta['YS'], meta['XE'], meta['YE'] = [float(s) for s in f.readline().split()]\n # These are the numbers of grid points in x and y.\n meta['NX'], meta['NY'], meta['KLIMIT'] = [int(s) for s in f.readline().split()]\n # Implement this to read elliptically truncated grids.\n if meta['KLIMIT'] != 0:\n raise ValueError(\"Not implemented.\")\n # Load the field data. This returns an array with shape (NX * NY, 2 * NCOMP).\n conv = dict([(column, string_to_float) for column in range(2 * meta['NCOMP'])])\n data = np.loadtxt(f, dtype=float, converters=conv)\n # Determine the grid spacing and center values.\n meta['DX'] = (meta['XE'] - meta['XS']) / (meta['NX'] - 1)\n meta['DY'] = (meta['YE'] - meta['YS']) / (meta['NY'] - 1)\n meta['XCEN'] = meta['DX'] * meta['IX']\n meta['YCEN'] = meta['DY'] * meta['IY']\n # Reshape the data.\n map = np.empty((meta['NX'], meta['NY'], meta['NCOMP']),\n dtype=np.complex)\n for component in range(meta['NCOMP']):\n column = data[:, 2 * component] + 1j * data[:, 2 * component + 1]\n map[:, :, component] = column.reshape(meta['NX'], meta['NY'], order='F')\n return meta, map",
"def load_labels():\n filename = os.path.join(config['inference']['model_dir'], 'output_labels.txt')\n global labels\n labels = [line.rstrip() for line in tf.gfile.FastGFile(filename)]",
"def read_label_data(mode, image_type):\n return np.loadtxt(parse_path(mode, image_type, True), dtype=int, delimiter='\\n')",
"def retrieve_labels(file, label_indices):\n\n\t# Initialize numpy matrix to store the images\n\tlabels = np.zeros((len(label_indices), 10))\n\n\twith open(file, \"rb\") as f:\n\t\t# Intialize counters\n\t\ti = 0\n\t\tlabel_number = 0\n\n\t\t# Read first byte\n\t\tbyte = f.read(1)\n\n\t\t# Find each image in the data file\n\t\tfor label_index in label_indices:\n\t\t\t# Read in bytes until you arrive at the label\n\t\t\twhile byte and (i < (label_index + 8)):\n\t\t\t\tbyte = f.read(1)\n\t\t\t\ti += 1\n\n\t\t\t# Store label value in numpy array\n\t\t\tvalue = int.from_bytes(byte, \"big\")\n\t\t\tlabels[label_number] = np.zeros(10)\n\t\t\tlabels[label_number, value] = 1\n\n\t\t\t# Increment to next label\n\t\t\tlabel_number += 1\n\n\treturn labels",
"def load_data(fname):\n pathname = \"data/\" + fname\n data = pickle.load(open(pathname, 'rb'), encoding='latin1')\n images = np.array([img[:-1] for img in data])\n ys = [int(img[-1]) for img in data]\n length = len(ys)\n labels = np.zeros((length, 10))\n\n for i in range(length):\n labels[i, ys[i]] = 1\n\n return images, labels",
"def load_data_and_labels(data_file, labels_file):\r\n x_text = []\r\n y = []\r\n \r\n with open(data_file, encoding = \"utf-8\") as csvFile:\r\n readCSV = csv.reader(csvFile, delimiter = \",\")\r\n for row in readCSV:\r\n row = \"\".join(row)\r\n x_text.append(row) \r\n \r\n with open(labels_file, encoding = \"utf-8\") as csvFile2:\r\n readCSV = csv.reader(csvFile2, delimiter = \",\")\r\n for row in readCSV:\r\n d = defaultdict(list)\r\n for k,va in [(v,i) for i,v in enumerate(row)]:\r\n d[k].append(va)\r\n \r\n for k in range(len(d.get(\"1.0\"))):\r\n index = d.get(\"1.0\")[k]\r\n row[index] = 1\r\n for k in range(len(d.get(\"0.0\"))):\r\n index = d.get(\"0.0\")[k]\r\n row[index] = 0\r\n \r\n# print(len(row))\r\n y.append(row)\r\n \r\n\r\n\r\n\r\n \r\n print(\"x = {}\".format(len(x_text)))\r\n print(\"y = {}\".format(len(y)))\r\n \r\n return x_text, y",
"def load_grid(dico, options):\n grid = []\n error = False\n if options.file:\n file_name = options.file\n error = True\n else:\n file_name = 'data/puzzle-{}-1.txt'.format(str(dico[\"size\"]))\n try:\n fd = open(file_name, 'r+')\n except:\n if error is False:\n print(\"Infos file does not match the grid.\")\n elif error is True:\n print(\"Look like we can't find '{}'\".format(file_name))\n exit(1)\n i = 0\n for row in fd:\n if i == 0:\n i += 1\n else:\n grid.append(row.replace('\\n', ''))\n return grid",
"def load_labels(filename):\n\n file_path = os.path.join(DATA_DIR, filename)\n with open(file_path, 'rb') as f:\n b = f.read()\n\n magic, n_labels = (struct.unpack('>i', b[i*4:(i+1)*4]) for i in range(2))\n\n assert magic[0] == 2049, \"bad magic number, what do?\"\n\n label_stream = array.array('B', b[8:])\n \n assert len(label_stream) == n_labels[0], \"mismatch in label length\"\n \n # label_stream is actually type array.array, which is iterable surely.\n # i'll convert it anyway...\n return tuple(label_stream)",
"def load_labels(path):\n with open(path, \"r\", encoding=\"utf-8\") as f:\n lines = f.readlines()\n labels = {}\n for row_number, content in enumerate(lines):\n pair = re.split(r\"[:\\s]+\", content.strip(), maxsplit=1)\n if len(pair) == 2 and pair[0].strip().isdigit():\n labels[int(pair[0])] = pair[1].strip()\n else:\n labels[row_number] = pair[0].strip()\n # print(labels)\n return labels",
"def read_from_grid(filename):\n\n x=[]\n y=[]\n z=[]\n\n fid=open(filename,'r')\n\n for point in fid:\n x.append(float(point.split()[0]))\n y.append(float(point.split()[1]))\n z.append(float(point.split()[2]))\n\n fid.close()\n\n return x, y, z",
"def read_labels(label_path, label_type, calib_path=None, is_velo_cam=False, proj_velo=None):\n if label_type == \"txt\": #TODO\n places, size, rotates = read_label_from_txt(label_path)\n if places is None:\n return None, None, None\n rotates = np.pi / 2 - rotates\n dummy = np.zeros_like(places)\n dummy = places.copy()\n if calib_path:\n places = np.dot(dummy, proj_velo.transpose())[:, :3]\n else:\n places = dummy\n if is_velo_cam:\n places[:, 0] += 0.27\n\n elif label_type == \"xml\":\n bounding_boxes, size = read_label_from_xml(label_path)\n places = bounding_boxes[30][\"place\"]\n rotates = bounding_boxes[30][\"rotate\"][:, 2]\n size = bounding_boxes[30][\"size\"]\n\n return places, rotates, size"
]
| [
"0.72920907",
"0.64701855",
"0.6386603",
"0.6208433",
"0.6121841",
"0.6070264",
"0.6020945",
"0.6020452",
"0.5981528",
"0.5965864",
"0.5960582",
"0.5960582",
"0.59508276",
"0.5949805",
"0.5941208",
"0.59156436",
"0.5911893",
"0.589569",
"0.5853387",
"0.58449346",
"0.58364975",
"0.5833189",
"0.58166397",
"0.58063567",
"0.58053553",
"0.58048654",
"0.5786972",
"0.5784949",
"0.5783814",
"0.5759062"
]
| 0.75627935 | 0 |
Write the values of ECoG analysis to file | def write_ecog2d(ecog_file, ecog2d):
savetxt(ecog_file, ecog2d['value'], fmt='%.8f', delimiter='\t') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def saveenergyfile(path, meta, data):\n def serializemeta(meta):\n \"\"\"Convert metadata object to list of comment strings\"\"\"\n return [u\"#CTE_%s: %s\" % (key, meta[key]) for key in meta]\n\n with io.open(path, 'w+') as ff:\n ff.write(u\"\\n\".join(serializemeta(meta)))\n ff.write(u\"\\nvector,tipo,src_dst\\n\")\n for c in data:\n carrier = c['carrier']\n ctype = c['ctype']\n originoruse = c['originoruse']\n values = u\", \".join(u\"%.2f\" % v for v in c['values'])\n comment = u\" # %s\" % c['comment'] if c['comment'] else u\"\"\n ff.write(u\"%s, %s, %s, %s%s\\n\" % (carrier, ctype, originoruse, values, comment))",
"def saveeigenvalues(eigenvalue, fvalue, lvalue):\n eigenval = []\n for ii in range(fvalue, lvalue+1):\n test = eigenvalue[ii]\n eigenval.append(test)\n np.savetxt(\"energies.dat\", eigenval, fmt='%s')",
"def write(self, filename=None):\n if filename == None:\n filename = self.ofilename\n\n ofile = open(filename, 'w')\n\n ofile.write('# Susceptibility: %E d(susc): %E Coercivity: %E d(coer): %E\\n' % (self.susceptibility_mean, self.susceptibility_std, self.coercivity_mean, self.coercivity_std) )\n ofile.write('# H[] M[] Mfit[]\\n')\n\n #for i in range(len(self.h)):\n # ofile.write(\" %12.10f %12.10f %12.10f\\n\" % ( self.h[i], self.m[i], self.m_fit[i] ) )\n\n ofile.close()",
"def write_results_dat(self, output_path):\n\n def fstr(nb):\n data = '%E' % nb\n if data == 'NAN':\n nb, power = 0,0\n else:\n nb, power = data.split('E')\n nb = float(nb) /10\n power = int(power) + 1\n return '%.5fE%+03i' %(nb,power)\n\n line = '%s %s %s %i %i %i %i %s %s %s %s %s %i\\n' % (fstr(self.axsec), fstr(self.xerru), \n fstr(self.xerrc), self.nevents, self.nw, self.maxit, self.nunwgt,\n fstr(self.luminosity), fstr(self.wgt), fstr(self.xsec), fstr(self.maxwgt),\n fstr(self.th_maxwgt), self.th_nunwgt) \n fsock = open(output_path,'w') \n fsock.writelines(line)\n for i in range(len(self.ysec_iter)):\n line = '%s %s %s %s %s %s\\n' % (i+1, self.ysec_iter[i], self.yerr_iter[i], \n self.eff_iter[i], self.maxwgt_iter[i], self.yasec_iter[i]) \n fsock.writelines(line)",
"def store_regain_values(filename,gain_vals,gain_comment=\"\"):\n f = open(filename,\"w\")\n f.write(\"#Gain values calculated by Echidna reduction routine\\n\")\n f.write(\"#\"+gain_comment+\"\\n\")\n for pos,gain in zip(range(len(gain_vals)),gain_vals):\n f.write(\"%d %8.3f\\n\" % (pos,gain))\n f.close()",
"def write_ogse(args):\n if args.ref_diode:\n add_ogse_ref_diode(args.ogse_dir / DB_REF_DIODE, args.l1a_file)\n\n if args.avantes:\n add_ogse_wav_mon(args.ogse_dir / DB_WAV_MON, args.l1a_file)\n\n if args.helios:\n xds = helios_spectrum()\n xds.to_netcdf(args.l1a_file, mode='a',\n group='/gse_data/ReferenceSpectrum')\n\n if args.grande:\n xds = gsfc_polarizer()\n xds.to_netcdf(args.l1a_file, mode='a',\n group='/gse_data/SpectralDolP')\n for n_lamps in (1, 2, 3, 5, 9):\n if args.l1a_file.name.find(f'-L{n_lamps:1d}_') > 0:\n xds = grande_spectrum(n_lamps)\n xds.to_netcdf(args.l1a_file, mode='a',\n group='/gse_data/ReferenceSpectrum')\n break\n\n if args.opo_laser:\n target_cwl = args.l1a_file.stem.split('_')[2].split('-')[-1]\n xds = read_gse_excel(args.ogse_dir, target_cwl)\n if xds is not None:\n xds.to_netcdf(args.l1a_file, mode='a',\n group='/gse_data/OPO_laser')",
"def write_to_file(self, time):\n if Parameters.instance().use_ages:\n nb_age_groups = len(Parameters.instance().age_proportions)\n else:\n nb_age_groups = 1\n if Parameters.instance().use_ages:\n if self.spatial_output: # Separate output line for each cell\n for cell in self.population.cells:\n for age_i in range(0, nb_age_groups):\n data = {s: 0 for s in list(InfectionStatus)}\n for inf_status in data:\n data_per_inf_status =\\\n cell.compartment_counter.retrieve()[inf_status]\n data[inf_status] += data_per_inf_status[age_i]\n # Age groups are numbered from 1 to the total number\n # of age groups (thus the +1):\n data[\"age_group\"] = age_i+1\n data[\"time\"] = time\n data[\"cell\"] = cell.id\n data[\"location_x\"] = cell.location[0]\n data[\"location_y\"] = cell.location[1]\n self.writer.write(data)\n else: # Summed output across all cells in population\n data = {s: 0 for s in list(InfectionStatus)}\n for cell in self.population.cells:\n for age_i in range(0, nb_age_groups):\n for inf_status in list(InfectionStatus):\n data_per_inf_status =\\\n cell.compartment_counter.retrieve()[inf_status]\n data[inf_status] += data_per_inf_status[age_i]\n data[\"age_group\"] = age_i+1\n data[\"time\"] = time\n self.writer.write(data)\n else: # If age not considered, age_group not written in csv\n if self.spatial_output: # Separate output line for each cell\n for cell in self.population.cells:\n data = {s: 0 for s in list(InfectionStatus)}\n for k in data:\n data[k] += sum(cell.compartment_counter.retrieve()[k])\n data[\"time\"] = time\n data[\"cell\"] = cell.id\n data[\"location_x\"] = cell.location[0]\n data[\"location_y\"] = cell.location[1]\n self.writer.write(data)\n else: # Summed output across all cells in population\n data = {s: 0 for s in list(InfectionStatus)}\n for cell in self.population.cells:\n for k in data:\n # Sum across age compartments\n data[k] += sum(cell.compartment_counter.retrieve()[k])\n data[\"time\"] = time\n self.writer.write(data)",
"def save_enu(self, filename):\n x, y, z = self.get_coords_enu()\n coords = np.vstack([x, y, z]).T\n np.savetxt(filename, coords, fmt=b'%.12e')",
"def saveexpvalues(erw, erwq):\n erwartung = np.zeros(len(erw))\n erwartungquad = np.zeros(len(erwq))\n for ii in range(0, len(erw)):\n erwartung[ii] = erw[ii]\n erwartungquad[ii] = erwq[ii]\n np.savetxt(\"expvalues.dat\", np.transpose([erwartung, erwartungquad]), fmt='%s')",
"def write_e(self, outpath):\n\n if not self.tokens:\n raise Exception(\"MLE model not yet trained\")\n\n word_counts = collections.Counter([word_tag[WORD_INDEX] for word_tag in self.tokens])\n # Count and format word tag pairs with out unks\n e_tokens = [(token[WORD_INDEX], token[TAG_INDEX]) for token in self.tokens]\n e_counts = dict(collections.Counter(e_tokens))\n formatted_counts = [k[WORD_INDEX] + SPACE + k[TAG_INDEX] + TAB + str(e_counts[k]) for k in e_counts]\n output = NEW_LINE.join(formatted_counts)\n write(outpath, output)",
"def PrintOutput(self):\n self.file_settings[\"file_name\"].SetString(self.file_name)\n file = TimeBasedAsciiFileWriterUtility(self.model_part, self.file_settings, self._GetHeader()).file\n for point, var_values in zip(self.found_positions, self.values):\n file.write(self._DataToString(point, var_values))\n file.close()",
"def saveeigenvaluestest(eigenValue, fvalue, lvalue, path):\n chEigen = []\n for ii in range(fvalue, lvalue+1):\n test = eigenValue[ii]\n chEigen.append(test)\n np.savetxt(path, chEigen, fmt='%s')",
"def writeEcs( self ):\n\n self.logger.info( 'writeEcs: START' )\n\n # Generate inserts for ecs table.\n self.importerEc.writeEcs()\n\n self.logger.info( 'writeEcs: DONE' )",
"def write_input_file(y,z,fname):\n file = open('c:/4nec2/out/' + fname + '.nec', 'w')\n file.write('CM Seeddesign \\n')\n file.write('CM Zigzag Antenna \\n')\n file.write('CE File generated by python \\n')\n seg = 1\n\n #write the antenna\n for i in range(0,len(y)-1):\n file.write('GW %3i %3i %8.4f %8.4f %8.4f %8.4f %8.4f %8.4f %8.4f\\n' % (1,seg,0,y[i],z[i],0,y[i+1],z[i+1],1))\n\n file.write('GE 0 \\n')\n file.write('EK \\n')\n file.write('EX %3i %3i %3i %3i %3i %3i %3i\\n' % (0,1,1,1,1,0,0))\n file.write('GN -1 \\n')\n \n file.write('FR %3i %3i %3i %3i %8.4f %8.4f\\n' % (0,1,0,0,900,0))\n file.write('FR %3i %3i %3i %3i %8.4f %8.4f\\n' % (0,11,0,0,850,10))\n\n file.write('LD %3i %3i %3i %3i %8.4f %8.4f\\n' % (5,1,0,0,58000000,2))\n file.write('RP %3i %3i %3i %3i %8.4f %8.4f %8.4f %8.4f\\n' % (0,1,1,1000,90,0,0,0))\n\n file.write('EN \\n')\n file.close()",
"def write_output(self) -> None:\n self.home.round(2).to_csv(var.indicators_base_cumsum + \"home_\" + str(self.year) + \".csv\")\n self.away.round(2).to_csv(var.indicators_base_cumsum + \"away_\" + str(self.year) + \".csv\")",
"def save_file(E_Filtered, output_path):\n os.makedirs(os.path.dirname(output_path), exist_ok=True)\n with open(output_path, 'w+') as f:\n for k, v in E_Filtered.items():\n f.write(\"%s\\t%s\\n\" % (list(k), v))",
"def write_output(basis, filename):\n\n logging.info('Writing output to {}'.format(filename))\n\n basis.to_csv(filename)",
"def write(self, filename):\n f = open(filename, 'w')\n f.write(str(self.m) + \"\\n\")\n f.write(str(self.n) + \"\\n\")\n for i in self.values:\n for j in i:\n f.write(str(j)+\"\\n\")\n f.closed",
"def write_gct_file(output_file, class_names, class_counts, expression_matrix):\n total_genes = len(expression_matrix)\n first_key = list(expression_matrix.keys())[0]\n total_samples = len(expression_matrix[first_key])\n\n headers = ['NAME', 'DESCRIPTION']\n\n for c_name, c_count in zip(class_names, class_counts):\n for i in range(c_count):\n headers.append('{}_{}'.format(c_name, i + 1))\n\n with open(output_file, 'w') as f:\n f.write('#1.2\\n')\n f.write('{} {}\\n'.format(total_genes, total_samples))\n f.write('\\t'.join(headers))\n f.write('\\n')\n\n for g_name, values in expression_matrix.items():\n f.write(g_name)\n f.write('\\tna\\t')\n f.write('\\t'.join(\n ['{0:.2f}'.format(v) for v in values]\n ))\n f.write('\\n')",
"def write_data(values, cols, dataset):\n file = open(WRITEPATH+dataset, \"w\")\n weka = open(WEKAPATH+dataset[0:len(dataset)-3]+\"arff\", \"w\")\n\n weka.write(\"@relation emotion\\n\")\n weka.write(\"\\n\")\n\n\n for i in range(len(cols)-1):\n weka.write(\"@attribute \" + cols[i] + \" numeric\\n\")\n file.write(cols[i]+\",\")\n\n weka.write(\"@attribute \" + cols[-1] + \"{Positive,Negative,Neutral}\\n\")\n file.write(cols[-1]+\"\\n\")\n\n weka.write(\"\\n@data\\n\")\n\n for v in values:\n l = np.sum(v[0:len(v)-1])\n if l != 0:\n for i in range(len(v)-1):\n weka.write(str(v[i]) + \",\")\n file.write(str(v[i]) + \",\")\n weka.write(str(v[-1]) + \"\\n\")\n file.write(str(v[-1]) + \"\\n\")\n file.close()\n weka.close()",
"def writexyz(self,fname):\n xyzfile = open(fname + \".xyz\",\"a+\")\n xyzfile.write(str(self.natoms) + \"\\n\\n\")\n for a in self.atoms:\n \tcxyz = a.xyz - np.array(self.pbc_correction(a.xyz))\n\t\t\txyzfile.write(str(a.type) + \"\\t\" + str(cxyz[0]) + \"\\t\" + str(cxyz[1]) + \"\\t\" + str(cxyz[2]) + \"\\n\")\n xyzfile.close()",
"def save_EGRID( self , filename , output_unit = EclUnitTypeEnum.ERT_ECL_METRIC_UNITS):\n self._fwrite_EGRID2( filename, output_unit )",
"def write_csv(elongation, file_name):\n e = elongation\n\n with open(file_name, 'w') as f:\n f.write(f\"\"\"\\\nBreak Load, {e.break_load()}\nBreak Strength, {e.break_strength()}\nBreak Elongation, {e.break_elongation()}\nYield Load, {e.yield_load()}\nYield Strength, {e.yield_strength()}\nYield Elongation, {e.yield_elongation()}\nGauge Length, {e.gauge_length}\nSample Width, {e.sample_width}\nSample Thickness, {e.sample_thickness}\n\nPoints\n %, N\"\"\")\n for x, y in zip(e.xs, e.ys):\n f.write(f'\\n{x:>8.4f}, {y:>8.4f}')",
"def write_output(self, failed_genes):\r\n file_prefix = self.file_name.strip('sambamba_output.txt')\r\n fieldnames = ['GeneSymbol;Accession', 'percentage30']\r\n with open (f'../results/{file_prefix}.coverage_output.csv', 'w', newline = '') as output:\r\n csvwriter = csv.DictWriter(output, fieldnames=fieldnames)\r\n csvwriter.writeheader()\r\n csvwriter.writerows(failed_genes)",
"def save_band_for_path(self, path, filename):\n with open(filename, 'ab') as file_hander:\n for point in path:\n energies = self.problem.energy_eigenvalues(point[0], point[1])\n np.savetxt(file_hander, energies)",
"def writeout(self):\n out_file = ''.join(['theta_w_t', str(self.t), '.dat'])\n data_list = [] \n\n for i in xrange(self.n_params): \n data_list.append( self.theta_t[i,:] ) \n\n data_list.append(self.w_t)\n\n np.savetxt(\n out_file, \n (np.vstack(np.array(data_list))).T, \n delimiter='\\t'\n )\n\n return None",
"def Writedata(self, tstep):\n \n nc = Dataset(self.outfile, 'a')\n \n nc.variables['time'][tstep] = self.time\n nc.variables['salt'][tstep] = self.salt\n nc.variables['temp'][tstep] = self.temp\n nc.variables['uc'][tstep] = self.uc\n nc.variables['vc'][tstep] = self.vc\n nc.variables['nu_v'][tstep] = self.nu_v\n nc.variables['rho'][tstep] = self.rho\n nc.variables['tau_x'][tstep] = self.tau_x\n nc.variables['tau_y'][tstep] = self.tau_y\n nc.variables['eta'][tstep] = self.eta\n \n nc.close()",
"def save(self, exp_file, gat_file):\n\t\tto_save = np.stack((self.b, self.sigma)) #(2, K)\n\t\tto_save = np.concatenate((self.W,to_save) , axis = 0) #(D+2,K)\n\t\tnp.savetxt(exp_file, to_save)\n\t\tself.gating.save(gat_file)\n\t\treturn",
"def writeToFile(self):\n self.dto.writeToCsv()\n print(\"File written.\")",
"def write_result(self, file_path):\n f = open(file_path, \"a\")\n f.write(\"{}\\t{}\\n\".format(*[self.name, str(self.ROC_AUC_value)]))\n f.close()"
]
| [
"0.6469135",
"0.64325714",
"0.63181114",
"0.62568504",
"0.6206715",
"0.6175021",
"0.6094979",
"0.6085525",
"0.6083914",
"0.60705334",
"0.6051837",
"0.6035098",
"0.60194105",
"0.59780884",
"0.5972372",
"0.59563106",
"0.59435415",
"0.59313726",
"0.5927502",
"0.59157795",
"0.5903241",
"0.587397",
"0.5866182",
"0.58334893",
"0.58319664",
"0.58305633",
"0.5825283",
"0.5818016",
"0.5807767",
"0.5789186"
]
| 0.64328045 | 1 |
Write electrode position to tsv | def write_tsv(labels, positions, elec_file):
labels = labels.reshape(-1, order='F')
positions = positions.reshape(-1, 3, order='F')
elec_file = elec_file.with_suffix('.tsv')
with elec_file.open('w') as f:
f.write('name\tx\ty\tz\n')
for i in range(labels.shape[0]):
f.write(f'{labels[i]}\t{positions[i, 0]:.3f}\t{positions[i, 1]:.3f}\t{positions[i, 2]:.3f}\n') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def write_towhee_coord(self, filename):\n with open(filename, 'w') as f:\n df = self.contents[['X', 'Y', 'Z']].copy()\n np.savetxt(f, df.values, fmt=\" %20.15f\"*3)",
"def write_tsv(self, filename):\n f = open(filename,'wb')\n wr = csv.writer(f,delimiter='\\t',quoting=csv.QUOTE_ALL)\n colrow = []\n for col in self.cols:\n colrow.append('<undefined>' if len(col) == 0 else unicode(iter(col).next()).encode('unicode-escape'))\n wr.writerow(colrow)\n for row in self.data:\n strrow = []\n for cell in row:\n strrow.append('' if cell is None else unicode(cell).encode('unicode-escape'))\n wr.writerow(strrow)\n f.close()",
"def write_position(seq_array, tau, emission):\n file = open(\"motif_position.txt\", \"w\")\n for i in range(len(seq_array)):\n v_mat, t_mat = mf.viterbi(seq_array[i], tau, emission)\n viterbi_seq = mf.trace_viterbi(v_mat, t_mat)\n file.write(str(viterbi_seq.find(mf.MOTIF)))\n file.write(\"\\n\")\n file.close()",
"def sauvegarder():\n\n fic = open(\"sauvegarde.txt\", \"w\")\n\n for i in range(Nombre_de_colonne):\n\n for j in range(Nombre_de_ligne):\n\n fic.write(str(etat[i][j]) + \"\\n\")\n\n fic.close()",
"def write_tsv_fast(self, filename):\n # TODO (without quotation marks)\n with open(filename, 'wb') as f:\n colnames = ['<undefined>' if len(col) == 0 else unicode(iter(col).next()).encode('unicode-escape') for col in self.cols]\n f.write('\\t'.join(colnames)+'\\n')\n for row in self.data:\n f.write('\\t'.join(['' if cell is None else unicode(cell).encode('unicode-escape') for cell in row])+'\\n')",
"def writeVelocityPlot(self):\n name = \"velocity.vtk\"\n chargeFile = open(name,'w')\n chargeFile.write(\"%s\\n\"%(\"# vtk DataFile Version 2.0\"))\n chargeFile.write(\"%s\\n\"%(\"obtained via hydraulicmodule\"))\n chargeFile.write(\"%s\\n\"%(\"ASCII\"))\n chargeFile.write(\"%s\\n\"%(\"DATASET UNSTRUCTURED_GRID\"))\n chargeFile.write(\"%s %i %s\\n\"%(\"POINTS\",len(self.points),\"double\"))\n dim = self.mesh.getSpaceDimensions()\n if (dim==2): \n for ind in range(0,len(self.points)):\n chargeFile.write(\"%15.8e %15.8e %15.8e\\n\"%(self.points[ind][0],\\\n self.points[ind][1],\\\n 0.))\n pass\n pass\n elif (dim==3): \n for ind in range(0,len(self.points)):\n chargeFile.write(\"%15.8e %15.8e %15.8e\\n\"%(self.points[ind][0],\\\n self.points[ind][1],\\\n self.points[ind][2]))\n pass\n pass\n else:\n raise Exception(\" error in mesh dimension \") \n numberOfCells = self.mesh.getNumberOfCells()\n connectivity = self.mesh.getConnectivity()\n\n cellListSize = 0\n for i in range(0,numberOfCells): # gmsh meshes: type of elements\n gmshType = connectivity[i][1]\n if gmshType == 1: # 2-node line\n cellListSize += 3\n pass\n elif gmshType == 2: # 3-node triangles\n cellListSize += 4\n pass\n elif gmshType == 3: # 4-node quadrangles\n cellListSize += 5\n pass\n elif gmshType == 4: # 4-node tetrahedron\n cellListSize += 5\n pass\n elif gmshType == 5: # 8-node hexahedrons\n cellListSize += 9\n pass\n pass\n chargeFile.write(\"CELLS %i %i\\n\"%(numberOfCells,cellListSize))\n ind = 0\n for cell in connectivity:\n ind = cell[2]+3\n# print \" ctm dbg cell \",vtkTyp,ind,cell,\" perm \",permutation[ind],permutation[ind+1],permutation[ind+2],permutation[ind+3]\n # \n vtkTyp = _vtkGmsh(cell[1])\n if (vtkTyp==3): # 2-node line\n ind = cell[2]+3\n chargeFile.write(\"%i %i %i\\n\"%(\n 2,\\\n cell[ind]-1,\\\n cell[ind+1]-1)\n )\n pass\n \n elif (vtkTyp==5): # triangles\n chargeFile.write(\"%i %i %i %i\\n\"%(\n 3, \n cell[ind]-1,\\\n cell[ind+1]-1,\\\n cell[ind+2]-1)\n )\n pass\n elif (vtkTyp==9): # quadr\n chargeFile.write(\"%i %i %i %i %i\\n\"%(\n 4,\\\n cell[ind]-1,\\\n cell[ind+1]-1,\\\n cell[ind+2]-1,\\\n cell[ind+3]-1)\n )\n pass\n elif (vtkTyp==10): # tetra\n chargeFile.write(\"%i %i %i %i %i\\n\"%(\n 4,\\\n cell[ind]-1,\\\n cell[ind+1]-1,\\\n cell[ind+2]-1,\\\n cell[ind+3]-1)\n )\n pass\n elif (vtkTyp==12): # hexahedron\n chargeFile.write(\"%i %i %i %i %i %i %i %i %i\\n\"%(\n 8,\\\n cell[ind]-1,\\\n cell[ind+1]-1,\\\n cell[ind+2]-1,\\\n cell[ind+3]-1,\\\n cell[ind+4]-1,\\\n cell[ind+5]-1,\\\n cell[ind+6]-1,\\\n cell[ind+7]-1)\n )\n pass\n pass\n chargeFile.write(\"%s %i\\n\"%(\"CELL_TYPES\",numberOfCells))\n#\n for i in range(0,numberOfCells):\n gmshType = connectivity[i][1]\n\n if (gmshType)==1:\n cellTyp = 3\n pass\n elif (gmshType)==2:\n cellTyp = 5\n pass\n elif (gmshType)==3:\n cellTyp = 9\n pass\n elif (gmshType)==4:\n cellTyp = 10\n pass\n elif (gmshType)==5:\n cellTyp = 12\n pass\n elif (gmshType)==6:\n cellTyp = 13\n pass\n elif gmshType == 7:\n cellTyp = 14\n pass\n else:\n raise Exception(\" check gmshtype \")\n chargeFile.write(\"%i\\n\"%(cellTyp))\n chargeFile.write(\"%s %d\\n\"%(\"POINT_DATA\",len(self.points)))\n chargeFile.write(\"%s\\n\"%(\"VECTORS vectors float\"))\n for velocityComponent in self.velocity:\n chargeFile.write(\" %e %e %e\\n \"%(velocityComponent[0], velocityComponent[1], velocityComponent[2]))\n chargeFile.write(\"%s\\n\"%(\"SCALARS charge double\"))\n chargeFile.write(\"%s\\n\"%(\"LOOKUP_TABLE default\"))\n#\n \n chargeDataFile=open(\"./\" + self.flowComponent.meshDirectoryName + \"/\" + \"HeVel.dat\",'r')\n line = chargeDataFile.readline()\n while \"Number Of Nodes\" not in line:\n line = chargeDataFile.readline()\n#line.split()\n nodesNumber = line.split()[-1]\n while \"Perm\" not in line:\n line = chargeDataFile.readline()\n#\n# We read the permutation\n#\n for i in range(int(nodesNumber)): chargeDataFile.readline()\n#\n# We read the charge\n#\n for i in range(int(nodesNumber)): chargeFile.write(\" %15.10e\\n \"%(float(chargeDataFile.readline())))",
"def write(self, outfile):\n outfile.write(\n '\\t'.join(\n [\n str(i) for i in [\n self.chrom, self.start, self.end, self.name,\n self.count, self.fold_change, self.log10p\n ]\n ]\n )\n )\n outfile.write('\\n')",
"def write_csv(elongation, file_name):\n e = elongation\n\n with open(file_name, 'w') as f:\n f.write(f\"\"\"\\\nBreak Load, {e.break_load()}\nBreak Strength, {e.break_strength()}\nBreak Elongation, {e.break_elongation()}\nYield Load, {e.yield_load()}\nYield Strength, {e.yield_strength()}\nYield Elongation, {e.yield_elongation()}\nGauge Length, {e.gauge_length}\nSample Width, {e.sample_width}\nSample Thickness, {e.sample_thickness}\n\nPoints\n %, N\"\"\")\n for x, y in zip(e.xs, e.ys):\n f.write(f'\\n{x:>8.4f}, {y:>8.4f}')",
"def coord_file(pybel_group, dihed, nonH, energy, name):\n csv_name = \"coords\" + name + \".csv\"\n #Open file, create writer\n f = open(csv_name, \"w\")\n wr = csv.writer(f)\n #Generate coords and write them\n for py_molec in pybel_group:\n wr.writerow(vector(py_molec, dihed, nonH, energy))\n f.close()\n return csv_name",
"def save_enu(self, filename):\n x, y, z = self.get_coords_enu()\n coords = np.vstack([x, y, z]).T\n np.savetxt(filename, coords, fmt=b'%.12e')",
"def _writeVTKOutput(self):\n\n sigma = numpy.ones((self.numStations, 3), dtype=numpy.float64)\n sigma[:, 0] *= self.sigmaEast\n sigma[:, 1] *= self.sigmaNorth\n sigma[:, 2] *= self.sigmaUp\n\n vtkHead = \"# vtk DataFile Version 2.0\\n\" + \\\n \"Synthetic GPS stations\\n\" + \\\n \"ASCII\\n\" + \\\n \"DATASET POLYDATA\\n\" + \\\n \"POINTS \" + repr(self.numStations) + \" double\\n\"\n\n v = open(self.vtkOutputFile, 'w')\n v.write(vtkHead)\n numpy.savetxt(v, self.coords)\n\n numConnect = 2 * self.numStations\n connectHead = \"VERTICES %d %d\\n\" % (self.numStations, numConnect)\n v.write(connectHead)\n verts = numpy.arange(self.numStations, dtype=numpy.int64)\n sizes = numpy.ones_like(verts)\n outConnect = numpy.column_stack((sizes, verts))\n numpy.savetxt(v, outConnect, fmt=\"%d\")\n \n dispHead = \"POINT_DATA \" + repr(self.numStations) + \"\\n\" + \\\n \"VECTORS displacement double\\n\"\n v.write(dispHead)\n numpy.savetxt(v, self.dispNoise)\n\n sigHead = \"VECTORS uncertainty double\\n\"\n v.write(sigHead)\n numpy.savetxt(v, sigma)\n v.close()\n \n return",
"def write_tcv(self):\n suffix = '_'+str(self.shot)+'_'+str(int(self.t*1e3))\n self.write_input(suffix=suffix)",
"def savepos(self):\n self.out.write(self.csi + \"s\")",
"def exportTOUGH2(self, fname):\r\n STR = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\"\r\n self.ne, self.nn, self.nz = np.array(self.Grid.GetDimensions()) # - 1 #\r\n filename, ext = os.path.splitext(fname)\r\n if self.GridType == \"vtkStructuredGrid\":\r\n with io.open(filename, 'w', newline='\\r\\n') as f:\r\n f.write(\"ELEME\")\r\n # debug\r\n f.write(\r\n \"\"\"\r\n 1 10 20 30 40 50 60 70 80\r\n |--------|---------|---------|---------|---------|---------|---------|---------|\r\n 12345678901234567890123456789012345678901234567890123456789012345678901234567890\r\n \"\"\")\r\n\r\n ii = 0\r\n for iy in range(self.nn):\r\n for ix in range(self.ne):\r\n # f.write(str(iy)+str(ix)+\"\\n\")\r\n # first base\r\n b2 = ii // (len(STR) * len(STR))\r\n b1 = (ii - len(STR) * b2) // len(STR)\r\n b0 = ii % len(STR)\r\n\r\n f.write(STR[b2] + STR[b1] + STR[b0] + \"\\t\" + str(ii) + \"\\n\")\r\n ii += 1",
"def write_torque_table(A, filename):\n f = open(filename, 'w')\n for row in range(np.size(A, axis=0)):\n A[row,:].tofile(f, sep=',')\n f.write('\\n')\n f.close()",
"def dump_traza(self, fichero='traza.txt'):\n fichero = open(fichero, 'w', encoding=\"utf-8\")\n for punto in self.trazado:\n fichero.write(\"{},{}\\n\".format(punto.x, punto.y))\n fichero.close()",
"def write_pos(sposcar,ngrid,nspecies,filename):\n pos=np.dot(sposcar[\"lattvec\"],sposcar[\"positions\"])\n ntot=ngrid[0]*ngrid[1]*ngrid[2]*nspecies\n np_icell=np.empty((3,ntot),dtype=np.intc)\n car=pos\n np_ispecies=np.empty(ntot,dtype=np.intc)\n icell=np_icell\n ispecies=np_ispecies\n\n f=StringIO.StringIO()\n\n for ii in xrange(ntot):\n tmp,ispecies[ii]=divmod(ii,nspecies)\n tmp,icell[0,ii]=divmod(tmp,ngrid[0])\n icell[2,ii],icell[1,ii]=divmod(tmp,ngrid[1])\n car[0,ii],car[1,ii],car[2,ii]=np.dot(sposcar[\"lattvec\"],sposcar[\"positions\"][:,ii])*10\n f.write(\"{:>6d} {:>6d} {:>15.10f} {:>15.10f} {:>15.10f}\\n\".\n format(ii+1,ispecies[ii]+1, car[0,ii],car[1,ii],car[2,ii]))\n ffinal=open(filename,\"w\")\n ffinal.write(f.getvalue())\n f.close()\n ffinal.close()",
"def dump_tsv(file_name, data, header=None, append=False):\n mode = \"a\" if append else \"w\"\n with open(file_name, mode) as f:\n print(\"tsv file created:\", file_name)\n if header:\n f.write(\"\\t\".join(header) + \"\\n\")\n for line in data:\n f.write(\"\\t\".join([str(d) for d in line]) + \"\\n\")",
"def write_tsv(df, path):\n df.to_csv(path, sep=\"\\t\", compression=\"gzip\")",
"def writeMeshVTP(self, outFile):\n # setup colors\n Colors = vtk.vtkFloatArray()\n #Colors.SetNumberOfComponents(3)\n Colors.SetNumberOfTuples(self.Npts)\n Colors.SetName(self.label) #can change to any string\n\n #points\n vtkPts = vtk.vtkPoints()\n\n #build points and colors\n for i,facet in enumerate(self.mesh.Facets):\n for j in range(3):\n x = facet.Points[j][0]\n y = facet.Points[j][1]\n z = facet.Points[j][2]\n vtkPts.InsertNextPoint(x,y,z)\n # Colors.InsertTuple( i*3+j, (arr[i],arr[i],arr[i]) )\n Colors.InsertTuple( i*3+j, [self.scalar[i]] )\n\n #build vtp triangular mesh\n Triangles = vtk.vtkCellArray()\n for i in range(self.Npts):\n Triangle = vtk.vtkTriangle()\n Triangle.GetPointIds().SetId(0, i*3+0)\n Triangle.GetPointIds().SetId(1, i*3+1)\n Triangle.GetPointIds().SetId(2, i*3+2)\n Triangles.InsertNextCell(Triangle)\n\n #build final vtp object for writing\n polydata = vtk.vtkPolyData()\n polydata.SetPoints(vtkPts)\n polydata.SetPolys(Triangles)\n polydata.GetPointData().SetScalars(Colors)\n polydata.Modified()\n writer = vtk.vtkXMLPolyDataWriter()\n writer.SetFileName(outFile)\n writer.SetInputData(polydata)\n #writer.SetDataModeToBinary()\n writer.Write()\n\n return",
"def write_to_vtk(mesh, displacement=None, file_name=\"gridfile\"):\n cents = get_cell_centroids(mesh)\n dim = len(cents[0])\n \n if displacement is not None:\n cents+= displacement\n \n file_name = \"./\"+file_name\n \n write_function=None\n if dim==3:\n write_function = write_to_vtk3D\n if dim==2:\n write_function = write_to_vtk2D\n \n write_function(cents, displacement, file_name)\n\n\n pass",
"def write_eneheader(self,filename,replica):\n \n fheader = open(filename,'w')\n fheader.write('E_pot\\tE_rest(D)\\tD\\tcontact_state\\ttemp\\n')\n fheader.write('# Energy units: Joules/mol\\n')\n fheader.write('# Restrained contact state: ' + repr(replica.mc.restraint.contacts) + '\\n')\n fheader.write('# kspring: '+str(replica.mc.restraint.kspring) + '\\n')\n\tfheader.close()",
"def write_values_to_tsv(self, values, out_file):\n with open(out_file, \"w\") as f:\n for row in values:\n f.write(\"\\t\".join([cell.strip().replace(\"\\n\", \"|\").replace(\"\\r\", \"\")\n for cell in row]))\n f.write(os.linesep)",
"def save_pose(msg, t, text):\n text.write(\"%i.%09i\\t%f\\t%f\\t%f\\t%f\\t%f\\t%f\\t%f\\n\" \n %( t.secs, t.nsecs,\n msg.pose.position.x, msg.pose.position.y, msg.pose.position.z,\n msg.pose.orientation.x, msg.pose.orientation.y, msg.pose.orientation.z, msg.pose.orientation.w))",
"def writeVTK(self,fname,itime=None,output_time=None):\n if output_time:\n itime = int(output_time / self.dt)\n if not itime:\n print 'Need to specify itime or output_time'\n return\n print 'Writing out time step',itime,': t=',self.t[itime]\n u = np.zeros((self.NY,1,self.NZ)); u[:,0,:] = np.flipud(self.field['u'][itime,:,:]).T\n v = np.zeros((self.NY,1,self.NZ)); v[:,0,:] = np.flipud(self.field['v'][itime,:,:]).T\n w = np.zeros((self.NY,1,self.NZ)); w[:,0,:] = np.flipud(self.field['w'][itime,:,:]).T\n VTKwriter.vtk_write_structured_points( open(fname,'wb'), #binary mode\n 1,self.NY,self.NZ,\n [u,v,w],\n datatype=['vector'],\n dx=1.0,dy=self.dy,dz=self.dz,\n dataname=['TurbSim_velocity'],\n origin=[0.,self.y[0],self.z[0]] )",
"def export_tsv(self):\n outputfile = tkinter.filedialog.asksaveasfilename(\n defaultextension=\".tsv\",\n filetypes=((\"tab seperated values\", \"*.tsv\"),\n (\"All Files\", \"*.*\")))\n if outputfile:\n tabledata = self.tabs.window.aistracker.create_table_data()\n export.write_csv_file(tabledata, outputfile, dialect='excel-tab')\n else:\n raise ExportAborted('Export cancelled by user.')",
"def write_xyz(self, filename):\n df = self.contents[['Element', 'X', 'Y', 'Z']].copy()\n np.savetxt(filename, df.values, fmt='%s' + '%20.15f' * 3,\n header=f\"{self.numatom}\\n{self.comment}\", comments=\"\")",
"def save_tsv_file(parsed_data):\n result_file.write('\\t'.join(parsed_data) + '\\n')",
"def CSV_Write_File( self, cvsfilename ):\n self.SVF = open( cvsfilename, 'w' )\n self.SVF.write( 'Site/Plot, Age, Tree#, OrigTree#, Species, Dia, Ht, Live/Dead, Status, Condition, TPA, CR, Crad, ' )\n self.SVF.write( 'BrokenHt, BrokenOff, Bearing, DMR, LeanAngle, RootWad, X, Y\\n' )\n stands = self.Data.Stand.keys()\n stands.sort()\n for s in stands:\n print( s )\n ymin = 9999\n ymax = 0\n trees = self.Data.Stand[s].Tree.keys()\n for t in trees:\n years = self.Data.Stand[s].Tree[t].Year.keys()\n for y in years:\n if( y < ymin ): ymin = y\n if( y > ymax ): ymax = y\n years = range( ymin, ymax+1, 5 )\n for y in years:\n trees = self.Data.Stand[s].Tree.keys()\n trees.sort()\n for t in trees:\n if( not self.Data.Stand[s].Tree.has_key(t) ): continue\n if( not self.Data.Stand[s].Tree[t].Year.has_key(y) ): continue\n (species, dbh, ht, tpa, treeno, live, cclass, status) = ( self.Data.Stand[s].Tree[t].Species,\n self.Data.Stand[s].Tree[t].Year[y].DBH, self.Data.Stand[s].Tree[t].Year[y].Height,\n self.Data.Stand[s].Tree[t].Year[y].TPA, self.Data.Stand[s].Tree[t].TreeNumber,\n self.Data.Stand[s].Tree[t].Year[y].Live, self.Data.Stand[s].Tree[t].Year[y].Condition,\n self.Data.Stand[s].Tree[t].Year[y].Status )\n self.SVF.write( '%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s\\n' % \\\n (s, y, t, treeno, species, dbh, ht, live, status, cclass, tpa ) )\n self.SVF.close()",
"def write_ecsv(\n cosmology,\n file,\n *,\n overwrite=False,\n cls=QTable,\n cosmology_in_meta=True,\n rename=None,\n **kwargs\n):\n table = to_table(\n cosmology, cls=cls, cosmology_in_meta=cosmology_in_meta, rename=rename\n )\n\n kwargs[\"format\"] = \"ascii.ecsv\"\n table.write(file, overwrite=overwrite, **kwargs)"
]
| [
"0.63908005",
"0.6292356",
"0.6181932",
"0.6171404",
"0.6037974",
"0.5988555",
"0.5952182",
"0.5941722",
"0.5917379",
"0.59172416",
"0.58794904",
"0.5877961",
"0.5865538",
"0.58002007",
"0.57907534",
"0.57813555",
"0.5768386",
"0.5759972",
"0.5754379",
"0.57495767",
"0.5714846",
"0.56857485",
"0.56734204",
"0.5670961",
"0.56539804",
"0.5653615",
"0.5637781",
"0.5636448",
"0.560948",
"0.55994374"
]
| 0.7126136 | 0 |
Export tkRAS transformation to a transform file. | def export_transform(offset, transform_file, format='slicer'):
assert format == 'slicer'
transform_file = Path(transform_file)
transform_file = transform_file.with_suffix('.tfm')
# use ITK convertion ("ITK's convention is to use LPS coordinate system as opposed to RAS coordinate system in Slicer")
offset = offset * [-1, -1, 1]
tfm = """\
#Insight Transform File V1.0
#Transform 0
Transform: AffineTransform_double_3_3
Parameters: 1 0 0 0 1 0 0 0 1 {:.3f} {:.3f} {:.3f}
FixedParameters: 0 0 0
""".format(*offset)
with transform_file.open('w') as f:
f.write(dedent(tfm)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def filemenu_Export(self):\n line_dict = {}\n for line in self.lines.values():\n for name, arr in line.to_mat().items():\n line_dict[name] = arr\n fileTypes = [(\"MATLAB file\",\"*.mat\"), (\"NumPy file\",\"*.npz\")]\n options = {}\n options['initialdir'] = os.path.expanduser('~')\n options['filetypes'] = fileTypes\n options['parent'] = self.master\n filename = filedialog.asksaveasfilename(**options)\n if filename:\n _, ext = os.path.splitext(filename)\n if ext == \".mat\":\n sio.savemat(filename, line_dict)\n elif ext == \".npz\":\n np.savez(filename, lines=line_dict)",
"def saveTrans(self):\n modtranDataDir = os.getenv('MODTRAN_DATADIR')\n outputfile = '{0}/{1}_final.plt'.format(\n self.outfilename, self.outfilename)\n outputpath = os.path.join(modtranDataDir, outputfile)\n with open(outputpath, 'w') as transmf:\n transmf.write('$ FINAL ATMOSPHERE TRANSMISSION\\n')\n for val in range(len(self.modtran_wl)):\n data = '\\t'.join('{0:f}'.format(self.transmittance[run][val])\n for run in range(len(self.modtran_wl)))\n line = '{0}\\t{1}\\n'.format(self.modtran_wl[val], data)\n transmf.write(line)",
"def WriteExport(self, logname, outputDir, settings, isAnimated, cameraRig, lightingRig):\r\n if (self.__currentImportProperName == None): return\r\n \r\n step = os.path.basename(outputDir)\r\n execution = os.path.basename(os.path.dirname(outputDir))\r\n test = os.path.basename(os.path.dirname(os.path.dirname(outputDir)))\r\n path = os.path.join(self.__scenesDir, test, execution, step)\r\n if (not os.path.isdir(path)):\r\n os.makedirs(path)\r\n self.__pathMap.append((path, outputDir))\r\n \r\n basename = self.__currentImportProperName + \".dae\"\r\n \r\n command = \"\"\r\n \r\n for setting in settings:\r\n value = setting.GetValue().strip()\r\n if (value == \"\"):\r\n value = self.FindDefault(FXsi.__EXPORT_OPTIONS, \r\n setting.GetPrettyName())\r\n command = (command + \"myEProp.Parameters(\\\"\" + \r\n setting.GetCommand() + \"\\\").Value = \" + \r\n setting.GetValue() + \"\\n\")\r\n \r\n self.__logFiles.append(os.path.join(path, os.path.basename(logname)))\r\n \r\n self.__script.write(\r\n \"SetValue \\\"preferences.scripting.cmdlogfilename\\\", \\\"\" + \r\n self.__logFiles[-1].replace(\"\\\\\", \"\\\\\\\\\") + \"\\\"\\n\"\r\n \"set myEProp = CreateExportFTKOptions()\\n\"\r\n \"myEProp.Parameters(\\\"Filename\\\").Value = \\\"\" + \r\n os.path.join(path, basename).replace(\"\\\\\", \"\\\\\\\\\") + \r\n \"\\\"\\n\" +\r\n \"myEProp.Parameters(\\\"Format\\\").Value = 1\\n\"\r\n \"myEProp.Parameters(\\\"Verbose\\\").Value = True\\n\" +\r\n command +\r\n \"ExportFTK myEProp.Name\\n\"\r\n )\r\n \r\n return [basename,]",
"def save_trans(self, fname):\n if not self.can_save:\n raise RuntimeError(\"Not enough information for saving transform\")\n trans_matrix = self.head_mri_trans\n trans = {'to': FIFF.FIFFV_COORD_MRI, 'from': FIFF.FIFFV_COORD_HEAD,\n 'trans': trans_matrix}\n write_trans(fname, trans)",
"def save_grtrans_image(grt_obj):\n I_im = grt_obj.ivals[:,0,0].reshape(npix,npix).flatten()\n Q_im = grt_obj.ivals[:,1,0].reshape(npix,npix).flatten()\n U_im = grt_obj.ivals[:,2,0].reshape(npix,npix).flatten()\n V_im = grt_obj.ivals[:,3,0].reshape(npix,npix).flatten()\n\n # convert to Tb\n factor = 3.254e13/(RF**2 * psize_rad**2)\n I_im *= factor\n Q_im *= factor\n U_im *= factor\n V_im *= factor\n\n x = np.array([[i for i in range(npix)] for j in range(npix)]).flatten()\n y = np.array([[j for i in range(npix)] for j in range(npix)]).flatten()\n\n x -= npix/2\n y -= npix/2\n x = x*psize_uas\n y = y*psize_uas\n\n outdat = np.vstack((x.T,y.T,I_im.T,Q_im.T,U_im.T,V_im.T)).T\n np.savetxt('../rrjet_and_riaf/'+FNAME,outdat)\n #np.savetxt('../rrjet_and_riaf/grtrans_jet_compare_positron_noconv.txt',outdat)\n return",
"def _export_button_cb(self):\n filename = asksaveasfile(\n mode='w',\n filetypes=(('YAML files', '*.yaml'), ('All files', '*.*'))\n )\n\n if not filename:\n return\n\n with open(filename.name, 'w') as f:\n f.write('obstacles:\\n')\n for obstacle in self.obstacles:\n f.write(f' - {str(obstacle)}')\n f.write('\\n')",
"def export_tikz(nodes, scale, path):\n filename = asksaveasfile(defaultextension=\".tex\")\n if filename:\n _file = open(filename.name, 'w')\n\n _file.write(\"\\\\begin{tikzpicture}\\n\")\n _file.write(\"\\\\begin{axis}[%\\n\")\n _file.write(\"width=\\\\textwidth,\\n\")\n _file.write(\"scale only axis,\\n\")\n _file.write(\"xmin=-100,\\n\")\n _file.write(\"xmax=2700,\\n\")\n _file.write(\"ymin=-100,\\n\")\n _file.write(\"ymax=2100,\\n\")\n _file.write(\"y dir=reverse,\\n\")\n _file.write(\"axis x line*=bottom,\\n\")\n _file.write(\"axis y line*=left\\n\")\n _file.write(\"]\\n\")\n\n for group in get_groups(nodes):\n _file.write(\n \"\"\"\\\\addplot [color=black,mark size=5.0pt,\n only marks,mark=*,mark options={solid,\n fill=\"\"\" + group.lower() + \"},forget plot]\\n\")\n _file.write(\"table[row sep=crcr]{%\\n\")\n for node in nodes:\n if node.color == group:\n _file.write(\n str(node.x_coord * scale) + \" \" +\n str(node.y_coord * scale) + \"\\\\\\\\\\n\")\n _file.write(\"};\\n\")\n\n if not path is None:\n _file.write(\"\\\\addplot [draw=black,forget plot]\\n\")\n _file.write(\"table[row sep=crcr]{%\\n\")\n for path_node in path['Tour']:\n print(path_node)\n node = nodes[int(path_node)]\n print(node)\n _file.write(\n str(node.x_coord * scale) + \" \" +\n str(node.y_coord * scale) + \"\\\\\\\\\\n\")\n _file.write(\"};\\n\")\n _file.write(\"\\\\end{axis}\\n\")\n _file.write(\"\\\\end{tikzpicture}%\\n\")\n _file.close()",
"def ortra_export(request):\n export_fields = OrderedDict(ORTRA_EXPORT_FIELDS)\n export = OpenXMLExport('Exportation')\n export.write_line(export_fields.keys(), bold=True) # Headers\n # Data\n query_keys = [f for f in export_fields.values() if f is not None]\n query = Student.objects.filter(Q(klass__name__contains='ASAFE') |\n Q(klass__name__contains='ASEFE') |\n Q(klass__name__contains='ASSCFE'),\n archived=False).order_by('klass__name',\n 'last_name',\n 'first_name')\n\n for line in query.values(*query_keys):\n values = []\n for field in query_keys:\n if field == 'gender':\n values.append(('Madame', 'Monsieur')[line[field] == 'M'])\n else:\n values.append(line[field])\n export.write_line(values)\n\n return export.get_http_response('ortra_export')",
"def make_file(self):\n\n f = open(get_output_path(), \"w\")\n \n f.write(self.export())\n \n f.close()\n\n return self",
"def export_tecan(args):\n clarity_epp.export.tecan.samplesheet(lims, args.process_id, args.type, args.output_file)",
"def export(self):\n if self.model.algorithm == 'DecisionTree':\n dot_data = tree.export_graphviz(self.model.clf, out_file=None)\n graph = graphviz.Source(dot_data)\n graph.render(\"exports/DecisionTreeRegressor\")",
"def write_transform(transform, filename):\n filename = os.path.expanduser(filename)\n write_transform_fn = _write_transform_dict[transform.precision][transform.dimension]\n write_transform_fn(transform._tx, filename)",
"def click_re_analysis_grid_export_to_excel_button(self):\n self.click_grid_export_to_excel_button(self.re_analysis_grid_div_id)",
"def export_samfile(self):",
"def convert(filename,\nRenderer: \"\"\"By default, the schematic is converted to an SVG file,\n written to the standard output. It may also be rendered using TK.\"\"\",\n):\n \n with open(filename, \"rb\") as file:\n objects = read(file)\n stat = os.stat(file.fileno())\n \n sheet = objects[1]\n assert sheet[\"RECORD\"] == Record.SHEET\n (sheetstyle, size) = {SheetStyle.A4: (\"A4\", (1150, 760)), SheetStyle.A3: (\"A3\", (1550, 1150)), SheetStyle.A: (\"A\", (950, 760))}[sheet.get(\"SHEETSTYLE\", SheetStyle.A4)]\n if \"USECUSTOMSHEET\" in sheet:\n size = tuple(int(sheet[\"CUSTOM\" + \"XY\"[x]]) for x in range(2))\n \n # Units are 1/100\" or 10 mils\n renderer = Renderer(size, \"in\", 1/100,\n margin=0.3, line=1, down=-1, textbottom=True)\n \n for n in range(int(sheet[\"FONTIDCOUNT\"])):\n n = format(1 + n)\n fontsize = int(sheet[\"SIZE\" + n]) * 0.875\n family = sheet[\"FONTNAME\" + n].decode(\"ascii\")\n kw = dict()\n italic = sheet.get(\"ITALIC\" + n)\n if italic:\n kw.update(italic=True)\n bold = sheet.get(\"BOLD\" + n)\n if bold:\n kw.update(bold=True)\n renderer.addfont(\"font\" + n, fontsize, family, **kw)\n renderer.setdefaultfont(\"font\" + sheet[\"SYSTEMFONT\"].decode(\"ascii\"))\n renderer.start()\n \n arrowhead = dict(base=5, shoulder=7, radius=3)\n arrowtail = dict(base=7, shoulder=0, radius=2.5)\n diamond = dict(base=10, shoulder=5, radius=2.5)\n \n pinmarkers = {\n PinElectrical.INPUT: arrowhead,\n PinElectrical.IO: diamond,\n PinElectrical.OUTPUT: arrowtail,\n PinElectrical.PASSIVE: None,\n PinElectrical.POWER: None,\n }\n \n def gnd(renderer):\n renderer.hline(10)\n renderer.vline(-7, +7, offset=(10, 0), width=1.5)\n renderer.vline(-4, +4, offset=(13, 0), width=1.5)\n renderer.vline(-1, +1, offset=(16, 0), width=1.5)\n def rail(renderer):\n renderer.hline(10)\n renderer.vline(-7, +7, offset=(10, 0), width=1.5)\n def arrowconn(renderer):\n renderer.hline(10, endarrow=arrowhead)\n def dchevron(renderer):\n renderer.hline(5)\n renderer.polyline(((8, +4), (5, 0), (8, -4)))\n renderer.polyline(((11, +4), (8, 0), (11, -4)))\n connmarkers = {\n PowerObjectStyle.ARROW: (arrowconn, 12),\n PowerObjectStyle.BAR: (rail, 12),\n PowerObjectStyle.GND: (gnd, 20),\n }\n \n def nc(renderer):\n renderer.line((+3, +3), (-3, -3), width=0.6)\n renderer.line((-3, +3), (+3, -3), width=0.6)\n renderer.addobjects((gnd, rail, arrowconn, dchevron, nc))\n \n with renderer.view(offset=(0, size[1])) as base:\n base.rectangle((size[0], -size[1]), width=0.6)\n base.rectangle((20, -20), (size[0] - 20, 20 - size[1]), width=0.6)\n for axis in range(2):\n for side in range(2):\n for n in range(4):\n translate = [None] * 2\n translate[axis] = size[axis] / 4 * (n + 0.5)\n translate[axis ^ 1] = 10\n if side:\n translate[axis ^ 1] += size[axis ^ 1] - 20\n translate[1] *= -1\n with base.view(offset=translate) as ref:\n label = chr(ord(\"1A\"[axis]) + n)\n ref.text(label, horiz=ref.CENTRE, vert=ref.CENTRE)\n if n + 1 < 4:\n x = size[axis] / 4 / 2\n if axis:\n ref.hline(-10, +10, offset=(0, -x),\n width=0.6)\n else:\n ref.vline(-10, +10, offset=(x, 0), width=0.6)\n \n if \"TITLEBLOCKON\" in sheet:\n if not os.path.isabs(filename):\n cwd = os.getcwd()\n pwd = os.getenv(\"PWD\")\n if os.path.samefile(pwd, cwd):\n cwd = pwd\n filename = os.path.join(pwd, filename)\n with base.view(offset=(size[0] - 20, 20 - size[1])) as block:\n points = ((-350, 0), (-350, 80), (-0, 80))\n block.polyline(points, width=0.6)\n block.hline(-350, 0, offset=(0, 50), width=0.6)\n block.vline(-30, offset=(-300, 50), width=0.6)\n block.vline(-30, offset=(-100, 50), width=0.6)\n block.hline(-350, 0, offset=(0, 20), width=0.6)\n block.hline(-350, 0, offset=(0, 10), width=0.6)\n block.vline(20, 0, offset=(-150, 0), width=0.6)\n \n block.text(\"Title\", (-345, 70))\n block.text(\"Size\", (-345, 40))\n block.text(sheetstyle, (-340, 30), vert=block.CENTRE)\n block.text(\"Number\", (-295, 40))\n block.text(\"Revision\", (-95, 40))\n block.text(\"Date\", (-345, 10))\n d = format(date.fromtimestamp(stat.st_mtime), \"%x\")\n block.text(d, (-300, 10))\n block.text(\"File\", (-345, 0))\n block.text(filename, (-300, 0))\n block.text(\"Sheet\", (-145, 10))\n block.text(\"of\", (-117, 10))\n block.text(\"Drawn By:\", (-145, 0))\n \n for obj in objects:\n if (obj.keys() - {\"INDEXINSHEET\"} == {\"RECORD\", \"OWNERPARTID\", \"LOCATION.X\", \"LOCATION.Y\", \"COLOR\"} and\n obj[\"RECORD\"] == Record.JUNCTION and obj.get(\"INDEXINSHEET\", b\"-1\") == b\"-1\" and obj[\"OWNERPARTID\"] == b\"-1\"):\n location = (int(obj[\"LOCATION.\" + x]) for x in \"XY\")\n col = colour(obj[\"COLOR\"])\n renderer.circle(2, location, fill=col)\n \n elif (obj.keys() - {\"INDEXINSHEET\", \"IOTYPE\", \"ALIGNMENT\"} == {\"RECORD\", \"OWNERPARTID\", \"STYLE\", \"WIDTH\", \"LOCATION.X\", \"LOCATION.Y\", \"COLOR\", \"AREACOLOR\", \"TEXTCOLOR\", \"NAME\", \"UNIQUEID\"} and\n obj[\"RECORD\"] == Record.PORT and obj[\"OWNERPARTID\"] == b\"-1\"):\n width = int(obj[\"WIDTH\"])\n if \"IOTYPE\" in obj:\n points = ((0, 0), (5, -5), (width - 5, -5),\n (width, 0), (width - 5, +5), (5, +5))\n else:\n points = ((0, -5), (width - 5, -5),\n (width, 0), (width - 5, +5), (0, +5))\n if (obj.get(\"ALIGNMENT\") == b\"2\") ^ (obj[\"STYLE\"] != b\"7\"):\n labelpoint = (10, 0)\n horiz = renderer.LEFT\n else:\n labelpoint = (width - 10, 0)\n horiz = renderer.RIGHT\n if obj[\"STYLE\"] == b\"7\":\n shapekw = dict(rotate=+90, offset=(0, +width))\n else:\n shapekw = dict()\n offset = (int(obj[\"LOCATION.\" + x]) for x in \"XY\")\n with renderer.view(offset=offset) as view:\n view.polygon(points,\n width=0.6,\n outline=colour(obj[\"COLOR\"]),\n fill=colour(obj[\"AREACOLOR\"]),\n **shapekw)\n \n with contextlib.ExitStack() as context:\n if obj[\"STYLE\"] == b\"7\":\n view = context.enter_context(view.view(rotate=+1))\n view.text(\n overline(obj[\"NAME\"]),\n colour=colour(obj[\"TEXTCOLOR\"]),\n offset=labelpoint,\n vert=view.CENTRE, horiz=horiz,\n )\n \n elif (obj.keys() - {\"INDEXINSHEET\"} >= {\"RECORD\", \"OWNERPARTID\", \"LINEWIDTH\", \"COLOR\", \"LOCATIONCOUNT\", \"X1\", \"Y1\", \"X2\", \"Y2\"} and\n obj[\"RECORD\"] == Record.WIRE and obj[\"OWNERPARTID\"] == b\"-1\" and obj[\"LINEWIDTH\"] == b\"1\"):\n points = list()\n for location in range(int(obj[\"LOCATIONCOUNT\"])):\n location = format(1 + location)\n points.append(tuple(int(obj[x + location]) for x in \"XY\"))\n renderer.polyline(points, colour=colour(obj[\"COLOR\"]))\n elif (obj.keys() == {\"RECORD\", \"OWNERINDEX\"} and\n obj[\"RECORD\"] in {b\"46\", b\"48\", b\"44\"} or\n obj.keys() - {\"USECOMPONENTLIBRARY\", \"DESCRIPTION\", \"DATAFILECOUNT\", \"MODELDATAFILEENTITY0\", \"MODELDATAFILEKIND0\", \"DATALINKSLOCKED\", \"DATABASEDATALINKSLOCKED\", \"ISCURRENT\", \"INDEXINSHEET\", \"INTEGRATEDMODEL\", \"DATABASEMODEL\"} == {\"RECORD\", \"OWNERINDEX\", \"MODELNAME\", \"MODELTYPE\"} and\n obj[\"RECORD\"] == b\"45\" and obj.get(\"INDEXINSHEET\", b\"-1\") == b\"-1\" and obj.get(\"USECOMPONENTLIBRARY\", b\"T\") == b\"T\" and obj[\"MODELTYPE\"] in {b\"PCBLIB\", b\"SI\", b\"SIM\", b\"PCB3DLib\"} and obj.get(\"DATAFILECOUNT\", b\"1\") == b\"1\" and obj.get(\"ISCURRENT\", b\"T\") == b\"T\" and obj.get(\"INTEGRATEDMODEL\", b\"T\") == b\"T\" and obj.get(\"DATABASEMODEL\", b\"T\") == b\"T\" and obj.get(\"DATALINKSLOCKED\", b\"T\") == b\"T\" and obj.get(\"DATABASEDATALINKSLOCKED\", b\"T\") == b\"T\" or\n obj.keys() >= {\"RECORD\", \"AREACOLOR\", \"BORDERON\", \"CUSTOMX\", \"CUSTOMY\", \"DISPLAY_UNIT\", \"FONTIDCOUNT\", \"FONTNAME1\", \"HOTSPOTGRIDON\", \"HOTSPOTGRIDSIZE\", \"ISBOC\", \"SHEETNUMBERSPACESIZE\", \"SIZE1\", \"SNAPGRIDON\", \"SNAPGRIDSIZE\", \"SYSTEMFONT\", \"USEMBCS\", \"VISIBLEGRIDON\", \"VISIBLEGRIDSIZE\"} and\n obj[\"RECORD\"] == Record.SHEET and obj[\"AREACOLOR\"] == b\"16317695\" and obj[\"BORDERON\"] == b\"T\" and obj.get(\"CUSTOMMARGINWIDTH\", b\"20\") == b\"20\" and obj.get(\"CUSTOMXZONES\", b\"6\") == b\"6\" and obj.get(\"CUSTOMYZONES\", b\"4\") == b\"4\" and obj[\"DISPLAY_UNIT\"] == b\"4\" and obj[\"FONTNAME1\"] == b\"Times New Roman\" and obj[\"HOTSPOTGRIDON\"] == b\"T\" and obj[\"ISBOC\"] == b\"T\" and obj[\"SHEETNUMBERSPACESIZE\"] == b\"4\" and obj[\"SIZE1\"] == b\"10\" and obj[\"SNAPGRIDON\"] == b\"T\" and obj[\"SYSTEMFONT\"] == b\"1\" and obj.get(\"TITLEBLOCKON\", b\"T\") == b\"T\" and obj[\"USEMBCS\"] == b\"T\" and obj[\"VISIBLEGRIDON\"] == b\"T\" and obj[\"VISIBLEGRIDSIZE\"] == b\"10\" or\n obj.keys() == {\"HEADER\", \"WEIGHT\"} and\n obj[\"HEADER\"] == b\"Protel for Windows - Schematic Capture Binary File Version 5.0\" or\n obj.keys() - {\"INDEXINSHEET\"} == {\"RECORD\", \"DESIMP0\", \"DESIMPCOUNT\", \"DESINTF\", \"OWNERINDEX\"} and\n obj[\"RECORD\"] == b\"47\" and obj[\"DESIMPCOUNT\"] == b\"1\" or\n obj.keys() == {\"RECORD\", \"ISNOTACCESIBLE\", \"OWNERPARTID\", \"FILENAME\"} and\n obj[\"RECORD\"] == b\"39\" and obj[\"ISNOTACCESIBLE\"] == b\"T\" and obj[\"OWNERPARTID\"] == b\"-1\"):\n pass\n \n elif (obj.keys() - {\"ISMIRRORED\", \"ORIENTATION\", \"INDEXINSHEET\", \"COMPONENTDESCRIPTION\", \"SHEETPARTFILENAME\", \"DESIGNITEMID\", \"DISPLAYMODE\", \"NOTUSEDBTABLENAME\", \"LIBRARYPATH\"} == {\"RECORD\", \"OWNERPARTID\", \"UNIQUEID\", \"AREACOLOR\", \"COLOR\", \"CURRENTPARTID\", \"DISPLAYMODECOUNT\", \"LIBREFERENCE\", \"LOCATION.X\", \"LOCATION.Y\", \"PARTCOUNT\", \"PARTIDLOCKED\", \"SOURCELIBRARYNAME\", \"TARGETFILENAME\"} and\n obj[\"RECORD\"] == b\"1\" and obj[\"OWNERPARTID\"] == b\"-1\" and obj[\"AREACOLOR\"] == b\"11599871\" and obj[\"COLOR\"] == b\"128\" and obj[\"PARTIDLOCKED\"] == b\"F\" and obj[\"TARGETFILENAME\"] == b\"*\"):\n pass\n \n elif (obj.keys() - {\"TEXT\", \"OWNERINDEX\", \"ISHIDDEN\", \"READONLYSTATE\", \"INDEXINSHEET\", \"UNIQUEID\", \"LOCATION.X\", \"LOCATION.X_FRAC\", \"LOCATION.Y\", \"LOCATION.Y_FRAC\", \"ORIENTATION\", \"ISMIRRORED\"} == {\"RECORD\", \"OWNERPARTID\", \"COLOR\", \"FONTID\", \"NAME\"} and\n obj[\"RECORD\"] == Record.PARAMETER and obj[\"OWNERPARTID\"] == b\"-1\"):\n if obj.get(\"ISHIDDEN\") != b\"T\" and obj.keys() >= {\"TEXT\", \"LOCATION.X\", \"LOCATION.Y\"}:\n orient = obj.get(\"ORIENTATION\")\n kw = {\n None: dict(vert=renderer.BOTTOM, horiz=renderer.LEFT),\n b\"1\": dict(vert=renderer.BOTTOM, horiz=renderer.LEFT),\n b\"2\": dict(vert=renderer.TOP, horiz=renderer.RIGHT),\n }[orient]\n if orient == b\"1\":\n kw.update(angle=+90)\n val = obj[\"TEXT\"]\n if val.startswith(b\"=\"):\n match = val[1:].lower()\n for o in objects:\n if o.get(\"RECORD\") != Record.PARAMETER or o.get(\"OWNERINDEX\") != obj[\"OWNERINDEX\"]:\n continue\n if o[\"NAME\"].lower() != match:\n continue\n val = o[\"TEXT\"]\n break\n else:\n raise LookupError(\"Parameter value for |OWNERINDEX={}|TEXT={}\".format(obj[\"OWNERINDEX\"].decode(\"ascii\"), obj[\"TEXT\"].decode(\"ascii\")))\n renderer.text(val.decode(\"ascii\"),\n colour=colour(obj[\"COLOR\"]),\n offset=(int(obj[\"LOCATION.\" + x]) for x in \"XY\"),\n font=\"font\" + obj[\"FONTID\"].decode(\"ascii\"),\n **kw)\n else:\n text(renderer, obj, **kw)\n \n elif (obj.keys() - {\"INDEXINSHEET\", \"ISMIRRORED\", \"LOCATION.X_FRAC\", \"LOCATION.Y_FRAC\"} == {\"RECORD\", \"OWNERINDEX\", \"OWNERPARTID\", \"LOCATION.X\", \"LOCATION.Y\", \"COLOR\", \"FONTID\", \"TEXT\", \"NAME\", \"READONLYSTATE\"} and\n obj[\"RECORD\"] == Record.DESIGNATOR and obj[\"OWNERPARTID\"] == b\"-1\" and obj.get(\"INDEXINSHEET\", b\"-1\") == b\"-1\" and obj[\"NAME\"] == b\"Designator\" and obj[\"READONLYSTATE\"] == b\"1\"):\n desig = obj[\"TEXT\"].decode(\"ascii\")\n owner = objects[1 + int(obj[\"OWNERINDEX\"])]\n if int(owner[\"PARTCOUNT\"]) > 2:\n desig += chr(ord(\"A\") + int(owner[\"CURRENTPARTID\"]) - 1)\n renderer.text(desig, (int(obj[\"LOCATION.\" + x]) for x in \"XY\"),\n colour=colour(obj[\"COLOR\"]),\n font=\"font\" + obj[\"FONTID\"].decode(),\n )\n \n elif (obj.keys() >= {\"RECORD\", \"OWNERPARTID\", \"OWNERINDEX\", \"LOCATIONCOUNT\", \"X1\", \"X2\", \"Y1\", \"Y2\"} and\n obj[\"RECORD\"] == Record.POLYLINE and obj.get(\"ISNOTACCESIBLE\", b\"T\") == b\"T\" and obj.get(\"LINEWIDTH\", b\"1\") == b\"1\"):\n if obj[\"OWNERPARTID\"] == b\"-1\":\n current = True\n else:\n owner = objects[1 + int(obj[\"OWNERINDEX\"])]\n current = (obj[\"OWNERPARTID\"] == owner[\"CURRENTPARTID\"] and\n obj.get(\"OWNERPARTDISPLAYMODE\", b\"0\") == owner.get(\"DISPLAYMODE\", b\"0\"))\n if current:\n polyline(renderer, obj)\n \n elif (obj.keys() - {\"OWNERPARTDISPLAYMODE\", \"INDEXINSHEET\"} == {\"RECORD\", \"OWNERINDEX\", \"OWNERPARTID\", \"COLOR\", \"ISNOTACCESIBLE\", \"LINEWIDTH\", \"LOCATION.X\", \"LOCATION.Y\", \"CORNER.X\", \"CORNER.Y\"} and\n obj[\"RECORD\"] == Record.LINE and obj[\"ISNOTACCESIBLE\"] == b\"T\"):\n owner = objects[1 + int(obj[\"OWNERINDEX\"])]\n if (obj[\"OWNERPARTID\"] == owner[\"CURRENTPARTID\"] and\n obj.get(\"OWNERPARTDISPLAYMODE\", b\"0\") == owner.get(\"DISPLAYMODE\", b\"0\")):\n renderer.line(\n colour=colour(obj[\"COLOR\"]),\n width=int(obj[\"LINEWIDTH\"]),\n a=(int(obj[\"LOCATION.\" + x]) for x in \"XY\"),\n b=(int(obj[\"CORNER.\" + x]) for x in \"XY\"),\n )\n \n elif (obj.keys() - {\"NAME\", \"SWAPIDPIN\", \"OWNERPARTDISPLAYMODE\", \"ELECTRICAL\", \"DESCRIPTION\", \"SWAPIDPART\", \"SYMBOL_OUTEREDGE\"} == {\"RECORD\", \"OWNERINDEX\", \"OWNERPARTID\", \"DESIGNATOR\", \"FORMALTYPE\", \"LOCATION.X\", \"LOCATION.Y\", \"PINCONGLOMERATE\", \"PINLENGTH\"} and\n obj[\"RECORD\"] == Record.PIN and obj[\"FORMALTYPE\"] == b\"1\"):\n if obj[\"OWNERPARTID\"] == objects[1 + int(obj[\"OWNERINDEX\"])][\"CURRENTPARTID\"]:\n pinlength = int(obj[\"PINLENGTH\"])\n pinconglomerate = int(obj[\"PINCONGLOMERATE\"])\n offset = (int(obj[\"LOCATION.\" + x]) for x in \"XY\")\n rotate = pinconglomerate & 3\n with renderer.view(offset=offset, rotate=rotate) as view:\n kw = dict()\n points = list()\n if \"SYMBOL_OUTEREDGE\" in obj:\n view.circle(2.85, (3.15, 0), width=0.6)\n points.append(6)\n points.append(pinlength)\n electrical = obj.get(\"ELECTRICAL\", PinElectrical.INPUT)\n marker = pinmarkers[electrical]\n if marker:\n kw.update(startarrow=marker)\n view.hline(*points, **kw)\n \n if pinconglomerate >> 1 & 1:\n invert = -1\n kw = dict(angle=180)\n else:\n invert = +1\n kw = dict()\n if pinconglomerate & 8 and \"NAME\" in obj:\n view.text(overline(obj[\"NAME\"]),\n vert=view.CENTRE,\n horiz=view.RIGHT * invert,\n offset=(-7, 0),\n **kw)\n if pinconglomerate & 16:\n designator = obj[\"DESIGNATOR\"].decode(\"ascii\")\n view.text(designator,\n horiz=view.LEFT * invert,\n offset=(+9, 0),\n **kw)\n \n elif (obj.keys() - {\"INDEXINSHEET\", \"ORIENTATION\", \"STYLE\", \"ISCROSSSHEETCONNECTOR\"} == {\"RECORD\", \"OWNERPARTID\", \"COLOR\", \"LOCATION.X\", \"LOCATION.Y\", \"SHOWNETNAME\", \"TEXT\"} and\n obj[\"RECORD\"] == Record.POWER_OBJECT and obj[\"OWNERPARTID\"] == b\"-1\"):\n orient = obj.get(\"ORIENTATION\")\n if obj.get(\"ISCROSSSHEETCONNECTOR\") == b\"T\":\n marker = dchevron\n offset = 14\n else:\n (marker, offset) = connmarkers.get(obj[\"STYLE\"], (None, 0))\n \n col = colour(obj[\"COLOR\"])\n translate = (int(obj[\"LOCATION.\" + x]) for x in \"XY\")\n with renderer.view(colour=col, offset=translate) as view:\n kw = dict()\n if orient:\n kw.update(rotate=int(orient))\n view.draw(marker, **kw)\n \n if obj[\"SHOWNETNAME\"] != b\"F\":\n orients = {\n b\"2\": (renderer.RIGHT, renderer.CENTRE, (-1, 0)),\n b\"3\": (renderer.CENTRE, renderer.TOP, (0, -1)),\n None: (renderer.LEFT, renderer.CENTRE, (+1, 0)),\n b\"1\": (renderer.CENTRE, renderer.BOTTOM, (0, +1)),\n }\n (horiz, vert, pos) = orients[orient]\n t = obj[\"TEXT\"].decode(\"ascii\")\n pos = (p * offset for p in pos)\n view.text(t, pos, horiz=horiz, vert=vert)\n \n elif (obj.keys() - {\"INDEXINSHEET\", \"OWNERPARTDISPLAYMODE\", \"ISSOLID\", \"LINEWIDTH\", \"CORNERXRADIUS\", \"CORNERYRADIUS\", \"TRANSPARENT\"} == {\"RECORD\", \"OWNERINDEX\", \"OWNERPARTID\", \"AREACOLOR\", \"COLOR\", \"CORNER.X\", \"CORNER.Y\", \"ISNOTACCESIBLE\", \"LOCATION.X\", \"LOCATION.Y\"} and\n obj[\"RECORD\"] in {Record.RECTANGLE, Record.ROUND_RECTANGLE} and obj[\"ISNOTACCESIBLE\"] == b\"T\" and obj.get(\"ISSOLID\", b\"T\") == b\"T\"):\n owner = objects[1 + int(obj[\"OWNERINDEX\"])]\n if (obj[\"OWNERPARTID\"] == owner[\"CURRENTPARTID\"] and\n obj.get(\"OWNERPARTDISPLAYMODE\", b\"0\") == owner.get(\"DISPLAYMODE\", b\"0\")):\n kw = dict(width=0.6, outline=colour(obj[\"COLOR\"]))\n if \"ISSOLID\" in obj:\n kw.update(fill=colour(obj[\"AREACOLOR\"]))\n a = (int(obj[\"LOCATION.\" + x]) for x in \"XY\")\n b = (int(obj[\"CORNER.\" + x]) for x in \"XY\")\n \n if obj[\"RECORD\"] == Record.ROUND_RECTANGLE:\n r = list()\n for x in \"XY\":\n radius = obj.get(\"CORNER{}RADIUS\".format(x))\n if radius is None:\n radius = 0\n else:\n radius = int(radius)\n r.append(int(radius))\n renderer.roundrect(r, a, b, **kw)\n else:\n renderer.rectangle(a, b, **kw)\n \n elif (obj.keys() - {\"INDEXINSHEET\"} == {\"RECORD\", \"OWNERPARTID\", \"COLOR\", \"FONTID\", \"LOCATION.X\", \"LOCATION.Y\", \"TEXT\"} and\n obj[\"RECORD\"] == Record.NET_LABEL and obj[\"OWNERPARTID\"] == b\"-1\"):\n renderer.text(overline(obj[\"TEXT\"]),\n colour=colour(obj[\"COLOR\"]),\n offset=(int(obj[\"LOCATION.\" + x]) for x in \"XY\"),\n font=\"font\" + obj[\"FONTID\"].decode(\"ascii\"),\n )\n \n elif (obj.keys() - {\"INDEXINSHEET\", \"OWNERPARTDISPLAYMODE\", \"STARTANGLE\", \"SECONDARYRADIUS\"} == {\"RECORD\", \"OWNERPARTID\", \"OWNERINDEX\", \"COLOR\", \"ENDANGLE\", \"ISNOTACCESIBLE\", \"LINEWIDTH\", \"LOCATION.X\", \"LOCATION.Y\", \"RADIUS\"} and\n obj[\"RECORD\"] in {Record.ARC, Record.ELLIPTICAL_ARC} and obj[\"ISNOTACCESIBLE\"] == b\"T\" and obj[\"LINEWIDTH\"] == b\"1\" and obj.get(\"OWNERPARTDISPLAYMODE\", b\"1\") == b\"1\"):\n owner = objects[1 + int(obj[\"OWNERINDEX\"])]\n if (owner[\"CURRENTPARTID\"] == obj[\"OWNERPARTID\"] and\n owner.get(\"DISPLAYMODE\", b\"0\") == obj.get(\"OWNERPARTDISPLAYMODE\", b\"0\")):\n r = int(obj[\"RADIUS\"])\n if obj[\"RECORD\"] == Record.ELLIPTICAL_ARC:\n r2 = obj.get(\"SECONDARYRADIUS\")\n if r2 is None:\n r2 = 0\n else:\n r2 = int(r2)\n else:\n r2 = r\n \n start = float(obj.get(\"STARTANGLE\", 0))\n end = float(obj[\"ENDANGLE\"])\n centre = (int(obj[\"LOCATION.\" + x]) for x in \"XY\")\n renderer.arc((r, r2), start, end, centre,\n colour=colour(obj[\"COLOR\"]),\n )\n \n elif (obj.keys() - {\"INDEXINSHEET\", \"LINEWIDTH\"} > {\"RECORD\", \"AREACOLOR\", \"COLOR\", \"ISNOTACCESIBLE\", \"ISSOLID\", \"LOCATIONCOUNT\", \"OWNERINDEX\", \"OWNERPARTID\"} and\n obj[\"RECORD\"] == Record.POLYGON and obj[\"AREACOLOR\"] == b\"16711680\" and obj[\"ISNOTACCESIBLE\"] == b\"T\" and obj[\"ISSOLID\"] == b\"T\" and obj.get(\"LINEWIDTH\", b\"1\") == b\"1\" and obj[\"OWNERPARTID\"] == b\"1\"):\n points = list()\n for location in range(int(obj[\"LOCATIONCOUNT\"])):\n location = format(1 + location)\n points.append(tuple(int(obj[x + location]) for x in \"XY\"))\n renderer.polygon(fill=colour(obj[\"COLOR\"]), points=points)\n elif (obj.keys() - {\"INDEXINSHEET\", \"ISNOTACCESIBLE\", \"OWNERINDEX\", \"ORIENTATION\", \"JUSTIFICATION\", \"COLOR\"} == {\"RECORD\", \"FONTID\", \"LOCATION.X\", \"LOCATION.Y\", \"OWNERPARTID\", \"TEXT\"} and\n obj[\"RECORD\"] == Record.LABEL):\n if obj[\"OWNERPARTID\"] == b\"-1\" or obj[\"OWNERPARTID\"] == objects[1 + int(obj[\"OWNERINDEX\"])][\"CURRENTPARTID\"]:\n text(renderer, obj)\n elif (obj.keys() - {\"INDEXINSHEET\"} == {\"RECORD\", \"COLOR\", \"LOCATION.X\", \"LOCATION.Y\", \"OWNERPARTID\"} and\n obj[\"RECORD\"] == b\"22\" and obj[\"OWNERPARTID\"] == b\"-1\"):\n col = colour(obj[\"COLOR\"])\n location = (int(obj[\"LOCATION.\" + x]) for x in \"XY\")\n renderer.draw(nc, location, colour=col)\n elif (obj.keys() - {\"CLIPTORECT\"} == {\"RECORD\", \"ALIGNMENT\", \"AREACOLOR\", \"CORNER.X\", \"CORNER.Y\", \"FONTID\", \"ISSOLID\", \"LOCATION.X\", \"LOCATION.Y\", \"OWNERPARTID\", \"Text\", \"WORDWRAP\"} and\n obj[\"RECORD\"] == b\"28\" and obj[\"ALIGNMENT\"] == b\"1\" and obj[\"AREACOLOR\"] == b\"16777215\" and obj.get(\"CLIPTORECT\", b\"T\") == b\"T\" and obj[\"ISSOLID\"] == b\"T\" and obj[\"OWNERPARTID\"] == b\"-1\" and obj[\"WORDWRAP\"] == b\"T\"):\n lhs = int(obj[\"LOCATION.X\"])\n renderer.text(\n font=\"font\" + obj[\"FONTID\"].decode(\"ascii\"),\n offset=(lhs, int(obj[\"CORNER.Y\"])),\n width=int(obj[\"CORNER.X\"]) - lhs,\n text=obj[\"Text\"].decode(\"ascii\").replace(\"~1\", \"\\n\"),\n vert=renderer.TOP,\n )\n \n elif (obj.keys() == {\"RECORD\", \"OWNERINDEX\", \"ISNOTACCESIBLE\", \"OWNERPARTID\", \"LINEWIDTH\", \"COLOR\", \"LOCATIONCOUNT\", \"X1\", \"Y1\", \"X2\", \"Y2\", \"X3\", \"Y3\", \"X4\", \"Y4\"} and\n obj[\"RECORD\"] == Record.BEZIER and obj[\"ISNOTACCESIBLE\"] == b\"T\" and obj[\"OWNERPARTID\"] == b\"1\" and obj[\"LINEWIDTH\"] == b\"1\" and obj[\"LOCATIONCOUNT\"] == b\"4\"):\n col = colour(obj[\"COLOR\"])\n points = list()\n for n in range(4):\n n = format(1 + n)\n points.append(tuple(int(obj[x + n]) for x in \"XY\"))\n renderer.cubicbezier(*points, colour=col)\n \n elif (obj.keys() - {\"RADIUS_FRAC\", \"SECONDARYRADIUS_FRAC\"} == {\"RECORD\", \"OWNERINDEX\", \"ISNOTACCESIBLE\", \"OWNERPARTID\", \"LOCATION.X\", \"LOCATION.Y\", \"RADIUS\", \"SECONDARYRADIUS\", \"COLOR\", \"AREACOLOR\", \"ISSOLID\"} and\n obj[\"RECORD\"] == Record.ELLIPSE and obj[\"ISNOTACCESIBLE\"] == b\"T\" and obj.get(\"RADIUS_FRAC\", b\"94381\") == b\"94381\" and obj[\"SECONDARYRADIUS\"] == obj[\"RADIUS\"] and obj.get(\"SECONDARYRADIUS_FRAC\", b\"22993\") == b\"22993\" and obj[\"ISSOLID\"] == b\"T\"):\n renderer.circle(\n r=int(obj[\"RADIUS\"]),\n width=0.6,\n outline=colour(obj[\"COLOR\"]), fill=colour(obj[\"AREACOLOR\"]),\n offset=(int(obj[\"LOCATION.\" + x]) for x in \"XY\"),\n )\n \n elif (obj.keys() - {\"INDEXINSHEET\", \"SYMBOLTYPE\"} == {\"RECORD\", \"OWNERPARTID\", \"LOCATION.X\", \"LOCATION.Y\", \"XSIZE\", \"YSIZE\", \"COLOR\", \"AREACOLOR\", \"ISSOLID\", \"UNIQUEID\"} and\n obj[\"RECORD\"] == Record.SHEET_SYMBOL and obj[\"OWNERPARTID\"] == b\"-1\" and obj[\"ISSOLID\"] == b\"T\" and obj.get(\"SYMBOLTYPE\", b\"Normal\") == b\"Normal\"):\n renderer.rectangle((int(obj[\"XSIZE\"]), -int(obj[\"YSIZE\"])),\n width=0.6,\n outline=colour(obj[\"COLOR\"]), fill=colour(obj[\"AREACOLOR\"]),\n offset=(int(obj[\"LOCATION.\" + x]) for x in \"XY\"),\n )\n \n elif (obj.keys() - {\"INDEXINSHEET\"} == {\"RECORD\", \"OWNERINDEX\", \"OWNERPARTID\", \"LOCATION.X\", \"LOCATION.Y\", \"COLOR\", \"FONTID\", \"TEXT\"} and\n obj[\"RECORD\"] in {Record.SHEET_NAME, Record.SHEET_FILE_NAME} and obj.get(\"INDEXINSHEET\", b\"-1\") == b\"-1\" and obj[\"OWNERPARTID\"] == b\"-1\"):\n text(renderer, obj)\n \n elif (obj.keys() == {\"RECORD\", \"OWNERINDEX\", \"INDEXINSHEET\", \"OWNERPARTID\", \"LOCATION.X\", \"LOCATION.Y\", \"CORNER.X\", \"CORNER.Y\", \"EMBEDIMAGE\", \"FILENAME\"} and\n obj[\"RECORD\"] == Record.IMAGE and obj[\"OWNERINDEX\"] == b\"1\" and obj[\"OWNERPARTID\"] == b\"-1\" and obj[\"EMBEDIMAGE\"] == b\"T\" and obj[\"FILENAME\"] == b\"newAltmLogo.bmp\"):\n location = list()\n corner = list()\n for x in \"XY\":\n location.append(int(obj[\"LOCATION.\" + x]))\n corner.append(int(obj[\"CORNER.\" + x]))\n renderer.rectangle(location, corner, width=0.6)\n \n else:\n print(\"\".join(\"|{}={!r}\".format(p, v) for (p, v) in sorted(obj.items())), file=stderr)\n \n renderer.finish()",
"def __call__(self):\n self.tree = etree.parse(self.src)\n\n agent = transformer_factory(self.tree, self.options)\n self.tree = agent.transform()\n\n # Write out the finished product\n file = self._targetFile()\n self.tree.write(file, pretty_print=False)\n print 'wrote transformed channel:', file.name",
"def _cmd_export_jtv(args):\n sample_ids = list(map(core.fbase, args.filenames))\n table = export.merge_samples(args.filenames)\n formatter = export.EXPORT_FORMATS[\"jtv\"]\n outheader, outrows = formatter(sample_ids, table)\n write_tsv(args.output, outrows, colnames=outheader)",
"def convert(root, output, license, workflow_name, readme):\n if not output:\n output = Path(f\"{root.name}.crate.zip\")\n builder = ProvCrateBuilder(root, workflow_name, license, readme)\n crate = builder.build()\n if output.suffix == \".zip\":\n crate.write_zip(output)\n else:\n crate.write(output)",
"def astrometry_script(filename, catalog=\"PS\", rotation_scaling=True, xy_transformation=True, fine_transformation=True, images=False, vignette=3,vignette_rectangular=1., cutouts=None, ra=None, dec=None, projection_ra=None, projection_dec=None, verbose=False, save_images=False, ignore_header_rot=False, radius=-1., save_bad_result=False, silent=False, sigma_threshold_for_source_detection=5, high_res = False, hdul_idx=0, filename_for_sources=None, FWHM=4):\n #print(\"Program version: 1.2\")\n\n report = {}\n if(images):\n plt.ioff()\n warnings.simplefilter('ignore', UserWarning)\n fits_image_filename = filename\n\n print(\"> Astrometry for {} \".format(fits_image_filename))\n\n with fits.open(fits_image_filename) as hdul:\n #print(hdul.info())\n #print(hdul[0].header)\n\n hdu = hdul[hdul_idx]\n #hdu.verify('fix')\n hdr = hdu.header\n\n\n image_or = hdul[hdul_idx].data.astype(float)\n median = np.nanmedian(image_or)\n image_or[np.isnan(image_or)]=median\n image = image_or - median\n\n observation = find_sources(image, vignette,vignette_rectangular,cutouts, sigma_threshold_for_source_detection, FWHM=FWHM)\n #print(observation)\n\n #changed order of positions to [(x,y), (x,y),...] for compatibility with photutils 1.4\n xcenters = np.array(observation['xcenter'])\n ycenters = np.array(observation['ycenter'])\n positions = [(xcenters[i], ycenters[i]) for i in range(len(xcenters))]\n apertures = CircularAperture(positions, r=4.)\n\n\n #world coordinates\n if(not silent):\n print(\">Info found in the file -- (CRVAl: position of central pixel (CRPIX) on the sky)\")\n print(WCS(hdr))\n\n hdr[\"NAXIS1\"] = image.shape[0]\n hdr[\"NAXIS2\"] = image.shape[1]\n\n #wcsprm = Wcsprm(hdr.tostring().encode('utf-8')) #everything else gave me errors with python 3, seemed to make problems with pc conversios, so i wwitched to the form below\n wcsprm = WCS(hdr).wcs\n wcsprm_original = WCS(hdr).wcs\n wcsprm, fov_radius, INCREASE_FOV_FLAG, PIXSCALE_UNCLEAR = read_additional_info_from_header(wcsprm, hdr, ra, dec,projection_ra, projection_dec, ignore_header_rot, radius)\n if(verbose):\n print(WCS(wcsprm.to_header()))\n coord = SkyCoord(wcsprm.crval[0], wcsprm.crval[1], unit=(u.deg, u.deg), frame=\"icrs\")\n if(not PIXSCALE_UNCLEAR):\n if(wcsprm.crpix[0] < 0 or wcsprm.crpix[1] < 0 or wcsprm.crpix[0] > image.shape[0] or wcsprm.crpix[1] > image.shape[1] ):\n if(not silent):\n print(\"central value outside of the image, moving it to the center\")\n coord_radec = wcsprm.p2s([[image.shape[0]/2, image.shape[1]/2]], 0)[\"world\"][0]\n coord = SkyCoord(coord_radec[0], coord_radec[1], unit=(u.deg, u.deg), frame=\"icrs\")\n #print(wcsprm)\n\n\n\n #better: put in nice wrapper! with repeated tries and maybe try synchron!\n if(not silent):\n print(\">Dowloading catalog data\")\n radius = u.Quantity(fov_radius, u.arcmin)#will prob need more\n catalog_data = query.get_data(coord, radius, catalog)\n report[\"catalog\"] = catalog\n #reference = reference.query(\"mag <20\")\n \n\n if(catalog == \"GAIA\" and catalog_data.shape[0] < 5):\n if(not silent):\n print(\"GAIA seems to not have enough objects, will enhance with PS1\")\n catalog_data2 = query.get_data(coord, radius, \"PS\")\n report[\"catalog\"] = \"PS\"\n catalog_data = pd.concat([catalog_data, catalog_data2])\n #apertures_catalog = CircularAperture(wcs.wcs_world2pix(catalog_data[[\"ra\", \"dec\"]], 1), r=5.)\n if(not silent):\n print(\"Now we have a total of {} sources. Keep in mind that there might be duplicates now since we combined 2 catalogs\".format(catalog_data.shape[0]))\n elif(catalog == \"PS\" and (catalog_data is None or catalog_data.shape[0] < 5)):\n if(not silent):\n print(\"We seem to be outside the PS footprint, enhance with GAIA data\")\n catalog_data2 = query.get_data(coord, radius, \"GAIA\")\n report[\"catalog\"] = \"GAIA\"\n catalog_data = pd.concat([catalog_data, catalog_data2])\n #apertures_catalog = CircularAperture(wcs.wcs_world2pix(catalog_data[[\"ra\", \"dec\"]], 1), r=5.)\n if(not silent):\n print(\"Now we have a total of {} sources. Keep in mind that there might be duplicates now since we combined 2 catalogs\".format(catalog_data.shape[0]))\n\n max_sources = 400\n if(INCREASE_FOV_FLAG):\n max_sources= max_sources*2.25 #1.5 times the radius, so 2.25 the area\n if(catalog_data.shape[0]>max_sources):\n catalog_data = catalog_data.nsmallest(400, \"mag\")\n #remove duplicates in catalog?\n\n apertures_catalog = CircularAperture(wcsprm.s2p(catalog_data[[\"ra\", \"dec\"]], 1)['pixcrd'], r=5.)\n #plotting what we have, I keep it in the detector field, world coordinates are more painfull to plot\n if(images):\n fig = plt.figure()\n fig.canvas.manager.set_window_title('Input for {}'.format(fits_image_filename))\n plt.xlabel(\"pixel x direction\")\n plt.ylabel(\"pixel y direction\")\n plt.title(\"Input - red: catalog sources, blue: detected sources in img\")\n plt.imshow(image,cmap='Greys', origin='lower', norm=LogNorm())\n apertures.plot(color='blue', lw=1.5, alpha=0.5)\n apertures_catalog.plot(color='red', lw=1.5, alpha=0.5)\n\n plt.xlim(-200,image.shape[0]+200)\n plt.ylim(-200,image.shape[1]+200)\n if(save_images):\n name_parts = fits_image_filename.rsplit('.', 1)\n plt.savefig(name_parts[0]+\"_image_before.pdf\")\n\n ###tranforming to match the sources\n if(not silent):\n print(\"---------------------------------\")\n print(\">Finding the transformation\")\n if(rotation_scaling):\n if(not silent):\n print(\"Finding scaling and rotation\")\n wcsprm = register.get_scaling_and_rotation(observation, catalog_data, wcsprm, scale_guessed=PIXSCALE_UNCLEAR, verbose=verbose)\n if(xy_transformation):\n if(not silent):\n print(\"Finding offset\")\n wcsprm,_,_ = register.offset_with_orientation(observation, catalog_data, wcsprm, fast=False , INCREASE_FOV_FLAG=INCREASE_FOV_FLAG, verbose= verbose, silent=silent)\n\n #correct subpixel error\n compare_threshold = 3\n if(high_res):\n compare_threshold = 100\n obs_x, obs_y, cat_x, cat_y, distances = register.find_matches(observation, catalog_data, wcsprm, threshold=compare_threshold)#3\n if (len(distances) == 0): #meaning the list is empty\n best_score = 0\n else:\n rms = np.sqrt(np.mean(np.square(distances)))\n best_score = len(obs_x)/(rms+10) #start with current best score\n fine_transformation_success = False\n if(fine_transformation):\n print(\"Finding scaling and rotation\")\n lis = [2,3,5,8,10,6,4, 20,2,1,0.5]\n if(high_res):\n lis = [200,300,100,150,80,40,70, 20, 100, 30,9,5]\n skip_rot_scale = True\n for i in lis:\n wcsprm_new, score = register.fine_transformation(observation, catalog_data, wcsprm, threshold=i, compare_threshold=compare_threshold, skip_rot_scale=skip_rot_scale)\n if(i == 20):\n #only allow rot and scaling for the last few tries\n skip_rot_scale = False\n if(score> best_score):\n wcsprm = wcsprm_new\n best_score = score\n fine_transformation_success = True\n if not fine_transformation_success:\n if(not silent):\n print(\"Fine transformation did not improve result so will be discarded.\")\n else:\n if(not silent):\n print(\"Fine transformation applied to improve result\")\n #register.calculate_rms(observation, catalog_data,wcs)\n\n #make wcsprim more physical by moving scaling to cdelt, out of the pc matrix\n wcs =WCS(wcsprm.to_header())\n if(verbose):\n print(wcs)\n from astropy.wcs import utils\n scales = utils.proj_plane_pixel_scales(wcs)\n #print(scales)\n cdelt = wcsprm.get_cdelt()\n #print(cdelt)\n scale_ratio = scales/cdelt\n #print(scale_ratio)\n pc = np.array(wcsprm.get_pc())\n pc[0,0] = pc[0,0]/scale_ratio[0]\n pc[1,0] = pc[1,0]/scale_ratio[1]\n pc[0,1] = pc[0,1]/scale_ratio[0]\n pc[1,1] = pc[1,1]/scale_ratio[1]\n wcsprm.pc = pc\n wcsprm.cdelt = scales\n\n #WCS difference before and after\n if(not silent):\n print(\"> Compared to the input the Wcs was changed by: \")\n scales_original = utils.proj_plane_pixel_scales(WCS(hdr))\n if(not silent):\n print(\"WCS got scaled by {} in x direction and {} in y direction\".format(scales[0]/scales_original[0], scales[1]/scales_original[1]))\n #sources:\n #https://math.stackexchange.com/questions/2113634/comparing-two-rotation-matrices\n #https://stackoverflow.com/questions/2827393/angles-between-two-n-dimensional-vectors-in-python/13849249#13849249\n def unit_vector(vector):\n \"\"\" Returns the unit vector of the vector. \"\"\"\n return vector / max(np.linalg.norm(vector), 1e-10)\n def matrix_angle( B, A ):\n \"\"\" comment cos between vectors or matrices \"\"\"\n Aflat = A.reshape(-1)\n Aflat = unit_vector(Aflat)\n Bflat = B.reshape(-1)\n Bflat = unit_vector(Bflat)\n #return np.arccos((np.dot( Aflat, Bflat ) / max( np.linalg.norm(Aflat) * np.linalg.norm(Bflat), 1e-10 )))\n return np.arccos(np.clip(np.dot(Aflat, Bflat), -1.0, 1.0))\n #print(matrix_angle(wcsprm.get_pc(), wcsprm_original.get_pc()) /2/np.pi*360)\n #bugfix: multiplying by cdelt otherwise the calculated angle is off by a tiny bit\n rotation_angle = matrix_angle(wcsprm.get_pc()@wcsprm.get_cdelt(), wcsprm_original.get_pc()@wcsprm_original.get_cdelt()) /2./np.pi*360.\n if((wcsprm.get_pc() @ wcsprm_original.get_pc() )[0,1] > 0):\n text = \"counterclockwise\"\n else:\n text = \"clockwise\"\n if(not silent):\n print(\"Rotation of WCS by an angle of {} deg \".format(rotation_angle)+text)\n old_central_pixel = wcsprm_original.s2p([wcsprm.crval], 0)[\"pixcrd\"][0]\n if(not silent):\n print(\"x offset: {} px, y offset: {} px \".format(wcsprm.crpix[0]- old_central_pixel[0], wcsprm.crpix[1]- old_central_pixel[1]))\n\n\n #check final figure\n if(images):\n fig = plt.figure()\n fig.canvas.manager.set_window_title('Result for {}'.format(fits_image_filename))\n plt.xlabel(\"pixel x direction\")\n plt.ylabel(\"pixel y direction\")\n plt.title(\"Result - red: catalog sources, blue: detected sources in img\")\n plt.imshow(image,cmap='Greys', origin='lower', norm=LogNorm())\n apertures.plot(color='blue', lw=1.5, alpha=0.5)\n #apertures_catalog = CircularAperture(wcs.wcs_world2pix(catalog_data[[\"ra\", \"dec\"]], 1), r=5.)\n apertures_catalog = CircularAperture(wcsprm.s2p(catalog_data[[\"ra\", \"dec\"]], 1)['pixcrd'], r=5.)\n\n apertures_catalog.plot(color='red', lw=1.5, alpha=0.5)\n if(save_images):\n name_parts = fits_image_filename.rsplit('.', 1)\n plt.savefig(name_parts[0]+\"_image_after.pdf\")\n if(not silent):\n print(\"--- Evaluate how good the transformation is ----\")\n dic_rms = register.calculate_rms(observation, catalog_data,wcsprm)\n #updating file\n converged = determine_if_fit_converged(dic_rms, catalog_data, observation, wcsprm, image.shape[0], image.shape[1], silent)\n report[\"converged\"] = converged\n report[\"matches\"] = dic_rms[\"matches\"]\n report[\"match_radius\"] = dic_rms[\"radius_px\"]\n if(converged or save_bad_result):\n write_wcs_to_hdr(fits_image_filename, wcsprm, report, hdul_idx=hdul_idx)\n if(filename_for_sources != None):\n wcs =WCS(wcsprm.to_header())\n observation_on_sky = wcs.wcs_pix2world(observation[[\"xcenter\",\"ycenter\"]], 1)\n #catalog_from_obs = np.zeros(observation_on_sky.shape[0], dtype={'names':('ra', 'dec', 'aperture_sum'),'formats':('f8', 'f8', 'f8')})\n catalog_from_obs = pd.DataFrame()\n catalog_from_obs[\"ra\"]= observation_on_sky[:,0]\n catalog_from_obs[\"dec\"]= observation_on_sky[:,1]\n catalog_from_obs[\"aperture_sum\"]= observation[\"aperture_sum\"]\n catalog_from_obs[\"mag\"]= -1.* observation[\"aperture_sum\"]#this is fine since we only use the mag to order the sources!\n catalog_from_obs.to_csv(filename_for_sources+\".csv\")\n if(images):\n plt.show()\n\n return converged, dic_rms #dictionary with short info about fit, \"matches\" gives a number of objects matched within certain radius",
"def export(self):\n def get_export_cmd(svgfile, fmt, dpi, outfile):\n if _use_rsvg and os.name == 'posix':\n # A DPI of 72 must be set to convert from files generated with\n # Inkscape v1+ to get the correct page size.\n ret = os.system('rsvg-convert --version 1>/dev/null')\n if ret == 0:\n return ('rsvg-convert' +\n ' --dpi-x=' + str(dpi * 72.0 / 96.0) +\n ' --dpi-y=' + str(dpi * 72.0 / 96.0) +\n ' --format=' + fmt +\n ' --output=\"' + outfile + '\"' +\n ' \"' + svgfile + '\"')\n else:\n return ('inkscape '\n + '--export-dpi=' + str(dpi) + ' '\n + '--export-type=' + fmt + ' '\n + '--export-filename=\"' + outfile + '\" '\n '\"' + svgfile + '\"')\n\n for line, svgfile in self.svgouts.iteritems():\n d = self.get_line_desc(line)\n outfile = self.get_output(d)\n if self.options.format == 'jpg':\n # TODO: output a jpg file\n self.options.format = 'png'\n outfile = outfile.replace('jpg', 'png')\n if self.options.format == 'svg':\n try:\n shutil.move(svgfile, outfile)\n except OSError:\n errormsg(_('Cannot create \"' + outfile + '\"'))\n else:\n cmd = get_export_cmd(svgfile,\n self.options.format,\n self.options.dpi, outfile)\n os.system(cmd)",
"def rsMakeComp(args):\n\n pathControlSelection = cmds.optionMenu('%s_optionMenu05'\n % windowID, query=True, value=True)\n if cmds.objExists('camera') is False:\n print '# Couldn\\'t find \\'camera\\' #'\n raise RuntimeError('Couldn\\'t find the camera. Make sure the main camera is called \\'camera\\''\n )\n\n si = autoConnect.SceneInfo()\n\n if si.isSceneSaved is False:\n print '# Scene has not been saved yet #'\n raise RuntimeError('_\\n%s' % 'Scene hasn\\'t been saved')\n\n DATA = {}\n\n BASE_PATH = si.renders\n START_FRAME = si.startFrame\n END_FRAME = si.endFrame\n DURATION = si.duration\n currentTime = si.currentTime\n currentWidth = si.currentWidth\n currentHeight = si.currentHeight\n FRAME_RATE = si.frameRate\n EXTENSION = 'exr'\n IMAGE_PATH = None\n\n sn = cmds.file(query=True, sn=True, shortName=True)\n if sn:\n # removes the versioning // Studio AKA specific setting.\n SCENE_NAME = (sn.split('.')[0])[:-4]\n else:\n SCENE_NAME = 'untitled_maya_scene'\n OUTPUT_OPTION = [w for w in renderOutput.SIZE_TEMPLATE\n if currentWidth == w['width'] and currentHeight\n == w['height']]\n # Check output templates\n if OUTPUT_OPTION == []:\n raise RuntimeError(\n 'The current output size is not one of the defined templates. This is unsupported.\\n\\\n To continue, select one of the templated output formats.'\n )\n TEMPLATE = None\n IMAGE_PATHS = []\n FOOTAGE_NAMES = []\n MAYA_CAMERA = None\n\n TEMPLATE = OUTPUT_OPTION[0]['suffix']\n\n if pathControlSelection == renderOutput.OUTPUT_TEMPLATES[0]:\n print '# Output path not yet set #'\n raise RuntimeError('Path template is not set. To continue, select one of the output path templates.')\n\n LAYER_NAME = renderSetup.instance().getVisibleRenderLayer().name()\n VERSION = cmds.optionMenu('%s_outputVersionMenu' % windowID,\n query=True, value=True)\n\n # Decode the exr template file\n decoded_exr_template_file = base64.b64decode(templates.EXR_TEMPLATES[TEMPLATE])\n PADDING = str(int(START_FRAME)).zfill(4)\n\n BASE_PATH = rsRenderOutput.pathStr(\n LAYER_NAME,\n long=True\n )\n IMAGE_PATH = '{path}_{padding}.{ext}'.format(\n path=BASE_PATH,\n padding=PADDING,\n ext=EXTENSION\n )\n\n if 'layout' in LAYER_NAME:\n IMAGE_PATH = '{path}.{padding}.{ext}'.format(\n path=BASE_PATH,\n padding=PADDING,\n ext='jpg'\n )\n IMAGE_PATH = os.path.normpath(IMAGE_PATH)\n\n\n def capture_layout():\n multiSample = cmds.getAttr(\n 'hardwareRenderingGlobals.multiSampleEnable'\n )\n ssao = cmds.getAttr(\n 'hardwareRenderingGlobals.ssaoEnable'\n )\n\n window = autoConnect.captureWindow(\n int(currentWidth) * 0.50, int(currentHeight) * 0.50 + 30\n )\n\n # Tying to force Maya to retain this setting...\n # Set image format to jpg\n cmds.setAttr('%s.imageFormat'\n % renderOutput.DEFAULTS_NODE, 8)\n\n # Make pers non-renderable\n cmds.setAttr('perspShape.renderable', 0)\n\n # Make camera renderable, if exists.\n cmds.setAttr('cameraShape.renderable', 1)\n image_path = IMAGE_PATH.replace('.{}.jpg'.format(PADDING), '')\n image_path = os.path.normpath(IMAGE_PATH.replace('<AOV>', ''))\n cmds.playblast( # compression=compression,\n format='image',\n percent=int(100),\n viewer=False,\n startTime=int(START_FRAME),\n endTime=int(END_FRAME),\n showOrnaments=True,\n forceOverwrite=True,\n filename=image_path,\n widthHeight=[int(currentWidth),\n int(currentHeight)],\n rawFrameNumbers=True,\n framePadding=int(4),\n )\n\n cmds.setAttr(\n 'hardwareRenderingGlobals.multiSampleEnable', multiSample)\n cmds.setAttr('hardwareRenderingGlobals.ssaoEnable',\n ssao)\n window.close()\n\n def confirm_overwrite(aov):\n message = '{layer} - {aov}: Render images already exists at the current location.\\n'\n message += 'If you choose \\'Overwrite\\' they will be replaced with a blank placeholder sequence.\\n\\n'\n message += 'Otherwise click \\'Import Existing\\' to import the existing sequence (recommended).\\n'\n message += 'Image Path: {path}'\n\n message = message.format(\n layer=LAYER_NAME,\n aov=aov,\n path=IMAGE_PATH\n )\n\n return cmds.confirmDialog(\n title='Warning',\n message=message,\n button=['Import Existing', 'Overwrite'],\n defaultButton='Import Existing',\n cancelButton='Import Existing',\n dismissString='Import Existing',\n )\n\n def write_exrs(aov):\n for n in xrange(DURATION):\n IMAGE_PATH = '{path}_{padding}.{ext}'.format(\n path=BASE_PATH,\n padding=str(int(START_FRAME) + int(n)).zfill(4),\n ext=EXTENSION\n )\n\n if 'layout' in LAYER_NAME:\n image_path = os.path.normpath(IMAGE_PATH.replace('<AOV>', ''))\n else:\n image_path = os.path.normpath(IMAGE_PATH.replace('<AOV>', aov))\n\n if not os.path.exists(os.path.dirname(image_path)):\n os.makedirs(os.path.dirname(image_path))\n\n with open(image_path, 'w') as exr_file:\n exr_file = open(image_path, 'w')\n exr_file.write(decoded_exr_template_file)\n\n\n # LOOP THROUGH AOVS\n AOVs = rsRenderOutput.get_active_Arnold_AOVs()\n\n if not AOVs:\n image_path = os.path.normpath(IMAGE_PATH.replace('<AOV>', ''))\n if os.path.isfile(image_path) and (confirm_overwrite('(no AOVs)') == 'Overwrite'):\n if 'layout' in LAYER_NAME:\n capture_layout()\n else:\n write_exrs(None)\n elif not os.path.isfile(image_path):\n if 'layout' in LAYER_NAME:\n capture_layout()\n else:\n write_exrs(None)\n\n if os.path.isfile(image_path):\n IMAGE_PATHS.append(str(image_path))\n FOOTAGE_NAMES.append(\n '{layer}_{aov}_{version}'.format(\n layer=LAYER_NAME.replace('_rsLayer', ''),\n aov='beauty',\n version=VERSION\n )\n )\n\n for aov in AOVs:\n if 'layout' in LAYER_NAME:\n image_path = os.path.normpath(IMAGE_PATH.replace('<AOV>', ''))\n else:\n image_path = os.path.normpath(IMAGE_PATH.replace('<AOV>', aov))\n\n if os.path.isfile(image_path) and (confirm_overwrite(aov) == 'Overwrite'):\n if 'layout' in LAYER_NAME:\n capture_layout()\n else:\n write_exrs(aov)\n elif not os.path.isfile(image_path):\n if 'layout' in LAYER_NAME:\n capture_layout()\n else:\n write_exrs(aov)\n\n\n if os.path.isfile(image_path):\n IMAGE_PATHS.append(str(image_path))\n if 'layout' in LAYER_NAME:\n if os.path.isfile(image_path):\n FOOTAGE_NAMES.append(\n '{layer}_{aov}_{version}'.format(\n layer=LAYER_NAME.replace('_rsLayer', ''),\n aov='beauty',\n version=VERSION\n )\n )\n break\n if os.path.isfile(image_path):\n FOOTAGE_NAMES.append(\n '{layer}_{aov}_{version}'.format(\n layer=LAYER_NAME.replace('_rsLayer', ''),\n aov=aov,\n version=VERSION\n )\n )\n\n # House cleaning\n rsUtility.removeMissingSelections()\n\n # Export Camera from scene\n MAYA_CAMERA = autoConnect.exportCamera()\n if MAYA_CAMERA:\n pass\n else:\n raise RuntimeError('Couldn\\'t export maya camera.')\n\n # ############################################################\n # Time to call Ater Effects!\n\n if IMAGE_PATHS:\n pass\n else:\n raise RuntimeError('No image path could be found to export.')\n\n ac = autoConnect.AutoConnect()\n aePath = ac.AFTER_EFFECTS_PATH\n if aePath:\n pass\n else:\n raise RuntimeError('Couldn\\'t find After Effects.')\n\n tempfile.gettempdir()\n scriptPath = os.path.normpath(os.path.join(tempfile.gettempdir(),\n 'aeCommand.jsx'))\n\n # #############################################\n # Script file\n\n script = aeCommand.script\n AE_SCRIPT = script.replace(\n '<Name>', str(SCENE_NAME)\n ).replace(\n '<Width>', str(currentWidth)\n ).replace(\n '<Height>', str(currentHeight)\n ).replace(\n '<Pixel_Aspect>',str(1)\n ).replace(\n '<Duration>', str(float(DURATION) / float(FRAME_RATE))\n ).replace(\n '<Frame_Rate>', str(float(FRAME_RATE))\n ).replace(\n '<Image_Paths>', '{}'.format(IMAGE_PATHS)\n ).replace(\n '<Footage_Names>', str(FOOTAGE_NAMES)\n ).replace(\n '<Maya_Camera>', MAYA_CAMERA.replace('\\\\', '\\\\\\\\')\n )\n\n # #############################################\n\n with open(scriptPath, 'w') as AE_SCRIPT_FILE:\n AE_SCRIPT_FILE.write(str(AE_SCRIPT))\n AE_SCRIPT_FILE.close()\n\n cmd = '\"%s\" -r \"%s\"' % (ac.AFTER_EFFECTS_PATH, scriptPath)\n process = QProcess()\n process.startDetached(cmd)",
"def export_to_gsas():\n # Get workflow\n work_flow = my_data.get()\n\n output_file_name = '/tmp/acceptance_test.gda'\n\n # Clear the file if it exists.\n if os.path.exists(output_file_name):\n os.remove(output_file_name)\n\n status = work_flow.export_gsas_file(run_number=80231)\n assert status\n assert os.path.exists(output_file_name)",
"def export_to_file(self):\r\n return True",
"def save_file():\n filepath = asksaveasfilename(\n defaultextension=\"txt\",\n filetypes=[(\"Text Files\", \"*.txt\"), (\"All Files\", \"*.*\")],\n )\n if not filepath:\n return\n with open(filepath, \"w\") as output_file:\n text = txt_edit.get(\"1.0\", tk.END)\n output_file.write(text)\n window.title(f\"Switch port Consolidation - {filepath}\")",
"def exportAssetAssembly(name, rigTopNode, meshTopNode, path, postScript=None):\n if pm.ls(rigTopNode):\n rigTopNode = pm.PyNode(rigTopNode)\n else:\n pm.displayError(\n \"{} doesn't exist or duplicated. Please check your \"\n \"scene\".format(rigTopNode))\n return\n\n if pm.ls(meshTopNode):\n meshTopNode = pm.PyNode(meshTopNode)\n else:\n pm.displayError(\n \"{} doesn't exist or duplicated. Please check \"\n \"your scene\".format(meshTopNode))\n return\n # check the folder and script\n # if the target name exist abort and request another name\n\n deformer_jnts = rigTopNode.rigGroups[3].connections()[0].members()\n if not deformer_jnts:\n pm.displayError(\n \"{} is empty. The tool can't find any joint\".format(meshTopNode))\n\n # export connections and cut joint connections\n file_path = os.path.join(path, name + \".jmm\")\n dm_nodes = exportConnections(source=deformer_jnts,\n filePath=file_path,\n disc=True)\n\n # cut al possible remaining connection and adjust hierarchy\n # joint or visibility\n jnt_org = pm.PyNode(\"jnt_org\")\n pm.disconnectAttr(rigTopNode.jnt_vis, jnt_org.visibility)\n\n # restructure model\n model = pm.createNode(\"transform\",\n n=\"model\",\n p=None,\n ss=True)\n pm.addAttr(model, ln=\"rigGroups\", at='message', m=1)\n pm.parent(meshTopNode, jnt_org, model)\n\n # disconnect jnt set\n sets = rigTopNode.listConnections(type=\"objectSet\")\n\n deformersGrp = None\n for oSet in sets:\n if \"deformers_grp\" in oSet.name():\n deformersGrp = oSet\n\n if deformersGrp:\n for cnx in deformersGrp.message.listConnections(p=True):\n pm.disconnectAttr(deformersGrp.message, cnx)\n pm.connectAttr(deformersGrp.message, model.attr(\"rigGroups[0]\"))\n\n # disconnect bindPoses\n dg_poses = rigTopNode.message.listConnections(type=\"dagPose\", p=True)\n for dgp in dg_poses:\n if dgp.node().name().startswith(\"bindPose\"):\n pm.disconnectAttr(rigTopNode.message, dgp)\n\n # post script\n if postScript:\n try:\n exec(compile(open(postScript, \"rb\").read(), postScript, 'exec'))\n except Exception as ex:\n template = \"An exception of type {0} occured. Arguments:\\n{1!r}\"\n message = template.format(type(ex).__name__, ex.args)\n pm.displayError(message)\n cont = pm.confirmBox(\"FAIL: Script Fail\",\n \"Do you want to export anyway?\" + \"\\n\\n\"\n + message + \"\\n\\n\" + traceback.format_exc(),\n \"Continue\", \"Cancel\")\n if not cont:\n pm.undo()\n return\n\n # export rig model\n pm.select(dm_nodes, r=True)\n pm.select(rigTopNode, add=True)\n file_path = os.path.join(path, name + \"_rig.ma\")\n exp = pm.exportSelected(file_path, f=True, type=\"mayaAscii\")\n pm.displayInfo(exp)\n\n # export mesh and joints\n pm.select(model, r=True)\n file_path = os.path.join(path, name + \"_model.ma\")\n exp = pm.exportSelected(file_path, f=True, type=\"mayaAscii\")\n pm.displayInfo(exp)",
"def BT_export(self):\n src = os.path.join(self.resMan.base_path, Config.instance().weld_BT_root_folder)\n srcs=self.BTMan.get_subdirs(src)\n dst = os.path.join(self.project.rootdir, Config.instance().weld_BT_root_folder)\n #this operation has lots of exceptions to output...\n try:\n for src in srcs:\n self.BTMan.export(src, dst)\n except Exception, e:\n print >> sys.__stderr, 'ERROR in Weld.BT_export():'\n print >> sys.__stderr, e.args[0]\n print >> sys.__stderr, 'export cancelled (some cleanup might be needed in %s)' % dst",
"def on_export_button(self, event):\n wildcard = \"Filtered _iso_res_filt.csv file (*_iso_res_filt.csv)|*_iso_res_filt.csv|\"\\\n \"All files (*.*)|*.*|\"\n defFile = self.datafile[:-4]+'_filt.csv'\n dlg = wx.FileDialog(\n self, message=\"Save file as ...\", \n defaultDir=self.currentDirectory, \n defaultFile=defFile, wildcard=wildcard, style=wx.SAVE\n )\n if dlg.ShowModal() == wx.ID_OK:\n path = dlg.GetPath()\n self.recalcAll()\n self.redrawAll()\n self.dataFrame['priorFilter'] = self.dataFrame['allFPass']\n self.dataFrame.to_csv(path, index=False)\n summaryCSVPath = path.split('.')[0] + '_median_[' + ''.join(self.calcNum) + ']_[' + ''.join(self.calcDen) + '].csv'\n self.writeSummaryCSV(summaryCSVPath)\n \n dlg.Destroy()",
"def to_text_file(self, filename):\n translate.write_psf_text(self, filename)\n return",
"def export_tsp(nodes, scale, comment, pre_filename=None):\n filename = pre_filename\n if comment is None:\n comment = \"PUT PROBLEM DESCRIPTION HERE\"\n # check if the function was called with a filename\n if filename is None:\n filename = asksaveasfile(defaultextension=\".tsp\")\n # check if the user did select a file\n if filename:\n _file = open(filename.name, 'w')\n _file.write(\"NAME : \" + os.path.basename(filename.name) + \"\\n\")\n _file.write(\"COMMENT : \" + comment + \"\\n\")\n\n groups = construct_groups_string(nodes)\n if not groups == \"\":\n _file.write(\"COMMENT : CLUSTERS : \" + groups + \"\\n\")\n\n startnodes = construct_startnodes_string(nodes)\n if not startnodes == \"\":\n _file.write(\"COMMENT : STARTNODES : \" + startnodes + \"\\n\")\n\n _file.write(\"TYPE: TSP\" + \"\\n\")\n _file.write(\"DIMENSION: \" + str(len(nodes)) + \"\\n\")\n _file.write(\"EDGE_WEIGHT_TYPE : EUC_2D\" + \"\\n\")\n _file.write(\"NODE_COORD_SECTION\" + \"\\n\")\n\n for (index, node) in enumerate(nodes):\n _file.write(str(index + 1) + \" \" + str(node.x_coord * scale) +\n \" \" + str(node.y_coord * scale) + \"\\n\")\n _file.write(\"EOF\")\n _file.close()\n return os.path.basename(filename.name)",
"def save(filename, points3, tris, metadata):\n logging.info(\"saving mesh: %s\"%filename)\n cells = {'triangle':tris}\n vtk_io.write(filename, points3, cells)\n with open(filename+'.readme','w') as fid:\n fid.write(metadata)"
]
| [
"0.5916673",
"0.57419854",
"0.554426",
"0.543195",
"0.53800714",
"0.5367973",
"0.5343087",
"0.5321925",
"0.5260684",
"0.5219404",
"0.516955",
"0.51652265",
"0.51367575",
"0.5104578",
"0.50920767",
"0.5079807",
"0.50521576",
"0.50394034",
"0.50246775",
"0.5019167",
"0.50150526",
"0.5013875",
"0.5010641",
"0.49991718",
"0.4983105",
"0.49640808",
"0.49442914",
"0.49394566",
"0.49352694",
"0.49325836"
]
| 0.60936 | 0 |
Read 3D volume. You can also apply threshold to the data | def read_volume(volume_file, threshold=-Inf):
volume = nload(str(volume_file))
data = volume.get_fdata()
if threshold is not None:
i = data >= threshold
else:
i = abs(data) > 0.001 # exclude small values
output = zeros(i.sum(), dtype=DTYPE_VOLUME)
output['pos'] = apply_affine(volume.affine, array(where(i)).T)
if threshold is not None:
output['value'] = 1
else:
output['value'] = data[i]
return output | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_volume_3d(self):\n # generate voronoi mesh \n mesh = Mesh3d(self.particles, self.bound)\n print(\"building mesh...\")\n mesh.build_geometry()\n print(\"mesh complete\")\n\n # calculate voronoi volumes of all real particles \n real_indices = self.particles[\"tag\"] == ParticleTAGS.Real\n tot_vol = np.sum(self.particles[\"volume\"][real_indices])\n\n self.assertAlmostEqual(tot_vol, 1.0)",
"def readpil3d(self):\r\n\r\n # Read the data in as an array.\r\n res = np.loadtxt(self.name, delimiter=' ')\r\n\r\n # Split into useful chunks\r\n self.pos = res[:, 0:3] # Grid point locations\r\n self.Pn = res[:, 3:4] # Normal pressure [Pa]\r\n self.flux = res[:, -1] # Flux\r",
"def single_volume_inference(self, volume):\n self.model.eval()\n\n # Assuming volume is a numpy array of shape [X,Y,Z] and we need to slice X axis\n slices = []\n\n # Write code that will create mask for each slice across the X (0th) dimension. After \n # that, put all slices into a 3D Numpy array. You can verify if your method is \n # correct by running it on one of the volumes in your training set and comparing \n # with the label in 3D Slicer.\n \n # normalize\n image = (volume.astype(np.single) - np.min(volume))/(np.max(volume) - np.min(volume))\n \n new_image = med_reshape(image, new_shape=(self.patch_size, self.patch_size, image.shape[2]))\n mask3d = np.zeros(new_image.shape)\n \n for slc_ix in range(new_image.shape[2]):\n tsr_test = torch.from_numpy(new_image[:,:,slc_ix].astype(np.single)).unsqueeze(0).unsqueeze(0)\n #image = torch.from_numpy(self.data[slc[0]][\"image\"][:,:,slc[1]]).unsqueeze(0)\n #tsr_test = torch.from_numpy(slc.astype(np.single)).unsqueeze(0).unsqueeze(0)\n pred = self.model(tsr_test.to(self.device))\n pred = np.squeeze(pred.cpu().detach())\n mask3d[:,:,slc_ix] = torch.argmax(pred, dim=0)\n\n return mask3d",
"def load_volume(name, nx, ny, nz):\n\n # load raw volume into memory\n img = np.fromfile(name, dtype=np.float32)\n img = np.reshape(img, (ny, nx, nz))\n\n return img.transpose(0, 2, 1)",
"def test_3d_time_lowmem():\n dic,data = ng.pipe.read_lowmem(\"common_data/3d_pipe/data/test%03d.fid\")\n assert data.shape == (128, 88, 1250)\n assert data.dtype == 'complex64'\n assert round(data[0,1,2].real,2) == -7.98\n assert round(data[0,1,2].imag,2) == 33.82\n assert round(data[10,22,5].real,2) == 15.71\n assert round(data[10,22,5].imag,2) == 15.1\n lowmem_write_readback_3D(dic,data)",
"def test_3d_freq():\n dic,data = ng.pipe.read(\"common_data/3d_pipe/ft/test%03d.ft3\")\n sdic,sdata = ng.pipe.read(\"common_data/3d_pipe/ft/test001.ft3\")\n\n assert data.shape == (128, 128, 4096)\n assert data.dtype == 'float32'\n assert round(data[0,1,2],2) == 25980.13\n assert round(data[10,22,5],2) == 1561.09\n check_ppm_limits(dic,data,0,[78.10, 34.24])\n check_ppm_limits(dic,data,1,[147.42, 93.01])\n check_ppm_limits(dic,data,2,[254.92, -142.83])\n\n # and the first slice\n assert sdata.shape == (128, 4096)\n assert sdata.dtype == 'float32'\n assert round(sdata[1,2],2) == 25980.13\n assert round(sdata[22,5],2) == -8336.05\n check_ppm_limits(sdic,sdata,0,[147.42, 93.01])\n check_ppm_limits(sdic,sdata,1,[254.92, -142.83])\n\n # slice/data matching\n assert_array_equal(data[0],sdata)\n\n write_readback_3D(dic,data)",
"def test_3d_freq_lowmem():\n dic,data = ng.pipe.read_lowmem(\"common_data/3d_pipe/ft/test%03d.ft3\")\n assert data.shape == (128, 128, 4096)\n assert data.dtype == 'float32'\n assert round(data[0,1,2],2) == 25980.13\n assert round(data[10,22,5],2) == 1561.09\n check_ppm_limits(dic,data,0,[78.10, 34.24])\n check_ppm_limits(dic,data,1,[147.42, 93.01])\n check_ppm_limits(dic,data,2,[254.92, -142.83])\n lowmem_write_readback_3D(dic,data)",
"def test_3d_steam_time_lowmem():\n dic,data = ng.pipe.read_lowmem(\"common_data/3d_pipe/full3D.fid\")\n assert data.shape == (128, 88, 1250)\n assert data.dtype == 'complex64'\n assert round(data[0,1,2].real,2) == -7.98\n assert round(data[0,1,2].imag,2) == 33.82\n assert round(data[10,22,5].real,2) == 15.71\n assert round(data[10,22,5].imag,2) == 15.1\n lowmem_write_readback(dic,data)",
"def test_3d():\n dic, data = ng.bruker.read(os.path.join(DATA_DIR, \"bruker_3d\"))\n assert dic['FILE_SIZE'] == 91226112\n assert data.shape == (116, 128, 768)\n assert round(data[0, 0, 40].real, 2) == 18.0\n assert round(data[0, 0, 40].imag, 2) == -66.0\n assert round(data[5, 13, 91].real, 2) == 1138.0\n assert round(data[5, 13, 91].imag, 2) == 3482.0\n write_readback(dic, data)",
"def load_nifty_volume_as_array(filename):\n img = sitk.ReadImage(filename)\n img_arr = sitk.GetArrayFromImage(img)\n return img_arr",
"def test_3d_steam_time():\n dic,data = ng.pipe.read(\"common_data/3d_pipe/full3D.fid\")\n assert data.shape == (128, 88, 1250)\n assert data.dtype == 'complex64'\n assert round(data[0,1,2].real,2) == -7.98\n assert round(data[0,1,2].imag,2) == 33.82\n assert round(data[10,22,5].real,2) == 15.71\n assert round(data[10,22,5].imag,2) == 15.1\n write_readback(dic,data)",
"def read_ch3_pressure(self):\n sensor = self.ch3_index + 1\n return self.vgc.read_sensor(sensor)",
"def test_3d_time():\n dic,data = ng.pipe.read(\"common_data/3d_pipe/data/test%03d.fid\")\n sdic,sdata = ng.pipe.read(\"common_data/3d_pipe/data/test001.fid\")\n assert data.shape == (128, 88, 1250)\n assert data.dtype == 'complex64'\n assert round(data[0,1,2].real,2) == -7.98\n assert round(data[0,1,2].imag,2) == 33.82\n assert round(data[10,22,5].real,2) == 15.71\n assert round(data[10,22,5].imag,2) == 15.1\n\n # and the first slice\n assert sdata.shape == (88, 1250)\n assert sdata.dtype == 'complex64'\n assert round(sdata[1,2].real,2) == -7.98\n assert round(sdata[1,2].imag,2) == 33.82\n assert round(sdata[22,5].real,2) == 22.65\n assert round(sdata[22,5].imag,2) == 13.65\n\n # slice/data matching\n assert_array_equal(data[0],sdata)\n\n write_readback_3D(dic,data)",
"def test_volume():\n structure = Material(input)\n assert (structure.volume == 90.725624999999965)",
"def get_box_volume(solvent_file):\n box_volume = None\n file = solvent_file\n with open(file,\"rt\") as fin:\n for line in fin:\n if line[0:6] == \"CRYST1\":\n x_length = float(line[9:14])\n y_length = float(line[18:23])\n z_length = float(line[27:33])\n box_volume = x_length * y_length * z_length\n return(box_volume)\n return(box_volume)",
"def load_nifty_volume_as_4d_array(filename):\n img_obj = sitk.ReadImage(filename)\n data_array = sitk.GetArrayFromImage(img_obj)\n origin = img_obj.GetOrigin()\n spacing = img_obj.GetSpacing()\n direction = img_obj.GetDirection()\n shape = data_array.shape\n if(len(shape) == 4):\n assert(shape[3] == 1) \n elif(len(shape) == 3):\n data_array = np.expand_dims(data_array, axis = 0)\n else:\n raise ValueError(\"unsupported image dim: {0:}\".format(len(shape)))\n output = {}\n output['data_array'] = data_array\n output['origin'] = origin\n output['spacing'] = (spacing[2], spacing[1], spacing[0])\n output['direction'] = direction\n return output",
"def volume(self):\n return self.volume_array",
"def test_3d_steam_freq():\n dic,data = ng.pipe.read(\"common_data/3d_pipe/full3D.ft3\")\n assert data.shape == (128, 128, 4096)\n assert data.dtype == 'float32'\n assert round(data[0,1,2],2) == 25980.13\n assert round(data[10,22,5],2) == 1561.09\n check_ppm_limits(dic,data,0,[78.10, 34.24])\n check_ppm_limits(dic,data,1,[147.42, 93.01])\n check_ppm_limits(dic,data,2,[254.92, -142.83])\n write_readback(dic,data)",
"def get_volume(cls) -> float:\n raise NotImplementedError",
"def getVolume(volume, clipPlaneObject, obj_center):\n \n if not isinstance(clipPlaneObject, ClipPlane.ClipPlane):\n raise TypeError(\"the input variable must be a instance from class ClipPlane.ClipPlane\")\n \n if type(volume) != np.ndarray:\n raise TypeError(\"the input volume must be numpy array!\")\n \n c1 = obj_center[0]\n c2 = obj_center[1]\n c3 = obj_center[2]\n \n x_low = clipPlaneObject.x1 + c1\n x_high = clipPlaneObject.x2 + c1\n y_low = clipPlaneObject.y1 + c2\n y_high = clipPlaneObject.y2 + c2\n z_low = clipPlaneObject.z1 + c3\n z_high = clipPlaneObject.z2 + c3\n \n xl, yl, zl = volume.shape\n \n # make all values valid\n if x_low < 0: x_low =0 \n if x_high > xl: x_high = xl\n if y_low < 0: y_low =0 \n if y_high > xl: y_high = xl\n if z_low < 0: z_low =0 \n if z_high > xl: z_high = xl\n \n volume_index = np.zeros_like(volume)\n \n print x_low, x_high, y_low, y_high, z_low, z_high\n volume_index[x_low:x_high, y_low:y_high, z_low:z_high] = True\n \n print volume.mean()\n print sum(volume_index)\n \n outvolume = volume * volume_index\n \n print (outvolume).mean()\n \n return outvolume",
"def GetDataVolume(vDataSet,aIndexC,aIndexT):\r\n nx = vDataSet.GetSizeX()\r\n ny = vDataSet.GetSizeY()\r\n nz = vDataSet.GetSizeZ()\r\n dtype = GetType(vDataSet)\r\n\r\n if DEBUG:\r\n print(\"GetDataVolume\")\r\n print(\"vDataSet:\",(nz,ny,nx),GetType(vDataSet))\r\n print(aIndexC)\r\n print(aIndexT)\r\n\r\n if dtype == np.uint8:\r\n s = vDataSet.GetDataVolumeAs1DArrayBytes(aIndexC,aIndexT)\r\n arr = np.frombuffer(s,dtype).reshape((nz,ny,nx)).copy()\r\n else:\r\n #We define an empty array of the final size\r\n arr = np.empty(nz*ny*nx,dtype)\r\n\r\n if dtype == np.uint16:\r\n GetData = vDataSet.GetDataSubVolumeAs1DArrayShorts\r\n elif dtype == np.float32:\r\n GetData = vDataSet.GetDataSubVolumeAs1DArrayFloats\r\n\r\n #Filling-up the array\r\n for z in range(nz):\r\n arr[z*ny*nx:(z+1)*ny*nx] = GetData(0,0,z,aIndexC,aIndexT,nx,ny,1)\r\n\r\n arr = arr.reshape(nz,ny,nx)\r\n\r\n return np.ascontiguousarray(arr)",
"def get_cubic_volume(kv):\n v = None\n try:\n x = kv[gcode_key.MAXX] - kv[gcode_key.MINX]\n y = kv[gcode_key.MAXY] - kv[gcode_key.MINY]\n z = kv[gcode_key.MAXZ] - kv[gcode_key.MINZ]\n v = x * y * z\n except KeyError:\n pass\n return v",
"def get3D_rod():\n\n volume = torch.zeros(1,1,55,54,53)\n length = 15\n st = [27,26,25]\n\n volume[:,:,st[0]:st[0]+length,st[1],st[2]] = 0.5\n volume[:,:,st[0]+length:st[0]+length+2,st[1],st[2]] = 0.2\n\n volume[:,:,st[0],st[1]:st[1]+length,st[2]] = 0.5\n volume[:,:,st[0],st[1]+length:st[1]+length+2,st[2]] = 1.\n \n volume[:,:,st[0],st[1],st[2]:st[2]+length] = 0.5\n volume[:,:,st[0],st[1],st[2]+length:st[2]+length+2] = 2.0\n \n volume[:,:,st[0],st[1]:st[1]+length,st[2]:st[2]+length] = 0.2\n volume[:,:,st[0],st[1]+length:st[1]+length+1,st[2]+length:st[2]+length+1] = 1.5\n\n return volume",
"def single_volume_inference_unpadded(self, volume):\n \n # normalize the data volume \n image = (volume.astype(np.single) - np.min(volume))/(np.max(volume) - np.min(volume))\n # reshape the image volume to the same patch size used for training\n img_reshaped = med_reshape(image, new_shape=(self.patch_size, self.patch_size, image.shape[2]))\n # create a new 3d mask to store predicted results\n mask3d = np.zeros(img_reshaped.shape)\n # iterate over the image array and predict the all the slices\n for slc_idx in range(img_reshaped.shape[2]):\n # compute for each slice\n slc = torch.from_numpy(img_reshaped[:,:,slc_idx].astype(np.single)).unsqueeze(0).unsqueeze(0)\n # make prediction\n pred = self.model(slc.to(self.device))\n pred = np.squeeze(pred.cpu().detach())\n # store predicted data\n mask3d[:,:,slc_idx] = torch.argmax(pred, dim=0)\n # return the predicted volume\n return mask3d",
"def treat_volume(volume):\n labels = measure.label(volume.dataobj, background=0, connectivity=2)\n new_volume = np.asarray(volume.dataobj)\n new_volume[labels > 1] = 0\n new_volume = nib.Nifti1Image(new_volume, volume.affine)\n return new_volume",
"def ellipsoid_volume(radius1: number, radius2: number, radius3: number) -> number:\n volume = 4/3*(pi*radius1*radius2*radius3)\n return volume",
"def load_nifty_volume_as_array(filename, with_header = False):\n img = nibabel.load(filename)\n data = img.get_data()\n data = np.transpose(data, [2,1,0])\n if(with_header):\n return data, img.affine, img.header\n else:\n return data",
"def load_nifty_volume_as_array(filename, with_header = False):\n img = nibabel.load(filename)\n data = img.get_data()\n data = np.transpose(data, [2,1,0])\n if(with_header):\n return data, img.affine, img.header\n else:\n return data",
"def estimate_volume(self):\n volume = 0.\n zvals = np.unique([c.image_z_position for c in self.contours])\n\n # We pad a zval on the bottom that is the same distance from the\n # first zval to the second zval but below the first point. We do \n # the same thing for the top zval.\n if len(self.contours) != 1:\n zlow = zvals[ 0] - (zvals[1]-zvals[0])\n zhigh = zvals[-1] + (zvals[-1]-zvals[-2])\n zvals = np.r_[zlow, zvals, zhigh]\n else:\n zvals = None\n\n for i,contour in enumerate(self.contours):\n contour_array = contour.to_matrix() * self.scan.pixel_spacing\n x = contour_array[:,0]\n y = contour_array[:,1]\n # \"Shoelace\" formula for area.\n area = 0.5*np.abs(np.dot(x,np.roll(y,1))-np.dot(y,np.roll(x,1)))\n \n if zvals is not None:\n j = np.argmin(np.abs(contour.image_z_position-zvals))\n spacing_z = 0.5*(zvals[j+1]-zvals[j-1])\n else:\n spacing_z = self.scan.slice_thickness\n\n volume += (1. if contour.inclusion else -1.) * area * spacing_z\n return volume",
"def test_3d_lowmem():\n dic, data = ng.bruker.read_lowmem(os.path.join(DATA_DIR, \"bruker_3d\"))\n assert dic['FILE_SIZE'] == 91226112\n assert data.shape == (116, 128, 768)\n assert round(data[0, 0, 40].real, 2) == 18.0\n assert round(data[0, 0, 40].imag, 2) == -66.0\n assert round(data[5, 13, 91].real, 2) == 1138.0\n assert round(data[5, 13, 91].imag, 2) == 3482.0\n lowmem_write_readback(dic, data)"
]
| [
"0.65125376",
"0.65035737",
"0.6461333",
"0.6362523",
"0.6296995",
"0.61123705",
"0.6080686",
"0.60625637",
"0.6014546",
"0.59458286",
"0.5902094",
"0.5897134",
"0.5870413",
"0.5815913",
"0.57933134",
"0.57682717",
"0.5767813",
"0.5749741",
"0.5746253",
"0.57217675",
"0.5707402",
"0.57009226",
"0.57007825",
"0.56761694",
"0.56695944",
"0.5659958",
"0.5653584",
"0.5653584",
"0.5632674",
"0.5630444"
]
| 0.681766 | 0 |
Executes openio CLI command. | def openio(cls, cmd, coverage="--coverage ", **kwargs):
return execute("openio " + coverage + cmd, **kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def openio_admin(cls, cmd, coverage=\"--coverage \", **kwargs):\n return execute(\"openio-admin \" + coverage + cmd, **kwargs)",
"def execute(cmd) :\n return os.system( cmd )",
"def cli() -> None:",
"def cli() -> None:",
"def do_command(self, args):\n pass",
"def execute_cli(self, cmd, **kwargs):\n cli = CLI()\n cli.set_connection(self.connection)\n response = cli.execute(cmd, **kwargs)\n return response.http_response.json()",
"def _oc_command(self, args):\n oc_command_exists()\n return [\"oc\"] + args",
"def os_system(command):\n os.system(command)",
"def exe(self, inp):\n try:\n spl = shlex.split(inp)\n except:\n self.err_print('Mismatched quotations.')\n self.command_event.set()\n return\n\n if not spl:\n self.err_print(\"\")\n elif spl[0] in self.commands:\n self.err_print(\"\")\n self.commands[spl[0]](spl[1:])\n else:\n self.err_print('Invalid command: ' + spl[0])\n\n self.command_event.set()",
"def openio_batch(cls, commands, coverage=\"--coverage\", **kwargs):\n script = \"\\n\".join(commands)\n try:\n return execute(\"openio \" + coverage, stdin=script, **kwargs)\n except CommandFailed:\n print(\"Stdin was:\\n\\n%s\" % (script,))\n raise",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():"
]
| [
"0.6797947",
"0.6201791",
"0.6181111",
"0.6181111",
"0.61580837",
"0.6095095",
"0.60691255",
"0.60500485",
"0.58856475",
"0.58728683",
"0.58718526",
"0.58718526",
"0.58718526",
"0.58718526",
"0.58718526",
"0.58718526",
"0.58718526",
"0.58718526",
"0.58718526",
"0.58718526",
"0.58718526",
"0.58718526",
"0.58718526",
"0.58718526",
"0.58718526",
"0.58718526",
"0.58718526",
"0.58718526",
"0.58718526",
"0.58718526"
]
| 0.7085731 | 0 |
Execute several commands in the same openio CLI process. | def openio_batch(cls, commands, coverage="--coverage", **kwargs):
script = "\n".join(commands)
try:
return execute("openio " + coverage, stdin=script, **kwargs)
except CommandFailed:
print("Stdin was:\n\n%s" % (script,))
raise | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def openio_admin_batch(cls, commands, coverage=\"--coverage\", **kwargs):\n return execute(\"openio-admin \" + coverage, stdin=\"\\n\".join(commands), **kwargs)",
"def run_commands(commands, pii=False):\n for command in commands:\n run(command, pii=pii)",
"def run(self, commands: list[str]):\n ...",
"def execute_commands(commands):\n for new_process in commands:\n command = new_process.split()\n with subprocess.Popen(command) as proc:\n proc.wait()",
"def run(self):\n for command in CUSTOM_COMMANDS:\n self.run_custom_command(command)",
"def node_execute_multiple(self, ipaddr, username, password, commands):\n for cmd in commands:\n rc, output, error = self.node_execute_command(ipaddr, username, password, cmd)\n if rc is False:\n print(\"error running: [%s] %s\" % (ipaddr, cmd))",
"def process_commands(self, commands: List[str]):",
"def run_commands(self, commands, timeout=None, stdout=True):\n for command in commands:\n self.run_command(command, timeout=timeout, stdout=stdout)",
"def initialize_commands(self) -> None:\n\n @self.command(name=\"snr\")\n @logger(\"all\")\n async def snr(ctx, *args):\n await ctx.message.channel.send(str(indie_seq.Seq([int(k) for k in args]).f()))\n\n @self.command(name=\"oeis\")\n @logger(\"all\")\n async def oeis(ctx, *args):\n global oeis_in_progress\n if not oeis_in_progress:\n oeis_in_progress = True\n if len(args) > 0:\n await ctx.message.channel.send(indie_oeis.get_sequence_from_b_file(args[0]))\n else:\n await ctx.message.channel.send(indie_oeis.get_sequence_from_b_file(str(random.randint(1, 341962))))\n oeis_in_progress = False\n else:\n await ctx.message.add_reaction(\"❌\")\n\n @self.command(name=\"collatz\")\n @logger(\"all\")\n async def collatz(ctx, *args):\n num = int(args[0])\n inity = \"\" if len(args) < 2 else args[1]\n\n collatz_results = indie_collatz.collatz_info(num)\n if len(inity) == 1:\n if inity == \"e\":\n await ctx.message.channel.send(f\"Evenity trajectory of {num}: {collatz_results.evenity_trajectory}\")\n elif inity == \"o\":\n await ctx.message.channel.send(f\"Oddinity trajectory of {num}: {collatz_results.oddinity_trajectory}\")\n else:\n await ctx.message.channel.send(f\"Collatz trajectory of {num}: {collatz_results.collatz_trajectory}\")\n\n @self.group(name=\"pig\")\n @logger(\"pig-math\")\n async def pig(ctx, *args):\n if ctx.invoked_subcommand is None:\n await ctx.message.add_reaction(\"❌\")\n\n def get_user_id_from_mention(user_id):\n user_id = user_id.replace(\"<\", \"\")\n user_id = user_id.replace(\">\", \"\")\n user_id = user_id.replace(\"@\", \"\")\n user_id = user_id.replace(\"!\", \"\")\n return user_id\n\n # Pig Math commands\n\n @pig.command(name=\"challenge\")\n @logger(\"pig-math\")\n async def pig_challenge(ctx, *args):\n challengee = get_user_id_from_mention(args[1])\n challengee = (await self.fetch_user(challengee)).name\n if len(args) > 2:\n point_target = int(args[2])\n else:\n point_target = 100\n pig_challenge = indie_pig.PigChallenge.create_challenge(ctx.message.author.name, challengee, point_target)\n await ctx.message.channel.send(pig_challenge.status)\n\n @pig.command(name=\"accept\")\n @logger(\"pig-math\")\n async def pig_accept(ctx, *args):\n await ctx.message.channel.send(indie_pig.PigChallenge.accept_challenge(ctx.message.author.name))\n\n @pig.command(name=\"reject\")\n @logger(\"pig-math\")\n async def pig_reject(ctx, *args):\n await ctx.message.channel.send(indie_pig.PigChallenge.reject_challenge(ctx.message.author.name))\n\n @pig.command(name=\"roll\")\n @logger(\"pig-math\")\n async def pig_roll(ctx, *args):\n await ctx.message.channel.send(indie_pig.PigGame.play(ctx.message.author.name, \"roll\"))\n\n @pig.command(name=\"bank\")\n @logger(\"pig-math\")\n async def pig_bank(ctx, *args):\n await ctx.message.channel.send(indie_pig.PigGame.play(ctx.message.author.name, \"bank\"))\n\n @pig.command(name=\"score\")\n @logger(\"pig-math\")\n async def pig_score(ctx, *args):\n await ctx.message.channel.send(indie_pig.PigGame.play(ctx.message.author.name, \"score\"))\n\n @pig.command(name=\"quit\")\n @logger(\"pig-math\")\n async def pig_quit(ctx, *args):\n await ctx.message.channel.send(indie_pig.PigGame.play(ctx.message.author.name, \"quit\"))\n\n @self.command(name=\"save\")\n @logger(\"modonly\")\n async def save(ctx, *args):\n self.save_data_files()\n await ctx.message.channel.send(\"Saved.\")\n\n @self.command(name=\"balance\")\n @logger(\"all\")\n async def balance(ctx, *args):\n bals = self.data[\"balances.json\"]\n user = ctx.message.author.id\n bal = 0\n if user in bals:\n bal = bals[user]\n else:\n bals[user] = 0 \n await ctx.message.channel.send(ctx.message.author.name+\", your balance is \"+str(bal)+\".\")\n\n @self.command(name=\"credit\")\n @logger(\"modonly\")\n async def credit(ctx, *args):\n \"\"\"\n Command with credit users mentioned with first float arg detected\n \"\"\"\n users_mentioned = ctx.message.mentions\n user_mention = ctx.author.mention\n credit = 0\n for arg in args:\n try:\n credit = float(arg)\n await ctx.message.channel.send(user_mention+\", we have successfully debited as you commanded.\")\n break\n except:\n pass\n bals = self.data[\"balances.json\"]\n for user in users_mentioned:\n if user.id in bals:\n bals[user.id] += credit\n else:\n bals[user.id] = credit\n\n @self.command(name=\"debit\")\n @logger(\"modonly\")\n async def debit(ctx, *args):\n \"\"\"\n Command with credit users mentioned with first float arg detected\n \"\"\"\n users_mentioned = ctx.message.mentions\n user_mention = ctx.author.mention\n debit = 0\n for arg in args:\n try:\n debit = float(arg)\n await ctx.message.channel.send(user_mention+\", we have successfully debited as you commanded.\")\n break\n except:\n pass\n bals = self.data[\"balances.json\"]\n for user in users_mentioned:\n if user.id in bals:\n bals[user.id] -= debit\n else:\n bals[user.id] = -debit\n\n @self.command(name=\"register\")\n @logger(\"all\")\n async def register(ctx, *args):\n \"\"\"\n This command will trigger a check if the user is registered,\n if not, the bot will ask them to review the terms and conditions and accept,\n if they accept, the bot will consider them registered\n \"\"\"\n user = ctx.message.author\n user_mention = ctx.author.mention\n chan_mention = \"<#876850365730021386>\"\n \n if user in self.data[\"users.json\"]:\n await ctx.message.channel.send(user_mention+\", you are already registered. :blue_heart:\")\n else:\n self.data[\"users_asked_to_be_registered.json\"].append(user)\n await ctx.message.channel.send(user_mention+\", do you accept the \"+chan_mention+\n \" (Indie Library Terms of Service). Command .accept if you do. :blue_heart:\")\n \n @self.command(name=\"accept\")\n @logger(\"all\")\n async def accept(ctx, *args):\n \"\"\"\n This command will trigger a check if the user has asked to be registered.\n If they have, then calling this triggers adding them to registered users.\n If they have not, they will be asked to type .register first.\n \"\"\"\n user = ctx.message.author\n user_mention = \"<@\"+str(user.id)+\">\"\n\n if user in self.data[\"users_asked_to_be_registered.json\"]:\n self.data[\"users.json\"].append(user)\n self.data[\"users_asked_to_be_registered.json\"].remove(user)\n await ctx.message.channel.send(user_mention+\", you have been successfully registered. :blue_heart:\")\n else:\n await ctx.message.channel.send(user_mention+\", have not commanded .register yet. \"\n \"Please do so first. :blue_heart:\")",
"def execute(commands, output = None, encoding = None, inactivityDuration = None, thread=False):\n\tprocess = CommandLauncher()\n\treturn process.executeMany(commands, output, encoding, inactivityDuration, thread)",
"def commands():",
"def do_command(self, args):\n pass",
"def run_commands(self, commands):\n if isinstance(commands, str):\n commands = commands.split('\\n')\n for item in commands:\n if item.strip().startswith(\"#\"):\n continue\n should_exit, result = self.cmd_with_result(item)\n if not result:\n break",
"def execute_commands(self, commands):\n for cmd in commands:\n self.action_list[cmd](commands[cmd])\n if cmd == 'r':\n break",
"def run_commands(self):\n processes = []\n\n i = 0\n ## get list of commands\n commands = self.get_commands()\n cnum = multiprocessing.cpu_count()\n\n while len(commands)>0:\n while len(processes)<cnum-1:\n c = commands.pop()\n i+=1\n print \"command #\",i, c\n ## run commands\n processes.append((i,subprocess.Popen(c, shell=True)))\n\n for j,p in processes:\n if p.poll() is not None:\n print j, \" status: \", p.poll()\n processes.remove((j,p))\n break\n else:\n time.sleep(10)\n return",
"def call_sys(cmds):\n for c in cmds:\n logger.info(c)\n try:\n os.system(c)\n except:\n logger.error(c)",
"def _run_commands(self, command_list):\n for cmd in command_list:\n print(cmd)\n if not self.dry_run:\n run(cmd)",
"def pipe_open(commands: list):\n process = Popen(commands, stdout=PIPE, stderr=PIPE)\n output, error = process.communicate()\n return output, error",
"def execute(self):\n self.process = subprocess.Popen(self.command_text_list)\n self.process.wait()",
"def commands():\n pass",
"def commands():\n pass",
"def commands():\n pass",
"def commands():\n pass",
"def run_commands(\n commands: Iterable[str] = SimpleFrozenList(),\n silent: bool = False,\n dry: bool = False,\n capture: bool = False,\n) -> None:\n for c in commands:\n command = split_command(c)\n # Not sure if this is needed or a good idea. Motivation: users may often\n # use commands in their config that reference \"python\" and we want to\n # make sure that it's always executing the same Python that spaCy is\n # executed with and the pip in the same env, not some other Python/pip.\n # Also ensures cross-compatibility if user 1 writes \"python3\" (because\n # that's how it's set up on their system), and user 2 without the\n # shortcut tries to re-run the command.\n if len(command) and command[0] in (\"python\", \"python3\"):\n command[0] = sys.executable\n elif len(command) and command[0] in (\"pip\", \"pip3\"):\n command = [sys.executable, \"-m\", \"pip\", *command[1:]]\n if not silent:\n print(f\"Running command: {join_command(command)}\")\n if not dry:\n run_command(command, capture=capture)",
"def main():\n parser = argparse.ArgumentParser(description='REA Robot')\n parser.add_argument('--c', metavar='FILE', type=str, required=False, help='File with commands to execute. One command per line')\n args = parser.parse_args()\n\n # Get list of commands to execute\n commands = load_command_list(args.c)\n if len(commands) == 0:\n commands = read_commands_from_console()\n\n logger.debug('List of commands to execute: {}'.format(commands))\n\n # Run the Robot\n robot = Robot()\n cmd_parser = CommandsParser(commands)\n while True:\n cmd_and_args = cmd_parser.get_next_command()\n if cmd_and_args:\n cmd_and_args[0].run(robot, cmd_and_args[1])\n else:\n break",
"def run_commands(ip_address, user, password, commandList, platform, buffer=5000):\n print \"Configuring \" + ip_address\n remote_conn_pre = paramiko.SSHClient()\n remote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n remote_conn_pre.connect(ip_address, username=user, password=password)\n remote_conn = remote_conn_pre.invoke_shell()\n if platform == \"cisco\":\n remote_conn.send(\"enable\\n\")\n time.sleep(1)\n remote_conn.send(password+'\\n')\n time.sleep(1)\n commands = commandList.split('\\n')\n for com in commands:\n remote_conn.send(com+'\\n')\n time.sleep(1)\n output = remote_conn.recv(buffer)\n #print output",
"def call_commands_serially(commands,\r\n status_update_callback,\r\n logger,\r\n close_logger_on_success=True):\r\n logger.write(\"Executing commands.\\n\\n\")\r\n for c in commands:\r\n for e in c:\r\n status_update_callback('%s\\n%s' % e)\r\n logger.write('# %s command \\n%s\\n\\n' % e)\r\n stdout, stderr, return_value = qiime_system_call(e[1])\r\n if return_value != 0:\r\n msg = \"\\n\\n*** ERROR RAISED DURING STEP: %s\\n\" % e[0] +\\\r\n \"Command run was:\\n %s\\n\" % e[1] +\\\r\n \"Command returned exit status: %d\\n\" % return_value +\\\r\n \"Stdout:\\n%s\\nStderr\\n%s\\n\" % (stdout, stderr)\r\n logger.write(msg)\r\n logger.close()\r\n raise WorkflowError(msg)\r\n # in the no error case, we write commands' output to the log\r\n # and also echo to this proc's stdout/stderr\r\n else:\r\n # write stdout and stderr to log file\r\n logger.write(\"Stdout:\\n%s\\nStderr:\\n%s\\n\" % (stdout, stderr))\r\n # write stdout to stdout\r\n if stdout:\r\n print stdout\r\n # write stderr to stderr\r\n if stderr:\r\n sys.stderr.write(stderr)\r\n if close_logger_on_success:\r\n logger.close()",
"def _execute_impl(self, commands):\n conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n conn.connect((self.host, self.port))\n for c in commands:\n conn.sendall(c)\n conn.recv(4096)\n conn.close()",
"async def spawn(*command: Strings, **resources: int) -> None:\n current = Invocation.current\n\n def _run_exec(parts: List[str]) -> Awaitable:\n return asyncio.create_subprocess_exec(*parts, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE)\n\n await current.done(current.run_action(\"spawn\", _run_exec, *command, **resources))",
"def parse_commands(self, commands):\n\n for command_str in commands:\n command_parts = command_str.split(' ')\n\n # Check if command string has at least 2 parts: '--cmd' and 'command_type'\n if len(command_parts) <= 1:\n print \"[ERROR] CommandLineExecutor::parse_commands() - Command not properly formatted: (\" + command_str + \")\"\n continue\n\n # Extract command and parameters\n command_type = command_parts[1].lower()\n command_parameters = command_parts[2:len(command_parts)]\n\n # Form a command to be added to the command queue\n command = {}\n if command_type == 'load':\n # Check number of parameters\n if len(command_parameters) != 1:\n print \"[ERROR] CommandLineExecutor::parse_commands() - Command not properly formatted: (\" + command_str + \")\"\n continue\n\n folder_path = command_parameters[0].replace('\"', '').strip()\n\n command['method'] = self.app_instance.image_source.load_images\n command['parameters'] = {\n 'folder_path': folder_path\n }\n\n elif command_type == 'align':\n # Check number of parameters\n if len(command_parameters) != 1:\n print \"[ERROR] CommandLineExecutor::parse_commands() - Command not properly formatted: (\" + command_str + \")\"\n continue\n\n secondary_image_index = int(command_parameters[0])\n\n command['method'] = self.app_instance.align_nth_secondary_image\n command['parameters'] = {\n 'secondary_image_index': secondary_image_index\n }\n\n elif command_type == 'blend':\n # Check number of parameters\n if len(command_parameters) != 5:\n print \"[ERROR] CommandLineExecutor::parse_commands() - Command not properly formatted: (\" + command_str + \")\"\n continue\n\n secondary_image_index = int(command_parameters[0])\n x = int(command_parameters[1])\n y = int(command_parameters[2])\n width = int(command_parameters[3])\n height = int(command_parameters[4])\n\n command['method'] = self.app_instance.blend_nth_secondary_image\n command['parameters'] = {\n 'secondary_image_index': secondary_image_index,\n 'x': x,\n 'y': y,\n 'width': width,\n 'height': height\n }\n\n elif command_type == 'save':\n # Check number of parameters\n if len(command_parameters) != 1:\n print \"[ERROR] CommandLineExecutor::parse_commands() - Command not properly formatted: (\" + command_str + \")\"\n continue\n\n filename = command_parameters[0].replace('\"', '').strip()\n\n command['method'] = self.app_instance.save_result\n command['parameters'] = {\n 'filename': filename\n }\n\n else:\n print \"[ERROR] CommandLineExecutor::parse_commands() - Command not properly formatted: (\" + command_str + \")\"\n continue\n\n print \"[INFO] Queuing command: \" + command_str\n\n self.command_queue.append(command)"
]
| [
"0.7035743",
"0.67764515",
"0.67400265",
"0.66215736",
"0.64620274",
"0.64438605",
"0.6369911",
"0.63289934",
"0.6166716",
"0.6156091",
"0.61401606",
"0.61254555",
"0.60767615",
"0.60650593",
"0.6031006",
"0.60119826",
"0.5984301",
"0.59842825",
"0.5961239",
"0.5931458",
"0.5931458",
"0.5931458",
"0.5931458",
"0.5881909",
"0.5807555",
"0.5769642",
"0.57514447",
"0.5736208",
"0.5727419",
"0.5724862"
]
| 0.6805472 | 1 |
Executes openioadmin CLI command. | def openio_admin(cls, cmd, coverage="--coverage ", **kwargs):
return execute("openio-admin " + coverage + cmd, **kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def openio_admin_batch(cls, commands, coverage=\"--coverage\", **kwargs):\n return execute(\"openio-admin \" + coverage, stdin=\"\\n\".join(commands), **kwargs)",
"async def admin(ctx):\n if ctx.invoked_subcommand is None:\n await ctx.send(\"Invalid Command\")",
"def _oc_command(self, args):\n oc_command_exists()\n return [\"oc\"] + args",
"async def _admin(self, ctx: commands.Context):\n await ctx.send_help()",
"def handle_admincommands(bot, ievent):\n cmnds = getcmndtable()\n if not ievent.rest: ievent.reply(\"commands: \", cmnds)\n else:\n try: ievent.reply(\"%s command is found in %s \" % (ievent.rest, cmnds[ievent.rest]))\n except KeyError: ievent.reply(\"no such commands available\")",
"def run_as_admin(cmd):\n shell.ShellExecuteEx(lpVerb='runas', lpFile='cmd.exe', lpParameters='/c ' + cmd)",
"def manage(command, noinput=True):\n noinput = '--noinput' if noinput else ''\n run('envdir {bundle_root}/envdir {bundle_root}/env/bin/django-admin.py '\n '{command} {noinput}'.format(bundle_root=env.bundle_root,\n command=command, noinput=noinput))",
"def do_command(self, args):\n pass",
"def handle_admin(self, command):\n if type(command) != list:\n command = command.split()\n #shutdown-command\n if command[0] == \"shutdown\":\n print(\"Shutting down server\")\n self.server.shutdown()\n #register-user-commandsplit\n if command[0] == \"register\":\n # Save new user in the accountdada-save-file on the harddisk of the server\n account_file = open(\"accounts.sav\",\"a\")\n account_file.write(command[1]+\"|\"+command[2]+\"|user|\\n\")\n account_file.close()\n # Add new user to live list of accountdata.\n self.accounter.add_user(command[1], command[2], \"user\")",
"async def admin(self, ctx: MyContext):\n if ctx.subcommand_passed is None:\n await ctx.send_help(\"wormhole admin\")",
"def execute_command(self):\n return ''",
"def execute_frontend(self, cmd, verbose=True):\n return self.arangosh.run_command(cmd, verbose)",
"async def admin(self, ctx):\n if ctx.message.author.top_role.name.lower() == 'officer':\n await ctx.message.channel.send(\n 'List of useable commands for the parent command: **admin**\\n\\n **eboard admin auto** - updates the '\n 'new seats given current election data.\\n\\n**eboard admin set <position> <User#0000>** - assigns a '\n 'position to target user.\\n\\n**eboard admin remove <position> <User#0000>** - remove a target user '\n 'from their position.\\n\\n**eboard admin list** - lists the positions in the SQLite table.')",
"def do_command(self, args):\n ostypeops = dbops.OsTypes()\n ostypeops.add(args)",
"def cli() -> None:\r\n config_argparse = _configfile_parser()\r\n config_args, _ = config_argparse.parse_known_args()\r\n\r\n defaults = {}\r\n\r\n if config_args.config: \r\n defaults = _load_config(config_args)\r\n\r\n parser = _cli(config_argparse, defaults)\r\n _add_standard_args(parser) \r\n \r\n subparser = parser.add_subparsers()\r\n _add_create_command(subparser)\r\n _add_update_command(subparser) \r\n\r\n args = parser.parse_args()\r\n command = args.cmd\r\n command.execute(args)",
"def main(ctx: typer.Context):\n LOG.debug(F\"COVIDAP: executing command: {ctx.invoked_subcommand}\")",
"def execute(self):\n\n options, args = self.parser.parse_args(self.argv)\n\n try:\n subcommand_name = self.argv[1]\n except IndexError:\n subcommand_name = 'help'\n\n if subcommand_name == 'help':\n if len(args) <= 2:\n self.print_help()\n else:\n self.fetch_subcommand(self.argv[2]).print_help()\n elif subcommand_name == 'version':\n self.print_version()\n else:\n self.fetch_subcommand(subcommand_name).execute()",
"def manage(command, noinput=True):\n noinput = '--noinput' if noinput else ''\n run('%s/env/bin/django-admin.py %s %s --settings=settings' % (\n env.bundle_root, command, noinput,\n ))",
"def execute_cli(self, cmd, **kwargs):\n cli = CLI()\n cli.set_connection(self.connection)\n response = cli.execute(cmd, **kwargs)\n return response.http_response.json()",
"def command():\n pass",
"def cli() -> None:",
"def cli() -> None:",
"def openio(cls, cmd, coverage=\"--coverage \", **kwargs):\n return execute(\"openio \" + coverage + cmd, **kwargs)",
"def main(command: Optional[str] = None) -> int:\n package_registrar.import_and_register_cli_extension_packages()\n\n if VERSION_FLAG in sys.argv or (command and VERSION_FLAG in command):\n logger.info(f\"Gazoo Device Manager {gazoo_device.version}\")\n package_versions = extensions.get_registered_package_info()\n logger.info(f\"Registered extension packages: {package_versions}\")\n return 0\n\n return execute_command(command)",
"def cli():\n return",
"def commandInterface():\n\tusername = session[\"username\"]\n\tuserInput = request.form[\"commands\"]\n\toutput = cli.cli(userInput, username)\n\treturn render_template(\"index.html\", output=output)",
"def execute_from_command_line(argv=None):\n utility = ManagementUtility(argv)\n utility.execute()",
"def cli():",
"def cli():",
"def cli():"
]
| [
"0.6739558",
"0.6572091",
"0.62097144",
"0.6009581",
"0.5819513",
"0.5667608",
"0.56569225",
"0.5649356",
"0.5629662",
"0.5613385",
"0.5595961",
"0.55712277",
"0.55656326",
"0.55459076",
"0.5506487",
"0.5435759",
"0.53998935",
"0.53734577",
"0.5349197",
"0.5336373",
"0.5312184",
"0.5312184",
"0.5297424",
"0.52953446",
"0.527984",
"0.52661484",
"0.5244121",
"0.52008283",
"0.52008283",
"0.52008283"
]
| 0.7582081 | 0 |
Execute several commands in the same openioadmin CLI process. | def openio_admin_batch(cls, commands, coverage="--coverage", **kwargs):
return execute("openio-admin " + coverage, stdin="\n".join(commands), **kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def node_execute_multiple(self, ipaddr, username, password, commands):\n for cmd in commands:\n rc, output, error = self.node_execute_command(ipaddr, username, password, cmd)\n if rc is False:\n print(\"error running: [%s] %s\" % (ipaddr, cmd))",
"def run(self, commands: list[str]):\n ...",
"def openio_admin(cls, cmd, coverage=\"--coverage \", **kwargs):\n return execute(\"openio-admin \" + coverage + cmd, **kwargs)",
"def run(self):\n for command in CUSTOM_COMMANDS:\n self.run_custom_command(command)",
"def run_commands(commands, pii=False):\n for command in commands:\n run(command, pii=pii)",
"def process_commands(self, commands: List[str]):",
"def commands():",
"def handle_admincommands(bot, ievent):\n cmnds = getcmndtable()\n if not ievent.rest: ievent.reply(\"commands: \", cmnds)\n else:\n try: ievent.reply(\"%s command is found in %s \" % (ievent.rest, cmnds[ievent.rest]))\n except KeyError: ievent.reply(\"no such commands available\")",
"def do_command(self, args):\n pass",
"def execute_commands(commands):\n for new_process in commands:\n command = new_process.split()\n with subprocess.Popen(command) as proc:\n proc.wait()",
"def run_commands(self, commands, timeout=None, stdout=True):\n for command in commands:\n self.run_command(command, timeout=timeout, stdout=stdout)",
"def openio_batch(cls, commands, coverage=\"--coverage\", **kwargs):\n script = \"\\n\".join(commands)\n try:\n return execute(\"openio \" + coverage, stdin=script, **kwargs)\n except CommandFailed:\n print(\"Stdin was:\\n\\n%s\" % (script,))\n raise",
"def commands():\n pass",
"def commands():\n pass",
"def commands():\n pass",
"def commands():\n pass",
"def main_command_loop(self) -> None:\n\n self.commands.pop(0)\n for command in self.commands:\n if \"--static\" == command:\n self.update_static_files()\n elif \"--temp\" == command:\n self.update_templates()\n elif \"--app\" == command:\n self.update_app()\n elif \"--settings\" == command:\n self.update_settings()\n elif \"--mysql-files\" == command:\n self.update_mysql_files()\n elif \"--help\" == command or \"-h\" == command:\n self.write_help_message()",
"def execute_commands(self, commands):\n for cmd in commands:\n self.action_list[cmd](commands[cmd])\n if cmd == 'r':\n break",
"def _run_commands(self, command_list):\n for cmd in command_list:\n print(cmd)\n if not self.dry_run:\n run(cmd)",
"def initialize_commands(self) -> None:\n\n @self.command(name=\"snr\")\n @logger(\"all\")\n async def snr(ctx, *args):\n await ctx.message.channel.send(str(indie_seq.Seq([int(k) for k in args]).f()))\n\n @self.command(name=\"oeis\")\n @logger(\"all\")\n async def oeis(ctx, *args):\n global oeis_in_progress\n if not oeis_in_progress:\n oeis_in_progress = True\n if len(args) > 0:\n await ctx.message.channel.send(indie_oeis.get_sequence_from_b_file(args[0]))\n else:\n await ctx.message.channel.send(indie_oeis.get_sequence_from_b_file(str(random.randint(1, 341962))))\n oeis_in_progress = False\n else:\n await ctx.message.add_reaction(\"❌\")\n\n @self.command(name=\"collatz\")\n @logger(\"all\")\n async def collatz(ctx, *args):\n num = int(args[0])\n inity = \"\" if len(args) < 2 else args[1]\n\n collatz_results = indie_collatz.collatz_info(num)\n if len(inity) == 1:\n if inity == \"e\":\n await ctx.message.channel.send(f\"Evenity trajectory of {num}: {collatz_results.evenity_trajectory}\")\n elif inity == \"o\":\n await ctx.message.channel.send(f\"Oddinity trajectory of {num}: {collatz_results.oddinity_trajectory}\")\n else:\n await ctx.message.channel.send(f\"Collatz trajectory of {num}: {collatz_results.collatz_trajectory}\")\n\n @self.group(name=\"pig\")\n @logger(\"pig-math\")\n async def pig(ctx, *args):\n if ctx.invoked_subcommand is None:\n await ctx.message.add_reaction(\"❌\")\n\n def get_user_id_from_mention(user_id):\n user_id = user_id.replace(\"<\", \"\")\n user_id = user_id.replace(\">\", \"\")\n user_id = user_id.replace(\"@\", \"\")\n user_id = user_id.replace(\"!\", \"\")\n return user_id\n\n # Pig Math commands\n\n @pig.command(name=\"challenge\")\n @logger(\"pig-math\")\n async def pig_challenge(ctx, *args):\n challengee = get_user_id_from_mention(args[1])\n challengee = (await self.fetch_user(challengee)).name\n if len(args) > 2:\n point_target = int(args[2])\n else:\n point_target = 100\n pig_challenge = indie_pig.PigChallenge.create_challenge(ctx.message.author.name, challengee, point_target)\n await ctx.message.channel.send(pig_challenge.status)\n\n @pig.command(name=\"accept\")\n @logger(\"pig-math\")\n async def pig_accept(ctx, *args):\n await ctx.message.channel.send(indie_pig.PigChallenge.accept_challenge(ctx.message.author.name))\n\n @pig.command(name=\"reject\")\n @logger(\"pig-math\")\n async def pig_reject(ctx, *args):\n await ctx.message.channel.send(indie_pig.PigChallenge.reject_challenge(ctx.message.author.name))\n\n @pig.command(name=\"roll\")\n @logger(\"pig-math\")\n async def pig_roll(ctx, *args):\n await ctx.message.channel.send(indie_pig.PigGame.play(ctx.message.author.name, \"roll\"))\n\n @pig.command(name=\"bank\")\n @logger(\"pig-math\")\n async def pig_bank(ctx, *args):\n await ctx.message.channel.send(indie_pig.PigGame.play(ctx.message.author.name, \"bank\"))\n\n @pig.command(name=\"score\")\n @logger(\"pig-math\")\n async def pig_score(ctx, *args):\n await ctx.message.channel.send(indie_pig.PigGame.play(ctx.message.author.name, \"score\"))\n\n @pig.command(name=\"quit\")\n @logger(\"pig-math\")\n async def pig_quit(ctx, *args):\n await ctx.message.channel.send(indie_pig.PigGame.play(ctx.message.author.name, \"quit\"))\n\n @self.command(name=\"save\")\n @logger(\"modonly\")\n async def save(ctx, *args):\n self.save_data_files()\n await ctx.message.channel.send(\"Saved.\")\n\n @self.command(name=\"balance\")\n @logger(\"all\")\n async def balance(ctx, *args):\n bals = self.data[\"balances.json\"]\n user = ctx.message.author.id\n bal = 0\n if user in bals:\n bal = bals[user]\n else:\n bals[user] = 0 \n await ctx.message.channel.send(ctx.message.author.name+\", your balance is \"+str(bal)+\".\")\n\n @self.command(name=\"credit\")\n @logger(\"modonly\")\n async def credit(ctx, *args):\n \"\"\"\n Command with credit users mentioned with first float arg detected\n \"\"\"\n users_mentioned = ctx.message.mentions\n user_mention = ctx.author.mention\n credit = 0\n for arg in args:\n try:\n credit = float(arg)\n await ctx.message.channel.send(user_mention+\", we have successfully debited as you commanded.\")\n break\n except:\n pass\n bals = self.data[\"balances.json\"]\n for user in users_mentioned:\n if user.id in bals:\n bals[user.id] += credit\n else:\n bals[user.id] = credit\n\n @self.command(name=\"debit\")\n @logger(\"modonly\")\n async def debit(ctx, *args):\n \"\"\"\n Command with credit users mentioned with first float arg detected\n \"\"\"\n users_mentioned = ctx.message.mentions\n user_mention = ctx.author.mention\n debit = 0\n for arg in args:\n try:\n debit = float(arg)\n await ctx.message.channel.send(user_mention+\", we have successfully debited as you commanded.\")\n break\n except:\n pass\n bals = self.data[\"balances.json\"]\n for user in users_mentioned:\n if user.id in bals:\n bals[user.id] -= debit\n else:\n bals[user.id] = -debit\n\n @self.command(name=\"register\")\n @logger(\"all\")\n async def register(ctx, *args):\n \"\"\"\n This command will trigger a check if the user is registered,\n if not, the bot will ask them to review the terms and conditions and accept,\n if they accept, the bot will consider them registered\n \"\"\"\n user = ctx.message.author\n user_mention = ctx.author.mention\n chan_mention = \"<#876850365730021386>\"\n \n if user in self.data[\"users.json\"]:\n await ctx.message.channel.send(user_mention+\", you are already registered. :blue_heart:\")\n else:\n self.data[\"users_asked_to_be_registered.json\"].append(user)\n await ctx.message.channel.send(user_mention+\", do you accept the \"+chan_mention+\n \" (Indie Library Terms of Service). Command .accept if you do. :blue_heart:\")\n \n @self.command(name=\"accept\")\n @logger(\"all\")\n async def accept(ctx, *args):\n \"\"\"\n This command will trigger a check if the user has asked to be registered.\n If they have, then calling this triggers adding them to registered users.\n If they have not, they will be asked to type .register first.\n \"\"\"\n user = ctx.message.author\n user_mention = \"<@\"+str(user.id)+\">\"\n\n if user in self.data[\"users_asked_to_be_registered.json\"]:\n self.data[\"users.json\"].append(user)\n self.data[\"users_asked_to_be_registered.json\"].remove(user)\n await ctx.message.channel.send(user_mention+\", you have been successfully registered. :blue_heart:\")\n else:\n await ctx.message.channel.send(user_mention+\", have not commanded .register yet. \"\n \"Please do so first. :blue_heart:\")",
"def execute(self):\n\n options, args = self.parser.parse_args(self.argv)\n\n try:\n subcommand_name = self.argv[1]\n except IndexError:\n subcommand_name = 'help'\n\n if subcommand_name == 'help':\n if len(args) <= 2:\n self.print_help()\n else:\n self.fetch_subcommand(self.argv[2]).print_help()\n elif subcommand_name == 'version':\n self.print_version()\n else:\n self.fetch_subcommand(subcommand_name).execute()",
"def execute(self):\n self.process = subprocess.Popen(self.command_text_list)\n self.process.wait()",
"def _oc_command(self, args):\n oc_command_exists()\n return [\"oc\"] + args",
"async def admin(ctx):\n if ctx.invoked_subcommand is None:\n await ctx.send(\"Invalid Command\")",
"def execute(commands, output = None, encoding = None, inactivityDuration = None, thread=False):\n\tprocess = CommandLauncher()\n\treturn process.executeMany(commands, output, encoding, inactivityDuration, thread)",
"def execute_frontend(self, cmd, verbose=True):\n return self.arangosh.run_command(cmd, verbose)",
"def test_handle_multiple_subcommands(self):\r\n ret, code = self.testcommand.handle(\"project list edit\", user)\r\n self.assertEqual(ret, self.testcommand.get_help())\r\n self.assertEqual(code, 200)",
"async def _admin(self, ctx: commands.Context):\n await ctx.send_help()",
"def run_commands(self, commands):\n if isinstance(commands, str):\n commands = commands.split('\\n')\n for item in commands:\n if item.strip().startswith(\"#\"):\n continue\n should_exit, result = self.cmd_with_result(item)\n if not result:\n break",
"def handle_command_line():\n commands = scan_for_commands()\n parser = argparse.ArgumentParser(\n description=\"A set of utilities to ease the installation of Modoboa.\",\n epilog=\"\"\"Available commands:\n%s\n\"\"\" % \"\\n\".join([\"\\t%s\" % c for c in sorted(commands)]))\n parser.add_argument(\"--verbose\", action=\"store_true\",\n help=\"Activate verbose output\")\n parser.add_argument(\"command\", type=str,\n help=\"A valid command name\")\n (args, remaining) = parser.parse_known_args()\n\n if args.command not in commands:\n print(\"Unknown command '%s'\" % args.command, file=sys.stderr)\n sys.exit(1)\n\n commands[args.command](commands, verbose=args.verbose).run(remaining)"
]
| [
"0.65307754",
"0.64596766",
"0.6451439",
"0.642095",
"0.63148993",
"0.6174462",
"0.6088206",
"0.60869735",
"0.60494465",
"0.6002248",
"0.5898545",
"0.58760387",
"0.58524823",
"0.58524823",
"0.58524823",
"0.58524823",
"0.5837719",
"0.5812344",
"0.58026713",
"0.57729846",
"0.57274586",
"0.57052624",
"0.5700645",
"0.5697679",
"0.5696262",
"0.5693879",
"0.56722057",
"0.56680435",
"0.56118923",
"0.5605404"
]
| 0.77418923 | 0 |
Get formatting options for OpenIO CLIs, to make them output the specified fields in the specified format. | def get_format_opts(cls, format_="value", fields=[]):
return " -f {0} {1}".format(format_, " ".join(["-c " + it for it in fields])) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getopt_format(self):\n self._print_enum_opt(\"format\", FORMATTERS)",
"def fp_config_formatting(info):\n # From: OpenCL/AMDAPPSDK-3.0/include/CL/cl.h\n options = [((1 << 0), 'CL_FP_DENORM'),\n ((1 << 1), 'CL_FP_INF_NAN'),\n ((1 << 2), 'CL_FP_ROUND_TO_NEAREST'),\n ((1 << 3), 'CL_FP_ROUND_TO_ZERO'),\n ((1 << 4), 'CL_FP_ROUND_TO_INF'),\n ((1 << 5), 'CL_FP_FMA'),\n ((1 << 6), 'CL_FP_SOFT_FLOAT'),\n ((1 << 7), 'CL_FP_CORRECTLY_ROUNDED_DIVIDE_SQRT')]\n # The initial line shows the bitmap, following lines\n # explicitly show the meaning and availability.\n option_breakdown = [bin(info)]\n for bitfield, option in options:\n is_available = bool(bitfield & info)\n option_breakdown.append('{}={}'.format(option, is_available))\n return ('\\n\\t'+' '*(device_maxwidth+3)).join(option_breakdown)",
"def _parse_format(mode=2, rc_kw=None, **kwargs):\n kw = {}\n rc_kw = rc_kw or {}\n for key, value in kwargs.items():\n key_fixed = _rc_nodots.get(key, None)\n if key_fixed is None:\n kw[key] = value\n else:\n rc_kw[key_fixed] = value\n return rc_kw, mode, kw",
"def asformat(self, format):",
"def format_cl_args(in_csv, out_csv, verbose=False, **kwargs):\n keymap = {\n \"alpha\": \"a\",\n \"frame_interval\": \"t\",\n \"m\": \"m\",\n \"metropolis_sigma\": \"s\",\n \"B\": \"z\",\n \"n_iter\": \"n\",\n \"burnin\": \"b\",\n \"min_log_D\": \"c\",\n \"max_log_D\": \"d\",\n \"seed\": \"e\",\n \"max_occ_weight\": \"x\",\n \"loc_error\": \"l\",\n \"bias_csv\": \"i\"\n }\n executable = \"gs_dp_diff_defoc\" if kwargs.get( \\\n \"incorp_defoc_likelihoods\", False) else \"gs_dp_diff\"\n optstr = \" \".join([\"-{} {}\".format(str(keymap.get(k)), str(kwargs.get(k))) \\\n for k in kwargs.keys() if k in keymap.keys()])\n if verbose:\n return \"{} {} -v {} {}\".format(executable, optstr, in_csv, out_csv)\n else:\n return \"{} {} {} {}\".format(executable, optstr, in_csv, out_csv)",
"def complete_opt_format(self, text, *_):\n return [t + \" \" for t in FORMATTERS if t.startswith(text)]",
"def format_options(self, ctx: Context, formatter: DocsCommandHelpTextFormatter): # type:ignore\n DocsBaseCommand.format_description(formatter)\n self.format_sub_commands(formatter)",
"def get_format(self):\n pass",
"def assign_format(self):\n if self.is_output or self.is_req_output:\n if self.pname in self.tool_data[self.tool_name]['output_fmt']:\n return self.tool_data[self.tool_name]['output_fmt'][self.pname]\n elif self.pname in self.gen_out_fmt:\n return self.gen_out_fmt[self.pname]\n elif self.is_input:\n if self.pname in self.tool_data[self.tool_name]['input_fmt']:\n print(self.tool_data[self.tool_name])\n return self.tool_data[self.tool_name]['input_fmt'][self.pname]\n elif self.pname in self.gen_in_fmt:\n return self.gen_in_fmt[self.pname]\n else:\n # Not sure yet what this will be used for, but I think we need it.\n return ''",
"def formats():\n return _FORMATS",
"def __format__(self, format_spec):\n if format_spec == \"polite\":\n return self.polite\n elif format_spec == \"casual\":\n return self.casual\n else:\n # Using string addition here to avoid triggering flake8-sfs\n # while still giving a meaningful self-contained example:\n raise ValueError(format_spec + \" not a format defined by Client object\")",
"def doconce2format(name, format, options=''):\n if name.endswith('.do.txt'):\n name = name.replace('.do.txt', '')\n\n # Compile source\n cmd = 'doconce format %(format)s %(name)s %(options)s ' % vars()\n system(cmd)",
"def get_opt_formatted(self, command):\n if \"formatted\" in self.command_dict[\"commands\"][command].keys():\n return self.command_dict[\"commands\"][command][\"formatted\"]\n else:\n return CommandDict.DEFAULT_OPT_FORMATTED",
"def format(args):\n sequential_choices = (\"replace\", \"prefix\", \"suffix\")\n p = OptionParser(format.__doc__)\n p.add_option(\n \"--pairs\",\n default=False,\n action=\"store_true\",\n help=\"Add trailing /1 and /2 for interleaved pairs\",\n )\n p.add_option(\n \"--sequential\",\n default=None,\n choices=sequential_choices,\n help=\"Add sequential IDs\",\n )\n p.add_option(\n \"--sequentialoffset\", default=0, type=\"int\", help=\"Sequential IDs start at\"\n )\n p.add_option(\n \"--pad0\", default=0, type=\"int\", help=\"Pad a few zeros in front of sequential\"\n )\n p.add_option(\n \"--gb\",\n default=False,\n action=\"store_true\",\n help=\"For Genbank ID, get the accession\",\n )\n p.add_option(\"--sep\", default=None, help=\"Split description by certain symbol\")\n p.add_option(\n \"--index\",\n default=0,\n type=\"int\",\n help=\"Extract i-th field after split with --sep\",\n )\n p.add_option(\n \"--noversion\",\n default=False,\n action=\"store_true\",\n help=\"Remove the gb trailing version\",\n )\n p.add_option(\"--prefix\", help=\"Prepend prefix to sequence ID\")\n p.add_option(\"--suffix\", help=\"Append suffix to sequence ID\")\n p.add_option(\n \"--template\",\n default=False,\n action=\"store_true\",\n help=\"Extract `template=aaa dir=x library=m` to `m-aaa/x`\",\n )\n p.add_option(\"--switch\", help=\"Switch ID from two-column file\")\n p.add_option(\n \"--annotation\",\n help=\"Add functional annotation from two-column file ('ID <--> Annotation')\",\n )\n p.add_option(\"--ids\", help=\"Generate ID conversion table\")\n p.add_option(\n \"--upper\",\n default=False,\n action=\"store_true\",\n help=\"Convert sequence to upper case\",\n )\n p.add_option(\n \"--nodesc\",\n default=False,\n action=\"store_true\",\n help=\"Remove description after identifier\",\n )\n p.add_option(\n \"--minlength\", default=0, type=\"int\", help=\"Minimum sequence length to keep\"\n )\n opts, args = p.parse_args(args)\n\n if len(args) != 2:\n sys.exit(not p.print_help())\n\n infasta, outfasta = args\n gb = opts.gb\n pairs = opts.pairs\n prefix = opts.prefix\n suffix = opts.suffix\n noversion = opts.noversion\n sequential = opts.sequential\n sequentialoffset = opts.sequentialoffset\n sep = opts.sep\n idx = opts.index\n mapfile = opts.switch\n annotfile = opts.annotation\n desc = not opts.nodesc\n idsfile = opts.ids\n idsfile = open(idsfile, \"w\") if idsfile else None\n upper = opts.upper\n minlength = opts.minlength\n\n if mapfile:\n mapping = DictFile(mapfile, delimiter=\"\\t\")\n if annotfile:\n annotation = DictFile(annotfile, delimiter=\"\\t\")\n\n fp = SeqIO.parse(must_open(infasta), \"fasta\")\n fw = must_open(outfasta, \"w\")\n nremoved = 0\n for i, rec in enumerate(fp):\n if len(rec) < minlength:\n nremoved += 1\n continue\n origid = rec.id\n description = rec.description.replace(origid, \"\").strip()\n if sep:\n rec.id = rec.description.split(sep)[idx].strip()\n if gb:\n # gi|262233616|gb|GU123895.1| Coffea arabica clone BAC\n atoms = rec.id.split(\"|\")\n if len(atoms) >= 3:\n rec.id = atoms[3]\n elif len(atoms) == 2:\n rec.id = atoms[1]\n if pairs:\n id = \"/1\" if (i % 2 == 0) else \"/2\"\n rec.id += id\n if noversion:\n rec.id = rec.id.rsplit(\".\", 1)[0]\n if sequential:\n rec.id = \"{0:0{1}d}\".format(sequentialoffset, opts.pad0)\n if sequential == \"prefix\":\n rec.id = \"{0}-{1}\".format(rec.id, origid)\n elif sequential == \"suffix\":\n rec.id = \"{0}-{1}\".format(origid, rec.id)\n sequentialoffset += 1\n if opts.template:\n template, dir, lib = [\n x.split(\"=\")[-1] for x in rec.description.split()[1:4]\n ]\n rec.id = \"{0}-{1}/{2}\".format(lib, template, dir)\n if mapfile:\n if origid in mapping:\n rec.id = mapping[origid]\n else:\n logging.error(\n \"{0} not found in `{1}`. ID unchanged.\".format(origid, mapfile)\n )\n if prefix:\n rec.id = prefix + rec.id\n if suffix:\n rec.id += suffix\n if annotfile:\n rec.description = (\n annotation.get(origid, \"\")\n if not mapfile\n else annotation.get(rec.id, \"\")\n )\n else:\n rec.description = description if desc else \"\"\n if idsfile:\n print(\"\\t\".join((origid, rec.id)), file=idsfile)\n if upper:\n rec.seq = rec.seq.upper()\n\n SeqIO.write(rec, fw, \"fasta\")\n\n if idsfile:\n logging.debug(\"Conversion table written to `{0}`.\".format(idsfile.name))\n idsfile.close()\n\n if nremoved:\n logging.debug(\n \"Removed {} sequences with length < {}\".format(nremoved, minlength)\n )",
"def format_option(self, options_int, value):\n # NOPs\n if options_int == 1:\n return (self.scapy_options[options_int], ())\n elif options_int in [5]:\n return (self.scapy_options[options_int], value)\n # Timestamp\n elif options_int in [8, 14]:\n return (self.scapy_options[options_int], (value, 0))\n elif options_int in self.scapy_options:\n return (self.scapy_options[options_int], value)\n else:\n return (options_int, value)",
"def build_format(i, ex, args, meta_args):\n formatter = string.Formatter()\n format_string = meta_args.format_string\n fields = list(formatter.parse(format_string))\n\n kwarg_fields = []\n indexed_fields = []\n\n i.result = hive.variable('str')\n i.result_out = hive.pull_out(i.result)\n\n for index, field in enumerate(fields):\n literal_text = field[1]\n\n if literal_text is None:\n continue\n\n if not literal_text.isidentifier():\n field_name = \"field_{}\".format(index)\n indexed_fields.append(field_name)\n\n else:\n field_name = literal_text\n kwarg_fields.append(field_name)\n\n # Create IO\n attr = hive.variable()\n setattr(i, field_name, attr)\n\n in_attr = hive.pull_in(attr)\n setattr(i, \"{}_in\".format(field_name), in_attr)\n\n setattr(ex, field_name, hive.antenna(in_attr))\n hive.trigger(i.result_out, in_attr, pretrigger=True)\n\n ex.result = hive.output(i.result_out)\n\n def do_format(self):\n args = [getattr(self, \"_{}\".format(attr_name)) for attr_name in indexed_fields]\n kwargs = {attr_name: getattr(self, \"_{}\".format(attr_name)) for attr_name in kwarg_fields}\n self._result = formatter.format(format_string, *args, **kwargs)\n\n i.func = hive.modifier(do_format)\n hive.trigger(i.result_out, i.func, pretrigger=True)",
"def _format_getters(self, format_get_info=None, format_get_k_info=None):\n ## Get info setting\n if format_get_k_info is None:\n self.get_k = self._general_get_k\n elif format_get_k_info == \"default\":\n self.get_k = self._default_get_k\n elif format_get_k_info == \"general\":\n self.get_k = self._general_get_k\n elif format_get_k_info == \"list\":\n self.get_k = self._list_get_k\n elif format_get_k_info == \"integer\":\n self.get_k = self._integer_get_k\n ## Get information setting\n if format_get_info is None:\n self.get_information = self._general_get_information\n elif format_get_info == \"default\":\n self.get_information = self._default_get_information\n elif format_get_info == \"general\":\n self.get_information = self._general_get_information\n ## Other getters\n if self.staticneighs:\n self.get_copy_iss = self._staticneighs_get_copy_iss\n self.get_copy_iss_by_ind = self._staticneighs_get_copy_iss_by_ind\n else:\n self.get_copy_iss = self._notstaticneighs_get_copy_iss\n self.get_copy_iss_by_ind =\\\n self._notstaticneighs_get_copy_iss_by_ind",
"def format(self):\n return self[\"format\"]",
"def format(self):\n return self[\"format\"]",
"def getFormat(self, **kargs):\n #########################################################################\n # REPARSE INPUT ARGUMENTS\n #########################################################################\n for k, v in self.argTypes.iteritems():\n if kargs.has_key(k) and isinstance(kargs[k],v):\n exec('self.'+k+' = kargs[k]')\n\n #########################################################################\n # ROW FORMAT\n #########################################################################\n\n # Generate default values for essential arguments if they are not passed.\n if not self.rowformat:\n rowlen = (len(self.data[0]) + 1) if self.vheader else len(self.data[0])\n self.rowformat = \"c \" * rowlen\n else:\n if self.vheader:\n addstr = \"c \" if self.rowformat.find(\"|\") == -1 else \"|c\"\n self.rowformat = addstr + self.rowformat\n if self.nospaces:\n self.rowformat = r'@{\\extracolsep{\\fill}}'+ self.rowformat\n\n\n #########################################################################\n # CELL FORMAT\n #########################################################################\n\n # If no print format for cells given, guess from data\n if not self.cellformat:\n self.cellformat = list()\n for i in range(len(self.data_numpy.dtype)):\n dtype = self.data_numpy.dtype[i]\n if dtype.kind == 'S':\n self.cellformat.append('%'+str(dtype.itemsize)+'s')\n elif dtype.kind == 'i':\n self.cellformat.append('%'+str(dtype.itemsize)+'d')\n elif dtype.kind == 'f':\n digits = str(dtype.itemsize/2)+'.'+str(dtype.itemsize/2)\n self.cellformat.append('%'+digits+'f')\n elif dtype.kind == 'c':\n return None\n\n # Text string for printing data rows\n self.cellstring = str()\n for cell in self.cellformat:\n self.cellstring += cell + \" & \"\n self.cellstring = self.cellstring[:-2] + \"\\\\\\\\\\n\"",
"def _get_options(self, struct, field):\n return struct.DESCRIPTOR.fields_by_name[field].GetOptions() if hasattr(struct, \"DESCRIPTOR\") else None",
"def opt_format(self, fmt):\n key = get_enum_key(fmt, FORMATTERS)\n if key is not None:\n self.conf[\"format\"] = key\n print(\"Set format %r\" % key)\n else:\n print(\"Unknown format %r\" % fmt)",
"def format(self):\n ...",
"def formatDefinitions(options, COLS, presets={}):\n\n # Number of spaces before each line\n # Default to 10, but reduce to 1 if this results in very low width\n spaces = \" \" * 10\n width = COLS - 11\n if width < 15:\n width = COLS - 2\n spaces = \" \"\n\n lines = []\n # Display flag name, followed by indented documentation string\n for (longname, default, doc) in options:\n lines.append(\"--{} <arg>\".format(longname))\n default = presets.get(longname, default)\n\n # Don't add default info for empty strings or None\n if default not in ('', None):\n doc += ' (defaults to {})'.format(default)\n\n # Word wrap documentation string\n while len(doc) > width:\n pre, _, post = doc[:width].rpartition(' ')\n doc = post + doc[width:]\n lines.append(spaces + pre)\n if doc:\n lines.append(spaces + doc)\n\n lines.append('')\n return '\\n'.join(lines)",
"def __format__(self, format_specification=''):\n return super().__format__(format_specification=format_specification)",
"def format(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"format\")",
"def format(self, out):\n import cly.console as console\n\n if not self.help:\n return\n last_group = None\n max_len = max([len(h[2]) for h in self.help])\n if out.isatty():\n write = console.colour_cwrite\n else:\n write = console.mono_cwrite\n for group, order, command, help in self.help:\n if last_group is not None and last_group != group:\n out.write('\\n')\n last_group = group\n write(out, ' ^B%-*s^B %s\\n' % (max_len, command, help))",
"def _wr_3fmt_goeaobj(goea_results, goeaobj, wr_params, log):\n # List of all fields, printable or not, available from GOEnrichmentRecord\n log.write(\"\\nGOEnrichmentRecord FIELDS: {F}\\n\".format(F=\" \".join(goea_results[0].get_prtflds_all())))\n # Use the subset of namedtuple fields_names that are listed in the format string:\n # Same format: print to screen and print to file:\n goeaobj.prt_txt(log, goea_results, **wr_params) # Print to screen\n goeaobj.wr_txt(\"nbt3102_subset_obj.txt\", goea_results, **wr_params)\n # Print to Excel Spreadsheet\n title=\"Print subset of fields from GOEnrichmentRecord\"\n goeaobj.wr_xlsx(\"nbt3102_subset_obj.xlsx\", goea_results, title=title, **wr_params)\n # Print to tab-separated file\n goeaobj.wr_tsv(\"nbt3102_subset_obj.tsv\", goea_results, **wr_params)",
"def format(self):\n return self.getparam(\"FORMAT\")",
"def format(self):\n return self.getparam(\"FORMAT\")"
]
| [
"0.64548236",
"0.58642226",
"0.5668938",
"0.5616641",
"0.56150675",
"0.5609857",
"0.55871737",
"0.5584162",
"0.55806154",
"0.5536493",
"0.553237",
"0.55031854",
"0.54341996",
"0.53965104",
"0.5395915",
"0.53318113",
"0.52923864",
"0.5283675",
"0.5283675",
"0.52736145",
"0.5265001",
"0.5248579",
"0.5194801",
"0.51753134",
"0.5164281",
"0.5148787",
"0.51368046",
"0.51125896",
"0.51125556",
"0.51125556"
]
| 0.7621838 | 0 |
Gets the horizon angles given a terrain profile. Derived from ITM hzns() routine as specified in R2SGN21. | def _GetHorizonAnglesLegacy(its_elev, height_cbsd, height_rx, refractivity):
num_points = int(its_elev[0])
step = its_elev[1]
dist = num_points * step
# Find the refractivity at the average terrain height
start_avg = int(3.0 + 0.1 * num_points)
end_avg = num_points - start_avg + 6
zsys = np.mean(its_elev[start_avg-1:end_avg])
refractivity *= np.exp(-zsys/9460.0)
# Find the ray down-curvature per meter
gma = 157e-9
gme = gma*(1.0 - 0.04665 * np.exp(refractivity/179.3))
alt_cbsd = its_elev[2] + height_cbsd
alt_rx = its_elev[num_points+2] + height_rx
qc = 0.5 * gme
q = qc * dist
# theta0 and theta1 the slopes, dl0 and dl1 the horizon distances
theta1 = (alt_rx - alt_cbsd) / dist
theta0 = theta1 - q
theta1 = -theta1 - q
dl0 = dist
dl1 = dist
if num_points >= 2:
sa = 0.0
sb = dist
wq = True
for i in range(1, num_points):
sa += step
sb -= step
q = its_elev[i+2] - (qc*sa + theta0) * sa - alt_cbsd
if q > 0.0:
theta0 += q/sa
dl0 = sa
wq = False
if not wq:
q = its_elev[i+2] - (qc*sb + theta1) * sb - alt_rx
if q > 0.0:
theta1 += q/sb
dl1 = sb
return (np.arctan(theta0) * 180/np.pi,
np.arctan(theta1) * 180/np.pi,
dl0,
dl1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def horiz_angle(time, data):\n\n # TODO What should 0deg be? Set it to inline w/ target? facing target?\n\n # direction of the sun. measured in degrees counted clockwise from north.\n azimuth = data[time]['azimuth']\n\n h_angle = (azimuth / 2 - 90)\n\n # returns answer between -180 and 180 degrees\n return round(((h_angle + 180) % 360) - 180, 4)",
"def azimuth(poly):\n num = len(poly) - 1\n vec = unit_normal(poly[0], poly[1], poly[num])\n vec_azi = np.array([vec[0], vec[1], 0])\n vec_n = np.array([0, 1, 0])\n # update by Santosh\n # angle2vecs gives the smallest angle between the vectors\n # so for a west wall angle2vecs will give 90\n # the following 'if' statement will make sure 270 is returned\n x_vector = vec_azi[0]\n if x_vector < 0:\n return 360 - angle2vecs(vec_azi, vec_n)\n else:\n return angle2vecs(vec_azi, vec_n)",
"def addHorizon(horizon_altitude=np.radians(30.), lat_telescope=np.radians(33.35731944), raCen=0.):\n step = .02\n az = np.arange(0, np.pi * 2.0 + step, step)\n alt = np.ones(len(az), float) * horizon_altitude\n obs = ephem.Observer()\n obs.lat = lat_telescope\n # Set obs lon to zero, just to fix the location.\n # Note that this is not the true observatory longitude, but as long as\n # we calculate the RA at zenith for this longitude, we can still calculate\n # HA appropriately.\n obs.lon = 0\n obs.pressure = 0\n # Given obs lon at zero, find the equivalent ra overhead.\n zenithra, zenithlat = obs.radec_of(0, 90)\n lon = np.zeros(len(az), float)\n lat = np.zeros(len(az), float)\n for i, (alti, azi) in enumerate(zip(alt, az)):\n # Find the equivalent ra/dec values for an alt/az circle.\n r, lat[i] = obs.radec_of(azi, alti)\n # Correct the ra value by the zenith ra value, to get the HA.\n lon[i] = r - zenithra\n lon = -(lon - np.pi) % (np.pi * 2) - np.pi\n return lon, lat",
"def getHeadAngles(self):\n\n\t\trobot_head_yaw, robot_head_pitch = self.motion.getAngles(\"Head\", False)\n\n\t\t# return adjusted robot head angles\n\t\treturn [robot_head_yaw, -robot_head_pitch]",
"def calc_incidence_angle():\n \n Delta_r, lat_r, Omega_r, Zenith_r, Azimuth_r, Elev_angle = solar_model()\n \n # Beta is equal to angle of tilted surface to horizontal (in radians)\n roof_slopes_west = section_coordinates()\n Beta_r = np.arctan(roof_slopes_west) \n incidence_angles_west = np.zeros(101)\n \n \n for i in range(0,len(roof_slopes_west)):\n incidence_angles_west[i] = np.arccos(np.sin(Delta_r)* np.sin(lat_r) * np.cos(Beta_r[i]) - np.sin(Delta_r) * np.cos(lat_r) * np.sin(Beta_r[i]) * np.cos(Azimuth_r) + np.cos(Delta_r) * np.cos(lat_r) * np.cos(Beta_r[i]) * np.cos(Omega_r) + np.cos(Delta_r) * np.sin(lat_r) * np.sin(Beta_r[i]) * np.cos(Azimuth_r) * np.cos(Omega_r) + np.cos(Delta_r) * np.sin(Beta_r[i]) * np.sin(Azimuth_r) * np.sin(Omega_r))",
"def azimuth(source):\n srcAzEl = subarrayControl.s.azel(source, 0.0);\n return srcAzEl[0];",
"def interior_angle(self):\n if self.interior_angle_l is not None:\n return self.interior_angle_l\n else:\n self.interior_angle_l = ((self.vert_count - 2)*180)/math.pi\n return self.interior_angle_l",
"def extent_hhr(self):\r\n\r\n return [self._h_min-0.5*self.step, self._h_max+0.5*self.step,\r\n self._hr_min-0.5*self.step, self._hr_max+0.5*self.step]",
"def h(self, landmark):\r\n dx = landmark[0,0] - (self.pose[0, 0] +\r\n cam_displacement * math.cos(self.pose[2, 0]))\r\n dy = landmark[1,0] - (self.pose[1, 0] +\r\n cam_displacement * math.sin(self.pose[2, 0]))\r\n r = math.sqrt(dx ** 2 + dy ** 2)\r\n angle = math.atan2(dy, dx) - self.pose[2, 0]\r\n alpha = pi_2_pi(angle)\r\n return np.array([r, alpha])",
"def get_ra_dec_from_skymap(skymap):\n index_of_max = np.argmax(skymap)\n nside = hp.npix2nside(len(skymap))\n theta, phi = hp.pix2ang(nside, index_of_max)\n return phi, np.pi/2-theta",
"def getH(self):\n\t\thAngle = (math.atan2(self.y,self.x))/(2*math.pi)\n\t\tif self.y < 0:\n\t\t\thAngle = 1 + hAngle\t\n\t\treturn hAngle",
"def hpa(self):\n return HPAngle(gon2hp(self.gon_angle))",
"def angle(z):",
"def looks_azimuth(self) -> Optional[int]:\n return self._get_property(LOOKS_AZIMUTH_PROP, int)",
"def get_azimuth(self):\n self.degrees = self.azimuth_encoder.get_degrees()\n self.tele_azimuth = self.Calculations.convert_degrees(self.degrees)\n return self.tele_azimuth",
"def getAltitudeProfile(pass_length,terrain,uav_altitude,u,start_v,wind_angle):\n altitude_profile = []\n v = start_v\n for k in range(0,round(pass_length)):\n coord = convertCoords([[u,v]],wind_angle,'xy')\n x = coord[0][0]\n y = coord[0][1]\n x_points = [int(x),int(x),int(x)+1,int(x)+1]\n y_points = [int(y),int(y)+1,int(y)+1,int(y)]\n z_points = [terrain[int(y)][int(x)],terrain[int(y)+1][int(x)],\n terrain[int(y)+1][int(x)+1],terrain[int(y)][int(x)+1]]\n\n # For created terrain ONLY\n z = griddata((x_points,y_points),z_points,(x,y)) # Interpolate \n altitude = z + uav_altitude\n\n altitude_profile.append(altitude)\n v +=1\n return altitude_profile",
"def _rotate_winds(rpn_hr):\n coords = {\"lon\": rpn_hr.nav_lon, \"lat\": rpn_hr.nav_lat}\n u_out, v_out = viz_tools.rotate_vel_bybearing(\n rpn_hr.UU, rpn_hr.VV, coords, origin=\"grid\"\n )\n\n return u_out, v_out",
"def get_ztf_footprint_corners():\n x = 6.86 / 2\n return [-x, +x, +x, -x] * u.deg, [-x, -x, +x, +x] * u.deg",
"def nfw_physical2angle_fromNFWparams(self, rhos, rs, z):\n\n D_d = self.cosmo.D_A_z(z)\n Rs_angle = rs / D_d / self.cosmo.arcsec # Rs in arcsec\n theta_Rs = rhos * (4 * rs ** 2 * (1 + numpy.log(1. / 2.)))\n eps_crit = self.get_sigma_crit_lensing(z, self.z_source)\n\n return Rs_angle, theta_Rs / eps_crit / D_d / self.cosmo.arcsec",
"def Horizon(time, observer, ra, dec, refraction):\n if not (Refraction.Airless.value <= refraction.value <= Refraction.JplHorizons.value):\n raise Error('Invalid refraction type')\n\n latrad = math.radians(observer.latitude)\n lonrad = math.radians(observer.longitude)\n decrad = math.radians(dec)\n rarad = ra * _HOUR2RAD\n\n sinlat = math.sin(latrad)\n coslat = math.cos(latrad)\n sinlon = math.sin(lonrad)\n coslon = math.cos(lonrad)\n sindc = math.sin(decrad)\n cosdc = math.cos(decrad)\n sinra = math.sin(rarad)\n cosra = math.cos(rarad)\n\n # Calculate three mutually perpendicular unit vectors\n # in equatorial coordinates: uze, une, uwe.\n #\n # uze = The direction of the observer's local zenith (straight up).\n # une = The direction toward due north on the observer's horizon.\n # uwe = The direction toward due west on the observer's horizon.\n #\n # HOWEVER, these are uncorrected for the Earth's rotation due to the time of day.\n #\n # The components of these 3 vectors are as follows:\n # [0] = x = direction from center of Earth toward 0 degrees longitude (the prime meridian) on equator.\n # [1] = y = direction from center of Earth toward 90 degrees west longitude on equator.\n # [2] = z = direction from center of Earth toward the north pole.\n\n uze = [coslat*coslon, coslat*sinlon, sinlat]\n une = [-sinlat*coslon, -sinlat*sinlon, coslat]\n uwe = [sinlon, -coslon, 0.0]\n\n # Correct the vectors uze, une, uwe for the Earth's rotation by calculating\n # sideral time. Call spin() for each uncorrected vector to rotate about\n # the Earth's axis to yield corrected unit vectors uz, un, uw.\n # Multiply sidereal hours by -15 to convert to degrees and flip eastward\n # rotation of the Earth to westward apparent movement of objects with time.\n\n angle = -15.0 * _sidereal_time(time)\n uz = _spin(angle, uze)\n un = _spin(angle, une)\n uw = _spin(angle, uwe)\n\n # Convert angular equatorial coordinates (RA, DEC) to\n # cartesian equatorial coordinates in 'p', using the\n # same orientation system as uze, une, uwe.\n\n p = [cosdc*cosra, cosdc*sinra, sindc]\n\n # Use dot products of p with the zenith, north, and west\n # vectors to obtain the cartesian coordinates of the body in\n # the observer's horizontal orientation system.\n #\n # pz = zenith component [-1, +1]\n # pn = north component [-1, +1]\n # pw = west component [-1, +1]\n\n pz = p[0]*uz[0] + p[1]*uz[1] + p[2]*uz[2]\n pn = p[0]*un[0] + p[1]*un[1] + p[2]*un[2]\n pw = p[0]*uw[0] + p[1]*uw[1] + p[2]*uw[2]\n\n # proj is the \"shadow\" of the body vector along the observer's flat ground.\n proj = math.sqrt(pn*pn + pw*pw)\n\n # Calculate az = azimuth (compass direction clockwise from East.)\n if proj > 0.0:\n # If the body is not exactly straight up/down, it has an azimuth.\n # Invert the angle to produce degrees eastward from north.\n az = math.degrees(-math.atan2(pw, pn))\n if az < 0:\n az += 360\n else:\n # The body is straight up/down, so it does not have an azimuth.\n # Report an arbitrary but reasonable value.\n az = 0.0\n\n # zd = the angle of the body away from the observer's zenith.\n zd = math.degrees(math.atan2(proj, pz))\n hor_ra = ra\n hor_dec = dec\n\n if refraction != Refraction.Airless:\n zd0 = zd\n refr = RefractionAngle(refraction, 90.0 - zd)\n zd -= refr\n if refr > 0.0 and zd > 3.0e-4:\n zdrad = math.radians(zd)\n sinzd = math.sin(zdrad)\n coszd = math.cos(zdrad)\n zd0rad = math.radians(zd0)\n sinzd0 = math.sin(zd0rad)\n coszd0 = math.cos(zd0rad)\n\n pr = [(((p[j] - coszd0 * uz[j]) / sinzd0)*sinzd + uz[j]*coszd) for j in range(3)]\n proj = math.sqrt(pr[0]*pr[0] + pr[1]*pr[1])\n if proj > 0:\n hor_ra = _RAD2HOUR * math.atan2(pr[1], pr[0])\n if hor_ra < 0:\n hor_ra += 24\n else:\n hor_ra = 0\n hor_dec = math.degrees(math.atan2(pr[2], proj))\n\n return HorizontalCoordinates(az, 90.0 - zd, hor_ra, hor_dec)",
"def heading_idx(self):\n if self.heading > 0:\n idx = self.heading * 180\n else:\n idx = 360 + self.heading * 180\n return int(idx - 1)",
"def get_angles(self):\n if self.wavelength_control:\n return self.gonio_angles + self.wl_angles\n else:\n return self.gonio_angles",
"def determine_rotation_angle(self, landmarks):\n lp = landmarks['left-eye-center-pos']\n rp = landmarks['right-eye-center-pos']\n return angle_between_points(lp, rp)",
"def z_halo(self): \n return self.coords_halo[2]",
"def rh(self, h):\n sez=self.getSect(h)\n area=self.area(sez)\n wetborder = self.wetBorder(sez)\n return area/wetborder",
"def getPosHeading(self) :\n\t\treturn (self.avatarNP.getX(), self.avatarNP.getY(), \\\n\t\t\tself.avatarNP.getZ(), (self.avatarNP.getHpr()[0])%360)",
"def get_metrics(H):\n theta = np.arctan2(H[0,1], H[0,0])\n scale = H[0,0] / np.cos(theta)\n tx = H[0,2]\n ty = H[1,2]\t\n return tx,ty,theta",
"def calculate_yaw(pixel_x, center_x) -> float:\n yaw = math.degrees(math.atan((pixel_x - center_x) / H_FOCAL_LENGTH))\n return yaw",
"def get_shoulder_orientation(self):\n FL_shoulder_orientation = p.getLinkState(bodyUniqueId=self.rex.quadruped, \n linkIndex=self.link_name_to_ID['front_left_shoulder_link'])[-1]\n FR_shoulder_orientation = p.getLinkState(bodyUniqueId=self.rex.quadruped, \n linkIndex=self.link_name_to_ID['front_right_shoulder_link'])[-1]\n RL_shoulder_orientation = p.getLinkState(bodyUniqueId=self.rex.quadruped, \n linkIndex=self.link_name_to_ID['rear_left_shoulder_link'])[-1]\n RR_shoulder_orientation = p.getLinkState(bodyUniqueId=self.rex.quadruped, \n linkIndex=self.link_name_to_ID['rear_right_shoulder_link'])[-1]\n\n return [FL_shoulder_orientation, FR_shoulder_orientation, RL_shoulder_orientation, RR_shoulder_orientation]",
"def get_local_hour_angle(self):\n LHA = self.Calculations.local_hour_angle(self.Longitude, self.right_ascension)\n self.LHA = LHA\n return LHA"
]
| [
"0.5809133",
"0.56932247",
"0.562133",
"0.5432792",
"0.5391699",
"0.5273448",
"0.5269917",
"0.5258349",
"0.5191962",
"0.5191381",
"0.51645434",
"0.51499426",
"0.5090944",
"0.5034365",
"0.5031973",
"0.50225246",
"0.501417",
"0.4997427",
"0.49757305",
"0.4935782",
"0.49198472",
"0.49158698",
"0.4912527",
"0.49086532",
"0.48902053",
"0.4889565",
"0.48869053",
"0.48804808",
"0.48717973",
"0.4870201"
]
| 0.633702 | 0 |
Repeats a message multiple times. | async def repeat(ctx, times: int, content='repeating...'):
for i in range(times):
await ctx.send(content) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def repeat(self,ctx, times: int, content='repeating...'):\n for i in range(times):\n await ctx.send(content)",
"async def repeat(times : int, content='repeating...'):\n for i in range(times):\n await bot.say(content)",
"async def repeat(ctx, times : int, content='repeating...'):\n for i in range(times):\n await bot.say(content)",
"def async_repetitive_message(message, interval_seconds):\n repeat = ['-', '\\\\', '|', '/']\n\n for switch in itertools.cycle(repeat):\n print('\\r[{}] {}'.format(switch, message), end='')\n yield from async_sleep(interval_seconds)",
"async def repeat(self, ctx, *, text):\n await ctx.send(text)",
"async def ripgupta(self, ctx, count, *, message):\n int(count)\n gupta = 468209010978455552\n channel = 617525238392946699\n mloop = 0\n int(mloop) \n while mloop > count:\n await channel.send(\"{} {}\".format(gupta.mention, message))\n int(mloop)\n mloop = mloop + 1",
"async def repeat(ctx, *, arg):\r\n if await bMsg(ctx,ctx.message.author.name,client):\r\n return\r\n logger.info('repeat: ' + arg, extra={'invoker': ctx.message.author.name})\r\n await ctx.send(arg)",
"async def repeat(self, ctx, times : int, content : str):\n if times < 6:\n for i in range(times):\n await ctx.send(content)\n else:\n await ctx.send(\"Please don't get me banned by Discord! (Max 5)\")",
"async def do(ctx, times : int, *, command):\n msg = copy.copy(ctx.message)\n msg.content = command\n for i in range(times):\n await bot.process_commands(msg)",
"async def repeat(\n text: ('str', 'The content to repeat')\n):\n if not text:\n text = 'nothing to repeat'\n \n return InteractionResponse(text, allowed_mentions = None)",
"def cycle(self, message):\n msg_list = self.ts.get_human_readable_message(message).split(' ')\n players = self.player_queue.pop_all()\n players_str = ' '.join(players)\n channel = SOCKET_ARGS['channel']\n if len(msg_list) > 1:\n credential_str = ' '.join(msg_list[1:])\n whisper_str = 'You may now join {} to play. The credentials you need are: {}'.format(\n channel, credential_str)\n self.player_queue_credentials = credential_str\n else:\n whisper_str = 'You may now join {} to play.'.format(channel)\n self.player_queue_credentials = None\n for player in players:\n self._add_to_whisper_queue(player, whisper_str)\n # self.command_queue.appendleft(('_delete_last_row', {}))\n self._add_to_chat_queue(\"Invites sent to: {} and there are {} people left in the queue\".format(\n players_str, len(self.player_queue.queue)))",
"def cycle_one(self, message):\n msg_list = self.ts.get_human_readable_message(message).split(' ')\n channel = SOCKET_ARGS['channel']\n try:\n player = self.player_queue.pop()\n if len(msg_list) > 1:\n credential_str = ' '.join(msg_list[1:])\n whisper_str = 'You may now join {} to play. The credentials you need are: {}'.format(\n channel, credential_str)\n elif self.player_queue_credentials is not None:\n credential_str = self.player_queue_credentials\n whisper_str = 'You may now join {} to play. The credentials you need are: {}'.format(\n channel, credential_str)\n else:\n whisper_str = 'You may now join {} to play.'.format(channel)\n self._add_to_whisper_queue(player, whisper_str)\n self._add_to_chat_queue(\"Invite sent to: {} and there are {} people left in the queue\".format(player, len(self.player_queue.queue)))\n # self.command_queue.appendleft(('_delete_last_row', {}))\n except IndexError:\n self._add_to_chat_queue('Sorry, there are no more players in the queue')",
"def send_spam_msg(driver, name, message, n):\r\n\r\n for i in range(n):\r\n send_message(driver, name, message)",
"def repeat(word, repetitions):\n return word * repetitions",
"def repeat(self, count):\n return self.Sequence((self,) * count)",
"def repeat(s):\r\n\r\n return s",
"def shake(r, num_repeats=1):\n for i in range(num_repeats):\n r.go(25)\n time.sleep(.1)\n r.stop()\n time.sleep(.1)\n r.go(-25)\n time.sleep(.1)\n r.stop()\n time.sleep(.1)",
"def repeat_string_n_times(string, count):\r\n return string * int(count)",
"async def echo(ctx, *, message=None):\n message = message or \"Please provide the message to be repeated.\"\n await ctx.message.delete()\n await ctx.send(message)",
"def repeat_timers(bot, chat_id, message_id):\n\n bot_collection[chat_id].timers.repeat()\n start_timer(bot, chat_id, message_id)",
"def repeat(self, repeat: bool=None):\n self._select_interface(self._rc_repeat, self._http_repeat, repeat)",
"async def repeat(self, msg):\n if msg.guild.id in self.player:\n if msg.voice_client.is_playing() is True:\n if self.player[msg.guild.id]['repeat'] is True:\n self.player[msg.guild.id]['repeat'] = False\n return await msg.message.add_reaction(emoji='✅')\n\n self.player[msg.guild.id]['repeat'] = True\n return await msg.message.add_reaction(emoji='✅')\n\n return await msg.send(\"No audio currently playing\")\n return await msg.send(\"Bot not in voice channel or playing music\")",
"def repeat(self):\n return self._repeat",
"def repeat(self, fn, *args, **kwargs):\n return repeat_n_times(self.n, fn, *args, **kwargs)",
"def repeat(self, number_of_repeats):\n return \"G\" + str(number_of_repeats)",
"def repeated_iteration(self) -> global___Statement.Iteration.RepeatedIteration:",
"def sendMessage_0(self, messages):\n for message in messages:\n self.sendMessage(message)",
"def message_all(self, message):\n # We copy the _clients into a list to avoid dictionary changing\n # size during iteration.\n for character in self.players.values():\n character.message(message)",
"async def say(self, string, *, update=True):\r\n said = False\r\n while not said:\r\n if not self.ended:\r\n for x in range(4):\r\n try:\r\n msg = await bot.send_message(self.channel, string)\r\n said = True\r\n if update and self.player:\r\n self.player.update_message(string)\r\n return\r\n except (discord.HTTPException, OSError, aiohttp.ClientResponseError) as e:\r\n print(\"Suffered\", type(e), \"error in botcommand.say().\")\r\n print(\"info: \", string, self.channel.name, self.player.id)\r\n await asyncio.sleep(x ** x)\r\n self.end()\r\n raise CommandEndedError\r\n else:\r\n raise CommandEndedError",
"def repeat(num_times):\n\n def decorator_repeat(func):\n \"\"\"\n defines wrapper_repeat(*args, **kwargs)\n\n :returns: wrapper_repeat\n \"\"\"\n\n @functools.wraps(func)\n def wrapper_repeat(*args, **kwargs):\n \"\"\"\n func(*args, **kwargs) num_times\n\n :return: last return value\n \"\"\"\n for _ in range(num_times):\n value = func(*args, **kwargs)\n return value\n\n return wrapper_repeat\n\n return decorator_repeat"
]
| [
"0.7628505",
"0.75241643",
"0.74671906",
"0.6782859",
"0.6725025",
"0.66473454",
"0.6640616",
"0.6501877",
"0.6463185",
"0.6406237",
"0.62558305",
"0.6149078",
"0.61431366",
"0.61045724",
"0.60727173",
"0.60414445",
"0.6028678",
"0.59844077",
"0.59493715",
"0.59287435",
"0.57907003",
"0.5782056",
"0.5741135",
"0.5721196",
"0.56817126",
"0.5671292",
"0.56708336",
"0.5663998",
"0.5582047",
"0.5578785"
]
| 0.75642896 | 1 |
Says if a user is cool. In reality this just checks if a subcommand is being invoked. | async def cool(ctx):
if ctx.invoked_subcommand is None:
await ctx.send('No, {0.subcommand_passed} is not cool'.format(ctx)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def cool(ctx):\n if ctx.invoked_subcommand is None:\n await bot.say('No, {0.subcommand_passed} is not cool'.format(ctx))",
"async def event(self, ctx):\r\n if ctx.invoked_subcommand is None:\r\n await self.bot.say('No, {0.subcommand_passed} is not cool'.format(ctx))",
"async def appcheck(self, ctx: commands.Context, user_id: discord.Member):\n return await ctx.send(\n \"This command is currently being reworked, follow updates in The Kompound\"\n )",
"def can_cool(self) -> bool:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"can_cool\"))\r\n return True",
"def is_rainbow(msg: str = 'I guess you are not my little pog champ :3'):\n\n async def check(ctx):\n rainbow = ctx.author.id == ctx.bot.owner_id\n if not rainbow:\n await ctx.send(msg)\n return rainbow\n\n return commands.check(check)",
"def run_check_username(args, username):\n if not args.quiet:\n colored_username = Style.BRIGHT + username + Style.RESET_ALL\n print \"Checking '{}'..\".format(colored_username),\n result = check_username(username)\n if not result.valid:\n if not args.quiet:\n print Fore.RED + \"invalid\" + Style.RESET_ALL\n elif result.available:\n if not args.quiet:\n print Fore.GREEN + \"available\" + Style.RESET_ALL\n else:\n if not args.quiet:\n print Fore.RED + \"taken\" + Style.RESET_ALL\n if args.verbose and result.recommendations:\n for suggestion in result.recommendations:\n print \"\\t\" + Fore.YELLOW + suggestion + Style.RESET_ALL\n return result.available",
"def __is_active(self, command):\n return True",
"def is_cooling(action_data):\n return (action_data == COOLING_ACTION) | (action_data == TWO_STAGE_COOLING_ACTION)",
"async def interaction_check(self, interaction: Interaction) -> bool:\n if interaction.user != self.interaction_owner:\n await interaction.response.send_message(\n \":x: This is not your command to react to!\",\n ephemeral=True\n )\n return False\n return True",
"async def control_checks(self, ctx):\n server_id = ctx.message.server.id\n requester = ctx.message.author\n #silently drop if not in voice\n if not self.in_voice(server_id):\n return False\n #refuse if user not in the same channel\n if not self.user_in_channel(server_id, requester):\n vcname = self.get_server_dict(server_id)['voice'].channel.name\n await ctx.bot.send_message(ctx.message.channel, \"You can't control me outside of {}.\".format(vcname))\n return False\n return True",
"async def cog_check(self, ctx: Context) -> bool: # type: ignore[override]\n\n return ctx.guild is not None",
"async def love(ctx, user: discord.Member):\r\n author = ctx.message.author\r\n if user.id == ctx.bot.user.id:\r\n await ctx.send(\"I am not capable of loving like you can. I'm sorry.\" )\r\n else:\r\n await ctx.send(author.mention + \" is capable of loving \" + user.mention + \" a whopping \" +\r\n str(randint(0, 100)) + \"%!\")\r\n ctx.counter(n)",
"def can_act(self) -> bool:\n return self.cooldown < 1",
"def can_act(self) -> bool:\n return self.cooldown < 1",
"def cog_check(self, ctx):\n if ctx.guild is None:\n raise commands.NoPrivateMessage()\n return True",
"async def meow(self, ctx: vbu.Context):\n\n if ctx.invoked_subcommand is None:\n return await ctx.send_help(ctx.command)",
"async def is_bear(ctx):\n return ctx.message.author.id == 353730886577160203 or ctx.message.author.id == 715048392408956950",
"async def cog_check(self, ctx):\n guild_doc = await db.PLUGINS.find_one({\"_id\": ctx.guild.id})\n\n if guild_doc.get(\"Verification\"):\n return True\n\n else:\n await ctx.send(\n embed=discord.Embed(\n description=(\n f\"{var.E_DISABLE} The Verification plugin\"\n \" is disabled in this server\"\n ),\n color=var.C_ORANGE\n )\n )",
"def canAct(self) -> bool:\n return self.cooldown < 1",
"async def command(self,ctx):\n await ctx.send(\"Yes this is a command.\")",
"def responds_to(self, command) -> bool:\n return command == self.command and self.active is True and self.command is not None",
"def cog_check(self, ctx):\r\n return ctx.author.guild_permissions.administrator",
"def cog_check(self, ctx):\r\n return ctx.author.guild_permissions.administrator",
"async def poke(ctx):\n await pokemaster_bot.change_presence(activity=Game(name='!help for command info'))\n if ctx.invoked_subcommand is None:\n await ctx.send(\"Invalid Command\")",
"async def is_launcher(ctx):\n member = ctx.message.author\n staff = await is_staff(ctx)\n lhRole = discord.utils.get(member.guild.roles, name=ROLE_LH)\n if staff or lhRole in member.roles: return True",
"def early_return(bot:Bot, ctx:Context):\n return ctx.message.author.bot or ctx.message.author.id == bot.user.id",
"def first_is_valid(command_from_user):\n arguement_entered_user = command_from_user[0]\n if arguement_entered_user == 'list':\n return True\n \n elif arguement_entered_user == 'clashes':\n return True\n \n else:\n return False",
"def check_if_help_message(message):\n return \"The commands are\" in message",
"def cmd(self, context, message):\r\n return True",
"async def _tweets(self, ctx):\n if ctx.invoked_subcommand is None:\n await self.bot.send_cmd_help(ctx)"
]
| [
"0.7579999",
"0.6899157",
"0.63918954",
"0.6365819",
"0.61112785",
"0.60582155",
"0.60375273",
"0.5930833",
"0.5887822",
"0.58628875",
"0.5848644",
"0.57999957",
"0.5797073",
"0.5797073",
"0.579523",
"0.57907087",
"0.56930506",
"0.5675957",
"0.564224",
"0.56418496",
"0.5619679",
"0.55580366",
"0.55580366",
"0.555265",
"0.5543906",
"0.55360746",
"0.55334127",
"0.5530377",
"0.5516296",
"0.55155915"
]
| 0.7729976 | 0 |
Is the bot cool? | async def _bot(ctx):
await ctx.send('Yes, the bot is cool.') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_bot(self) -> bool:",
"def is_bot(self) -> undefined.UndefinedOr[bool]:",
"async def _bot():\n await bot.say('Yes, the bot is cool.')",
"def is_rainbow(msg: str = 'I guess you are not my little pog champ :3'):\n\n async def check(ctx):\n rainbow = ctx.author.id == ctx.bot.owner_id\n if not rainbow:\n await ctx.send(msg)\n return rainbow\n\n return commands.check(check)",
"async def best():\n await bot.say('Nargacuga is the best Monster. Are you casual?')",
"async def is_bear(ctx):\n return ctx.message.author.id == 353730886577160203 or ctx.message.author.id == 715048392408956950",
"def can_cool(self) -> bool:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"can_cool\"))\r\n return True",
"async def wherearemypants():\n await bot.say('justin is a known pants thief. Not saying he took them but he totally probably took them')",
"def is_bot(self):\n return self._is_bot",
"async def tick(self):\n room = self.bot.Room.load('19961884194@chatroom')\n await room.ready()\n await room.say(f'i love you -> {datetime.now()}')",
"async def wink(self, ctx):\n await ctx.send('wonk')",
"def ShouldI(sc, event):\n options = ['Yes, you should!',\n 'I think that would be best.',\n 'Hrmm... yes!',\n 'Signs point to yes!',\n 'That\\'s the best idea I\\'ve ever heard!',\n 'D\\'uh! Of course!',\n 'Wow! What a great idea!',\n 'What an incredible idea! You\\'re a genius!',\n 'Yes, yes! A thousand times, yes!',\n 'Of course you should!',\n 'I\\'ve never heard of a better idea!',\n 'Why didn\\'t I think of that? You\\'re brilliant!']\n response = random.choice(options)\n sc.api_call('chat.postMessage', as_user='true',\n channel=event['channel'], text=response)",
"def hey(self, msg):\n if issilence(msg):\n return \"Fine. Be that way.\"\n elif isshouting(msg):\n return \"Woah, chill out!\"\n elif isquestion(msg):\n return \"Sure.\"\n else:\n return \"Whatever.\"",
"async def cool(ctx):\n if ctx.invoked_subcommand is None:\n await ctx.send('No, {0.subcommand_passed} is not cool'.format(ctx))",
"async def favor(self, ctx):\n east = ctx.guild.get_member(339119069066297355)\n if not east or east.status != discord.Status.online:\n await ctx.send(f\"I'm afraid I can't do that, {ctx.author.display_name}.\")\n return\n await ctx.send(\"&East, could I ask you for a favor? I need someone to verify my code.\")\n await asyncio.sleep(2)\n async with ctx.typing():\n await asyncio.sleep(1)\n await ctx.send(\"Oh my. Well, if you insist ;)\")",
"def isalive():\n return 'alive'",
"def wishMe():\n hour = int(datetime.datetime.now().hour)\n if 0 <= hour < 12:\n speak(\"Good Morning Boss\")\n\n elif 12 <= hour < 18:\n speak(\"Good Afternoon Boss\")\n\n else:\n speak(\"Good Evening Boss!\")\n\n speak(\"This is mayaa appointed as your Assistant\")",
"async def hi(self, ctx, *, extra=\"\"):\n if str(ctx.author) == \"East#4048\" and extra.startswith(\"there...\"):\n async with ctx.typing():\n await asyncio.sleep(1)\n await ctx.send(\"Hello East.\")\n await asyncio.sleep(1)\n async with ctx.typing():\n await asyncio.sleep(2)\n await ctx.send(\"Thank you. The same to you.\")\n return\n await ctx.send(f\"Hello there {ctx.author.name}\")",
"async def love(ctx, user: discord.Member):\r\n author = ctx.message.author\r\n if user.id == ctx.bot.user.id:\r\n await ctx.send(\"I am not capable of loving like you can. I'm sorry.\" )\r\n else:\r\n await ctx.send(author.mention + \" is capable of loving \" + user.mention + \" a whopping \" +\r\n str(randint(0, 100)) + \"%!\")\r\n ctx.counter(n)",
"async def hello(self, ctx):\n await ctx.send(random.choice(self.greetings))",
"def brain_status(self):\r\n return 'thinking...'",
"def command_who(self, bot, update):\n\n messages = [\n 'Myles Braithwaite lives in Toronto where he runs a small '\n 'consluting company called [Monkey in your Soul]'\n '(https://monkeyinyoursoul.com/) (you should hire him because '\n \"he's awesome).\",\n 'You should follow him on [Twitter](https://twitter.com/mylesb) '\n 'or [Instagram](https://instagram.com/myles).',\n 'You can find his programming stuff on [GitHub]'\n '(https://github.com/myles) or [CodePen]'\n '(http://codepen.io/mylesb/).'\n ]\n\n self.send_messages(bot, update, messages)",
"async def ping():\n await bot.say(\"Pong\")",
"def is_hero(self):\n return True",
"async def musicbot(self, ctx, the_state):\r\n is_mod = False\r\n for role in ctx.message.author.roles:\r\n if role.name == \"Moderators\":\r\n is_mod = True\r\n if is_mod:\r\n if the_state == \"1\":\r\n self.music_off = False\r\n await ctx.send(\"Music Bot features now on\")\r\n else:\r\n self.music_off = True\r\n await ctx.send(\"Music Bot features now off\")\r\n else:\r\n await ctx.send(\"**Error:** You are not allowed to use this command!\")",
"def yell():\n ground_description_int = GROUND_FEATURES_LIST[ZERO_BASE_PLYR_POS]\n if ground_description_int != 12:\n printmessage(\"You yell, but nobody hears you.\", 5, CYAN, 1)\n else:\n printmessage(\"You have found the ranger, amd won the game!\", 5, GREEN, 3)\n die(\"ranger\")",
"def talk(self):\n print('Meow!')",
"def is_lyrics_approved():",
"async def amireallyalive(alive):\n await alive.edit(\"**Crevil Is ALive!** \\n`🇮🇳BOT Status : ` **☣Hot**\\n\\n\"\n f\"`My Master`: {DEFAULTUSER}\\n\\n\"\n \"`Telethon version:` **6.0.9**\\n`Python:` **3.7.4**\\n\"\n \"`Database Status:` **conected...**\\n\\n`I am Working Fine! Sir\\n`\"\n \"**Bot Creator:** [CrevilBot](t.me/crevil)\\n\"\n \" [🇮🇳Deploy This crevilbot🇮🇳](https://github.com/crevils/crevilbot)\")",
"def is_for_me(event):\n # check if not my own event\n\n type = event.get('type')\n\n if type and type == 'message' and not(event.get('user') == VALET_SLACK_ID):\n\n if is_private(event):\n return True\n text = event.get('text')\n # channel = event.get('channel')\n if type and type == 'message' and text.startswith(\"@td \"):\n return True\n if type and type == 'message' and text.startswith(\"@t\"):\n return True\n if type and type == 'message' and text.startswith(\"@cl\"):\n return True\n if valet_slack_mention in text.strip().split():\n return True"
]
| [
"0.7927701",
"0.7085806",
"0.70779985",
"0.69509816",
"0.68449193",
"0.6735682",
"0.667686",
"0.6618449",
"0.645192",
"0.6344753",
"0.6315624",
"0.6279006",
"0.6266472",
"0.61349714",
"0.6125706",
"0.6121934",
"0.6118115",
"0.61062866",
"0.6083343",
"0.6079055",
"0.60745174",
"0.6071202",
"0.6054641",
"0.60484254",
"0.60441947",
"0.6022595",
"0.60110116",
"0.59955394",
"0.5994043",
"0.59857315"
]
| 0.7284892 | 1 |
Returns normalized explicit bolean flags for `absl.flags` compatibility. | def normalize_flags(argv: List[str]) -> List[str]:
bolean_flag_patern = re.compile(r'--[\w_]+=(true|false)')
def _normalize_flag(arg: str) -> str:
if not bolean_flag_patern.match(arg):
return arg
if arg.endswith('=true'):
return arg[: -len('=true')] # `--flag=true` -> `--flag`
elif arg.endswith('=false'):
# `--flag=false` -> `--noflag`
return '--no' + arg[len('--') : -len('=false')]
else:
raise AssertionError(f'Unrecognized arg: {arg}')
return [_normalize_flag(a) for a in argv] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _SanitizeFlags( flags ):\n\n sanitized_flags = []\n saw_arch = False\n for i, flag in enumerate( flags ):\n if flag == '-arch':\n saw_arch = True\n continue\n elif flag.startswith( '-arch' ):\n continue\n elif saw_arch:\n saw_arch = False\n continue\n\n sanitized_flags.append( flag )\n\n vector = ycm_core.StringVec()\n for flag in sanitized_flags:\n vector.append( flag )\n return vector",
"def create_basic_flag_mask(*flags):\n\n out = np.ones(len(flags[0]), bool)\n for flag in flags:\n out &= (~flag)\n\n return out",
"def get_flags(self):\n\n if self.raw.flags not in [0, 1, 2, 3]:\n raise ValueError(\"Invalid raw flags: {}\".format(self.raw.flags))\n\n flags = set()\n\n if (self.raw.flags & 0b010) > 0:\n flags.add(\"DF\")\n\n if (self.raw.flags & 0b001) > 0:\n flags.add(\"MF\")\n\n return frozenset(flags)",
"def TransformFlags(self) -> _n_2_t_0[bool]:",
"def get_parsed_flags():\n return Flags.parsed_args",
"def _get_flags(args: Sequence[str]) -> Dict[str, bool]:\n flags = {}\n for arg in args:\n if arg.startswith(FLAG_MARKER):\n flag_name = arg[len(FLAG_MARKER):]\n if flag_name and flag_name not in OMIT_FLAGS:\n flags[flag_name] = True\n else:\n break # Ignore flags after initial CLI call\n return flags",
"def get_all_flags(options):\n flags = []\n if options.inputFlag:\n flags.append(try_to_int(options.inputFlag))\n if options.outputFlags:\n for flag in options.outputFlags:\n flags.append(try_to_int(flag))\n return flags",
"def convert_flags_to_boolean_dict(flags):\n return {f: True for f in flags}",
"def re_flags_to_string(flags: int=0) -> str:\n possible_flags = {\n re.ASCII: \"a\",\n re.IGNORECASE: \"i\",\n re.LOCALE: \"L\",\n re.UNICODE: \"u\",\n re.MULTILINE: \"m\",\n re.DOTALL: \"s\",\n re.VERBOSE: \"x\",\n }\n\n flagchrs = \"\"\n for flagval, flagchr in possible_flags.items():\n if flags & flagval:\n flagchrs += flagchr\n\n return f\"(?{flagchrs})\" if flagchrs else \"\"",
"def parseFlags(self):\n # Blank return value.\n retVal = \"\"\n \n try:\n # Store flags as we parse them.\n allFlags = []\n \n # Get the accumulator flag.\n accFlag = self.__flags & self.f_accum\n trendFlag = self.__flags & self.f_trend\n modeFlag = self.__flags & self.f_mode\n \n # Complete set of readings?\n if accFlag == self.f_accum_complete:\n # Completed loading values into the accumulator.\n allFlags.append('C')\n elif accFlag == self.f_accum_accum:\n # Still accumulating.\n allFlags.append('A')\n elif accFlag == self.f_accum_unk:\n # Unknown.\n allFlags.append('?')\n else:\n # Bad value.\n allFlags.append('!')\n \n # Trend?\n if (trendFlag) == self.f_trend_stable:\n # Readings stable.\n allFlags.append('S')\n elif (trendFlag) == self.f_trend_up:\n # Still accumulating.\n allFlags.append('U')\n elif (trendFlag) == self.f_trend_dn:\n # Still accumulating.\n allFlags.append('D')\n elif (trendFlag) == self.f_trend_unk:\n # Still accumulating.\n allFlags.append('?')\n else:\n # Bad value.\n allFlags.append('!')\n \n # Mode?\n if modeFlag == self.f_mode_fast:\n # Fast\n allFlags.append('F')\n elif modeFlag == self.f_mode_slow:\n # Slow\n allFlags.append('S')\n elif modeFlag == self.f_mode_counter:\n # Stream\n allFlags.append('C')\n elif modeFlag == self.f_mode_scaler:\n # Roll\n allFlags.append('L')\n else:\n # Bad value.\n allFlags.append('!')\n \n # Build a nice string.\n retVal = ''.join(allFlags)\n \n \n except:\n raise\n \n # Return value.\n return retVal",
"def flags(self):\n flags = self.Flags\n return [x for x in self.FLAGS_VALUES if flags & x]",
"def test_removeFlagsSilently(self):\n self._flagsSilentlyTest('removeFlags', b'-FLAGS.SILENT')",
"def remove_invalid_flags(flags):\n\n # Validate flag format\n filtered_flags = [flag for flag in flags if is_valid_flag(flag)]\n\n number_of_removed_flags = len(flags) - len(filtered_flags)\n\n if number_of_removed_flags:\n log.failure(\"Removed {} flags with incorrect format.\".format(number_of_removed_flags))\n\n return filtered_flags",
"def flags_decomposer(flags):\n l = 0\n \n if flags & 2 ** 1:\n l = 1\n \n if flags & 2 ** 4:\n l = 2\n \n return l",
"def encode_flags(names):\n return reduce(operator.ior, (flag_masks[name] for name in names), 0)",
"def single_flag_name(self):\n return self.enum_class.to_representation(self.flags.name)",
"def decode_flags(flags):\n if isinstance(flags, six.string_types):\n flags = int(flags.lstrip('@'), 16)\n return [name for i, name in enumerate(flag_names) if (1 << i) & flags]",
"def or_cpp_flags(self, flags):\n flags_dict = {\"deprecated\": \"vtable::common_::deprecated\",\n \"hidden\": \"vtable::common_::hidden\",\n \"unprivileged\": \"vtable::common_::unprivileged\",\n \"no_reply\": \"vtable::method_::no_reply\"}\n\n cpp_flags = []\n for flag in flags:\n try:\n cpp_flags.append(flags_dict[flag])\n except KeyError:\n raise ValueError(\"Invalid flag \\\"{}\\\"\".format(flag))\n\n return \" | \".join(cpp_flags)",
"def get_flags(self):\n return self.short_flag, self.long_flag",
"def test_removeFlags(self):\n self._flagsTest('removeFlags', b'-FLAGS')",
"def process_flags(self):\n\t\tsflags = []\n\t\tfor attr in dir(self):\n\t\t\tif attr[:3] != \"PF_\":\n\t\t\t\tcontinue\n\t\t\tvalue = getattr(self, attr)\n\t\t\tif value & self.fields[\"flags\"]:\n\t\t\t\tsflags.append(attr)\n\n\t\treturn sflags",
"def read_flags():\n return flag_args",
"def broaden(mask):\n if len(mask) < 2:\n return mask\n # Note: the order in which these operations are performed is important.\n # Modifying newmask in-place with the |= operator only works for if\n # newmask[:-1] is the L-value.\n newmask = concatenate(([False], mask[1:] | mask[:-1]))\n newmask[:-1] |= mask[1:]\n return newmask",
"def format_flags(self):\n flags = []\n if self.is_unique:\n flags.append('Unique')\n if self.is_weak:\n flags.append('Weak')\n if self.is_ctor:\n flags.append('Constructor')\n if self.is_warning:\n flags.append('Warning')\n if self.is_ref:\n flags.append('Indirect reference')\n if self.is_reloc:\n flags.append('Reloc function')\n if self.is_debug:\n flags.append('Debug')\n if self.is_dynamic:\n flags.append('Dynamic')\n if self.is_func:\n flags.append('Function')\n if self.is_file:\n flags.append('File')\n if self.is_object:\n flags.append('Object')\n return flags",
"def get_flags(cls):\n return cls.get_short_flag(), cls.get_flag()",
"def flags_decomposer(flags):\n l = []\n if flags & 2 ** 0:\n l.append(\"superscript\")\n if flags & 2 ** 1:\n l.append(\"italic\")\n if flags & 2 ** 2:\n l.append(\"serifed\")\n else:\n l.append(\"sans\")\n if flags & 2 ** 3:\n l.append(\"monospaced\")\n else:\n l.append(\"proportional\")\n if flags & 2 ** 4:\n l.append(\"bold\")\n return \", \".join(l)",
"def get_flags(flags):\n byte1 = bytes(flags[:1])\n QR = '1'\n OPCODE = ''\n for bit in range(1, 5):\n OPCODE += str(ord(byte1) & (1 << bit)) # bitwise\n AA = '1'\n TC = '0'\n RD = '0'\n # 2 bytes\n RA = '0'\n Z = '000'\n RCODE = '0000'\n first_byte = convert_to_bytes(int(QR + OPCODE + AA + TC + RD, 2), 1, byte_order='big')\n second_byte = convert_to_bytes(int(RA + Z + RCODE, 2), 1, byte_order='big')\n return bytearray(first_byte + second_byte)",
"def _get_build_flags(cmdline: str) -> Tuple[Tuple[str, ...], Tuple[str, ...]]:\n cmdlist = cmdline.split()\n labels = [arg for arg in cmdlist if arg.startswith(\"//\")]\n build_flags = [arg for arg in cmdlist if not arg.startswith(\"//\")]\n return (tuple(labels), tuple(build_flags))",
"def encode_aurafilter(flags: List[str]) -> int:\n aurafilter = 0\n for name in flags:\n try:\n aurafilter |= AURAFILTER_FLAGS[name]\n except KeyError:\n raise ValueError(f\"Unknown AuraFilter flag name: {name!r}\") from None\n return aurafilter",
"def __process_flags(self, flags: int) -> Dict[str, bool]:\n return {\n 'ns': True if flags & 0x100 else False,\n 'cwr': True if flags & 0x080 else False,\n 'ece': True if flags & 0x040 else False,\n 'urg': True if flags & 0x020 else False,\n 'ack': True if flags & 0x010 else False,\n 'psh': True if flags & 0x008 else False,\n 'rst': True if flags & 0x004 else False,\n 'syn': True if flags & 0x002 else False,\n 'fin': True if flags & 0x001 else False,\n }"
]
| [
"0.59382993",
"0.59221536",
"0.5758345",
"0.5547654",
"0.55003345",
"0.5474018",
"0.5454624",
"0.53176725",
"0.5287026",
"0.52538836",
"0.5246436",
"0.52206445",
"0.52093154",
"0.5192281",
"0.51625186",
"0.5133777",
"0.5103207",
"0.5092846",
"0.50773543",
"0.5054406",
"0.50417715",
"0.50391567",
"0.50279796",
"0.49648115",
"0.49302602",
"0.49279776",
"0.49171457",
"0.48732632",
"0.48455805",
"0.48373452"
]
| 0.679333 | 0 |
Get request data from NASA APOD API | def get_requests():
global response
#Set the parameters fot the request
url = "https://api.nasa.gov/planetary/apod"
api_key = "DEMO_KEY" #Use your own key
date = calender.get_date()
querystring = {'api_key':api_key, 'date':date}
#Call the request and turn it into a python usable format
response = requests.request("GET", url, params=querystring)
response = response.json()
#Update output label
set_info() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_apod_data(api_key: str, download: bool = False, path: str = \".\") -> dict:\n url = \"https://api.nasa.gov/planetary/apod\"\n return requests.get(url, params={\"api_key\": api_key}).json()",
"def __apiRequest(self, url, parms={}):\n authparms = self.__addAuthParms(parms);\n request = self.http.request('GET', url, fields=authparms)\n if request.status != 200:\n raise ApiCommunicationError('Failed to retrieve data from Marvel, HTTP Status {}'.format(request.status))\n else:\n return json.loads( request.data.decode('utf-8') )",
"def _query_aprs_api(self):\n \n # Query APRS.fi for the balloon's location\n try:\n aprs_request = urllib2.Request(self._aprs_api_endpoint)\n aprs_opener = urllib2.build_opener()\n aprs_response = aprs_opener.open(aprs_request, None, self.aprs_update_timeout)\n except Exception as e:\n # Error downloading the file\n raise APRSAPIError('There was an error querying the APRS.fi API.')\n \n # Parse the APRS response\n try:\n parsed_response = json.load(aprs_response)\n except ValueError as e:\n # Error parsing the response\n raise APRSAPIError('There was an error parsing the JSON response from the APRS.fi API.')\n\n # Check for an API error\n if parsed_response['result'] == \"fail\":\n raise APRSAPIError('An error occured querying the APRS.fi API: \"'+parsed_response['description']+'\"')\n\n # Format the response into the expected format\n final_response = {\n 'timestamp': int(parsed_response['entries'][0]['time']),\n 'longitude': float(parsed_response['entries'][0]['lng']),\n 'latitude': float(parsed_response['entries'][0]['lat']),\n 'altitude': float(parsed_response['entries'][0]['altitude'])\n }\n\n return final_response",
"def api(title):\n\ttitle = title.replace(\" \", \"+\")\n\tresponse = urlopen(\"http://www.omdbapi.com/?apikey=cc47980e&t={}\".format(title)).read().decode('utf8')\n\tdata = json.loads(response)\n\n\treturn data",
"def api():\n global KEY_FILE\n global APCA_API_KEY_ID\n global APCA_API_SECRET_KEY\n\n if \"APCA_ID\" in os.environ:\n APCA_ID = os.environ[\"APCA_ID\"]\n APCA_KEY = os.environ[\"APCA_KEY\"]\n elif KEY_FILE:\n auth_header = authentication_header()\n APCA_ID = str(auth_header[\"APCA-API-KEY-ID\"])\n APCA_KEY = str(auth_header[\"APCA-API-SECRET-KEY\"])\n else:\n APCA_ID = APCA_API_KEY_ID\n APCA_KEY = APCA_API_SECRET_KEY\n\n # Open the API connection\n api = tradeapi.REST(APCA_ID, APCA_KEY, \"https://paper-api.alpaca.markets\")\n # Get account info\n api.get_account()\n return api",
"def get_archive_data(query: str) -> dict:\n url = \"https://images-api.nasa.gov/search\"\n return requests.get(url, params={\"q\": query}).json()",
"async def request_api(url):\n\theaders = {\"User-Agent\": f\"Mozilla/5.0 aiotfm/{__version__}\"}\n\n\ttry:\n\t\tasync with aiohttp.ClientSession() as session:\n\t\t\tasync with session.get(url, headers=headers) as resp:\n\t\t\t\treturn await resp.json()\n\texcept aiohttp.ClientError:\n\t\treturn {}",
"def apiai_response(query, session_id):\n\trequest = ai.text_request()\n\trequest.lang='en'\n\trequest.session_id=session_id\n\trequest.query = query\n\tresponse = request.getresponse()\n\treturn json.loads(response.read().decode('utf8'))",
"def apicall():\r\n# try:\r\n print request.get_json()\r\n test_json = request.get_json()\r\n logger.info(\"input json object loaded\")\r\n logger.info(test_json)\r\n k=MetaData(test_json)\r\n int_res=k.getData()\r\n print '------------------------------'\r\n print int_res\r\n return jsonify(int_res)",
"def get_velib_data():\n api_url = \"https://api.jcdecaux.com/vls/v1/\"\n query_string = \"stations?contract=Paris&apiKey=\"\n api_key = \"ec29d3b17e5162e1459aaad45cddfe74fe832379\"\n my_url = api_url + query_string + api_key\n\n urlobj = URL.urlopen(my_url)\n data = json.load(urlobj)\n# data = urlobj.read()\n# help(data)\n return data",
"async def get_data(self, path: str) -> Dict:\n # function accepts paths that start with / and also path that do not start with /\n if path.startswith(\"/\"):\n path = path[1:]\n try:\n async with aiohttp.ClientSession() as session:\n async with session.get(f\"{self._opa_url}/data/{path}\") as opa_response:\n return await opa_response.json()\n except aiohttp.ClientError as e:\n logger.warning(\"Opa connection error: {err}\", err=e)\n raise",
"def getOmdbInfo(title):\n baseUrl = \"http://www.omdbapi.com/?\"\n # parsing the API credentials to the base url\n credentialsData = urllib.parse.urlencode(credentials)\n finalUrl = baseUrl + credentialsData\n parameters = {\"t\": title} # Parameters to add a query to the url\n try:\n r = requests.get(url=finalUrl, params=parameters)\n return r.json()\n except Exception as e:\n return None",
"def get_meraki_api_data(api_uri):\n url = API_URL + api_uri\n a_response = requests.get(url, headers=api_headers, verify=False)\n if a_response.status_code == 200:\n data = json.loads(a_response.text)\n logger.info(\"Meraki GET operation suceeded : %s \", api_uri)\n else:\n data = {}\n logger.info(\"Meraki GET Operation failed : %s\", a_response.status_code)\n return data",
"def probe_api():\n\n info = loads(get(url).text)\n return info",
"def _get_data(self, url: str)->dict:\n data = None\n resp = self._get(url)\n if resp:\n data = resp.json()['data']\n return data",
"def get_data(self, request, url):\n data = request.get(endpoint=url)\n return data[0], data[1]",
"def request(self):\n cookies = {\n \"session\": self.session_cookie\n }\n url = self.get_request_url()\n\n r = requests.get(url, cookies=cookies)\n return r.json()",
"def __init__(self, address, ap):\n super(ReadRequest, self).__init__(address=address, ap=ap)",
"def _request_data(self, url):\n connection = httplib.HTTPConnection(self.url)\n connection.request(\"GET\", url)\n response = connection.getresponse()\n\n if response.status != 200:\n raise Exception(response.reason)\n\n data = response.read()\n response.close()\n\n return json.loads(data)",
"def api_call():\n\tresponse = requests.get(URL_API)\n\treturn response",
"def make_forecastio_request():\n REQUEST = REQ_BASE + \"{0}/\".format(APIKEY)+\\\n \"{0},{1}\".format(LAT,LON)\n try:\n conn = httplib.HTTPSConnection(FORECASTIO_URL)\n conn.request(\"GET\", REQUEST)\n resp = conn.getresponse()\n data = resp.read()\n except:\n giveup()\n else:\n return data",
"def getInfoContainer(data):\n\tAPI_URL = 'https://gps.cs.etc.vn:15443/etcaccr-ecargo-api/swagger-resources/request-object'\n\turl_data = urlencode(data)\n\turl = API_URL + \"?\" + url_data\n\n\tcurl = pycurl.Curl()\n\tcurl.setopt(curl.SSL_VERIFYPEER, 0)\n\tcurl.setopt(pycurl.URL, url)\n\tcurl.setopt(pycurl.HTTPHEADER, ['Accept: application/json',\n\t 'Content-Type: application/json'])\n\n\tbuffer = BytesIO()\n\n\t# prepare and send. See also: pycurl.READFUNCTION to pass function instead\n\tcurl.setopt(pycurl.WRITEFUNCTION, buffer.write)\n\tcurl.perform()\n\n\tstatus_code = curl.getinfo(pycurl.RESPONSE_CODE)\n\n\treturn status_code, buffer.getvalue().decode('utf8')",
"def get(self, request, format=None):\n \n return Response(\"ahla Rami\")",
"def __request(self,endpoint):\n apiRequest = requests.get(\"%s/%s\" % (self.baseurl,endpoint), \n auth=requests.auth.HTTPBasicAuth(self.api_id, self.api_secret))\n try:\n json = apiRequest.json()\n return json\n except JSONDecodeError:\n print(\"Failed to download or failed to parse JSON.\")\n print(apiRequest)\n return None",
"def request_endpoints(self):\n\n endpoints_url = self.std[\"api\"]\n endpoints_paramd = {\n \"access_token\": self.std[\"access_token\"]\n }\n\n endpoints_response = requests.get(url=endpoints_url, params=endpoints_paramd)\n print endpoints_response\n self.endpointd = endpoints_response.json()[0]",
"def _do_request(self, url, method='GET', body=None):\n response, content = self.request(url, method=method, body=body, headers=self.headers)\n if int(response['status']) != 200:\n raise GPAPIError(response['status'], 'ERROR IN REQUEST')\n json = simplejson.loads(content)\n return json",
"def _request(self, endpoint, params=dict(), data=None):\n client_value = \"Python Netinfo\"\n headers = {'X-Request-Client': client_value}\n url = '/'.join([self.url, endpoint])\n kwargs = {'url': url, 'headers': headers, 'timeout': 30,\n 'params': params, 'data': data}\n response = requests.get(**kwargs)\n if response.status_code not in range(200, 299):\n raise RequestFailure(response.status_code, response.content)\n try:\n loaded = json.loads(response.content)\n except Exception as error:\n raise InvalidResponse(error)\n return loaded",
"def getAcdcs(url, requests):\n acdcs = []\n for request in requests:\n name=request['id']\n #if a wrong or weird name\n if len(request['key'])<3:\n print request\n continue\n if 'ACDC' not in name:\n continue\n status=request['key']\n #only completed requests\n if status != 'completed':\n continue\n #requestType=request['key'][2]\n #only acdcs\n #if requestType != 'Resubmission':\n # continue\n acdcs.append(name) \n return acdcs",
"def get_patient_status():\n r = requests.get(\"http://vcm-7474.vm.duke.edu:5000/api/heart_rate/3\")\n print(r.text)",
"def enaApiQuery(acc):\n viewUrl = \"http://www.ebi.ac.uk/ena/data/view/{acc}&display=xml\"\n\n def mkRequest():\n cnt = 0\n while True:\n try:\n return urlopen(viewUrl.format(acc=acc))\n except URLError as ex:\n if cnt >= 10:\n raise MetadataException(\"ENA API request failed\", acc=acc) from ex\n cnt += 1\n time.sleep(cnt * 0.5)\n\n result = ET.fromstring(mkRequest().read())\n # If accession was not found, the results are a document with just a <ROOT>\n # with the text:\n # Entry: ARFARF display type is either not supported or entry is not found.\n if len(result) == 0:\n if result.text.find('entry is not found') < 0:\n raise MetadataException(\"expected string 'entry is not found' when no data returned got '{}'\".format(result.text),\n acc=acc)\n return MetadataException(\"ENA entry not found\", acc=acc)\n return result"
]
| [
"0.675507",
"0.6119157",
"0.60302633",
"0.5864022",
"0.5860251",
"0.5842368",
"0.5795848",
"0.57947344",
"0.57612",
"0.5754473",
"0.5714969",
"0.556975",
"0.5491468",
"0.54912275",
"0.5476093",
"0.5472536",
"0.54627216",
"0.5447599",
"0.5426728",
"0.5425115",
"0.54118353",
"0.54112846",
"0.54100347",
"0.53944993",
"0.53817695",
"0.5365313",
"0.53582305",
"0.53335863",
"0.53258175",
"0.5323776"
]
| 0.7090859 | 0 |
Save the desired photo | def save_photo():
save_name = filedialog.asksaveasfilename(initialdir="10.APOD Viewer/", title="Save Image", filetype=(("JPEG", "*.jpg"), ("All Files", "*.*")))
img.save(save_name + ".jpg") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def savePhoto(self, path):\n if self.original_image and path:\n cv2.imwrite(path, self.original_image)",
"def save_image(self):\n self.save()",
"def savePicture(self):\n self.file_name = QtGui.QFileDialog.getSaveFileName(self, \n \"Save as... (specify extension)\", \"\")\n cv2.imwrite(self.file_name, self.frame)",
"def save_image_action(self):\n self.view.save_image(self.settings.get_image_type())",
"def save_as(self, filename):\n opencv.imwrite(filename, self.img)",
"def save(self, filename):\n self.image.save(filename, self.options.img_format)",
"def save(self, filepath):\n self.drawer.flush()\n self.img.save(filepath)",
"def save(self, **kwargs):\n self.remove_file()\n if not self.image:\n self.generate(save=False)\n else:\n self.image.name = self.file()\n super(FormatedPhoto, self).save(**kwargs)",
"def camera_save_image(filename):\n image = camera_acquire_image()\n image.save(filename)",
"def img_save(self):\n file_name, extension = return_folder_file_extension(self.img_name)[1:]\n image_name_save = \"%s_D=%s_Rs=%s_size=%s_offset=%i%s\" % (file_name, self.D, self.Rs, self.axe_X, self.offset_X+self.offset_X2, extension)\n\n if self.img2 is not None:\n self.img2.save(image_name_save)\n print(\"Saved \"+image_name_save)\n else:\n print(\"No image to save\")",
"def saveImageAs(self, name):\n\t\tself.image.save(name)",
"def img_save(name,img):\n cv2.imwrite(name,img)",
"def save_image(filename):\n subprocess(\"camera_save_image(%r)\" % filename)\n ##image = acquire_image()\n ##image.save(filename)",
"def save_processed_image(self, filename=None):\n if filename is None:\n tag = self.get_alert_objects_list(\"_\")\n filename = os.path.join(\n config.IMAGE_SAVE_PATH, \"%s_%s_%s_%s.jpg\" %\n (time.strftime(\"%Y-%m-%d_%H.%M.%S\", time.localtime(self.timestamp)),\n self.camera_name, self.detection_model, tag))\n # print(\"Using filename %s\" % filename)\n ret = cv2.imwrite(filename, self.processed_image)\n self.processed_file = filename\n return ret",
"def saveimage(self):\n if self.saveimageButton.isChecked():\n self.save = True\n self.channelsOpen()\n self.movetoStart()\n self.saveimageButton.setText('Abort')\n self.guarda = np.zeros((self.numberofPixels, self.numberofPixels))\n self.liveviewStart()\n\n else:\n self.save = False\n print(\"Abort\")\n self.saveimageButton.setText('reintentar Scan and Stop')\n self.liveviewStop()",
"def __save_display_image(self):\n\n try:\n self.photomosaic_generator.can_save_image()\n if self.generating:\n raise MissingComponentError('Cannot save. Image is currently being generated.')\n except MissingComponentError as error_msg:\n error_msg_box = QtWidgets.QMessageBox.critical(self, 'Error', str(error_msg))\n else:\n options = QtWidgets.QFileDialog.Options()\n options |= QtWidgets.QFileDialog.DontUseNativeDialog\n file_name, file_type = QtWidgets.QFileDialog.getSaveFileName(self, 'Save photomosaic', '..',\n 'jpg (*.jpg);;png (*.png)', options=options)\n file_type = file_type[-5:-1]\n if file_name != '':\n self.photomosaic_generator.save_image(file_name + file_type if file_name[-4:] != file_type else\n file_name)",
"def save_image(img: Image, filename: str) -> None:\r\n img.save(filename)",
"def _save(filename, img):\n if not os.path.exists(OUTPUT_DIR):\n os.makedirs(OUTPUT_DIR)\n # filename = filename+'.png'\n filename = os.path.join(OUTPUT_DIR, filename)\n # print(filename, img.shape)\n cv.imwrite(filename, img)",
"def saveImage(self, event):\r\n fileWritten = self.image.writeFile()\r\n self.statusBar.SetStatusText(\"Saved {}\".format(fileWritten))",
"def save_image(img, path):\n cv2.imwrite(path, img)",
"def save(img, path, file_name):\n\n name = os.path.join(path,file_name).replace('/', os.sep)\n\n io.imsave(name,img)",
"def save(self):\n im = Image.open(self.picture)\n output = BytesIO()\n im.thumbnail((350, 350))\n im.save(output, format='JPEG', quality=100)\n output.seek(0)\n self.picture = InMemoryUploadedFile(output, 'ImageField', \"%s.jpg\" % self.picture.name.split('.')[0],\n 'image/jpeg', sys.getsizeof(output), None)\n super(Tire, self).save()",
"def save(im, output_dir: Path):\n if not hasattr(save, \"counter\"):\n save.counter = 0 # type: ignore\n fname = f\"{save.counter:05d}.jpg\" # type: ignore\n cv2.imwrite(str(output_dir / fname), im)\n print(\"Saved\", fname)\n save.counter += 1 # type: ignore",
"def _save(self, data: PIL.Image) -> None:\n with self._fs.open(self._filepath, mode=\"wb\") as f:\n data.save(f)",
"def save(self):\n\n self.image.save(\"./output/\" + self.name + \" pg\" + str(self._page) + \".png\")",
"def save_image(self, filename):\n if filename[-4:] != '.pkl':\n filename + '.pkl'\n with open(filename, 'wb') as output: # Overwrites any existing file.\n pickle.dump(self, output, pickle.HIGHEST_PROTOCOL)",
"def save(self):\n\n # TODO:Find place to save data, write logic to save images(Filter out video?)",
"def save_image(image, file_name):\n io.imsave(file_name,image)",
"def __saveImage(self, act):\n index = act.data()\n itm = self.imagesTree.topLevelItem(index)\n if itm is None:\n return\n \n if (\n not self.imagePreview.scene() or\n len(self.imagePreview.scene().items()) == 0\n ):\n return\n \n pixmapItem = self.imagePreview.scene().items()[0]\n if not isinstance(pixmapItem, QGraphicsPixmapItem):\n return\n \n if pixmapItem.pixmap().isNull():\n E5MessageBox.warning(\n self,\n self.tr(\"Save Image\"),\n self.tr(\n \"\"\"<p>This preview is not available.</p>\"\"\"))\n return\n \n imageFileName = WebBrowserTools.getFileNameFromUrl(QUrl(itm.text(1)))\n index = imageFileName.rfind(\".\")\n if index != -1:\n imageFileName = imageFileName[:index] + \".png\"\n \n filename = E5FileDialog.getSaveFileName(\n self,\n self.tr(\"Save Image\"),\n imageFileName,\n self.tr(\"All Files (*)\"),\n E5FileDialog.Options(E5FileDialog.DontConfirmOverwrite))\n \n if not filename:\n return\n \n if not pixmapItem.pixmap().save(filename, \"PNG\"):\n E5MessageBox.critical(\n self,\n self.tr(\"Save Image\"),\n self.tr(\n \"\"\"<p>Cannot write to file <b>{0}</b>.</p>\"\"\")\n .format(filename))\n return",
"def save_detection(self, image):\n\t\timg = self.visualize_detection(image)\n\t\timg = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n\t\tcv2.imwrite(f'{SAVE_PATH}{self.clip}{self.num_save}.jpg', img)"
]
| [
"0.7788132",
"0.774997",
"0.7455914",
"0.74308985",
"0.7244878",
"0.72401404",
"0.71777683",
"0.7136806",
"0.7078779",
"0.70240724",
"0.7012547",
"0.7004505",
"0.692248",
"0.6906744",
"0.68199617",
"0.680405",
"0.68036956",
"0.6798691",
"0.6788766",
"0.67621744",
"0.6716808",
"0.671594",
"0.66978663",
"0.6688744",
"0.66845065",
"0.6650548",
"0.6647502",
"0.66462225",
"0.6634302",
"0.66311115"
]
| 0.7974888 | 0 |
A method that invokes "It's Always Sunny In Philadelphia" title card generation from ImageUtils. | async def iasip_title_card(self, ctx: Context, *, title: str) -> None:
async with ctx.channel.typing():
if not title.startswith('"'):
title = '"' + title
if not title.endswith('"'):
title += '"'
buffer = await image_utils.title_card_generator(title)
try:
await ctx.send(file=discord.File(buffer, filename="iasip.png"))
except discord.HTTPException as e:
bot_logger.error(f'File Send Failure. {e.status}. {e.text}')
await ctx.send(f'Could not send image. Details: [Status {e.status} | {e.text}]')
return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_title(words):",
"def print_title():\r\n HANGMAN_ASCII_ART = \"\"\"welcome to the game hangman\r\n _ _ \r\n | | | | \r\n | |__| | __ _ _ __ __ _ _ __ ___ __ _ _ __ \r\n | __ |/ _` | '_ \\ / _` | '_ ` _ \\ / _` | '_ \\ \r\n | | | | (_| | | | | (_| | | | | | | (_| | | | |\r\n |_| |_|\\__,_|_| |_|\\__, |_| |_| |_|\\__,_|_| |_|\r\n __/ | \r\n |___/\r\n\"\"\"\r\n print(HANGMAN_ASCII_ART)",
"def _generate_title(cls, ca_type):\n special_chars = string_utils.SPECIAL\n return append_random_string(\n \"{}_{}_\".format(ca_type, random_string(\n size=len(special_chars), chars=special_chars)))",
"def prep_title(self):\n self.title_image = self.font.render(self.title, True, self.text_color,\n self.ctl_settings.panel_bg_color)\n self.title_image_rect = self.title_image.get_rect()\n self.title_image_rect.centerx = self.rect.centerx\n self.title_image_rect.bottom = self.rect.top - 1",
"def get_title():",
"def banner_ascii():\n print(\"\")\n print(f\"\\n{RED} Steganography Tool{RESET}\")\n print(f\"{RED} Made By {RESET}\")\n print(f\"{RED} Ehthe Samul Islam Laskar USN:1DS16CS712 {RESET}\")\n print(f\"{RED} B Padma USN:1DS19CS420{RESET}\")\n print(f\"{RED} Nikhil D Kanyal USN:1DS17CS731{RESET}\")\n print(f\"{YELLOW}Type 'help' to see commands{RESET}\")",
"def print_title(title):\n print \"\\n\"+\"#\"*32+\"\\n# \"+title+\"\\n\"+\"#\"*32+\"\\n\"",
"def __draw_title(self):\n title = 'SNAAAAKE'\n x_offset = (curses.COLS - len(title)) // 2\n y_offset = max(1, (curses.LINES - self.config.arena_size[1] - 2) // 4)\n self.stdscr.addstr(y_offset, x_offset, title)",
"def create_image_caption_pairs(self):",
"def generate_horror_title():\n d666 = random.randint(1, 666)\n if d666 <= 111:\n #the adj noun\n return \"The \" + horror_adj[random.randint(0, len(horror_adj) - 1)] + \" \" + horror_noun[random.randint(0, len(horror_noun) - 1)]\n elif d666 > 111 and d666 <= 222: \n #noun of noun\n return horror_noun[random.randint(0, len(horror_noun) - 1)] + \" of \" + horror_noun[random.randint(0, len(horror_noun) - 1)]\n elif d666 > 222 and d666 < 444: \n #the adj noun of verb \n return \"The \" + horror_adj[random.randint(0, len(horror_adj) - 1)] + \" \" + horror_noun[random.randint(0, len(horror_noun) - 1)] + \" of \" + horror_verb[random.randint(0, len(horror_verb) - 1)]\n elif d666 >= 444 and d666 < 555: \n #noun of noun\n return horror_noun[random.randint(0, len(horror_noun) - 1)] + \" of \" + horror_noun[random.randint(0, len(horror_noun) - 1)]\n elif d666 >= 555:\n #verb of the adj noun\n return horror_verb[random.randint(0, len(horror_verb) - 1)] + \" of the \" + horror_adj[random.randint(0, len(horror_adj) - 1)] + \" \" + horror_noun[random.randint(0, len(horror_noun) - 1)]",
"def generate_romance_title():\n d69 = random.randint(1, 69)\n if d69 <= 35:\n #the adj noun\n return \"The \" + romance_adj[random.randint(0, len(romance_adj) - 1)] + \" \" + romance_noun[random.randint(0, len(romance_noun) - 1)]\n elif d69 > 35:\n #noun of the adj noun\n return romance_noun[random.randint(0, len(romance_noun) - 1)] + \" of the \" + romance_adj[random.randint(0, len(romance_adj) - 1)] + \" \" + romance_noun[random.randint(0, len(romance_noun) - 1)]",
"def generate_fantasy_title():\n d20 = random.randint(1, 20)\n if d20 <= 4:\n #genetive noun\n return fantasy_genetive[random.randint(0, len(fantasy_genetive) - 1)] + \" \" + fantasy_noun[random.randint(0, len(fantasy_noun) - 1)]\n elif d20 > 4 and d20 < 13: \n #The adj noun\n return \"The \" + fantasy_adj[random.randint(0, len(fantasy_adj) - 1)] + \" \" + fantasy_noun[random.randint(0, len(fantasy_noun) - 1)]\n elif d20 >= 13:\n #something of something\n return fantasy_noun[random.randint(0, len(fantasy_noun) - 1)] + \" of \" + fantasy_what_is_this[random.randint(0, len(fantasy_what_is_this) - 1)]",
"def title_n(self):\n self.run_command('title_n')",
"def generate_title(self, title=None):\n if title is None:\n title = self.header.get('title', self.title)\n\n title = self.generate(title)\n title = title.replace('<p>', '').replace('</p>', '')\n # no trailing newlines\n title = re.sub(r'\\n+', ' ', title).rstrip()\n return title",
"def page_title(id):\r\n\tswitcher = {\r\n\t\t\"404\": \"Error 404: Not Found - WWW2PNG\",\r\n\t\t\"api_activate\": \"API Key Activated - WWW2PNG\",\r\n\t\t\"api_help\": \"API Help - WWW2PNG\",\r\n\t\t\"api_request\": \"API Key Requested - WWW2PNG\",\r\n\t\t\"buried\": \"Manage Buried - WWW2PNG\",\r\n\t\t\"contact\": \"Contact Us - WWW2PNG\",\r\n\t\t\"default\": \"Free Webpage Screenshot Service API with Blockchain Anchoring - WWW2PNG\",\r\n\t\t\"error\": \"Error - WWW2PNG\",\r\n\t\t\"pp\": \"Privacy Policy - WWW2PNG\",\r\n\t\t\"tos\": \"Terms of Service - WWW2PNG\",\r\n\t}\r\n\treturn switcher.get(id, \"WWW2PNG\")",
"def TitlePrint(title):\n titleLength = len(title)\n barLength = titleLength + 12\n fmtdTitle = '----- {0} -----'.format(title)\n bar = '-' * barLength\n print(bar, fmtdTitle, bar,\n sep='\\n', end='\\n\\n')",
"def print_banner(title):\n\n title = \" \" + title + \" \"\n\n nequals = ncolumns - len(title)\n nleft = nequals // 2\n\n print((\"=\" * (nleft + nequals %2)) + title + (\"=\" * nleft))",
"def prep_titles(self, cost_title: str=\"\") -> (str, str):\n img_title = self.function_name + \\\n '_batch' + str(self.batch_size)\n\n if cost_title == \"\":\n img_title = str(self.experiment_count) + '_accuracy_plot_' + img_title\n title = self.title + \\\n '\\n' + self.function_name + \", \" + \\\n 'mini-batch size: ' + str(self.batch_size) + \\\n '\\nAvg Last 10 Epochs: Training ' + self.tr_mean_str + '%, Testing ' + self.test_mean_str + '%'\n else:\n img_title = str(self.experiment_count) + '_cost_plot_' + img_title\n title = cost_title\n\n print(f'\\nexperiment: {img_title}')\n return title, img_title",
"def show_title():\r\n complement = (\r\n '\\n __ ')\r\n title = ('\\n _______ _______________ ____ _______ __ ___ _ _______/ /_ ____ _____ ____ ____ ')\r\n title += ('\\n / ___/ / / / ___/ ___/ _ \\/ __ \\/ ___/ / / / / _ \\| |/_/ ___/ __ \\/ __ `/ __ \\/ __ `/ _ \\ ')\r\n title += ('\\n/ /__/ /_/ / / / / / __/ / / / /__/ /_/ / / __/> </ /__/ / / / /_/ / / / / /_/ / __/ ')\r\n title += ('\\n\\___/\\__,_/_/ /_/ \\___/_/ /_/\\___/\\__, / \\___/_/|_|\\___/_/ /_/\\__,_/_/ /_/\\__, /\\___/ ')\r\n title += ('\\n /____/ /____/ ')\r\n # Add Styles\r\n break_line = ('-' * len(complement) + \"\\n\") * 2\r\n print(\"{}\\n{}\\n{}\\n\".format(break_line, title, break_line))",
"def draw_title(self):\n title = text_helper.create_text(\"Indefinite Loop\", menu_fonts, 50, white)\n self.main_menu_surface.blit(title, (center_horizontally(title, self.screen_dimensions), 50))",
"def title_p(self):\n self.run_command('title_p')",
"def print_game_logo():\n\n HANGMAN_ASCII_ART = r\"\"\"\n _ _\n | | | |\n | |__| | __ _ _ __ __ _ _ __ ___ __ _ _ __\n | __ |/ _` | '_ \\ / _` | '_ ` _ \\ / _` | '_ \\\n | | | | (_| | | | | (_| | | | | | | (_| | | | |\n |_| |_|\\__,_|_| |_|\\__, |_| |_| |_|\\__,_|_| |_|\n __/ |\n |___/\n\"\"\"\n \n clear_player_screen()\n print_centered(HANGMAN_ASCII_ART)\n\n return None",
"def _generate_title_description(psap_id, title, description):\n if description is None:\n description = PersistentFields.get_description(psap_id)\n else:\n PersistentFields.set_description(psap_id, description)\n if title is None:\n title = PersistentFields.get_title(psap_id)\n else:\n PersistentFields.set_title(psap_id, title)\n\n return title, description",
"def generate_title(model, tokenizer, photo, max_length):\n in_text = \"startseq\"\n vocab = len(tokenizer.word_index) + 1\n prev_word = \"\"\n\n for i in range(max_length):\n sequence = tokenizer.texts_to_sequences([in_text])[0]\n sequence = pad_sequences([sequence], maxlen=max_length)\n yhat = model.predict([photo, sequence], verbose=0)\n yhat = random.choice(list(range(vocab)), 1, p=yhat[0])\n # yhat = argmax(yhat)\n word = word_for_id(yhat, tokenizer)\n\n if word is None:\n break\n\n if word == prev_word:\n pass\n\n in_text += \" \" + word\n\n prev_word = word\n\n if word == \"endseq\":\n break\n\n return in_text",
"def generate_caption(self, images):\n text = \"a photography of\"\n\n prev_device = self.caption_generator.device\n\n device = self._execution_device\n inputs = self.caption_processor(images, text, return_tensors=\"pt\").to(\n device=device, dtype=self.caption_generator.dtype\n )\n self.caption_generator.to(device)\n outputs = self.caption_generator.generate(**inputs, max_new_tokens=128)\n\n # offload caption generator\n self.caption_generator.to(prev_device)\n\n caption = self.caption_processor.batch_decode(outputs, skip_special_tokens=True)[0]\n return caption",
"def print_title():\n print(\" ##################\")\n print(\" # #\")\n print(\" # ===== ===== #\")\n print(\" # | | #\")\n print(\" # | | #\")\n print(\" # #\")\n print(\" ##################\")\n print(\"\\n\")\n print(\"#\" * 10, end='')\n print(\"Welcome to timetable tool\", end='')\n print(\"#\" * 10)",
"def test_getTitle(self):\n def checkNameAndTitle(name, titlesolution):\n title = self._nameClassifierBuilder._getTitle(name)\n self.assertEquals(titlesolution, title)\n\n checkNameAndTitle(\"Mrs. ldajfhgp\", \"Mrs\")\n checkNameAndTitle(\"dlsfajkMrdlkjaf\", \"Mr\")\n checkNameAndTitle(\"dagddgwdasJonkheer\", \"Jonkheer\")",
"def createTitleCard2(self, name, text, wordwrap, x, z, scale=0.025):\n self.myTitle2 = TextNode(name)\n self.myTitle2.setFont(self.font)\n self.myTitle2.setText(text)\n self.myTitle2.setWordwrap(wordwrap)\n self.myTitle2.setTextColor(globals.colors['guiwhite'])\n self.myTitle2.setCardColor(globals.colors['guiblue3'])\n self.myTitle2.setFrameColor(globals.colors['guiblue2'])\n self.myTitle2.setFrameAsMargin(.3, .5, .5, .5)\n self.myTitle2.setFrameLineWidth(3)\n self.myTitle2.setCardAsMargin(.3, .5, .5, .5)\n textNodePath = aspect2d.attachNewNode(self.myTitle2)\n textNodePath.setScale(scale)\n textNodePath.setPos(x, 0, z)\n self.myWidgets.append(textNodePath)",
"def __draw_title(self):\n if self.title is not None:\n self.fig.suptitle(\n self.title, y=self.settings.otherParams[\"figure.title.yposition\"])",
"def test_title(names):"
]
| [
"0.6305679",
"0.62836653",
"0.61354226",
"0.6115783",
"0.6080377",
"0.60758245",
"0.606231",
"0.6042628",
"0.60418034",
"0.5866398",
"0.58305675",
"0.5820785",
"0.57217073",
"0.5705232",
"0.56908786",
"0.5675178",
"0.5664888",
"0.56627333",
"0.5640492",
"0.56279325",
"0.5561185",
"0.5553539",
"0.55437684",
"0.552277",
"0.54951125",
"0.54912126",
"0.5489052",
"0.5487059",
"0.5470685",
"0.5439657"
]
| 0.6612236 | 0 |
helper method to get integer input from user in certain range | def _int_input_in_range(self, print_out, range_):
try:
i = int(input(print_out))
assert range_[0] <= i <= range_[1]
return i
except AssertionError:
print('Please, enter a vaild number')
return None
except ValueError:
print('Please, enter a number not a string')
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ask_number (question,low,high):\n response = None\n while response not in range(low,high):\n response = int(input(question))\n return response",
"def ask_number(question, low, high):\n response = None\n while response not in range (low, high):\n response = int(input(question))\n return response",
"def ask_number(question, low, high):\n response = None\n while response not in range(low, high):\n response = int(input(question))\n return response",
"def ask_number(question, low, high):\n response = None\n while response not in range(low, high):\n response = int(input(question))\n return response",
"def get_integer(prompt: str, error_prompt: str, limits_prompt: str, min_num: int = -float('inf'),\n max_num: int = float('inf')) -> int:\n while True:\n try:\n integer = int(input(prompt))\n if max_num >= integer >= min_num:\n return integer\n print(limits_prompt)\n except ValueError:\n print(error_prompt)",
"def _ask_user_range(question, first, last, default):\n\n while True:\n answer = input(question)\n if answer == \"\":\n answer = default\n break\n if re.findall(r\"[0-9+]\", answer):\n if int(answer) in range(first, last + 1):\n break\n else:\n print(\n \"Please a value between {} and {} or Return.\".format(\n first, last\n )\n )\n else:\n print(\n \"Please a number between {} and {} or Return.\".format(first, last)\n )\n\n return int(answer)",
"def integer_input( min_value=0, max_value=999, default=0, \n prompt=\"please type number and press ENTER\"):\n while True:\n raw = input(prompt)\n if not raw.isdigit():\n print(\"please enter a number\")\n continue\n raw = int(raw)\n if min_value <= raw <= max_value:\n return raw\n print(\"please enter value between {} and {}\".format(min_value,\n max_value))",
"def get_number():\n valid_input = False\n while not valid_input:\n try:\n user_num = int(input(\"Enter a number between {} and {}: \".format(LOWER_BOUND, UPPER_BOUND)))\n if LOWER_BOUND <= user_num <= UPPER_BOUND:\n return user_num\n except ValueError:\n pass\n print(\"That is not a valid number !\")",
"def ask_number(question, low, high):\n response = None\n while response not in range(low, high, 1):\n response = input(question)\n return response",
"def get_employee_input_int(message):\n while True:\n user_input = input('{}: '.format(message))\n\n # Type validation\n try:\n number = int(user_input)\n break\n except ValueError:\n print('You must enter a whole number.')\n continue\n\n #Range Validation\n # if valid_range and number not in valid_range:\n # _min = min(valid_range)\n # _max = max(valid_range)\n # print('You must enter a number from {} to {}.'.format(_min, _max))\n # continue\n return number",
"def get_int_input_constrained(prompt, value_min, value_max, value_default):\n\n input_value = 0\n while input_value < 1:\n txt = input(prompt)\n try:\n input_value = min(max(int(txt), value_min), value_max)\n except ValueError:\n input_value = value_default\n\n return (True, input_value)",
"def get_int(lo, hi):\n while True:\n n = input(f\"Please enter an integer from {lo} to {hi}: \")\n try:\n n = int(n) \n except ValueError: \n print(\"It must be an integer!\") \n continue\n if n < lo: \n print(\"You can't use negative numbers...\")\n continue # needed, otherwise enters the else statement.\n if n > hi: \n print(\"Think smaller\")\n else:\n break # exit to return if meets conditions.\n return n",
"def GetInteger(prompt=\"Please enter a number:\",\n lowerbound=0, upperbound=99,\n smaller_prompt=\"It's Smaller, please re-enter:\",\n bigger_prompt=\"It's Bigger, please re-enter:\",\n not_int_prompt=\"You did not enter a number, please re-enter:\"):\n user_input = input(prompt)\n\n def InternalFunc1(num):\n while True:\n try:\n return int(num)\n except ValueError:\n num = input(not_int_prompt)\n result = InternalFunc1(user_input)\n\n while not lowerbound <= result <= upperbound:\n if result < lowerbound:\n user_input = input(smaller_prompt)\n result = InternalFunc1(user_input)\n if upperbound < result:\n user_input = input(bigger_prompt)\n result = InternalFunc1(user_input)\n return result",
"def PickNumber(lenList, message = ' To select the correct option pick a number in range ',min = 1, typeInput = int):\n while True:\n try:\n input1 = typeInput(input('\\n'+message+str(min)+'-'+str(lenList)+': \\t'))\n except ValueError:\n print( 'That\\'s not a number!')\n else:\n if min <= input1 <= lenList:\n return input1\n else:\n print( 'Number out of range. Try again!')",
"def get_input():\n numb = int(input(\"Enter a number 1-10 \"))\n while True:\n if numb > 0 and numb < 10:\n return(numb)\n else:\n return(\"Please enter a value 1-10\")",
"def boundary(quantity, lower, upper):\r\n in_range = False\r\n while not in_range:\r\n if quantity < lower or quantity > upper:\r\n quantity = int(input(\"That is out of range, please try a number between \" + \\\r\n str(lower) + \" and \" + str(upper) + \": \"))\r\n else:\r\n in_range = True\r\n return quantity",
"def user_selection(num, text):\n lst = list(range(1,num+1))\n answer= 0\n while answer not in lst:\n try:\n answer = int(input(text))\n \n if answer not in range(1,num+1):\n raise ValueError\n break\n except ValueError:\n print('Select a valid Number')\n\n return answer",
"def Demo():\n print(\"Users input:\", GetInteger())\n print(\"Users input:\", GetInteger(lowerbound=-3, upperbound=10))\n input(\"Please press <Enter> to exit the demo.\")",
"def check_value(self, name, min_int, max_int):\n while True:\n numb = input(f\"-- {name} : Entrez une valeur comprise \"\n f\"entre {min_int} et {max_int} : \")\n try:\n check = int(numb)\n if check == 99 or min_int <= check <= max_int:\n break\n except ValueError:\n pass\n return check",
"def get_int(message, high, low=0):\r\n intValue = 1\r\n while True:\r\n try:\r\n intValue = int(input(message))\r\n except ValueError:\r\n print (\"ERROR, Entry must be a number. Please try again.\")\r\n continue\r\n if intValue <= low or intValue > high:\r\n print (\"ERROR, Entry must be greater than \" + str(low) + \" and, less than or equal to \"\\\r\n + str(high) + \". Please try again.\")\r\n continue\r\n break\r\n return intValue",
"def _readInt(self, prompt, small, large):\n prompt = prompt + ' (from ' + str(small) + ' to ' + str(large) + ')? '\n answer = small - 1 # intentionally invalid\n while not small <= answer <= large:\n try:\n answer = int(raw_input(prompt))\n if not small <= answer <= large:\n print 'Integer must be from '+str(small)+' to '+str(large)+'.'\n except ValueError:\n print 'That is not a valid integer.'\n return answer",
"def get_int(self):\n while True:\n try:\n choice = int(input(\"Choose: \"))\n if 1 <= choice <= len(self.menu):\n return choice\n print(\"Invalid choice.\")\n except (NameError,ValueError, TypeError,SyntaxError):\n print(\"That was not a number, genious.... :(\")",
"def prompt_number(prompt, low_limit = 1, high_limit = 65535):\n while True:\n try:\n response = int(prompt_base(prompt))\n if low_limit <= response <= high_limit:\n return response\n except:\n pass",
"def input_loop(menu_range):\n def check(inp, rng):\n\n try:\n chk = int(inp)\n except ValueError:\n return False\n\n if chk in range(0, rng):\n return True\n else:\n return False\n\n print('-' * 20) # spacer\n\n inpu = input('choose option: ')\n\n while not check(inpu, menu_range):\n inpu = input('try again: ')\n\n return int(inpu)",
"def pedir_entero(msg, min, max):\n while True:\n n = str(raw_input(msg))\n if not n.isdigit() :\n show_msg(\"Oops! Parece que eso no era un numero entero\")\n continue\n n = int(n)\n if n <= max and n >= min :\n return n\n else:\n show_msg(\"Numero fuera de rango\")\n continue",
"def input_int(question):\n while True:\n try:\n value = int(input(question))\n except (SyntaxError, NameError) as exception:\n print(\"Invalid entry. Try again.\")\n continue\n\n if value <= 0:\n print(\"Invalid entry. Try again.\")\n continue\n else:\n break\n\n return value",
"def get_positive_int(prompt):\n while True:\n n = get_int(prompt)\n if n > 0 and n < 9 :\n break\n return n",
"def pick_number(low, high, limit):\n print(\"Think of a number from \" + str(low) + \" to \" +\n str(high) +\" and I will try to guess it and I will get a total of \" + str(limit) + \" tries. Press Enter when you are ready.\")\n input()",
"def ask_number(message: str) -> int:\n global number\n assert isinstance(message, str), \"message should be a string\"\n stop_condition2 = False\n while not stop_condition2:\n try:\n number = int(input(message))\n if number < lower_range:\n print(\"Please pick a number within the range\", lower_range, \"and\", upper_range, \".\")\n elif number > upper_range:\n print(\"Please pick a number between\", lower_range, \"and\", upper_range, \".\")\n else:\n stop_condition2: bool = True\n except ValueError as ve:\n print(\"This is not a number.\")\n return number",
"def enterInteger(CustomMessage=\"Please enter an integer: \",\r\n CustomErrorMessage=\"The input is not an integer, please try again...\",\r\n min=None, max=None):\r\n \r\n isInteger = False\r\n while not isInteger:\r\n try:\r\n number = int(input(CustomMessage))\r\n isInteger = True\r\n except ValueError:\r\n print(CustomErrorMessage)\r\n\r\n # range parameter\r\n if type(min) is int and type(max) is int:\r\n if min > max:\r\n raise ValueError(\"parameter 'min' is larger than 'max'\")\r\n else:\r\n while min > number or number > max:\r\n number = enterInteger(CustomMessage=\"Please input a number within \"+str(min)+\" to \"+str(max)+\": \")\r\n elif type(min) is int:\r\n while min > number:\r\n number = enterInteger(CustomMessage=\"Please input a number larger than \" + str(min) + \": \")\r\n elif type(max) is int:\r\n while number > max:\r\n number = enterInteger(CustomMessage=\"Please input a number smaller than \" + str(max) + \": \")\r\n\r\n return number"
]
| [
"0.78135586",
"0.7703191",
"0.76899767",
"0.76899767",
"0.7627158",
"0.7533919",
"0.7518586",
"0.75072557",
"0.74196607",
"0.7310433",
"0.7306025",
"0.72823185",
"0.725796",
"0.7250905",
"0.71397394",
"0.71391845",
"0.7110826",
"0.7087965",
"0.70310694",
"0.702148",
"0.7014467",
"0.699489",
"0.69850546",
"0.6941324",
"0.6923896",
"0.6911663",
"0.69072115",
"0.6903247",
"0.68924445",
"0.6889926"
]
| 0.81906676 | 0 |
Prints out logging options to the user | def logging_page(self):
print('-=' * 12 + " Logging Page " + '-=' * 12)
options = {1: self.sign_up, 2: self.log_in, 3: self.delete_account, 4: self.exit}
print_out = "(1) Sign up \n (2) Log in \n (3) Delete Account \n (4) Exit"
return self._take_option(options, print_out) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def print_options(self):\n for option in self._options.items():\n print \"{0} = {1}\".format(option[0], option[1])",
"def display_user_options():\r\n print(\"Please choose an option [e/d/a/q]:\")\r\n print(\" e) Encrypt some text\")\r\n print(\" d) Decrypt some text\")\r\n print(\" a) Automatically decrypt English text\")\r\n print(\" q) Quit\")",
"def help_opt(self):\n print(OPTIONS)",
"def printConfig():\n # Why not log instead? Are we asking user to confirm settings?\n pass # until implemented",
"def printMenu():\n # tWelc = PrettyTable(['Welcome to the CLI-of the repository classifier'])\n print('Welcome to the CLI of the repository classifier')\n print(strStopper1)\n t = PrettyTable(['Action', ' Shortcut '])\n t.add_row(['Show Menu', '- m -'])\n t.add_row([' Predict repositories form txt-file ', '- i -'])\n t.add_row(['Input URL', '- u -'])\n t.add_row(['Show Info', '- f -'])\n t.add_row(['Train Model', '- t -'])\n t.add_row(['set GitHub-Token', '- g -'])\n t.add_row(['Help', '- h -'])\n t.add_row(['Quit', '- q -'])\n print(t)\n print('')",
"def display_credentials():\n print(f\"GH_TOKEN: {GH_TOKEN}\")\n print(f\"USER: {GH_USER}\")",
"def printOptions():\n\n # For each group, create a group option\n print(\"default\")",
"def menu_eng(self):\n intro = \"Here are the options available for you to choose from\"\n option1 = \"[1] UNLOCK BY CREDENTIALS\"\n option2 = \"[2] UNLOCK BY QR CODE\"\n option3 = \"[3] UNLOCK WITH BLUETOOTH\"\n option4 = \"[4] BACK\"\n print(intro, option1, option2, option3, option4, sep='\\n')",
"def display_other_options():\n print(\"> - Next Song page.\")\n print(\"< - Previous song page.\")\n print(\"q - to quit\")",
"def main_menu(self):\n welcome = \"\"\"\n ************************\n * WELCOME TO CARSHARE! *\n ************************\n \"\"\"\n intro = \"Are you a USER or an ENGINEER?\"\n option1 = \"[1] USER\"\n option2 = \"[2] ENGINEER\"\n print(welcome, intro, option1, option2, sep='\\n')",
"def print_config_option(args, run):\n print_config(run)\n print(\"-\" * 79)",
"def display(self):\n\n print('\\n')\n for key, val in self.option.items():\n print(key, val, '\\n') # make it more confortable to read\n self.get_choice() # launch automaticly the choice method after display",
"def options():\n print \"\"\"Options summary:\n -h, --help\n -u, --usage\n -v, --verbose <verb_level>\n -e, --endpoint <endpoint>\n -i, --interface-type <iface_type>\n -r, --recursive\n --dbs-conf <conf_file>\n --show-prod\n --show-caf\n --only-subscribed\n --only-custodial\n \"\"\"",
"def menu_cust(self):\n intro = \"Here are the options available for you to choose from:\"\n option1 = \"[1] UNLOCK THE CAR\"\n option2 = \"[2] RETURN THE CAR\"\n option3 = \"[3] BACK\"\n print(intro, option1, option2, option3, sep='\\n')",
"def display_options(self):\n print()\n options = list(self.get_commands().values())\n options.sort(key=lambda op: int(op.name))\n\n for option in options:\n print(f'{\"%3d\" % int(option.name)}. {option.description}')",
"def print_options(self, opt):\n message = ''\n message += '----------------- Options ---------------\\n'\n for k, v in sorted(vars(opt).items()):\n comment = ''\n default = self.parser.get_default(k)\n if v != default:\n comment = '\\t[default: %s]' % str(default)\n message += '{:>25}: {:<30}{}\\n'.format(str(k), str(v), comment)\n message += '----------------- End -------------------'\n print(message)\n\n # save to the disk\n expr_dir = os.path.join(opt.checkpoints_dir, opt.name)\n mkdirs(expr_dir)\n file_name = os.path.join(expr_dir, '{}_opt.txt'.format(opt.phase))\n with open(file_name, 'wt') as opt_file:\n opt_file.write(message)\n opt_file.write('\\n')",
"def otherOptionsFullScreen(self):\n\n # Set Storage List\n storageList = []\n # Create Intel explain menu\n menuDisplay = \"\"\"\n \\n\n [*] Information Verbose:\n Ontop of Asking for the Username and \n Password Should we Gather Even\n More Information about the User such as \n GEOIP / ISP / User Agent etc. etc. \n This Requires Curl to be installed or \n file_get_contents in PHP on selected Server \n \"\"\"\n # display About this\n self.outputText(menuDisplay, \"yellow\")\n # Set Verbose of Intel Gather\n self.results = input(\n \"\\nWould you like to Build a More In-depth Intel Report on Victim ( y Or n ): \")\n if self.results.lower()[0] == \"y\" or self.results.lower() == \"yes\":\n storageList.append(\"INTEL_VERBOSE_LOUD\")\n elif self.results.lower()[0] == \"n\" or self.results.lower() == \"no\":\n storageList.append(\"INTEL_VERBOSE_HUSH\")\n else:\n # Anything Else lets just Hush it then\n storageList.append(\"INTEL_VERBOSE_HUSH\")\n # Redirect Ask\n menuDisplay = \"\"\"\n \\n\n [*] Hitting Enter Keeps the Default \n = Redirect URL Which is the Same \n = URL of the Full-Screen Attack \n = you picked. For Instance If \n = it was AOL Full-Screen Attack\n = the default URL redirect would \n = be https://my.screenname.aol.com\n \"\"\"\n # display About this\n self.outputText(menuDisplay, \"yellow\")\n self.results = input(\n \"After the Victim Inputs Info Where Should the Script Redirect?: \")\n # Check if nothing was entered\n if self.results == \"\" or self.results == \" \":\n # Append Default Redirect Naaaow\n storageList.append(\"REDIRECT_DEFAULT\")\n else:\n # No Checking on URL Let Them Use Whatever lol there bad i guess\n # Append Default Redirect Naaaow\n storageList.append(self.results)\n\n # Spoof link\n menuDisplay = \"\"\"\n \\n\n [*] Hitting Enter Keeps the Default \n = What do you want the URL Link to be spoofed\n = to? This will be displayed when the user\n = rolls over the link. Basically tricking\n = them making them think they are going\n = to that URL..\n \"\"\"\n # display About this\n self.outputText(menuDisplay, \"yellow\")\n self.results = input(\n \"What should the URL be spoofed to? (ex: https://my.screenname.aol.com): \")\n # Check if nothing was entered\n if self.results == \"\" or self.results == \" \":\n # Append Default Redirect Naaaow\n storageList.append(\"DEFAULT_SPOOF\")\n else:\n # Append specified spoof url now\n storageList.append(self.results)\n\n # link name\n menuDisplay = \"\"\"\n \\n\n [*] Hitting Enter Keeps the Default \n = What do you want the Actual URL name\n = to be?\n \"\"\"\n # display About this\n self.outputText(menuDisplay, \"yellow\")\n self.results = input(\n \"What should the URL name be? (ex: Aol Login): \")\n # Check if nothing was entered\n if self.results == \"\" or self.results == \" \":\n # Append Default Redirect Naaaow\n storageList.append(\"DEFAULT_URL_NAME\")\n else:\n # Append url name\n storageList.append(self.results)\n\n menuDisplay = \"\"\"\n \\n\n [*] Hitting Enter Keeps the Default \n = name of Index.php If you feel \n = the need to change the name please \n = do not add the actual extension .php \n = along with it only add whatever crazy \n = name you come up with\n \"\"\"\n # display About this\n self.outputText(menuDisplay, \"yellow\")\n self.results = input(\n \"What Should the Main Index PHP File Be Called? ( ex: login ) : \")\n if self.results == \"\" or self.results == \" \":\n # Append Default Redirect Naaaow\n storageList.append(\"INDEX_DEFAULT\")\n else:\n check = self.results.find(\".\")\n # if it doesn't return a -1 it found a decimal\n if check != -1:\n # Throw Error we found a dot\n self.errorOutput(\n \"[*] Error - Didn't We Say Not to Add an Extension, WOW...\", \"yellow\")\n else:\n # Append name of the File\n storageList.append(self.results)\n\n menuDisplay = \"\"\"\n \\n\n [*] Hitting Enter Keeps the Default \n = Title of the Webpage.\n \"\"\"\n # display About this\n self.outputText(menuDisplay, \"blue\")\n self.results = input(\n \"What Should the Title of the Page be? (ex: AOL Login ) : \")\n if self.results == \"\" or self.results == \" \":\n # Append Default Redirect Naaaow\n storageList.append(\"TITLE_DEFAULT\")\n else:\n # Append name of the File\n storageList.append(self.results)\n\n # Return Storage List for Processing\n return storageList",
"def print_menu():\r\n print(\"==============================================\")\r\n print(\"What do you want to do now? \")\r\n print(\"==============================================\")\r\n print(\"Available options:\")\r\n i = 1\r\n for a in available_actions:\r\n if current_state in a[\"valid_states\"]:\r\n # Only hint about the action if the current state allows it\r\n print(\" %i) %s\" % (i, a[\"description\"]))\r\n i += 1\r\n print()",
"def log_in(self):\n print('-=' * 12 + \" Log in \" + '-=' * 12)\n mob_num, password = self._input_mob_num('Mobile Number :'), input(\"Password: \")\n self._user = self.auth.log_in(mob_num, password)\n if self._user:\n print(\"you are logged in, Welcome '{}'\".format(self._user.username))\n self.homepage()\n else:\n print(\"Mobile number or/and password is/are Invaild \\n\" + '-=' * 30)\n options = {1: self.log_in, 2: self.logging_page, 3: self.exit}\n print_out = \"(1) Try Again \\n (2) Back to Logging Page \\n (3) Exit\"\n self._take_option(options, print_out)",
"def print_options(self, options, describe=False, indent_level=0):\n indent = ' ' * indent_level\n for option in options.get_option_names():\n line = colorize(option + ': ', 'green') + str(options[option])\n if describe:\n line += ' (' + options.get_description(option) + ')'\n values = options.get_acceptable_values(option)\n if values is not None:\n line += ' (Acceptable Values: ' + str(values) + ')'\n eprint(indent + line)",
"def printCurrentOptions(self):\n if self.comm.rank == 0:\n print('+---------------------------------------+')\n print('| All %s Options: |' % self.name)\n print('+---------------------------------------+')\n # Need to assemble a temporary dictionary\n tmpDict = {}\n for key in self.options:\n tmpDict[key] = self.getOption(key)\n pp(tmpDict)",
"def print_menu():\r\n clear()\r\n print(\"Ratatouille Server\")\r\n print(\"---------------------------\")\r\n print(\"\")\r\n\r\n for (index, func) in MENU.items():\r\n print(\"%d - %s\" % (index, func.__name__))\r\n\r\n return raw_input(\"Choose an option: \").lstrip()",
"def show_user_info():\n \n vprint( 'Effective User :', os.geteuid())\n vprint( 'Effective Group :', os.getegid())\n vprint( 'Actual User :', os.getuid(), 'Login user:', os.getlogin())\n vprint( 'Actual Group :', os.getgid())\n vprint( 'Actual Groups :', os.getgroups())\n return",
"def print_help(self):\n self.format_epilog_called = False\n optparse.OptionParser.print_help(self)\n if not self.format_epilog_called:\n sys.stdout.write(self.on_format_epilog())",
"def display_user_info(user):\n\n ornament = '=' * 30\n print(ornament)\n print('LOGGED USER')\n print(ornament)\n print('Name: {}\\nSurname: {}\\nPermissions: {}'.format(user.name, user.surname, user.__class__.__name__))\n print(ornament + '\\n')",
"def display_menu_options(length):\r\n print('\\n***********************************************\\nVeuillez choisir une option entre 1 et', str(length))",
"def OutputMenuItems():\r\n print('''\r\n Menu of Options\r\n 1) Show current data\r\n 2) Add a new item.\r\n 3) Save Data to File\r\n 4) Exit Program\r\n ''')\r\n print() # Add an extra line for looks\r",
"def print_auth_error(self):\n click.secho('Authentication error.', fg=self.clr_error)\n click.secho(('Update your credentials in ~/.gitsomeconfig '\n 'or run:\\n gh configure'),\n fg=self.clr_message)",
"def _print_menu(self):\n # Create header line.\n header = \"%s Menu:\" % (self.__name)\n header = header.title()\n print(header)\n\n # Show the iterations counter.\n iterations = self._status.get_value(\"iterations\")\n print(\"(Iteration %d)\" % (iterations))\n\n self._print_custom()\n\n # Display the options alphabetically.\n option_names = list(self.__options.keys())\n option_names.sort()\n for option in option_names:\n desc, command = self.__options[option]\n print(\"\\t%s: %s\" % (option, desc))",
"def info():\n print(\"\"\"\n Module for use in WMDframe. Just an interaction with the git\n repo changeme by ztgrace.\n\n \"Getting default credentials added to commercial scanners is\n often difficult and slow. changeme is designed to be simple\n to add new credentials without having to write any code or modules.\"\n\n Checkout the git repo: https://github.com/ztgrace/changeme\n \"\"\")\n # Delete the parser info, if args.parse is not used.\n if parser.format_help():\n print('\\n\\t' + bc.OKBLUE + 'COMMANDLINE ARGUMENTS:' + bc.ENDC)\n for line in parser.format_help().strip().splitlines():\n print('\\t' + line)\n print('')"
]
| [
"0.65579903",
"0.6431838",
"0.63489974",
"0.6298482",
"0.62360644",
"0.6181142",
"0.61544454",
"0.6136024",
"0.61304414",
"0.6068432",
"0.60009336",
"0.5979868",
"0.5954127",
"0.5939901",
"0.59330225",
"0.59311664",
"0.5922984",
"0.59142864",
"0.5908796",
"0.5897866",
"0.5891738",
"0.58633435",
"0.58082557",
"0.5803898",
"0.5792768",
"0.5785298",
"0.5782813",
"0.5775205",
"0.5753357",
"0.57498485"
]
| 0.7051415 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.