query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Edit rules Edit the rules that process inbound events | def edit_rules():
my_rules = rules.get_all_rules()
my_rules.append(DEFAULT_RULE)
selected_rule_id = select(
label="Existing rules",
options=[{"label": rule["name"], "value": rule["id"]} for rule in my_rules],
)
# Rules have unique IDs from the database:
logging.info(f"selected_rule: {selected_rule_id}")
use_rule = [r for r in my_rules if r["id"] == int(selected_rule_id)][0]
updated_rule = input_group(
"Rule editing",
[
input(
"name", type=TEXT, name="name", value=use_rule["name"], required=True
), # Need ttextarea(
textarea(
"Rule names",
name="rule",
rows=10,
code={
"mode": "python", # code language
"theme": "darcula", # Codemirror theme. Visit https://codemirror.net/demo/theme.html#cobalt to get more themes
},
value=f"""{use_rule['rule']}\n""",
),
actions(
"actions",
[
# {"label": "test", "value": "test"},
{"label": "save", "value": "save"},
],
name="action",
help_text="Save",
),
],
)
if updated_rule is not None:
rl = dict(updated_rule)
if rl["action"] == "save":
rule_info = rules.save_rule(
rl["name"], rl["rule"], selected_rule_id
)
put_row(put_text("Rule"))
put_row(put_code(pprint.pformat(rule_info, indent=1)))
# Use webhook_info's ID to add/update the extractor
put_text(f"The rule added is: {updated_rule}") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_rules():\n update_all_rules()\n return \"OK\"",
"def edit_ongoing_rule():\n rules = request.json['rules']\n now = datetime.datetime.now()\n\n for rule in rules:\n rule['line_id'] = int(rule['line_id'])\n rule['time'] = convert_to_datetime(rule['time'])\n rule['intervals'] = int(rule['intervals'])\n rule['time_wait'] = int(rule['time_wait'])\n rule['repeat_value'] = int(rule['repeat_value'])\n rule['date_start'] = convert_to_datetime(rule['date_start'])\n rule['time_start'] = convert_to_datetime(rule['time_start'])\n rule['date_time_start'] = datetime.datetime.combine(\n rule['date_start'], rule['time_start'].time())\n rule['end_date'] = convert_to_datetime(rule['end_date'])\n rule['rule_id'] = rule['rule_id']\n rule['days'] = -1\n\n if rule['date_start'].date() == rule['end_date'].date():\n date_delta = rule['end_date'].date() - now.date()\n if date_delta.days == 0:\n rule['days'] = 0\n if date_delta.days == 1:\n rule['days'] = 1\n\n # \"UPDATE ongoing_rules\n # SET line_id = {0}, time = {1}, intervals = {2}, time_wait = {3}, repeat_value={4}, date_time_start='{5}'\"\n # end_date = '{6}' WHERE rule_id = '{7}'\"\n database.update(database.QUERY[mn() + '_ongoing'].format(\n rule['line_id'], rule['time'], rule['intervals'], rule['time_wait'],\n rule['repeat_value'], rule['date_time_start'],\n rule['end_date'], rule['rule_id']))\n\n # update rules;\n update_rules_from_ongoing_rules(rule)\n # update_all_rules()\n logging.info(\"Ongoing rule modified. {0}\".format(str(rule)))\n\n send_ongoing_rule_message('edit_ongoing_rule', rule)\n\n return json.dumps({'status': 'OK'})",
"def edit_rule(self, rule_number, rule):\n\n\t\tif self._mode == Mode.PassThrough:\n\t\t\traise ValueError(\"Can't edit rules while in passthrough mode\")\n\n\t\tif self._mode == Mode.BlackList:\n\t\t\tif len(self._blacklist_rules) - 1 < rule_number:\n\t\t\t\traise ValueError('Rule not found in rules list')\n\t\t\told_rule = self._blacklist_rules.pop(rule_number)\n\t\t\tself._blacklist_rules.append(rule)\n\t\t\tself._log.info('Replaced rule from the blacklist rules set: \\n old: %s\\n new: %s' % (old_rule, rule))\n\n\t\tif self._mode == Mode.WhiteList:\n\t\t\tif len(self._whitelist_rules) - 1 < rule_number:\n\t\t\t\traise ValueError('Rule not found in rules list')\n\t\t\told_rule = self._whitelist_rules.pop(rule_number)\n\t\t\tself._whitelist_rules.append(rule)\n\t\t\tself._log.info('Replaced rule from the whitelist rules set: \\n old: %s\\n new: %s' % (old_rule, rule))\n\n\t\tself._dump_configuration()\n\t\tself._remove_all_flow_records()\n\t\treturn old_rule",
"def modifyRule(request):\n\t# We set up the logger and a few lists.\n\tlogger = logging.getLogger(__name__)\n\tresponse = []\n\tsids = []\n\truleSets = []\n\t\n\t# If the POST contains sids, we're processing rules.\n\tif request.POST.get('sids'):\n\t\tsids = json.loads(request.POST.get('sids'))\n\t# If the POST contains ruleset, we're processing rulesets.\n\tif request.POST.get('ruleset'):\n\t\truleSets = request.POST.getlist('ruleset')\n\t# Get the mode as well.\n\tmode = request.POST.get('mode')\n\t\n\t# We translate the mode into true or false.\n\tif mode == \"enable\":\n\t\tactive = True\n\telif mode == \"disable\":\n\t\tactive = False\n\telse:\n\t\tlogger.error(\"Invalid mode '\"+str(mode)+\"'. Rule(s) not modified.\")\n\t\tresponse.append({'response': 'invalidMode', 'text': 'Rule modification failed, invalid mode. \\nContact administrator.\\n\\n'})\n\t\treturn HttpResponse(json.dumps(response))\n\t\n\t# We only need to process rules if there are some in the list.\n\tif len(sids) == 0:\n\t\tresponse.append({'response': 'noSids'})\n\telse: \n\t\t# We use this list to return which rules got changed successfully.\n\t\tgoodsids = []\n\t\t# We iterate over the sids provided.\n\t\tfor sid in sids:\n\t\t\t# If we find the rule, we update its active flag to reflect the new status.\n\t\t\ttry:\n\t\t\t\tr = Rule.objects.filter(SID=sid).update(active=active)\n\t\t\t\tgoodsids.append({'sid': sid, 'mode': mode})\n\t\t\t\tlogger.info(\"Rule \"+str(r)+\" is now \"+str(mode)+\"d.\")\n\t\t\texcept Rule.DoesNotExist:\n\t\t\t\tresponse.append({'response': 'ruleDoesNotExist', 'text': 'Rule '+sid+' could not be found. \\nIt has not been modified.\\n\\n'})\n\t\t\t\tlogger.warning(\"Rule \"+str(sid)+\" could not be found.\")\n\t\t\t\t\n\t\tresponse.append({'response': 'ruleModificationSuccess', 'sids': goodsids})\n\t\t\n\t# We only need to process rulesets if there are some in the list.\n\tif len(ruleSets) == 0:\n\t\tresponse.append({'response': 'noSets'})\n\telse: \n\t\t# We use this list to return which rulesets got changed successfully.\n\t\tgoodRuleSets = []\n\t\t\n\t\t# Global is used to determine if the rulset is to be modified globally or per sensor.\n\t\tif request.POST.get('global'):\n\t\t\tglobalmodify = request.POST['global']\n\t\telse:\n\t\t\tglobalmodify = \"\"\n\t\t\t\n\t\t# If its global, we just change the active flag of the ruleset.\n\t\tif globalmodify == \"on\":\n\t\t\tfor ruleSet in ruleSets:\n\t\t\t\ttry:\n\t\t\t\t\tr = RuleSet.objects.filter(id=ruleSet).update(active=active)\n\t\t\t\t\tgoodRuleSets.append({'set': ruleSet, 'mode': mode})\n\t\t\t\t\tlogger.info(\"RuleSet \"+str(r)+\" is now \"+str(mode)+\"d.\")\n\t\t\t\texcept RuleSet.DoesNotExist:\n\t\t\t\t\tresponse.append({'response': 'ruleSetDoesNotExist', 'text': 'RuleSet '+ruleSet+' could not be found. \\nIt has not been modified.\\n\\n'})\n\t\t\t\t\tlogger.warning(\"RuleSet \"+str(ruleSet)+\" could not be found.\")\n\t\t\t\t\t\n\t\t\tresponse.append({'response': 'ruleSetModificationSuccess', 'sets': goodRuleSets})\n\t\t\t\n\t\t# If its not global, we have to iterate over all the sensors provided and add/remove the rulesets.\n\t\telse:\n\t\t\tsensors = request.POST.getlist('sensors')\n\t\t\t# If we didnt pick all sensors, we gotta iterate over all the ones we selected. \n\t\t\tsensorList = []\n\t\t\tallSensor = False\n\t\t\tfor sensor in sensors:\n\t\t\t\ttry:\n\t\t\t\t\ts = Sensor.objects.get(id=sensor)\n\t\t\t\t\t\n\t\t\t\t\tif s.name == \"All\":\n\t\t\t\t\t\tsensorList = [s]\n\t\t\t\t\t\tallSensor = True\n\t\t\t\t\t\tbreak\n\t\t\t\t\tsensorList.append(s)\n\t\t\t\texcept Sensor.DoesNotExist:\n\t\t\t\t\tresponse.append({'response': 'sensorDoesNotExist', 'text': 'Sensor with DB ID '+sensor+' does not exist.'})\n\t\t\t\t\tlogger.warning(\"Sensor \"+str(sensor)+\" could not be found.\")\n\t\t\t\t\n\t\t\tfor ruleSet in ruleSets:\n\t\t\t\ttry:\n\t\t\t\t\tr = RuleSet.objects.get(id=ruleSet)\n\t\t\t\t\t\n\t\t\t\t\tif \"All\" in r.sensors.values_list('name', flat=True):\n\t\t\t\t\t\tallInSet = True\n\t\t\t\t\telse:\n\t\t\t\t\t\tallInSet = False\n\t\t\t\t\t\t\n\t\t\t\t\tif r.sensors.count():\n\t\t\t\t\t\tsetHasSensors = True\n\t\t\t\t\telse:\n\t\t\t\t\t\tsetHasSensors = False\n\t\t\t\t\t\t\n\t\t\t\t\tif active:\n\t\t\t\t\t\tif allSensor and setHasSensors and not allInSet:\n\t\t\t\t\t\t\tr.sensors.clear\n\t\t\t\t\t\t\tr.sensors.add(*sensorList) # This is where the ruleset is tied to the sensor.\n\t\t\t\t\t\telif allSensor and allInSet:\n\t\t\t\t\t\t\tpass\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tr.sensors.add(*sensorList) # This is where the ruleset is tied to the sensor.\n\t\t\t\t\telse:\n\t\t\t\t\t\tif allSensor and setHasSensors:\n\t\t\t\t\t\t\tr.sensors.clear()\n\t\t\t\t\t\telif not allSensor and allInSet:\n\t\t\t\t\t\t\tr.sensors.clear()\n\t\t\t\t\t\t\ts = Sensor.objects.exclude(name=\"All\").all()\n\t\t\t\t\t\t\tr.sensors.add(*s)\n\t\t\t\t\t\t\tr.sensors.remove(*sensorList) # This is where the ruleset is removed from the sensor.\n\t\t\t\t\t\telif (allSensor and allInSet) or not setHasSensors:\n\t\t\t\t\t\t\tpass\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tr.sensors.remove(*sensorList) # This is where the ruleset is removed from the sensor.\n\t\t\t\t\t\t\n\t\t\t\t\tgoodRuleSets.append({'set': ruleSet, 'mode': mode, 'sensor': sensor})\n\t\t\t\t\tlogger.info(\"RuleSet \"+str(r)+\" is now \"+str(mode)+\"d on sensor \"+str(s)+\".\")\n\t\t\t\texcept RuleSet.DoesNotExist:\n\t\t\t\t\tresponse.append({'response': 'ruleSetDoesNotExist', 'text': 'RuleSet '+ruleSet+' could not be found. \\nIt has not been modified.\\n\\n'})\n\t\t\t\t\tlogger.warning(\"RuleSet \"+str(ruleSet)+\" could not be found.\")\n\t\t\t\t\n\t\t\t\t\t\n\t\t\tresponse.append({'response': 'ruleSetModificationSuccess', 'sets': goodRuleSets})\n\t\n\treturn HttpResponse(json.dumps(response))",
"def _set_rules_admin(self, gnp_config, network, host):\n addr_pool = self.dbapi.address_pool_get(network.pool_uuid)\n ip_version = IPAddress(f\"{addr_pool.network}\").version\n self._add_source_net_filter(gnp_config[\"spec\"][\"ingress\"],\n f\"{addr_pool.network}/{addr_pool.prefix}\")\n if (ip_version == 6):\n self._add_source_net_filter(gnp_config[\"spec\"][\"ingress\"], LINK_LOCAL)\n if (ip_version == 4):\n # copy the TCP rule and do the same for IGMP\n igmp_proto = 2\n igmp_egr_rule = copy.deepcopy(gnp_config[\"spec\"][\"egress\"][0])\n igmp_egr_rule[\"protocol\"] = igmp_proto\n igmp_egr_rule[\"metadata\"][\"annotations\"][\"name\"] = \\\n f\"stx-egr-{host.personality}-{network.type}-igmp{ip_version}\"\n gnp_config[\"spec\"][\"egress\"].append(igmp_egr_rule)\n igmp_ingr_rule = copy.deepcopy(gnp_config[\"spec\"][\"ingress\"][0])\n igmp_ingr_rule[\"protocol\"] = igmp_proto\n igmp_ingr_rule[\"metadata\"][\"annotations\"][\"name\"] = \\\n f\"stx-ingr-{host.personality}-{network.type}-igmp{ip_version}\"\n gnp_config[\"spec\"][\"ingress\"].append(igmp_ingr_rule)",
"def edit_standard_fwl_rules(self, firewall_id, rules):\r\n rule_svc = self.client['Network_Firewall_Update_Request']\r\n template = {\r\n \"networkComponentFirewallId\": firewall_id,\r\n \"rules\": rules}\r\n\r\n return rule_svc.createObject(template)",
"def update_rules(self: object,\n body: dict,\n cs_username: str = None # pylint: disable=W0613 # cs_username is deprecated\n ) -> dict:\n # [PATCH] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/update-rules\n return process_service_request(\n calling_object=self,\n endpoints=Endpoints,\n operation_id=\"update_rules\",\n body=body\n )",
"def _UpdateAclRule(self, entry):\n\n print 'Update Acl rule: %s' % (entry.GetEditLink().href)\n roleValue = \"http://schemas.google.com/gCal/2005#%s\" % (\"read\")\n entry.role = gdata.acl.data.AclRole(value=roleValue)\n returned_rule = self.cal_client.Update(entry)",
"def test_edit_rule(self):\n pass",
"def put(self, request, *args, **kwargs):\n try:\n new_rule = json.loads(request.body)\n except Exception as e:\n return error('unable to marshal json', str(e))\n try:\n validate_rule_json(new_rule)\n except RuleValidationException as e:\n return error('error validating json', str(e))\n rule = Rule()\n rule.populate(new_rule)\n rule.save()\n return success(rule.summary())",
"def add_rule(self, rule):\n\n\t\tif self._mode == Mode.PassThrough:\n\t\t\traise ValueError(\"Can't edit rules while in passthrough mode\")\n\n\t\tif self._mode == Mode.BlackList:\n\t\t\tself._log.info('Adding new rule to the blacklist rules set: %s' % rule)\n\t\t\tself._blacklist_rules.append(rule)\n\n\t\tif self._mode == Mode.WhiteList:\n\t\t\tself._log.info('Adding new rule to the whitelist rules set: %s' % rule)\n\t\t\tself._whitelist_rules.append(rule)\n\n\t\tself._dump_configuration()\n\t\tself._remove_all_flow_records()",
"def edit_dedicated_fwl_rules(self, firewall_id, rules):\r\n mask = ('mask[networkVlan[firewallInterfaces'\r\n '[firewallContextAccessControlLists]]]')\r\n svc = self.client['Network_Vlan_Firewall']\r\n fwl = svc.getObject(id=firewall_id, mask=mask)\r\n network_vlan = fwl['networkVlan']\r\n\r\n for fwl1 in network_vlan['firewallInterfaces']:\r\n if fwl1['name'] == 'inside':\r\n continue\r\n for control_list in fwl1['firewallContextAccessControlLists']:\r\n if control_list['direction'] == 'out':\r\n continue\r\n fwl_ctx_acl_id = control_list['id']\r\n\r\n template = {\r\n 'firewallContextAccessControlListId': fwl_ctx_acl_id,\r\n 'rules': rules\r\n }\r\n\r\n svc = self.client['Network_Firewall_Update_Request']\r\n return svc.createObject(template)",
"def edit_rule(self, value, new=False):\n\n if value >= 0 or new:\n if new:\n name = None\n rule = {}\n else:\n name = self.keys[value]\n rule = self.rules[value]\n text = '\"\"\"\\nIf you don\\'t need a setting, just leave it as None.\\n'\n text += 'When the rule is parsed, the default will be used.\\n'\n text += 'Each variable is evaluated separately, so you cannot substitute variables '\n text += 'in other variables.\\n\"\"\"\\n'\n text += '\\n# name (str): Rule name. Required.\\n'\n text += self.format_string('name', name)\n text += '\\n# find (str): Regular expression pattern or literal string.\\n'\n text += '# Use (?i) for case insensitive. Use (?s) for dotall.\\n'\n text += '# See https://docs.python.org/3.4/library/re.html for more info on regex flags.\\n'\n text += '# Required unless \"scope\" is defined.\\n'\n text += self.format_regex_string('find', rule.get('find'))\n text += '\\n# replace (str - default=r\\'\\\\g<0>\\'): Replace pattern.\\n'\n text += self.format_regex_string('replace', rule.get('replace'))\n text += '\\n# literal (bool - default=False): Preform a non-regex, literal search and replace.\\n'\n text += self.format_bool('literal', rule.get('literal'))\n text += '\\n# literal_ignorecase (bool - default=False): Ignore case when \"literal\" is true.\\n'\n text += self.format_bool('literal_ignorecase', rule.get('literal_ignorecase'))\n text += '\\n# scope (str): Scope to search for and to apply optional regex to.\\n'\n text += '# Required unless \"find\" is defined.\\n'\n text += self.format_string('scope', rule.get('scope'))\n text += '\\n# scope_filter ([str] - default=[]): An array of scope qualifiers for the match.\\n'\n text += '# Only used when \"scope\" is not defined.\\n'\n text += '#\\n'\n text += '# - Any instance of scope qualifies match: scope.name\\n'\n text += '# - Entire match of scope qualifies match: !scope.name\\n'\n text += '# - Any instance of scope disqualifies match: -scope.name\\n'\n text += '# - Entire match of scope disqualifies match: -!scope.name\\n'\n text += self.format_array('scope_filter', rule.get('scope_filter'))\n text += '\\n# greedy (bool - default=True): Apply action to all instances (find all).\\n'\n text += '# Used when \"find\" is defined.\\n'\n text += self.format_bool('greedy', rule.get('greedy'))\n text += '\\n# greedy_scope (bool - default=True): Find all the scopes specified by \"scope.\"\\n'\n text += self.format_bool('greedy_scope', rule.get('greedy_scope'))\n text += '\\n# format_replace (bool - default=False): Use format string style replace templates.\\n'\n text += '# Works only for Regex (with and without Backrefs) and Re (with Backrefs).\\n'\n text += '# See https://facelessuser.github.io/backrefs/usage/#format-replacements for more info.\\n'\n text += self.format_bool('format_replace', rule.get('format_replace'))\n text += '\\n# selection_inputs (bool -default=False): Use selection for inputs into find pattern.\\n'\n text += '# Global setting \"selection_only\" must be disabled for this to work.\\n'\n text += self.format_bool('selection_inputs', rule.get('selection_inputs'))\n text += '\\n# multi_pass (bool - default=False): Perform multiple sweeps on the scope region to find\\n'\n text += '# and replace all instances of the regex when regex cannot be formatted to find\\n'\n text += '# all instances. Since a replace can change a scope, this can be useful.\\n'\n text += self.format_bool('multi_pass', rule.get('multi_pass'))\n text += '\\n# plugin (str): Define replace plugin for more advanced replace logic.\\n'\n text += self.format_string('plugin', rule.get('plugin'))\n text += '\\n# args (dict): Arguments for \\'plugin\\'.\\n'\n text += self.format_dict('args', rule.get('args'))\n text += '\\n# ----------------------------------------------------------------------------------------\\n'\n text += '# test: Here you can setup a test command. This is not saved and is just used for this session.\\n'\n text += '# - replacements ([str]): A list of regex rules to sequence together.\\n'\n text += '# - find_only (bool): Highlight current find results and prompt for action.\\n'\n text += '# - action (str): Apply the given action (fold|unfold|mark|unmark|select).\\n'\n text += '# This overrides the default replace action.\\n'\n text += '# - options (dict): optional parameters for actions (see documentation for more info).\\n'\n text += '# - key (str): Unique name for highlighted region.\\n'\n text += '# - scope (str - default=\"invalid\"): Scope name to use as the color.\\n'\n text += '# - style (str - default=\"outline\"): Highlight style (solid|underline|outline).\\n'\n text += '# - multi_pass (bool): Repeatedly sweep with sequence to find all instances.\\n'\n text += '# - no_selection (bool): Overrides the \"selection_only\" setting and forces no selections.\\n'\n text += '# - regex_full_file_with_selections (bool): Apply regex search to full file then apply\\n'\n text += '# action to results under selections.\\n'\n text += textwrap.dedent(\n \"\"\"\\\n test = {\n \"replacements\": [%s],\n \"find_only\": True,\n \"action\": None,\n \"options\": {},\n \"multi_pass\": False,\n \"no_selection\": False,\n \"regex_full_file_with_selections\": False\n }\n \"\"\" % (self.simple_format_string(name) if name is not None else '')\n )\n\n replace_view = self.window.create_output_panel('reg_replace')\n replace_view.run_command('reg_replace_panel_insert', {'text': text})\n for ext in ST_LANGUAGES:\n highlighter = sublime.load_settings(\n 'reg_replace.sublime-settings'\n ).get('python_highlighter', 'Python/Python')\n highlighter = 'Packages/' + highlighter + ext\n try:\n sublime.load_resource(highlighter)\n replace_view.set_syntax_file(highlighter)\n break\n except Exception:\n pass\n replace_view.settings().set('gutter', True)\n replace_view.settings().set('line_numbers', True)\n replace_view.settings().set('reg_replace.edit_view', True)\n replace_view.settings().set('bracket_highlighter.bracket_string_escape_mode', 'regex')\n replace_view.settings().set('regreplace.name', name)\n replace_view.sel().clear()\n replace_view.sel().add(sublime.Region(0, 0))\n self.window.run_command(\"show_panel\", {\"panel\": \"output.reg_replace\"})\n sublime.set_timeout(lambda w=self.window, v=replace_view: w.focus_view(v), 100)",
"def rule(self, rules):\n\n if not isinstance(rules, list):\n rules = [rules]\n\n for rule in rules:\n self.__addRule(rule)",
"def set_rules(rules, overwrite=True, use_conf=False):\n\n init(use_conf=False)\n _ENFORCER.set_rules(rules, overwrite, use_conf)",
"def _set_rules_mgmt(self, gnp_config, network, host):\n addr_pool = self.dbapi.address_pool_get(network.pool_uuid)\n ip_version = IPAddress(f\"{addr_pool.network}\").version\n self._add_source_net_filter(gnp_config[\"spec\"][\"ingress\"],\n f\"{addr_pool.network}/{addr_pool.prefix}\")\n if (ip_version == 6):\n self._add_source_net_filter(gnp_config[\"spec\"][\"ingress\"], LINK_LOCAL)\n if (ip_version == 4):\n # add rule to allow DHCP requests (dhcp-offer have src addr == 0.0.0.0)\n # worker/storage nodes request IP dynamically\n rule = self._get_dhcp_rule(host.personality, \"UDP\", ip_version)\n gnp_config[\"spec\"][\"ingress\"].append(rule)\n\n # copy the TCP rule and do the same for IGMP\n igmp_proto = 2\n igmp_egr_rule = copy.deepcopy(gnp_config[\"spec\"][\"egress\"][0])\n igmp_egr_rule[\"protocol\"] = igmp_proto\n igmp_egr_rule[\"metadata\"][\"annotations\"][\"name\"] = \\\n f\"stx-egr-{host.personality}-{network.type}-igmp{ip_version}\"\n gnp_config[\"spec\"][\"egress\"].append(igmp_egr_rule)\n igmp_ingr_rule = copy.deepcopy(gnp_config[\"spec\"][\"ingress\"][0])\n igmp_ingr_rule[\"protocol\"] = igmp_proto\n igmp_ingr_rule[\"metadata\"][\"annotations\"][\"name\"] = \\\n f\"stx-ingr-{host.personality}-{network.type}-igmp{ip_version}\"\n gnp_config[\"spec\"][\"ingress\"].append(igmp_ingr_rule)",
"def process_IN_MODIFY(self, event):",
"def put(self, request, l7_rule_id, l7_policy_id):\n kwargs = {'l7_rule_id': l7_rule_id, 'l7_policy_id': l7_policy_id}\n update_l7_rule(request, **kwargs)",
"async def set_rules(self, ctx: discord.ext.commands.context.Context, *, rules: str):\n guild_info = server_setup.get_guild_info(ctx.guild)\n\n if guild_info[\"rulesChannelID\"] is not None:\n rules_channel = server_setup.get_channel(guild=ctx.guild, channel_id=guild_info[\"rulesChannelID\"])\n embed = await format_rules(rules=rules, title=\"Rules\",\n description=\"You must follow these rules at all times\")\n\n if guild_info[\"rulesMessageID\"] is not None:\n message = await rules_channel.fetch_message(guild_info[\"rulesMessageID\"])\n\n await message.edit(embed=embed)\n\n else:\n message = await rules_channel.send(embed=embed)\n guild_info[\"rulesMessageID\"] = message.id\n\n server_setup.update_guild(guild_info=guild_info)\n\n guild_info[\"rules\"] = rules\n server_setup.update_guild(guild_info=guild_info)\n\n else:\n await ctx.send(\"You must create a rules channel before you may set the rules message.\")\n\n print(\"Rules have been updated.\")",
"def test_update_rule(self):\n pass",
"def set_rules(rules, overwrite=True, use_conf=False): # pragma: no cover\n init(use_conf=False)\n _ENFORCER.set_rules(rules, overwrite, use_conf)",
"def handle_put(self, request, user, *args, **kwargs):\n try:\n self.log.info('Update rule to an environment')\n\n # User permission\n if not has_perm(user, AdminPermission.VIP_VALIDATION, AdminPermission.WRITE_OPERATION):\n self.log.error(\n u'User does not have permission to perform the operation.')\n raise UserNotAuthorizedError(None)\n\n # Load XML data\n xml_map, attrs_map = loads(request.raw_post_data)\n\n # XML data format\n networkapi_map = xml_map.get('networkapi')\n if networkapi_map is None:\n return self.response_error(3, u'There is no value to the networkapi tag of XML request.')\n\n rule_map = networkapi_map.get('map')\n if rule_map is None:\n return self.response_error(3, u'There is no value to the environment_vip tag of XML request.')\n\n # Get XML data\n id_rule = rule_map['id_rule']\n id_env = rule_map['id_env']\n name = rule_map['name']\n contents = rule_map['contents'] if type(\n rule_map['contents']) is list else [rule_map['contents'], ]\n blocks_id = rule_map['blocks_id'] if type(\n rule_map['blocks_id']) is list else [rule_map['blocks_id'], ]\n\n if not is_valid_int_greater_zero_param(id_rule):\n self.log.error(\n u'The id_rule parameter is not a valid value: %s.', id_rule)\n raise InvalidValueError(None, 'id_env', id_rule)\n\n if not is_valid_int_greater_zero_param(id_env):\n self.log.error(\n u'The id_env parameter is not a valid value: %s.', id_env)\n raise InvalidValueError(None, 'id_env', id_env)\n\n if not name or len(name) > 80:\n self.log.error(\n u'The name parameter is not a valid value: %s.', name)\n raise InvalidValueError(None, 'name', name)\n\n rule = Rule.objects.get(pk=id_rule)\n\n environment = Ambiente.get_by_pk(id_env)\n\n rule.name = name\n rule.environment = environment\n\n # Set NULL in rule field of all Vip Request related\n RequisicaoVips.objects.filter(rule=rule).update(rule=None)\n RequisicaoVips.objects.filter(\n rule_applied=rule).update(rule_applied=None)\n RequisicaoVips.objects.filter(\n rule_rollback=rule).update(rule_rollback=None)\n\n rule.save()\n\n for rule_cotent in rule.rulecontent_set.all():\n rule_cotent.delete()\n\n self.__save_rule_contents(\n contents, blocks_id, environment, rule, user)\n\n return self.response(dumps_networkapi({}))\n\n except AmbienteNotFoundError, e:\n self.log.error('Environment not found')\n return self.response_error(112)\n except InvalidValueError, e:\n return self.response_error(269, e.param, e.value)\n except Rule.DoesNotExist:\n return self.response_error(358)\n except BlockRules.DoesNotExist:\n return self.response_error(359)\n except UserNotAuthorizedError:\n return self.not_authorized()\n except XMLError, x:\n self.log.error(u'Error reading the XML request.')\n return self.response_error(3, x)\n except Exception, e:\n return self.response_error(1)",
"def add_rules(self, rules):\n self.model_sort.handler_block(self.row_reordered_signal)\n i = len(self.model)\n format_protocol_int = lambda s: 'ip' if not s else '\\n'.join(map(Operator.to_string, s))\n format_protocol = lambda s, n: '\\n'.join(set(n)) if n else format_protocol_int(s)\n format_int = lambda s: \"any\" if len(s) == 0 else '\\n'.join(map(Operator.to_string, s))\n format = lambda s, n: '\\n'.join(set(n)) if n else format_int(s)\n for r in rules:\n self.model_sort.get_model().append([r.identifier,\n r.name,\n format_protocol(r.protocol, r.protocol_name),\n format(r.ip_source, r.ip_source_name),\n format(r.port_source, r.port_source_name),\n format(r.ip_dest, r.ip_dest_name),\n format(r.port_dest, r.port_dest_name),\n r.action.to_string(),\n r.action.get_action_color(),\n '#FFFFFF' if i % 2 == 0 else '#DCDCDC'])\n i += 1\n self.model_sort.handler_unblock(self.row_reordered_signal)",
"def edit_ruleset(command):\n namespace = app.main(command)\n assert namespace.command == 'er' or namespace.command == \"editruleset\"\n assert namespace.name == \"test\"\n assert namespace.action in ['a','d']",
"def update_rules_from_ongoing_rules(rule):\n database.update(database.QUERY[mn() + '_remove_from_life'].format(rule['rule_id']))\n\n _delta = rule['end_date'] - rule['date_time_start']\n _days = _delta.days + 1\n logging.info(\"number of days: {0}\".format(_days))\n\n ongoing_rule_id = rule['rule_id']\n\n for days_to_add in range(0, _days + 1, rule['repeat_value']):\n date_datetime = rule['date_time_start'] + datetime.timedelta(days=days_to_add)\n\n # start_time = rule['date_time_start']\n branch_id = int(rule['line_id'])\n time_min = int(rule['time'])\n time_wait = int(rule['time_wait'])\n num_of_intervals = int(rule['intervals'])\n interval_id = str(uuid.uuid4())\n\n stop_datetime = date_datetime + datetime.timedelta(minutes=time_min)\n\n database.update(database.QUERY[mn() + '_add_rule_to_life'].format(\n branch_id, START_RULE, ENABLED_RULE,\n date_datetime.date(), date_datetime,\n interval_id, time_min, ongoing_rule_id))\n database.update(database.QUERY[mn() + '_add_rule_to_life'].format(\n branch_id, STOP_RULE, ENABLED_RULE,\n date_datetime.date(), stop_datetime,\n interval_id, 0, ongoing_rule_id))\n\n logging.info(\"Start time: {0}. Stop time: {1} added to database\".format(str(date_datetime), str(stop_datetime)))\n\n # first interval is executed\n for x in range(2, num_of_intervals + 1):\n date_datetime = stop_datetime + datetime.timedelta(minutes=time_wait)\n stop_datetime = date_datetime + datetime.timedelta(minutes=time_min)\n\n database.update(database.QUERY[mn() + '_add_rule_to_life'].format(\n branch_id, START_RULE, ENABLED_RULE,\n date_datetime.date(), date_datetime,\n interval_id, time_min, ongoing_rule_id))\n database.update(database.QUERY[mn() + '_add_rule_to_life'].format(\n branch_id, STOP_RULE, ENABLED_RULE,\n date_datetime.date(), stop_datetime,\n interval_id, 0, ongoing_rule_id))\n\n logging.info(\"Start time: {0}. Stop time: {1} added to database\".format(str(date_datetime), str(stop_datetime)))",
"def post(self, request, *args, **kwargs):\n rule = self.get_object()\n try:\n updates = json.loads(request.body.decode('utf-8'))\n except Exception as e:\n return error('unable to marshal json', str(e))\n try:\n validate_rule_json(updates)\n except RuleValidationException as e:\n return error('error validating json', str(e))\n\n # TODO this can take place in the save method on Rule, which would also\n # cover creation and deletion.\n change = RuleChange(\n rule=rule,\n change_user=updates['user'],\n change_comment=updates['comment'])\n change.populate(rule.full_values())\n if rule.enabled and not updates['enabled']:\n change.change_type = 'd'\n else:\n change.change_type = 'u'\n change.save()\n rule.populate(updates)\n rule.save()\n return success({\n 'rule': rule.summary(),\n 'change': change.summary(),\n })",
"def _set_rules_subcloud_admin(self, gnp_config, network, host_personality):\n\n addr_pool = self.dbapi.address_pool_get(network.pool_uuid)\n ip_version = IPAddress(f\"{addr_pool.network}\").version\n ICMP = \"ICMP\"\n if ip_version == 6:\n ICMP = \"ICMPv6\"\n\n rules = list()\n for proto in [\"TCP\", \"UDP\", ICMP]:\n rule = {\"metadata\": dict()}\n rule[\"metadata\"] = {\"annotations\": dict()}\n rule[\"metadata\"][\"annotations\"] = {\"name\":\n f\"stx-ingr-{host_personality}-subcloud-{proto.lower()}{ip_version}\"}\n rule.update({\"protocol\": proto})\n rule.update({\"ipVersion\": ip_version})\n rule.update({\"action\": \"Allow\"})\n if (proto == \"TCP\"):\n rule.update({\"destination\": {\"ports\": self._get_subcloud_tcp_ports()}})\n elif (proto == \"UDP\"):\n rule.update({\"destination\": {\"ports\": self._get_subcloud_udp_ports()}})\n rules.append(rule)\n\n networks = self._get_routes_networks(network.type)\n for network in networks:\n self._add_source_net_filter(rules, network)\n\n for rule in rules:\n gnp_config[\"spec\"][\"ingress\"].append(rule)",
"def on_set_rule(self) -> None:\r\n\r\n self.stop_animation()\r\n self.master.focus() # Move the cursor away from the rule entry\r\n rule_text = str(self.rule_entry.get())\r\n\r\n if not self.rule.try_set_rule(rule_text):\r\n messagebox.showinfo(message = self.INVALID_RULE_MESSAGE)\r\n return\r\n\r\n self.rule_name.configure(text = rule_text)\r\n\r\n self.board.birth_rule = self.rule.birth_rule\r\n self.board.remain_rule = self.rule.remain_rule\r\n self.anim_board.birth_rule = self.rule.birth_rule\r\n self.anim_board.remain_rule = self.rule.remain_rule",
"def update_acc_by_rules(self) -> None:\n for rule, coeff in self.rules.items():\n acc_delta = rule(self) # can't call self.rule\n self.update_acc(acc_delta, coeff)",
"def ModifySecurityRule(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"ModifySecurityRule\", params, headers=headers)\n response = json.loads(body)\n model = models.ModifySecurityRuleResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))"
]
| [
"0.68029857",
"0.6289704",
"0.61724335",
"0.60976857",
"0.608838",
"0.60482955",
"0.60210454",
"0.60100615",
"0.5890595",
"0.58173704",
"0.57617265",
"0.57586634",
"0.5667582",
"0.55736524",
"0.5569121",
"0.5536796",
"0.5503093",
"0.5500226",
"0.5476203",
"0.546313",
"0.5461089",
"0.54506946",
"0.5437657",
"0.54308605",
"0.53699803",
"0.53554153",
"0.5314423",
"0.5298645",
"0.52819246",
"0.52804357"
]
| 0.6884661 | 0 |
Start as many threads as ordered (provided by variable self.aOT). Also stores all threads in self.threads, so they can be stopped later as well. | def startWorkers(self):
for i in range(self.aOT):
t = thr.Thread(target=self.threadWorker)
t.start()
self.threads.append(t) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def start_threads(self):\r\n assert len(self.all_threads) > 0\r\n for thread in self.all_threads:\r\n thread.start()",
"def start_workers(self):\n\n for thread in self.threads:\n thread.start()",
"def create_and_start_threads(self):\r\n self.create_threads()\r\n self.start_threads()",
"def start_threads(self, sess, n_threads=1):\n self.threads = []\n for n in range(n_threads):\n t = threading.Thread(target=self.thread_main, args=(sess,))\n t.daemon = True # thread will close when parent quits\n t.start()\n self.threads.append(t)",
"def start_threads(self, sess, n_threads=1):\n self.threads = []\n for n in range(n_threads):\n t = threading.Thread(target=self.thread_main, args=(sess,))\n t.daemon = True # thread will close when parent quits\n t.start()\n self.threads.append(t)",
"def run_in_parallel(self):\n\t\tfor p in self.parallel_threads:\n\t\t\tp.start()\n\t\tfor p in self.parallel_threads:\n\t\t\tp.join()",
"def start_threads(self, sess, n_threads=4):\n threads = []\n print(\"starting %d data threads for training\" % n_threads)\n for n in range(n_threads):\n t = threading.Thread(target=self.thread_main, args=(sess,0,))\n t.daemon = True # thread will close when parent quits\n t.start()\n threads.append(t)\n # Make sure the queueu is filled with some examples (n = 500)\n num_samples_in_queue = 0\n while num_samples_in_queue < self.capacityTrain:\n num_samples_in_queue = sess.run(self.size_op)\n print(\"Initializing queue, current size = %i/%i\" % (num_samples_in_queue, self.capacityTrain))\n time.sleep(2)\n return threads",
"def _start_threads(self, target, n):\n threads = [threading.Thread(target=target) for i in range(n)]\n [t.start() for t in threads]\n [t.join() for t in threads]",
"def _start_threads(self, target, n):\n threads = [threading.Thread(target=target) for i in range(n)]\n [t.start() for t in threads]\n [t.join() for t in threads]",
"def create_threads(self):\r\n name = self.short_name\r\n self.all_threads = []\r\n tf.train.add_queue_runner(tf.train.QueueRunner(self._preprocess_queue, [self._enqueue_op] * 2))\r\n\r\n def _create_and_register_thread(*args, **kwargs):\r\n thread = threading.Thread(*args, **kwargs)\r\n thread.daemon = True\r\n self.all_threads.append(thread)\r\n\r\n for i in range(self.num_threads):\r\n # File read thread\r\n _create_and_register_thread(target=self.read_entry_job, name='fread_%s_%d' % (name, i))\r\n\r\n # Preprocess thread\r\n _create_and_register_thread(target=self.preprocess_job,\r\n name='preprocess_%s_%d' % (name, i))\r\n\r\n if self.staging:\r\n # Send-to-GPU thread\r\n _create_and_register_thread(target=self.transfer_to_gpu_job,\r\n name='transfer_%s_%d' % (name, i))",
"def setup_worker_threads(self):\n \n for thread_number in range(0, self.max_workers):\n worker = DeviceWorker(self, thread_number)\n self.worker_threads.append(worker)\n worker.start()",
"def run_threads(self, threads):\n\n for t, daemon in threads:\n t.daemon = daemon\n self.processes.append(t)\n t.start()\n\n for t in self.processes:\n t.join()",
"def startall(self, wait=False, **kwdargs):\n self.logger.debug(\"startall called\")\n with self.regcond:\n while self.status != 'down':\n if self.status in ('start', 'up') or self.ev_quit.is_set():\n # For now, abandon additional request to start\n self.logger.error(\"ignoring duplicate request to start thread pool\")\n return\n\n self.logger.debug(\"waiting for threads: count=%d\" %\n self.runningcount)\n self.regcond.wait()\n\n #assert(self.status == 'down')\n if self.ev_quit.is_set():\n return\n\n self.runningcount = 0\n self.status = 'start'\n self.workers = []\n if wait:\n tpool = self\n else:\n tpool = None\n\n # Start all worker threads\n self.logger.debug(\"starting threads in thread pool\")\n for i in range(self.numthreads):\n t = self.workerClass(self.queue, logger=self.logger,\n ev_quit=self.ev_quit, tpool=tpool,\n **kwdargs)\n self.workers.append(t)\n t.start()\n\n # if started with wait=True, then expect that threads will register\n # themselves and last one up will set status to \"up\"\n if wait:\n # Threads are on the way up. Wait until last one starts.\n while self.status != 'up' and not self.ev_quit.is_set():\n self.logger.debug(\"waiting for threads: count=%d\" %\n self.runningcount)\n self.regcond.wait()\n else:\n # otherwise, we just assume the pool is up\n self.status = 'up'\n self.logger.debug(\"startall done\")",
"def set_threads(self):\r\n # pump thread should start after the loop has started\r\n for device in self._model.measurement.children():\r\n if device.type_info == MOTOR_CONTROLLER:\r\n thread = Worker(device.update_duty_cycles)\r\n thread.setName('MotorControllerThread')\r\n thread.signals.message.connect(self.message)\r\n thread.signals.finished.connect(self.display_finished_thread)\r\n self._threads.append(thread)\r\n if device.type_info == LABJACK:\r\n # reading must start after processing\r\n thread = Worker(device.read_stream_data)\r\n thread.setName('StreamReaderThread-{}'.format(device.name))\r\n thread.signals.message.connect(self.message)\r\n thread.signals.finished.connect(self.display_finished_thread)\r\n thread.signals.result.connect(self.display_result)\r\n self._threads.append(thread)",
"def _init_threads(self):\n\n startTh = Thread(name='InitialStart', target = self._singleUpdate, args=(self.outPs, ))\n self.threads.append(startTh)\n\n sendTh = Thread(name='SteeringListen',target = self._listen_for_steering, args = (self.inPs[0], self.outPs, ))\n self.threads.append(sendTh)",
"def main(order_count):\n for id in range(MAX_ORDERS):\n while active_count() > MAX_QUEUE:\n print(\"..All permitted threads running: waiting\")\n sleep(LOOP_TIMEOUT)\n print(\"..Finished waiting\")\n o = Thread(target=order_gen, kwargs={\"id\": id})\n o.start()",
"def Threads():\n for i in range(0, idc.get_thread_qty()):\n yield idc.getn_thread(i)",
"def create_workers(self, threads_count):\n\n for _ in xrange(threads_count):\n new_thread = Thread(target=self.execute)\n self.threads.append(new_thread)",
"def _launch_threads():\n from . import workqueue as lib\n from ctypes import CFUNCTYPE, c_int\n\n launch_threads = CFUNCTYPE(None, c_int)(lib.launch_threads)\n launch_threads(NUM_CPU)",
"def _init_threads(self):\n\n self._init_hashers()\n self._queues = {}\n self._threads = {}\n\n for algo in self.algos:\n t = Thread(target=self._queue_updater, args=(algo,), name=algo)\n self._queues[algo] = Queue(MtHasher.QUEUE_SIZE)\n self._threads[algo] = t\n t.start()",
"def invoke_all_and_wait(self):\n list_promise = []\n for thread in self.__list_thread:\n thread.start()\n list_promise.append(thread)\n for process in list_promise: process.join()",
"def start(self, nb_threads):\r\n # type: (int) -> None\r\n if self._active_threads:\r\n raise Exception('Threads already started.')\r\n\r\n # Create thread pool\r\n for _ in range(nb_threads):\r\n worker = threading.Thread(\r\n target=_work_function,\r\n args=(self._job_q, self._result_q, self._error_q))\r\n worker.start()\r\n self._thread_list.append(worker)\r\n self._active_threads += 1\r\n\r\n # Put sentinels to let the threads know when there's no more jobs\r\n [self._job_q.put(_ThreadPoolSentinel()) for _ in self._thread_list]",
"def _launch_threads(click_context: click.Context, agents: List[Path]) -> int:\n aeas = [] # type: List[AEA]\n for agent_directory in agents:\n with cd(agent_directory):\n aeas.append(AEABuilder.from_aea_project(\".\").build())\n\n threads = [Thread(target=agent.start) for agent in aeas]\n for t in threads:\n t.start()\n\n try:\n while sum([t.is_alive() for t in threads]) != 0:\n # exit when all threads are not alive.\n # done to avoid block on joins\n for t in threads:\n t.join(0.1)\n\n except KeyboardInterrupt:\n logger.info(\"Keyboard interrupt detected.\")\n finally:\n for idx, agent in enumerate(aeas):\n if not agent.liveness.is_stopped:\n agent.stop()\n threads[idx].join()\n logger.info(\"Agent {} has been stopped.\".format(agent.name))\n return 0",
"def __init__(self, threads_count):\n\n self.queue = Queue(threads_count)\n\n self.threads = []\n self.device = None\n\n self.create_workers(threads_count)\n self.start_workers()",
"def threadsInBatches_run(l_threadAnalysis):\n index = 1\n if self.numThreads > total:\n self.numThreads = total\n threadFullLoops = int(total / self.numThreads)\n threadRem = total % self.numThreads\n alreadyRunCount = thread_batch(\n l_threadAnalysis,\n threadFullLoops,\n self.numThreads,\n 0)\n nextRunCount = thread_batch(\n l_threadAnalysis,\n 1,\n threadRem,\n alreadyRunCount)",
"def start_workers(self, test_runner):\n self._test_runner = test_runner\n for worker_number in xrange(self._num_workers):\n worker = _WorkerState('worker-%d' % worker_number)\n worker.thread = self._start_worker(worker_number, worker.name)\n self._workers[worker.name] = worker\n return self._threads()",
"def _setup_workers(self, num_workers):\n self.pool = []\n\n for _ in range(num_workers):\n self.pool.append(Thread(target=self.threadloop))\n\n for a_thread in self.pool:\n a_thread.setDaemon(True)\n a_thread.start()",
"def joinAllThreads(self):\n\n if self.fitAsync:\n with self.threadListLock:\n for thread in self.threadList:\n thread.join()\n else:\n return",
"def start_threads(count):\n for i in range(count):\n threading.Thread(target=send_pulses, args=(i,)).start()",
"def __init__(self, numthreads):\n self.queue = Queue.Queue()\n for _ in range(numthreads):\n Worker(self.queue)\n logger.debug(\"Event worker pool started with %s threads.\" % numthreads)"
]
| [
"0.7633587",
"0.73176336",
"0.72925687",
"0.68801624",
"0.68801624",
"0.6800484",
"0.67321104",
"0.6644174",
"0.6644174",
"0.6593679",
"0.65704525",
"0.63864803",
"0.6262484",
"0.62089807",
"0.6177471",
"0.6156482",
"0.61107695",
"0.6069611",
"0.6052761",
"0.60488874",
"0.60299224",
"0.60040003",
"0.59611565",
"0.5935604",
"0.59000206",
"0.5890652",
"0.5872135",
"0.5844151",
"0.5823726",
"0.581634"
]
| 0.793661 | 0 |
Stem all the words in a given question. | def stemQuestion(self, question):
stemmedquestion = []
for word in str(question).split():
if word not in self.wordsFilter:
stemmedquestion.append(stem(word.lower()))
return stemmedquestion | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def stem_words(self, words):\n return self.stemmer.stemWords(words)",
"def stemWords(self, words):\n\t\tif stemmer == \"lancaster\":\n\t\t\tstemmer = LancasterStemmer()\n\t\telif stemmer == \"snowbal\":\n\t\t\tstemmer = SnowballStemmer()\n\t\telif stemmer == \"porter\":\n\t\t\tstemmer = PorterStemmer()\n\t\tstems = [stemmer.stem(word) for word in words]\n\t\treturn stems",
"def stemming(self,sentence):",
"def stem_words(words):\n stemmer = Stemmer()\n stems = []\n for word in words:\n stem = stemmer.stem(word,0,len(word)-1)\n stems.append(stem)\n return stems",
"def stem_words(self, words):\n\t\tstemmer = LancasterStemmer()\n\t\tstems = []\n\t\tfor word in words:\n\t\t\tstem = stemmer.stem(word)\n\t\t\tstems.append(stem)\n\t\treturn stems",
"def stemming_filter(words):\n stemmer = snowball.GermanStemmer()\n return [stemmer.stem(w) for w in words]",
"def apply_all(text):\n return stem_words(remove_stop_words(initial_clean(text)))",
"def stem_words(text):\n try:\n text = [stemmer.stem(word) for word in text]\n text = [word for word in text if len(word) > 1] # no single letter words\n except IndexError:\n pass\n\n return text",
"def _stem_words(words):\n stemmer = LancasterStemmer()\n stems = []\n for word in words:\n stem = stemmer.stem(word)\n stems.append(stem)\n return stems",
"def apply_stemming(document):\n return [BasicNL.stemmer.stem(x) for x in document]",
"def stem_words(words):\r\n stemmer = LancasterStemmer()\r\n stems = []\r\n for word in words:\r\n stem = stemmer.stem(word)\r\n stems.append(stem)\r\n return stems",
"def stem_words(words):\n stemmer = LancasterStemmer()\n stems = []\n for word in words:\n stem = stemmer.stem(word)\n stems.append(stem)\n return stems",
"def stem_words(words):\n stemmer = LancasterStemmer()\n stems = []\n for word in words:\n stem = stemmer.stem(word)\n stems.append(stem)\n return stems",
"def stem_words(words):\n stemmer = LancasterStemmer()\n stems = []\n for word in words:\n stem = stemmer.stem(word)\n stems.append(stem)\n return stems",
"def stem_words(words):\n stemmer = LancasterStemmer()\n stems = []\n for word in words:\n stem = stemmer.stem(word)\n stems.append(stem)\n return stems",
"def stem_words(words):\n stemmer = LancasterStemmer()\n stems = []\n for word in words:\n stem = stemmer.stem(word)\n stems.append(stem)\n return stems",
"def tokenizeAndStem(phrase, ignore = []):\n # Final list of stems\n stems = []\n # Loop through tokenized items\n for word in word_tokenize(phrase):\n # Ignore unimporant character\n if word not in ignore:\n # Stem and lowercase each word\n stems.append(stemmer.stem(word.lower()))\n return stems",
"def stem(word):\n return stemmer.stem(word.lower())",
"def stem(word):\n return stemmer.stem(word.lower())",
"def test_unstem():\n\n # cat > cats\n t = Text('cat cat cats')\n assert t.unstem('cat') == 'cat'\n\n # cats > cat\n t = Text('cat cat cats cats cats')\n assert t.unstem('cat') == 'cats'",
"def stemwords(words):\n stemmer = nltk.stem.PorterStemmer()\n result = [stemmer.stem(w) for w in words]\n return result",
"def stem_list(self, listStem):\n for wordInx in range(len(listStem)):\n word = listStem[wordInx]\n if word.upper() != word:\n listStem[wordInx] = self.stemmer.stem(word)\n return listStem",
"def stemWord(self,word):\n if(\"stem\" in self._classes):\n return self._stem.stemmingWord(word)",
"def tweet_stemmer(tweet: str, is_lower: bool = True) -> str:\n tweet = tweet_sanitize(tweet)\n tweet = remove_stopwords(tweet, is_lower)\n return simple_stemmer(tweet)",
"def stem_text(self):\n\n stemmer = None\n\n if self.lang == 'ru':\n stemmer = RussianStemmer()\n\n elif self.lang == 'en':\n stemmer = EnglishStemmer()\n\n for i in range(len(self.__corpora)):\n words = self.__corpora[i].split()\n\n if self.lang == 'uk':\n self.__corpora[i] = ' '.join([UkrainianStemmer(word).stem_word() for word in words])\n\n else:\n self.__corpora[i] = ' '.join([stemmer.stem(word) for word in words])",
"def stem(s):\n short_words = {'is': 'is', 'the': 'the','he': 'he', 'she': 'she', \\\n 'my': 'my', }\n if s in short_words:\n return s\n if s[-1] == 's':\n s = s[:-1]\n special_cases = {'children': 'child', 'doing': 'do', 'did': 'do', \\\n 'string': 'string', 'spring': 'spring'}\n if s in special_cases:\n return special_cases[s]\n if s[-1] == 'e':\n s = s[:-1]\n if s[-3:] == 'ing' and len(s) > 5:\n if s[-5:-3] == 'mm' or s[-5:-3] == 'tt':\n s = s[-4]\n else:\n s = s[:-3]\n if s[-1] == 'y':\n s = s[:-1] + 'i'\n elif s[-2:] == 'er' and len(s) > 4:\n if s[-4:-2] == 'mm' or s[-4:-2] == 'tt':\n s = s[:-3]\n else:\n s = s[:-2]\n elif s[-2:] == 'ed' and len(s) > 4:\n if s[-4:-2] == 'mm' or s[-4:-2] == 'tt':\n s = s[:-3]\n else:\n s = s[:-2]\n return s",
"def stem_words(text: str, stemmer=PorterStemmer()) -> str:\n return ' '.join(stemmer.stem(word) for word in text.split())",
"def test_stemming():\n normalizer = TextNormalizer(stem=True, lemmatize=False)\n assert normalizer.transform([[\"running\"]])[\"corpus\"][0] == [\"run\"]",
"def setStem(self, stem):\n\t\tself.stem=stem;",
"def stemming(list_of_words):\n curated_list = []\n\n for word in list_of_words:\n output = STEMMER.stem(word)\n\n # preserving uppercase words\n if(word.isupper()):\n curated_list.append(output.upper())\n\n if(word.isupper() == False):\n curated_list.append(output)\n # end of loop\n return curated_list"
]
| [
"0.7243619",
"0.7022568",
"0.6754073",
"0.6657073",
"0.66218907",
"0.66076064",
"0.6532568",
"0.6519146",
"0.65048206",
"0.6432638",
"0.63969696",
"0.6363052",
"0.6363052",
"0.6363052",
"0.6363052",
"0.6363052",
"0.6288727",
"0.6284742",
"0.6284742",
"0.6257301",
"0.6246726",
"0.62204945",
"0.6217477",
"0.61500764",
"0.6142415",
"0.6133741",
"0.6131449",
"0.60940903",
"0.6077919",
"0.60622084"
]
| 0.8044658 | 0 |
Create new problem entries in database from yml document stream. Can create a yml file with individual problems delimited into documents using the `` ... `` document stream syntax. | def bulk_create_problem_yml(self, path):
with open(path, "r") as f:
obj_all = yaml.load_all(f)
for obj in obj_all:
self.create_problem(**obj) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_problem_yml(self, path):\n with open(path, \"r\") as f:\n obj = yaml.load(f)\n\n self.create_problem(**obj)",
"def create_problem_yaml(sbml_files: Union[str, List[str]],\n condition_files: Union[str, List[str]],\n measurement_files: Union[str, List[str]],\n parameter_file: str,\n observable_files: Union[str, List[str]],\n yaml_file: str,\n visualization_files: Optional[Union[str, List[str]]]\n = None) -> None:\n if isinstance(sbml_files, str):\n sbml_files = [sbml_files]\n if isinstance(condition_files, str):\n condition_files = [condition_files]\n if isinstance(measurement_files, str):\n measurement_files = [measurement_files]\n if isinstance(observable_files, str):\n observable_files = [observable_files]\n if isinstance(visualization_files, str):\n visualization_files = [visualization_files]\n\n problem_dic = {CONDITION_FILES: condition_files,\n MEASUREMENT_FILES: measurement_files,\n SBML_FILES: sbml_files,\n OBSERVABLE_FILES: observable_files}\n if visualization_files is not None:\n problem_dic.update({'visualization_files': visualization_files})\n yaml_dic = {PARAMETER_FILE: parameter_file,\n FORMAT_VERSION: 1,\n PROBLEMS: [problem_dic]}\n write_yaml(yaml_dic, yaml_file)",
"def create_yml(self):\n fid = open(os.path.join(RESOURCE_PATH,\n '11079419_SNA_SNA.txt'),\n MODE_ASCII_READ)\n\n stream_handle = fid\n\n self.create_parser(stream_handle, True)\n\n particles = self.parser.get_records(1000)\n\n self.particle_to_yml(particles, '11079419_SNA_SNA_telem.yml')\n fid.close()",
"def create():\n logging.info('\"Create\" task started using config file %s', args.config)\n input_csv = os.path.join(config['input_dir'], config['input_csv'])\n if os.path.exists(input_csv):\n # Store a dictionary of id_field values: node IDs so we can add child nodes.\n node_ids = dict()\n\n field_definitions = get_field_definitions(config)\n with open(input_csv) as csvfile:\n csv_data = csv.DictReader(csvfile, delimiter=config['delimiter'])\n csv_column_headers = csv_data.fieldnames\n\n node_endpoint = config['host'] + '/node?_format=json'\n\n for row in csv_data:\n row = clean_csv_values(row)\n id_field = row[config['id_field']]\n\n # Add required fields.\n node = {\n 'type': [\n {'target_id': config['content_type'],\n 'target_type': 'node_type'}\n ],\n 'title': [\n {'value': row['title']}\n ],\n 'status': [\n {'value': config['published']}\n ]\n }\n\n # If a node with an ID that matches the current item's\n # 'parent_id' value has just been created, make the item\n # a child of the node.\n if 'parent_id' in row.keys() and row['parent_id'] in node_ids:\n row['field_member_of'] = node_ids[row['parent_id']]\n\n # Add custom (non-required) CSV fields.\n required_fields = ['file', config['id_field'], 'title']\n custom_fields = list(\n set(csv_column_headers) - set(required_fields))\n for custom_field in custom_fields:\n if not isinstance(row[custom_field], str):\n continue\n # Skip updating field if value is empty.\n if len(row[custom_field]) == 0:\n continue\n\n # This field can exist in the CSV to create parent/child\n # relationships and is not a Drupal field.\n if custom_field == 'parent_id':\n continue\n\n # 'langcode' is a core Drupal field, but is not considered a \"base field\".\n if custom_field == 'langcode':\n continue\n\n # Execute field preprocessor scripts, if any are configured. Note that these scripts\n # are applied to the entire value from the CSV field and not split field values,\n # e.g., if a field is multivalued, the preprocesor must split it and then reassemble\n # it back into a string before returning it. Note that preprocessor scripts work only\n # on string data and not on binary data like images, etc. and only on custom fields\n # (so not title).\n if 'preprocessors' in config and len(config['preprocessors']) > 0:\n for field, command in config['preprocessors'].items():\n if field in csv_column_headers:\n output, return_code = preprocess_field_data(config['subdelimiter'], row[field], command)\n if return_code == 0:\n preprocessor_input = copy.deepcopy(row[field])\n row[field] = output.decode().strip()\n logging.info('Preprocess command %s executed, taking \"%s\" as input and returning \"%s\".', command, preprocessor_input, output.decode().strip())\n else:\n message = 'Preprocess command ' + command + ' failed with return code ' + str(return_code)\n logging.error(message)\n sys.exit(message)\n\n # Assemble Drupal field structures for entity reference fields from CSV data. For\n # taxonomy terms, target_type is 'taxonomy_term'; for nodes, it's 'node_type'.\n if field_definitions[custom_field]['field_type'] == 'entity_reference':\n if field_definitions[custom_field]['target_type'] == 'taxonomy_term':\n target_type = 'taxonomy_term'\n field_vocabs = get_field_vocabularies(config, field_definitions, custom_field)\n if config['subdelimiter'] in row[custom_field]:\n prepared_tids = []\n delimited_values = row[custom_field].split(config['subdelimiter'])\n for delimited_value in delimited_values:\n tid = prepare_term_id(config, field_vocabs, delimited_value)\n tid = str(tid)\n prepared_tids.append(tid)\n row[custom_field] = config['subdelimiter'].join(prepared_tids)\n else:\n row[custom_field] = prepare_term_id(config, field_vocabs, row[custom_field])\n row[custom_field] = str(row[custom_field])\n\n if field_definitions[custom_field]['target_type'] == 'node':\n target_type = 'node_type'\n\n # Cardinality is unlimited.\n if field_definitions[custom_field]['cardinality'] == -1:\n if config['subdelimiter'] in row[custom_field]:\n field_values = []\n subvalues = row[custom_field].split(config['subdelimiter'])\n for subvalue in subvalues:\n field_values.append({'target_id': subvalue, 'target_type': target_type})\n node[custom_field] = field_values\n else:\n node[custom_field] = [\n {'target_id': row[custom_field],\n 'target_type': target_type}]\n # Cardinality has a limit.\n elif field_definitions[custom_field]['cardinality'] > 1:\n if config['subdelimiter'] in row[custom_field]:\n field_values = []\n subvalues = row[custom_field].split(config['subdelimiter'])\n for subvalue in subvalues:\n field_values.append({'target_id': subvalue, 'target_type': target_type})\n node[custom_field] = field_values[:field_definitions[custom_field]['cardinality']]\n log_field_cardinality_violation(custom_field, id_field, field_definitions[custom_field]['cardinality'])\n else:\n node[custom_field] = [\n {'target_id': row[custom_field],\n 'target_type': target_type}]\n # Cardinality is 1.\n else:\n subvalues = row[custom_field].split(config['subdelimiter'])\n node[custom_field] = [\n {'target_id': subvalues[0],\n 'target_type': target_type}]\n if len(subvalues) > 1:\n log_field_cardinality_violation(custom_field, id_field, '1')\n\n # Typed relation fields.\n elif field_definitions[custom_field]['field_type'] == 'typed_relation':\n target_type = field_definitions[custom_field]['target_type']\n # Cardinality is unlimited.\n if field_definitions[custom_field]['cardinality'] == -1:\n if config['subdelimiter'] in row[custom_field]:\n field_values = []\n subvalues = split_typed_relation_string(config, row[custom_field], target_type)\n for subvalue in subvalues:\n field_values.append(subvalue)\n node[custom_field] = field_values\n else:\n field_value = split_typed_relation_string(config, row[custom_field], target_type)\n node[custom_field] = field_value\n # Cardinality has a limit.\n elif field_definitions[custom_field]['cardinality'] > 1:\n if config['subdelimiter'] in row[custom_field]:\n field_values = []\n subvalues = split_typed_relation_string(config, row[custom_field], target_type)\n subvalues = subvalues[:field_definitions[custom_field]['cardinality']]\n if len(subvalues) > field_definitions[custom_field]['cardinality']:\n log_field_cardinality_violation(custom_field, id_field, field_definitions[custom_field]['cardinality'])\n for subvalue in subvalues:\n field_values.append(subvalue)\n node[custom_field] = field_values\n else:\n field_value = split_typed_relation_string(config, row[custom_field], target_type)\n node[custom_field] = field_value\n # Cardinality is 1.\n else:\n field_values = split_typed_relation_string(config, row[custom_field], target_type)\n node[custom_field] = field_value[0]\n log_field_cardinality_violation(custom_field, id_field, '1')\n\n # Geolocation fields.\n elif field_definitions[custom_field]['field_type'] == 'geolocation':\n target_type = field_definitions[custom_field]['target_type']\n # Cardinality is unlimited.\n if field_definitions[custom_field]['cardinality'] == -1:\n if config['subdelimiter'] in row[custom_field]:\n field_values = []\n subvalues = split_geolocation_string(config, row[custom_field])\n for subvalue in subvalues:\n field_values.append(subvalue)\n node[custom_field] = field_values\n else:\n field_value = split_geolocation_string(config, row[custom_field])\n node[custom_field] = field_value\n # Cardinality has a limit.\n elif field_definitions[custom_field]['cardinality'] > 1:\n if config['subdelimiter'] in row[custom_field]:\n field_values = []\n subvalues = split_geolocation_string(config, row[custom_field])\n subvalues = subvalues[:field_definitions[custom_field]['cardinality']]\n log_field_cardinality_violation(custom_field, id_field, field_definitions[custom_field]['cardinality'])\n for subvalue in subvalues:\n field_values.append(subvalue)\n node[custom_field] = field_values\n else:\n field_value = split_geolocation_string(config, row[custom_field])\n node[custom_field] = field_value\n # Cardinality is 1.\n else:\n field_values = split_geolocation_string(config, row[custom_field])\n node[custom_field] = field_value[0]\n log_field_cardinality_violation(custom_field, id_field, '1')\n\n # For non-entity reference and non-typed relation fields (text, integer, boolean etc.).\n else:\n # Cardinality is unlimited.\n if field_definitions[custom_field]['cardinality'] == -1:\n if config['subdelimiter'] in row[custom_field]:\n field_values = []\n subvalues = row[custom_field].split(config['subdelimiter'])\n for subvalue in subvalues:\n subvalue = truncate_csv_value(custom_field, id_field, field_definitions[custom_field], subvalue)\n field_values.append({'value': subvalue})\n node[custom_field] = field_values\n else:\n row[custom_field] = truncate_csv_value(custom_field, id_field, field_definitions[custom_field], row[custom_field])\n node[custom_field] = [{'value': row[custom_field]}]\n # Cardinality has a limit.\n elif field_definitions[custom_field]['cardinality'] > 1:\n if config['subdelimiter'] in row[custom_field]:\n field_values = []\n subvalues = row[custom_field].split(config['subdelimiter'])\n subvalues = subvalues[:field_definitions[custom_field]['cardinality']]\n if len(subvalues) > field_definitions[custom_field]['cardinality']:\n log_field_cardinality_violation(custom_field, id_field, field_definitions[custom_field]['cardinality'])\n for subvalue in subvalues:\n subvalue = truncate_csv_value(custom_field, id_field, field_definitions[custom_field], subvalue)\n field_values.append({'value': subvalue})\n node[custom_field] = field_values\n else:\n row[custom_field] = truncate_csv_value(custom_field, id_field, field_definitions[custom_field], row[custom_field])\n node[custom_field] = [{'value': row[custom_field]}]\n # Cardinality is 1.\n else:\n subvalues = row[custom_field].split(config['subdelimiter'])\n first_subvalue = subvalues[0]\n first_subvalue = truncate_csv_value(custom_field, id_field, field_definitions[custom_field], first_subvalue)\n node[custom_field] = [{'value': first_subvalue}]\n if len(subvalues) > 1:\n log_field_cardinality_violation(custom_field, id_field, '1')\n\n node_headers = {'Content-Type': 'application/json'}\n node_endpoint = '/node?_format=json'\n node_response = issue_request(config, 'POST', node_endpoint, node_headers, node, None)\n if node_response.status_code == 201:\n node_uri = node_response.headers['location']\n print('Node for \"' + row['title'] + '\" (record ' + id_field + ') created at ' + node_uri + '.')\n logging.info(\"Node for %s (record %s) created at %s.\", row['title'], id_field, node_uri)\n if 'output_csv' in config.keys():\n write_to_output_csv(config, id_field, node_response.text)\n else:\n logging.error(\"Node for CSV record %s not created, HTTP response code was %s.\", id_field, node_response.status_code)\n continue\n\n # Map ID from CSV of newly created node to its node ID so we can use it for linking child nodes, etc.\n if node_response.status_code == 201:\n node_nid = node_uri.rsplit('/', 1)[-1]\n node_ids[id_field] = node_nid\n\n # If there is no media file (and we're not creating paged content), move on to the next CSV row.\n if 'file' in row and len(row['file']) == 0 and config['paged_content_from_directories'] is False:\n print('+No media for ' + node_uri + ' created since its \"file\" field in the CSV is empty.')\n logging.warning(\"No media for %s created since its 'file' field in the CSV is empty.\", node_uri)\n continue\n\n # If there is a media file, add it.\n if 'file' in row:\n file_path = os.path.join(config['input_dir'], row['file'])\n media_type = set_media_type(file_path, config)\n\n if node_response.status_code == 201:\n # If what is identified in the 'file' field is a file, create the media from it.\n if 'file' in row and len(row['file']) != 0 and os.path.isfile(file_path):\n media_response_status_code = create_media(config, row['file'], node_uri)\n allowed_media_response_codes = [201, 204]\n if media_response_status_code in allowed_media_response_codes:\n print('+' + media_type.title() + \" media for \" + row['file'] + \" created.\")\n logging.info(\"%s media for %s created.\", media_type.title(), row['file'])\n\n if 'file' in row and len(row['file']) == 0 and config['paged_content_from_directories'] is False:\n print('+ No file specified in CSV for ' + row['title'])\n logging.info(\"No file specified for %s, so no media created.\", id_field)\n\n if config['paged_content_from_directories'] is True:\n # Console output and logging are done in the create_children_from_directory function.\n create_children_from_directory(config, row, node_nid, row['title'])",
"def construct_yaml_stream(in_stream):\n\n global _yaml_initialized\n\n logger.info('Request to construct yaml')\n\n if not _yaml_initialized:\n def _object_creator(loader, node, deep=True):\n mapping = {}\n for key_node, value_node in node.value:\n key = loader.construct_object(key_node, deep=deep)\n value = loader.construct_object(value_node, deep=deep)\n mapping[key] = value\n\n if '__factory__' in mapping:\n print('I am here')\n try:\n _cls = mapping['__factory__']\n del mapping['__factory__']\n logger.debug('__factory__ found in yaml, attempting to construct %s', _cls)\n\n # This line is used for referencing modules by a registered alias\n if type(_cls) == str:\n registrar_values = find_type(_cls)\n _cls = registrar_values['factory_method']\n default_args = registrar_values['default_values']\n mapping = {**default_args, **mapping}\n\n return _cls(**mapping)\n except Exception as e:\n logger.error('Failed to construct yaml object %s, %s', e, str(mapping))\n raise e\n\n return loader.construct_mapping(node, deep)\n\n logger.info(f'Registering yaml constructor for python !obj types')\n yaml.add_constructor('!obj', _object_creator, yaml.Loader)\n\n _yaml_initialized = True\n\n return yaml.load(in_stream)",
"def create_yaml_workflow_schema():\n reana_yaml_schema = \\\n '''\n version: 0.4.0\n inputs:\n files:\n - code/helloworld.py\n - inputs/names.txt\n parameters:\n sleeptime: 2\n inputfile: inputs/names.txt\n helloworld: code/helloworld.py\n outputfile: outputs/greetings.txt\n outputs:\n files:\n - outputs/greetings.txt\n workflow:\n type: serial\n specification:\n steps:\n - environment: 'python:2.7'\n commands:\n - python \"${helloworld}\" --sleeptime ${sleeptime} \\\n --inputfile \"${inputfile}\" --outputfile \"${outputfile}\"\n '''\n return reana_yaml_schema",
"def main(\n streams: typing.Iterable[typing.TextIO],\n config_path: str,\n log_path: str,\n pattern_name: str,\n level_key: typing.Optional[str],\n template: typing.Optional[str],\n _multiline_keys_add: typing.Sequence[str],\n _multiline_keys_replace: typing.Sequence[str],\n _priority_keys: typing.Sequence[str],\n _remove_keys: typing.Sequence[str],\n) -> None:\n jsonlog.basicConfig(filename=ensure_log_path(log_path))\n\n remove_keys = Key.from_strings(_remove_keys)\n multiline_keys_add = Key.from_strings(_multiline_keys_add)\n multiline_keys_replace = Key.from_strings(_multiline_keys_replace)\n priority_keys = Key.from_strings(_priority_keys)\n\n streams = streams or (sys.stdin,)\n config: Config = Config.configure(config_path)\n pattern: Pattern = config.patterns[pattern_name]\n\n if level_key:\n pattern = pattern.replace(level_key=Key.from_string(level_key))\n\n if multiline_keys_replace:\n pattern = pattern.replace(multiline_keys=multiline_keys_replace)\n\n if multiline_keys_add:\n multiline_keys = (*pattern.multiline_keys, *multiline_keys_add)\n pattern = pattern.replace(multiline_keys=multiline_keys)\n\n if priority_keys:\n assert isinstance(pattern, KeyValuePattern)\n pattern = pattern.replace(priority_keys=priority_keys)\n\n if remove_keys:\n assert isinstance(pattern, KeyValuePattern)\n pattern = pattern.remove_keys(remove_keys)\n\n if template:\n assert isinstance(pattern, TemplatePattern)\n pattern = pattern.replace(template=template)\n\n for stream in streams:\n pattern.stream(stream)",
"def seed_user_data(filename):\n\n #open file and go through it line by line\n log_file = open(filename)\n\n for line in log_file:\n data = line.strip().split(\"|\") #data is a list\n\n #get data from split line\n id = int(data[0])\n age = int(data[1])\n zip = data[4]\n\n #create a new record and add it to the queue\n new_user = User(user_id=id, age=age, zipcode=zip)\n db.session.add(new_user)\n\n #commit changes\n db.session.commit()",
"def load_problem_definition():\n # TODO this should be expanded to include all definitions\n with open(Path(\"config.yml\")) as file:\n CONFIG = yaml.full_load(file)\n\n problem_definition_config = CONFIG[\"problem_definition\"]\n\n num_days = problem_definition_config[\"num_days\"]\n hotel_index = problem_definition_config[\"hotel_index\"]\n\n problem_definition = {\"num_days\": num_days, \"hotel_index\": hotel_index}\n\n return problem_definition",
"def user_create_yaml(self):\n pass",
"def _open_yaml(stream, original_file=None, substitutions_dict={}):\n try:\n yaml_contents = yaml.load(stream, Loader=yaml_SafeLoader)\n\n return _get_yaml_contents_without_documentation_complete(yaml_contents, substitutions_dict)\n except DocumentationNotComplete as e:\n raise e\n except Exception as e:\n count = 0\n _file = original_file\n if not _file:\n _file = stream\n with open(_file, \"r\") as e_file:\n lines = e_file.readlines()\n for line in lines:\n count = count + 1\n if re.match(r\"^\\s*\\t+\\s*\", line):\n print(\"Exception while handling file: %s\" % _file, file=sys.stderr)\n print(\"TabIndentationError: Line %s contains tabs instead of spaces:\" % (count), file=sys.stderr)\n print(\"%s\\n\\n\" % repr(line.strip(\"\\n\")), file=sys.stderr)\n sys.exit(1)\n\n print(\"Exception while handling file: %s\" % _file, file=sys.stderr)\n raise e",
"def create_streams(streams):\n for stream in streams:\n Stream.objects.create(**stream)",
"def parse(_log, _config, file_path, db_path, atomic_properties, molecular_properties):\n output_dir = os.path.dirname(db_path)\n create_dirs(_log=_log, output_dir=output_dir)\n generate_db(file_path, db_path, atomic_properties, molecular_properties)",
"def add_exercise(self):\r\n\r\n # Take the exercise entires from TOML file\r\n entries = self.cfg.get(\"payload\",{}).get(\"exercise\")\r\n # Check for valid entires\r\n if entries:\r\n # Construct payload \r\n for payload in entries:\r\n # Check the entry vs a json schema\r\n check.check_entry(path='schemas/exercise.json', test=payload)\r\n # Post request\r\n requests.post(API.url_exercise, data = payload, headers = self.headers, timeout = 2)",
"def create(tempo, template):\n\n futures = []\n error = False\n\n temp = load_yaml(template)\n\n for issue in temp['issues']:\n start = arrow.Arrow.fromdatetime(\n dateutil.parser.parse(issue['start_time']),\n )\n\n data = {\n 'issueKey': issue['issue'],\n 'timeSpentSeconds': parse_short_time(issue['time_spent']),\n 'startDate': start.format('YYYY-MM-DD'),\n 'startTime': start.format('HH:mm:ss'),\n 'description': issue.get(\n 'description',\n 'Working on issue {}'.format(issue['issue']),\n ),\n 'authorAccountId': issue.get('author_account_id', temp['author_account_id']),\n }\n\n # NOTE(awiddersheim): Load in any extra data overriding base\n # giving some flexibility to what can be created.\n data.update(issue.get('extras') or {})\n\n future = tempo.client.post_future(\n '/worklogs',\n json=data,\n )\n\n future.issue = (\n data['authorAccountId'],\n data['issueKey'],\n data['startDate'],\n data['startTime'],\n )\n\n futures.append(future)\n\n for future in tqdm(\n tempo.client.as_completed(futures),\n desc='Adding worklogs',\n total=len(futures),\n ncols=100,\n ):\n try:\n response = future.result()\n response.raise_for_status()\n except Exception as e:\n click.echo(\n 'Could not create {}: {}'.format(\n future.issue,\n str(e),\n ),\n err=True,\n )\n\n if tempo.verbose:\n click.echo(traceback.format_exc(), err=True)\n\n error = True\n\n if error:\n sys.exit(1)",
"def build_db(db_filename,write_configs=False,configfile=None):\n\n conn = sqlite3.connect(db_filename)\n cur=conn.cursor()\n sql=\"\"\"\nCREATE TABLE bugs(\nreproduction_steps text,\nexpected_behavior text, observed_behavior text,\nassigned_to text,\nfixed INTEGER,\ndate_created timestamp,\ndate_fixed timestamp,\nbug_name text);\n\"\"\"\n cur.executescript(sql)\n conn.commit()\n\n conn.close()\n\n if not os.path.isfile(db_filename): \n raise FattyException(\"Unable to create database file\")\n \n configs=configparser.ConfigParser()\n configs.add_section(\"bug_db\")\n\n configs.set(\"bug_db\",\"db_file\",db_filename)\n if write_configs:\n write_config(configs,configfile)",
"def migrate(cls):\n database.cursor.execute(\"\"\"CREATE TABLE IF NOT EXISTS meetups(\n id serial PRIMARY KEY,\n topic varchar,\n happening_date varchar,\n tags varchar,\n location varchar,\n images varchar,\n body varchar\n )\"\"\")\n database.connection.commit()",
"def create_problem():\n # Admin check\n if not current_user.admin == 1:\n return serve_error('You must be an admin to create problems',\n response_code=401)\n\n try:\n # Convert the JSON to python array of dictionaries\n cases = request.form['cases']\n cases = loads(cases)\n for case in cases:\n if 'input' not in case or 'output' not in case:\n return serve_error(\n 'Sample case(s) were not formed correctly',\n response_code=400)\n\n # Create the problem\n name = request.form['name'][:32]\n shortname = name.lower().replace(' ', '')\n problem = Problem(\n name=name,\n shortname=shortname\n )\n if 'difficulty' in request.form:\n problem.difficulty = request.form['difficulty']\n if 'appeared_in' in request.form:\n problem.appeared = request.form['appeared_in']\n\n # Create the problem data and add it to the database\n problem_data = ProblemData(\n description=request.form['description'],\n input_desc=request.form['input_desc'],\n output_desc=request.form['output_desc']\n )\n if 'time_limit' in request.form:\n problem_data.time_limit = request.form['time_limit']\n\n # Create list of sample cases\n case_num = 1\n sample_cases = list()\n for case in cases:\n sample = SampleCase(\n case_num=case_num,\n input=case['input'],\n output=case['output']\n )\n case_num += 1\n sample_cases.append(sample)\n\n in_file = zipfile.ZipFile(request.files['in_file'])\n out_file = zipfile.ZipFile(request.files['out_file'])\n sol_file = request.files['sol_file']\n\n # If any required values were missing, serve an error\n except KeyError as err:\n return serve_error('Form field not found: ' + err[0],\n response_code=400)\n\n # Commit everything to the database\n pid = problem.commit_to_session()\n problem_data.pid = pid\n problem_data.commit_to_session()\n for case in sample_cases:\n case.pid = pid\n case.commit_to_session()\n\n # Store the judge data\n directory = os.path.join(app.config['DATA_FOLDER'],\n 'problems', str(problem.pid))\n in_file.extractall(directory)\n out_file.extractall(directory)\n os.mkdir(os.path.join(directory, 'test'))\n sol_file.save(os.path.join(directory, 'test', sol_file.filename))\n\n return serve_response({\n 'name': problem.name,\n 'shortname': problem.shortname,\n 'description': problem_data.description,\n 'input_desc': problem_data.input_desc,\n 'output_desc': problem_data.output_desc,\n 'sample_cases': cases,\n 'pid': problem.pid,\n 'difficulty': problem.difficulty\n })",
"def create_from_yaml(\n yaml_content, user_id, title, category, exploration_id=None,\n image_id=None):\n exploration_dict = utils.dict_from_yaml(yaml_content)\n\n exploration_schema_version = exploration_dict.get('schema_version')\n\n if exploration_schema_version != CURRENT_EXPLORATION_SCHEMA_VERSION:\n raise Exception('Sorry, we can only process v1 YAML files at present.')\n\n init_state_name = exploration_dict['states'][0]['name']\n\n exploration_id = create_new(\n user_id, title, category, exploration_id=exploration_id,\n init_state_name=init_state_name, image_id=image_id)\n\n try:\n exploration_param_specs = {\n ps_name: param_domain.ParamSpec.from_dict(ps_val)\n for (ps_name, ps_val) in exploration_dict['param_specs'].iteritems()\n }\n\n for sdict in exploration_dict['states']:\n if sdict['name'] != init_state_name:\n add_state(user_id, exploration_id, sdict['name'])\n\n for sdict in exploration_dict['states']:\n state = get_state_by_name(exploration_id, sdict['name'])\n\n state.content = [\n exp_domain.Content(item['type'], item['value'])\n for item in sdict['content']\n ]\n\n state.param_changes = [param_domain.ParamChange(\n pc['name'], pc['generator_id'], pc['customization_args']\n ) for pc in sdict['param_changes']]\n\n for pc in state.param_changes:\n if pc.name not in exploration_param_specs:\n raise Exception('Parameter %s was used in a state but not '\n 'declared in the exploration param_specs.'\n % pc.name)\n\n wdict = sdict['widget']\n widget_handlers = [exp_domain.AnswerHandlerInstance.from_dict({\n 'name': handler['name'],\n 'rule_specs': [{\n 'definition': rule_spec['definition'],\n 'dest': convert_state_name_to_id(\n exploration_id, rule_spec['dest']),\n 'feedback': rule_spec['feedback'],\n 'param_changes': rule_spec.get('param_changes', []),\n } for rule_spec in handler['rule_specs']],\n }) for handler in wdict['handlers']]\n\n state.widget = exp_domain.WidgetInstance(\n wdict['widget_id'], wdict['customization_args'],\n widget_handlers, wdict['sticky'])\n\n save_state(user_id, exploration_id, state)\n\n exploration = get_exploration_by_id(exploration_id)\n exploration.default_skin = exploration_dict['default_skin']\n exploration.param_changes = [param_domain.ParamChange(\n pc['name'], pc['generator_id'], pc['customization_args']\n ) for pc in exploration_dict['param_changes']]\n exploration.param_specs = exploration_param_specs\n save_exploration(user_id, exploration)\n except Exception:\n delete_exploration(user_id, exploration_id, force_deletion=True)\n raise\n\n return exploration_id",
"def csv2yaml(source_csv,source_yaml):\n csv_data = pd.read_csv(source_csv)\n csv_data = csv_data[['site','utm_x','utm_y']]\n \n dict_file = {}\n for r in csv_data.iterrows():\n dict_file[r[1].site] = [float(r[1].utm_x),float(r[1].utm_y)]\n dict_file = {'sources':dict_file}\n with open(source_yaml, 'w') as file:\n documents = yaml.dump(dict_file, file)",
"def seed_movie_data(filename):\n\n #open file and go through it line by line\n log_file = open(filename)\n\n for line in log_file:\n data = line.strip().split(\"|\")\n\n #get data from split line\n id = int(data[0])\n release = data[2]\n url = data[4]\n\n #titles might have accented characters so test for this and decode\n #them if so\n title = data[1]\n try:\n title = unicode(title)\n except UnicodeError:\n title = title.decode(\"utf-8\")\n\n #if there's a date there, parse it\n if release:\n release = datetime.strptime(data[2], \"%d-%b-%Y\")\n #otherwise, set release to None so it will become NULL in the database\n else:\n release = None\n\n #create a new record and add it to the queue\n new_movie = Movie(movie_id=id, title=title, \n released_at=release, imdb_url=url)\n db.session.add(new_movie)\n\n #commit changes\n db.session.commit()",
"def test_012_yaml_load(self):\n HEADING()\n db = self.db\n db.connect()\n\n # Clear all jobs currently in the database to ensure a correct final assertion\n db.clear()\n\n # Add the jobs outlined in the YAML file\n db.add_from_yaml(\"etc/jobs.yaml\")\n\n count_fgrep = len(Shell.fgrep(\"input:\", \"etc/jobs.yaml\").split(\"\\n\"))\n\n # Assert that the correct number jobs have been added\n assert(db.count() == count_fgrep)",
"def build_sample_db():\n\n import string\n import random\n\n db.drop_all()\n db.create_all()\n\n with app.app_context():\n posts = [['Алгебра', ['№135', '№345', '№222']],\n ['Геометрия', ['№335', '№545', '№112']],\n ['Русский язык', ['Упражнение 200', 'Упражнение 195']],\n ['Литература', ['Доклад', 'Сочинение по произведению Сенька', 'стр.52 №1,2,3']]]\n first_names = [\n 'Harry', 'Amelia', 'Oliver', 'Jack', 'Isabella', 'Charlie', 'Sophie', 'Mia',\n 'Jacob', 'Thomas', 'Emily', 'Lily', 'Ava', 'Isla', 'Alfie', 'Olivia', 'Jessica',\n 'Riley', 'William', 'James', 'Geoffrey', 'Lisa', 'Benjamin', 'Stacey', 'Lucy'\n ]\n\n for i in range(len(first_names)):\n tmp_email = first_names[i].lower() + \".\" + first_names[i].lower() + \"@example.com\"\n tmp_pass = ''.join(random.choice(string.ascii_lowercase + string.digits) for i in range(10))\n new_user = User(\n username=first_names[i],\n email=tmp_email\n )\n new_user.set_password(tmp_pass)\n db.session.add(new_user)\n\n for i in posts:\n for q in i[1]:\n a = random.randint(18, 31)\n new_HW = Homeworks(\n type='homework',\n number=q,\n lesson=i[0],\n comments='Сделать до {} мая'.format(a),\n )\n db.session.add(new_HW)\n\n db.session.commit()\n return",
"def add_ongoing_rule():\n rules = request.json['rules']\n now = datetime.datetime.now()\n\n for rule in rules:\n rule['line_id'] = int(rule['line_id'])\n rule['line_name'] = rule['line_name']\n rule['time'] = convert_to_datetime(rule['time'])\n rule['intervals'] = int(rule['intervals'])\n rule['time_wait'] = int(rule['time_wait'])\n rule['repeat_value'] = int(rule['repeat_value'])\n rule['date_start'] = convert_to_datetime(rule['date_start'])\n rule['time_start'] = convert_to_datetime(rule['time_start'])\n rule['date_time_start'] = datetime.datetime.combine(\n rule['date_start'], rule['time_start'].time())\n rule['end_date'] = convert_to_datetime(rule['end_date'])\n rule['active'] = 1\n rule['rule_id'] = str(uuid.uuid4())\n rule['days'] = -1\n\n if rule['date_start'].date() == rule['end_date'].date():\n date_delta = rule['end_date'].date() - now.date()\n if date_delta.days == 0:\n rule['days'] = 0\n if date_delta.days == 1:\n rule['days'] = 1\n\n # \"INSERT INTO life(line_id, time, intervals, time_wait, repeat_value, date_start, \"\n # \"time_start, end_date, active, rule_id) \"\n # \"VALUES ({0}, '{1}', {2}, '{3}', {4}, {5}, '{6}', {7}, {8}, {9}\")\n # insert into ongoing table\n database.update(database.QUERY[mn()].format(\n rule['line_id'], rule['time'], rule['intervals'], rule['time_wait'],\n rule['repeat_value'], rule['date_time_start'],\n rule['end_date'], rule['active'], rule['rule_id']))\n\n # update rules;\n update_rules_from_ongoing_rules(rule)\n logging.info(\"Ongoing rule added. {0}\".format(str(rule)))\n\n template = render_template('ongoing_rule_single.html', n=rule)\n send_ongoing_rule_message(\n 'add_ongoing_rule',\n {'template': template, 'rule_id': rule['rule_id'], 'days': rule['days']})\n\n update_all_rules()\n try:\n response_status = garden_controller.branch_status()\n\n arr = form_responce_for_branches(response_status)\n send_branch_status_message('branch_status', arr)\n except Exception as e:\n logging.error(e)\n logging.error(\"Can't send updated rules. Exception occured\")\n\n return json.dumps({'status': 'OK'})",
"def test_yaml_creation():\n ligand_path = examples_paths()['p-xylene']\n toluene_path = examples_paths()['toluene']\n with mmtools.utils.temporary_directory() as tmp_dir:\n molecules = \"\"\"\n T4lysozyme:\n filepath: {}\n leap: {{parameters: oldff/leaprc.ff14SB}}\"\"\".format(examples_paths()['lysozyme'])\n solvent = \"\"\"\n vacuum:\n nonbonded_method: NoCutoff\"\"\"\n protocol = indent(standard_protocol)\n system = \"\"\"\n system:\n ligand: p-xylene\n receptor: T4lysozyme\n solvent: vacuum\"\"\"\n experiment = \"\"\"\n protocol: absolute-binding\n system: system\"\"\"\n\n yaml_content = \"\"\"\n ---\n options:\n output_dir: {}\n molecules:{}\n p-xylene:\n filepath: {}\n antechamber: {{charge_method: bcc}}\n leap: {{parameters: leaprc.gaff}}\n benzene:\n filepath: {}\n antechamber: {{charge_method: bcc}}\n leap: {{parameters: leaprc.gaff}}\n solvents:{}\n GBSA-OBC2:\n nonbonded_method: NoCutoff\n implicit_solvent: OBC2\n systems:{}\n protocols:{}\n experiments:{}\n \"\"\".format(os.path.relpath(tmp_dir), molecules,\n os.path.relpath(ligand_path), toluene_path,\n solvent, system, protocol, experiment)\n\n # We need to check whether the relative paths to the output directory and\n # for p-xylene are handled correctly while absolute paths (T4lysozyme) are\n # left untouched\n expected_yaml_content = textwrap.dedent(\"\"\"\n ---\n version: '{}'\n options:\n experiments_dir: .\n output_dir: .\n molecules:{}\n p-xylene:\n filepath: {}\n antechamber: {{charge_method: bcc}}\n leap: {{parameters: leaprc.gaff}}\n solvents:{}\n systems:{}\n protocols:{}\n experiments:{}\n \"\"\".format(HIGHEST_VERSION, molecules, os.path.relpath(ligand_path, tmp_dir),\n solvent, system, protocol, experiment))\n expected_yaml_content = expected_yaml_content[1:] # remove first '\\n'\n\n exp_builder = ExperimentBuilder(textwrap.dedent(yaml_content))\n\n # during setup we can modify molecule's fields, so we need\n # to check that it doesn't affect the YAML file exported\n experiment_dict = yaml.load(experiment, Loader=yaml.FullLoader)\n exp_builder._db.get_system(experiment_dict['system'])\n\n generated_yaml_path = os.path.join(tmp_dir, 'experiment.yaml')\n exp_builder._generate_yaml(experiment_dict, generated_yaml_path)\n with open(generated_yaml_path, 'r') as f:\n assert yaml.load(f, Loader=yaml.FullLoader) == yank_load(expected_yaml_content)",
"def test_PhonopyYaml_read_with_stream(helper_methods):\n filename = cwd / \"phonopy.yaml\"\n with open(filename) as fp:\n cell = _get_unitcell(fp)\n _compare_NaCl_convcell(cell, helper_methods.compare_cells_with_order)",
"def createPartitions(config, logger):\n databaseConnection, databaseCursor = connectToDatabase(config, logger)\n try:\n for aDatabaseObjectClass in databaseObjectClassListForWeeklyPartitions:\n weekIterator = mondayPairsIteratorFactory(config.startDate, config.endDate)\n aDatabaseObject = aDatabaseObjectClass(logger=logger)\n aDatabaseObject.createPartitions(databaseCursor, weekIterator)\n databaseConnection.commit()\n except:\n databaseConnection.rollback()\n socorro_util.reportExceptionAndAbort(logger)",
"def seed():\n\n try:\n body_parts = app.config.get(\"BODY_PARTS\")\n body_parts_model = get_class_by_tablename(\"body_parts\")\n\n current_records = body_parts_model.query.all()\n\n if current_records:\n for key, value in body_parts.items():\n if not body_parts_model.find(key):\n\n body_parts_model.create(id=key, name=value)\n\n else:\n\n for key, value in body_parts.items():\n body_parts_model.create(id=key, name=value)\n\n try:\n body_parts_model.session.commit()\n except IntegrityError as err:\n print(\"Error seeding the database: \", err)\n\n except:\n body_parts_model.session.rollback()\n print('Body parts records already exist in database.')\n\n try:\n body_subparts = app.config.get(\"BODY_SUBPARTS\")\n body_subparts_model = get_class_by_tablename(\"subparts\")\n\n current_records = body_subparts_model.query.all()\n\n if current_records:\n for key, value in body_subparts.items():\n if not body_subparts_model.find(key):\n body_subparts_model.create(id=key,\n name=value['name'],\n coordinates=value['coordinates'],\n active=value['active'],\n body_part_id=value['body_part_id']\n )\n\n else:\n for key, value in body_subparts.items():\n\n body_subparts_model.create(id=key,\n name=value['name'],\n coordinates=value['coordinates'],\n active=value['active'],\n body_part_id=value['body_part_id']\n )\n try:\n body_subparts_model.session.commit()\n except IntegrityError as err:\n print(\"Error seeding the database: \", err)\n\n except Exception as e:\n body_subparts_model.session.rollback()\n print(e)\n print('Subparts records already exist in database.')\n\n\n try:\n symptoms = app.config.get(\"SYMPTOMS\")\n symptoms_model = get_class_by_tablename(\"symptoms\")\n\n current_records = symptoms_model.query.all()\n\n if current_records:\n for key, value in symptoms.items():\n if not symptoms_model.find(key):\n symptoms_model.create(id=key,\n name=value['name'],\n active=value['active']\n )\n\n else:\n for key, value in symptoms.items():\n symptoms_model.create(id=key,\n name=value['name'],\n active=value['active']\n )\n try:\n symptoms_model.session.commit()\n except IntegrityError as err:\n print(\"Error seeding the database: \", err)\n\n except Exception as e:\n symptoms_model.session.rollback()\n print(e)\n print('Symptom records already exist in database.')\n\n\n try:\n suggestions = app.config.get(\"SUGGESTIONS\")\n suggestions_model = get_class_by_tablename(\"suggestions\")\n\n current_records = suggestions_model.query.all()\n\n if current_records:\n for key, value in suggestions.items():\n if not suggestions_model.find(key):\n suggestions_model.create(id=key,\n name=value['name'],\n active=value['active'],\n description=value['description'],\n link=value['link'],\n video_start=value['video_start'],\n video_end=value['video_end']\n )\n\n else:\n for key, value in suggestions.items():\n suggestions_model.create(id=key,\n name=value['name'],\n active=value['active'],\n description=value['description'],\n link=value['link'],\n video_start=value['video_start'],\n video_end=value['video_end']\n )\n try:\n suggestions_model.session.commit()\n except IntegrityError as err:\n print(\"Error seeding the database: \", err)\n\n except Exception as e:\n suggestions_model.session.rollback()\n print(e)\n print('Suggestion records already exist in database.')\n\n try:\n conditions = app.config.get(\"CONDITIONS\")\n conditions_model = get_class_by_tablename(\"conditions\")\n symptoms_model = get_class_by_tablename(\"symptoms\")\n suggestions_model = get_class_by_tablename(\"symptoms\")\n symptom_relation = app.config.get(\"CONDITION_SYMPTOM_RELATION\")\n suggestion_relation = app.config.get(\"CONDITION_SUGGESTION_RELATION\")\n\n current_records = conditions_model.query.all()\n\n if current_records:\n for key, value in conditions.items():\n if not conditions_model.find(key):\n conditions_model.create(id=key,\n name=value['name'],\n active=value['active'],\n description=value['description']\n )\n\n else:\n for key, value in conditions.items():\n conditions_model.create(id=key,\n name=value['name'],\n active=value['active'],\n description=value['description']\n )\n\n for key, value in symptom_relation.items():\n condition = conditions_model.query.filter(conditions_model.id == value['condition_id']).first()\n symptom = symptoms_model.query.filter(symptoms_model.id == value['symptom_id']).first()\n condition.symptoms.append(symptom)\n\n # for key, value in suggestion_relation.items():\n # condition = conditions_model.query.filter(conditions_model.id == value['condition_id']).first()\n # suggestion = suggestions_model.query.filter(suggestions_model.id == value['suggestion_id']).first()\n # condition.suggestions.append(suggestion)\n\n try:\n conditions_model.session.commit()\n except IntegrityError as err:\n print(\"Error seeding the database: \", err)\n\n except Exception as e:\n conditions_model.session.rollback()\n print(e)\n print('Condition records already exist in database.')\n\n try:\n body_subparts_model = get_class_by_tablename(\"subparts\")\n relationship = app.config.get(\"SUBPARTS_CONDITION_RELATION\")\n conditions_model = get_class_by_tablename(\"conditions\")\n\n for key, value in relationship.items():\n subpart = body_subparts_model.query.filter(body_subparts_model.id == value['subpart_id']).first()\n condition = conditions_model.query.filter(conditions_model.id == value['condition_id']).first()\n subpart.conditions.append(condition)\n try:\n body_subparts_model.session.commit()\n except IntegrityError as err:\n print(\"Error seeding the database: \", err)\n\n except Exception as e:\n body_subparts_model.session.rollback()\n print(e)\n print('Subparts Relation records already exist in database.')",
"def create_issues(repo, title, body, verbose=None):\n label = get_label(repo, title)\n if not label:\n err = \"A label embedded in parentheses is currently required. For \" \\\n \"example 'Title of Error (title_tag).' You provided: {0}\"\n raise NotImplementedError(err.format(title))\n # get stdout written to file\n with open(body) as fi:\n issues = fi.readlines()\n fi.close()\n # Handle empty body\n if not issues:\n raise RuntimeWarning(\"The body text is empty and no issue will be \"\n \"created for file: {}.\".format(body))\n # Handle multiline error messages.\n if 'Traceback' in ''.join(issues):\n if verbose:\n print \"Issue is a Traceback...\"\n string = \"\".join(issues)\n sha = hashlib.sha1(string).hexdigest()[0:6]\n error = dict(experiment_site_id=\"Traceback:{}\".format(sha),\n error=\"Traceback\",\n message=string)\n issues = [json.dumps(error, sort_keys=True)]\n for issue in issues:\n # Check for new format\n try:\n issue_dict = json.loads(issue)\n issue_dict.update({'title': get_valid_title(title)})\n error_msg = issue_dict.get('error')\n experiment_site_id = issue_dict.get('experiment_site_id')\n subject = \"{}, {}\".format(experiment_site_id, error_msg)\n body = generate_body(issue_dict)\n except:\n if verbose:\n print(\"Falling back to old issue formatting.\")\n # Old error handling approach.\n # Create a unique id.\n sha1 = hashlib.sha1(issue).hexdigest()[0:6]\n subject_base = title[0:title.index(' (')]\n subject = subject_base + \": {0}\".format(sha1)\n body = issue\n if is_open_issue(repo, subject, verbose=verbose):\n pass\n else:\n try:\n github_issue = repo.create_issue(subject, body=body, labels=label)\n except Exception as e:\n print 'Failed to create_issue with title:{0}, body:{1} and label:{2}, \\\n exception: {3}'.format(subject, body, label, str(e))\n if verbose:\n print \"Created issue... See: {0}\".format(github_issue.url)\n return None",
"def create_synpp_yaml(radial1d_mdl, fname, shell_no=0, lines_db=None):\n\n logger.warning('Currently only works with Si and a special setup')\n if radial1d_mdl.atom_data.synpp_refs is not None:\n raise ValueError(\n 'The current atom dataset does not contain the '\n 'necessary reference files (please contact the authors)')\n\n radial1d_mdl.atom_data.synpp_refs['ref_log_tau'] = -99.0\n for key, value in radial1d_mdl.atom_data.synpp_refs.iterrows():\n try:\n radial1d_mdl.atom_data.synpp_refs['ref_log_tau'].loc[key] = np.log10(\n radial1d_mdl.plasma.tau_sobolevs[0].loc[value['line_id']])\n except KeyError:\n pass\n\n\n relevant_synpp_refs = radial1d_mdl.atom_data.synpp_refs[\n radial1d_mdl.atom_data.synpp_refs['ref_log_tau'] > -50]\n\n with open(synpp_default_yaml_fname) as stream:\n yaml_reference = yaml.load(stream, Loader=yaml.CLoader)\n\n if lines_db is not None:\n yaml_reference['opacity']['line_dir'] = os.path.join(lines_db, 'lines')\n yaml_reference['opacity']['line_dir'] = os.path.join(lines_db, 'refs.dat')\n\n yaml_reference['output']['min_wl'] = float(\n radial1d_mdl.runner.spectrum.wavelength.to('angstrom').value.min())\n yaml_reference['output']['max_wl'] = float(\n radial1d_mdl.runner.spectrum.wavelength.to('angstrom').value.max())\n\n\n #raise Exception(\"there's a problem here with units what units does synpp expect?\")\n yaml_reference['opacity']['v_ref'] = float(\n (radial1d_mdl.tardis_config.structure.v_inner[0].to('km/s') /\n (1000. * u.km / u.s)).value)\n yaml_reference['grid']['v_outer_max'] = float(\n (radial1d_mdl.tardis_config.structure.v_outer[-1].to('km/s') /\n (1000. * u.km / u.s)).value)\n\n #pdb.set_trace()\n\n yaml_setup = yaml_reference['setups'][0]\n yaml_setup['ions'] = []\n yaml_setup['log_tau'] = []\n yaml_setup['active'] = []\n yaml_setup['temp'] = []\n yaml_setup['v_min'] = []\n yaml_setup['v_max'] = []\n yaml_setup['aux'] = []\n\n for species, synpp_ref in relevant_synpp_refs.iterrows():\n yaml_setup['ions'].append(100 * species[0] + species[1])\n yaml_setup['log_tau'].append(float(synpp_ref['ref_log_tau']))\n yaml_setup['active'].append(True)\n yaml_setup['temp'].append(yaml_setup['t_phot'])\n yaml_setup['v_min'].append(yaml_reference['opacity']['v_ref'])\n yaml_setup['v_max'].append(yaml_reference['grid']['v_outer_max'])\n yaml_setup['aux'].append(1e200)\n\n with open(fname, 'w') as f:\n yaml.dump(yaml_reference, stream=f, explicit_start=True)"
]
| [
"0.61997813",
"0.5509375",
"0.5250607",
"0.5211746",
"0.51627785",
"0.5148468",
"0.50572896",
"0.5039478",
"0.5036629",
"0.5018428",
"0.4979809",
"0.49567744",
"0.49291268",
"0.4919583",
"0.48977384",
"0.4887273",
"0.4854094",
"0.48412806",
"0.4833363",
"0.48256907",
"0.48174068",
"0.4790782",
"0.47863084",
"0.4783988",
"0.47776598",
"0.4772294",
"0.47585994",
"0.4729957",
"0.47174925",
"0.47142088"
]
| 0.65926623 | 0 |
Create new problem entry in database from yml file. | def create_problem_yml(self, path):
with open(path, "r") as f:
obj = yaml.load(f)
self.create_problem(**obj) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def bulk_create_problem_yml(self, path):\n with open(path, \"r\") as f:\n obj_all = yaml.load_all(f)\n for obj in obj_all:\n self.create_problem(**obj)",
"def create_problem():\n # Admin check\n if not current_user.admin == 1:\n return serve_error('You must be an admin to create problems',\n response_code=401)\n\n try:\n # Convert the JSON to python array of dictionaries\n cases = request.form['cases']\n cases = loads(cases)\n for case in cases:\n if 'input' not in case or 'output' not in case:\n return serve_error(\n 'Sample case(s) were not formed correctly',\n response_code=400)\n\n # Create the problem\n name = request.form['name'][:32]\n shortname = name.lower().replace(' ', '')\n problem = Problem(\n name=name,\n shortname=shortname\n )\n if 'difficulty' in request.form:\n problem.difficulty = request.form['difficulty']\n if 'appeared_in' in request.form:\n problem.appeared = request.form['appeared_in']\n\n # Create the problem data and add it to the database\n problem_data = ProblemData(\n description=request.form['description'],\n input_desc=request.form['input_desc'],\n output_desc=request.form['output_desc']\n )\n if 'time_limit' in request.form:\n problem_data.time_limit = request.form['time_limit']\n\n # Create list of sample cases\n case_num = 1\n sample_cases = list()\n for case in cases:\n sample = SampleCase(\n case_num=case_num,\n input=case['input'],\n output=case['output']\n )\n case_num += 1\n sample_cases.append(sample)\n\n in_file = zipfile.ZipFile(request.files['in_file'])\n out_file = zipfile.ZipFile(request.files['out_file'])\n sol_file = request.files['sol_file']\n\n # If any required values were missing, serve an error\n except KeyError as err:\n return serve_error('Form field not found: ' + err[0],\n response_code=400)\n\n # Commit everything to the database\n pid = problem.commit_to_session()\n problem_data.pid = pid\n problem_data.commit_to_session()\n for case in sample_cases:\n case.pid = pid\n case.commit_to_session()\n\n # Store the judge data\n directory = os.path.join(app.config['DATA_FOLDER'],\n 'problems', str(problem.pid))\n in_file.extractall(directory)\n out_file.extractall(directory)\n os.mkdir(os.path.join(directory, 'test'))\n sol_file.save(os.path.join(directory, 'test', sol_file.filename))\n\n return serve_response({\n 'name': problem.name,\n 'shortname': problem.shortname,\n 'description': problem_data.description,\n 'input_desc': problem_data.input_desc,\n 'output_desc': problem_data.output_desc,\n 'sample_cases': cases,\n 'pid': problem.pid,\n 'difficulty': problem.difficulty\n })",
"def db_insert(name, task, time, note):\n Entry.create(name=name,\n task=task,\n time=time,\n note=note)\n return main()",
"def load_problem_definition():\n # TODO this should be expanded to include all definitions\n with open(Path(\"config.yml\")) as file:\n CONFIG = yaml.full_load(file)\n\n problem_definition_config = CONFIG[\"problem_definition\"]\n\n num_days = problem_definition_config[\"num_days\"]\n hotel_index = problem_definition_config[\"hotel_index\"]\n\n problem_definition = {\"num_days\": num_days, \"hotel_index\": hotel_index}\n\n return problem_definition",
"def build_db(db_filename,write_configs=False,configfile=None):\n\n conn = sqlite3.connect(db_filename)\n cur=conn.cursor()\n sql=\"\"\"\nCREATE TABLE bugs(\nreproduction_steps text,\nexpected_behavior text, observed_behavior text,\nassigned_to text,\nfixed INTEGER,\ndate_created timestamp,\ndate_fixed timestamp,\nbug_name text);\n\"\"\"\n cur.executescript(sql)\n conn.commit()\n\n conn.close()\n\n if not os.path.isfile(db_filename): \n raise FattyException(\"Unable to create database file\")\n \n configs=configparser.ConfigParser()\n configs.add_section(\"bug_db\")\n\n configs.set(\"bug_db\",\"db_file\",db_filename)\n if write_configs:\n write_config(configs,configfile)",
"def new_entry(path, name):\n\n default_config = {'prompt': \"Select command to run:\", 'choices': {}}\n with open(path, 'w') as f:\n json.dump(default_config, f)\n\n add_entry_to_database(path, name)",
"def create_problem(self, name=\"\", problem_type=\"\", problem_type_details={},\n data_dir_train=\"\", data_dir_test=\"\", files=[], table_names=[],\n entities_table_name=\"\", entities_featurized_table_name=\"\",\n target_table_name=\"\"):\n\n with self.__orm.session_scope() as session:\n try:\n problem = session.query(Problem).filter(Problem.name == name).one()\n print(\"Problem {} already exists\".format(name))\n return\n except NoResultFound:\n pass # we will create it\n\n problem = Problem(\n name = name,\n problem_type = problem_type,\n problem_type_details = json.dumps(problem_type_details),\n data_dir_train = data_dir_train,\n data_dir_test = data_dir_test,\n files = json.dumps(files),\n table_names = json.dumps(table_names),\n entities_table_name = entities_table_name,\n entities_featurized_table_name = entities_featurized_table_name,\n target_table_name = target_table_name,\n )\n session.add(problem)\n print(\"Problem {} successfully created\".format(name))",
"def create():\n logging.info('\"Create\" task started using config file %s', args.config)\n input_csv = os.path.join(config['input_dir'], config['input_csv'])\n if os.path.exists(input_csv):\n # Store a dictionary of id_field values: node IDs so we can add child nodes.\n node_ids = dict()\n\n field_definitions = get_field_definitions(config)\n with open(input_csv) as csvfile:\n csv_data = csv.DictReader(csvfile, delimiter=config['delimiter'])\n csv_column_headers = csv_data.fieldnames\n\n node_endpoint = config['host'] + '/node?_format=json'\n\n for row in csv_data:\n row = clean_csv_values(row)\n id_field = row[config['id_field']]\n\n # Add required fields.\n node = {\n 'type': [\n {'target_id': config['content_type'],\n 'target_type': 'node_type'}\n ],\n 'title': [\n {'value': row['title']}\n ],\n 'status': [\n {'value': config['published']}\n ]\n }\n\n # If a node with an ID that matches the current item's\n # 'parent_id' value has just been created, make the item\n # a child of the node.\n if 'parent_id' in row.keys() and row['parent_id'] in node_ids:\n row['field_member_of'] = node_ids[row['parent_id']]\n\n # Add custom (non-required) CSV fields.\n required_fields = ['file', config['id_field'], 'title']\n custom_fields = list(\n set(csv_column_headers) - set(required_fields))\n for custom_field in custom_fields:\n if not isinstance(row[custom_field], str):\n continue\n # Skip updating field if value is empty.\n if len(row[custom_field]) == 0:\n continue\n\n # This field can exist in the CSV to create parent/child\n # relationships and is not a Drupal field.\n if custom_field == 'parent_id':\n continue\n\n # 'langcode' is a core Drupal field, but is not considered a \"base field\".\n if custom_field == 'langcode':\n continue\n\n # Execute field preprocessor scripts, if any are configured. Note that these scripts\n # are applied to the entire value from the CSV field and not split field values,\n # e.g., if a field is multivalued, the preprocesor must split it and then reassemble\n # it back into a string before returning it. Note that preprocessor scripts work only\n # on string data and not on binary data like images, etc. and only on custom fields\n # (so not title).\n if 'preprocessors' in config and len(config['preprocessors']) > 0:\n for field, command in config['preprocessors'].items():\n if field in csv_column_headers:\n output, return_code = preprocess_field_data(config['subdelimiter'], row[field], command)\n if return_code == 0:\n preprocessor_input = copy.deepcopy(row[field])\n row[field] = output.decode().strip()\n logging.info('Preprocess command %s executed, taking \"%s\" as input and returning \"%s\".', command, preprocessor_input, output.decode().strip())\n else:\n message = 'Preprocess command ' + command + ' failed with return code ' + str(return_code)\n logging.error(message)\n sys.exit(message)\n\n # Assemble Drupal field structures for entity reference fields from CSV data. For\n # taxonomy terms, target_type is 'taxonomy_term'; for nodes, it's 'node_type'.\n if field_definitions[custom_field]['field_type'] == 'entity_reference':\n if field_definitions[custom_field]['target_type'] == 'taxonomy_term':\n target_type = 'taxonomy_term'\n field_vocabs = get_field_vocabularies(config, field_definitions, custom_field)\n if config['subdelimiter'] in row[custom_field]:\n prepared_tids = []\n delimited_values = row[custom_field].split(config['subdelimiter'])\n for delimited_value in delimited_values:\n tid = prepare_term_id(config, field_vocabs, delimited_value)\n tid = str(tid)\n prepared_tids.append(tid)\n row[custom_field] = config['subdelimiter'].join(prepared_tids)\n else:\n row[custom_field] = prepare_term_id(config, field_vocabs, row[custom_field])\n row[custom_field] = str(row[custom_field])\n\n if field_definitions[custom_field]['target_type'] == 'node':\n target_type = 'node_type'\n\n # Cardinality is unlimited.\n if field_definitions[custom_field]['cardinality'] == -1:\n if config['subdelimiter'] in row[custom_field]:\n field_values = []\n subvalues = row[custom_field].split(config['subdelimiter'])\n for subvalue in subvalues:\n field_values.append({'target_id': subvalue, 'target_type': target_type})\n node[custom_field] = field_values\n else:\n node[custom_field] = [\n {'target_id': row[custom_field],\n 'target_type': target_type}]\n # Cardinality has a limit.\n elif field_definitions[custom_field]['cardinality'] > 1:\n if config['subdelimiter'] in row[custom_field]:\n field_values = []\n subvalues = row[custom_field].split(config['subdelimiter'])\n for subvalue in subvalues:\n field_values.append({'target_id': subvalue, 'target_type': target_type})\n node[custom_field] = field_values[:field_definitions[custom_field]['cardinality']]\n log_field_cardinality_violation(custom_field, id_field, field_definitions[custom_field]['cardinality'])\n else:\n node[custom_field] = [\n {'target_id': row[custom_field],\n 'target_type': target_type}]\n # Cardinality is 1.\n else:\n subvalues = row[custom_field].split(config['subdelimiter'])\n node[custom_field] = [\n {'target_id': subvalues[0],\n 'target_type': target_type}]\n if len(subvalues) > 1:\n log_field_cardinality_violation(custom_field, id_field, '1')\n\n # Typed relation fields.\n elif field_definitions[custom_field]['field_type'] == 'typed_relation':\n target_type = field_definitions[custom_field]['target_type']\n # Cardinality is unlimited.\n if field_definitions[custom_field]['cardinality'] == -1:\n if config['subdelimiter'] in row[custom_field]:\n field_values = []\n subvalues = split_typed_relation_string(config, row[custom_field], target_type)\n for subvalue in subvalues:\n field_values.append(subvalue)\n node[custom_field] = field_values\n else:\n field_value = split_typed_relation_string(config, row[custom_field], target_type)\n node[custom_field] = field_value\n # Cardinality has a limit.\n elif field_definitions[custom_field]['cardinality'] > 1:\n if config['subdelimiter'] in row[custom_field]:\n field_values = []\n subvalues = split_typed_relation_string(config, row[custom_field], target_type)\n subvalues = subvalues[:field_definitions[custom_field]['cardinality']]\n if len(subvalues) > field_definitions[custom_field]['cardinality']:\n log_field_cardinality_violation(custom_field, id_field, field_definitions[custom_field]['cardinality'])\n for subvalue in subvalues:\n field_values.append(subvalue)\n node[custom_field] = field_values\n else:\n field_value = split_typed_relation_string(config, row[custom_field], target_type)\n node[custom_field] = field_value\n # Cardinality is 1.\n else:\n field_values = split_typed_relation_string(config, row[custom_field], target_type)\n node[custom_field] = field_value[0]\n log_field_cardinality_violation(custom_field, id_field, '1')\n\n # Geolocation fields.\n elif field_definitions[custom_field]['field_type'] == 'geolocation':\n target_type = field_definitions[custom_field]['target_type']\n # Cardinality is unlimited.\n if field_definitions[custom_field]['cardinality'] == -1:\n if config['subdelimiter'] in row[custom_field]:\n field_values = []\n subvalues = split_geolocation_string(config, row[custom_field])\n for subvalue in subvalues:\n field_values.append(subvalue)\n node[custom_field] = field_values\n else:\n field_value = split_geolocation_string(config, row[custom_field])\n node[custom_field] = field_value\n # Cardinality has a limit.\n elif field_definitions[custom_field]['cardinality'] > 1:\n if config['subdelimiter'] in row[custom_field]:\n field_values = []\n subvalues = split_geolocation_string(config, row[custom_field])\n subvalues = subvalues[:field_definitions[custom_field]['cardinality']]\n log_field_cardinality_violation(custom_field, id_field, field_definitions[custom_field]['cardinality'])\n for subvalue in subvalues:\n field_values.append(subvalue)\n node[custom_field] = field_values\n else:\n field_value = split_geolocation_string(config, row[custom_field])\n node[custom_field] = field_value\n # Cardinality is 1.\n else:\n field_values = split_geolocation_string(config, row[custom_field])\n node[custom_field] = field_value[0]\n log_field_cardinality_violation(custom_field, id_field, '1')\n\n # For non-entity reference and non-typed relation fields (text, integer, boolean etc.).\n else:\n # Cardinality is unlimited.\n if field_definitions[custom_field]['cardinality'] == -1:\n if config['subdelimiter'] in row[custom_field]:\n field_values = []\n subvalues = row[custom_field].split(config['subdelimiter'])\n for subvalue in subvalues:\n subvalue = truncate_csv_value(custom_field, id_field, field_definitions[custom_field], subvalue)\n field_values.append({'value': subvalue})\n node[custom_field] = field_values\n else:\n row[custom_field] = truncate_csv_value(custom_field, id_field, field_definitions[custom_field], row[custom_field])\n node[custom_field] = [{'value': row[custom_field]}]\n # Cardinality has a limit.\n elif field_definitions[custom_field]['cardinality'] > 1:\n if config['subdelimiter'] in row[custom_field]:\n field_values = []\n subvalues = row[custom_field].split(config['subdelimiter'])\n subvalues = subvalues[:field_definitions[custom_field]['cardinality']]\n if len(subvalues) > field_definitions[custom_field]['cardinality']:\n log_field_cardinality_violation(custom_field, id_field, field_definitions[custom_field]['cardinality'])\n for subvalue in subvalues:\n subvalue = truncate_csv_value(custom_field, id_field, field_definitions[custom_field], subvalue)\n field_values.append({'value': subvalue})\n node[custom_field] = field_values\n else:\n row[custom_field] = truncate_csv_value(custom_field, id_field, field_definitions[custom_field], row[custom_field])\n node[custom_field] = [{'value': row[custom_field]}]\n # Cardinality is 1.\n else:\n subvalues = row[custom_field].split(config['subdelimiter'])\n first_subvalue = subvalues[0]\n first_subvalue = truncate_csv_value(custom_field, id_field, field_definitions[custom_field], first_subvalue)\n node[custom_field] = [{'value': first_subvalue}]\n if len(subvalues) > 1:\n log_field_cardinality_violation(custom_field, id_field, '1')\n\n node_headers = {'Content-Type': 'application/json'}\n node_endpoint = '/node?_format=json'\n node_response = issue_request(config, 'POST', node_endpoint, node_headers, node, None)\n if node_response.status_code == 201:\n node_uri = node_response.headers['location']\n print('Node for \"' + row['title'] + '\" (record ' + id_field + ') created at ' + node_uri + '.')\n logging.info(\"Node for %s (record %s) created at %s.\", row['title'], id_field, node_uri)\n if 'output_csv' in config.keys():\n write_to_output_csv(config, id_field, node_response.text)\n else:\n logging.error(\"Node for CSV record %s not created, HTTP response code was %s.\", id_field, node_response.status_code)\n continue\n\n # Map ID from CSV of newly created node to its node ID so we can use it for linking child nodes, etc.\n if node_response.status_code == 201:\n node_nid = node_uri.rsplit('/', 1)[-1]\n node_ids[id_field] = node_nid\n\n # If there is no media file (and we're not creating paged content), move on to the next CSV row.\n if 'file' in row and len(row['file']) == 0 and config['paged_content_from_directories'] is False:\n print('+No media for ' + node_uri + ' created since its \"file\" field in the CSV is empty.')\n logging.warning(\"No media for %s created since its 'file' field in the CSV is empty.\", node_uri)\n continue\n\n # If there is a media file, add it.\n if 'file' in row:\n file_path = os.path.join(config['input_dir'], row['file'])\n media_type = set_media_type(file_path, config)\n\n if node_response.status_code == 201:\n # If what is identified in the 'file' field is a file, create the media from it.\n if 'file' in row and len(row['file']) != 0 and os.path.isfile(file_path):\n media_response_status_code = create_media(config, row['file'], node_uri)\n allowed_media_response_codes = [201, 204]\n if media_response_status_code in allowed_media_response_codes:\n print('+' + media_type.title() + \" media for \" + row['file'] + \" created.\")\n logging.info(\"%s media for %s created.\", media_type.title(), row['file'])\n\n if 'file' in row and len(row['file']) == 0 and config['paged_content_from_directories'] is False:\n print('+ No file specified in CSV for ' + row['title'])\n logging.info(\"No file specified for %s, so no media created.\", id_field)\n\n if config['paged_content_from_directories'] is True:\n # Console output and logging are done in the create_children_from_directory function.\n create_children_from_directory(config, row, node_nid, row['title'])",
"def add_exercise(self):\r\n\r\n # Take the exercise entires from TOML file\r\n entries = self.cfg.get(\"payload\",{}).get(\"exercise\")\r\n # Check for valid entires\r\n if entries:\r\n # Construct payload \r\n for payload in entries:\r\n # Check the entry vs a json schema\r\n check.check_entry(path='schemas/exercise.json', test=payload)\r\n # Post request\r\n requests.post(API.url_exercise, data = payload, headers = self.headers, timeout = 2)",
"def create_task(text):\n new_task = Tasks(task_text=text) \n new_task.save()",
"def add_task():\n # get values from user\n responses = accept_inputs([\"Task label\", \"Short task description\", \"Parent task label\"])\n # insert into db\n query_no_results(\"insert into task values(?, ?, ?)\",\n [responses[\"Task label\"], responses[\"Short task description\"], responses[\"Parent task label\"]])\n print(\"New task created\")",
"def problem_create(self, problem): \n return self._post(\"problems\", json=problem).json()",
"def Create(data):\n try:\n bug = Bug()\n bug.Patch(data)\n bug.put()\n except (TypeError, db.Error, AssertionError), e:\n logging.error('bug.Create: Exception while creating bug: %s', e)\n raise CreateError('Failed to create a new bug.\\n%s\\n' % e)\n return bug",
"def _create_db_entries(self, qa):\n\n def _package_description(raw):\n return raw[2:].replace(' - ', ' - ')\n\n log.debug('Creating database entries')\n\n\n # Parse component and section from field in changes\n component, section = parse_section(self.changes['files'][0]['section'])\n\n # Check whether package is already in the database\n package_query = meta.session.query(Package).filter_by(name=self.changes['Source'])\n if package_query.count() == 1:\n log.debug('Package %s already exists in the database' % self.changes['Source'])\n package = package_query.one()\n # Update description to make sure it reflects the latest upload\n package.description = _package_description(self.changes['Description'])\n else:\n log.debug('Package %s is new to the system' % self.changes['Source'])\n package = Package(name=self.changes['Source'], user=self.user)\n package.description = _package_description(self.changes['Description'])\n package.needs_sponsor = 0\n meta.session.add(package)\n\n # No need to check whether there is the same source name and same version as an existing\n # entry in the database as the upload controller tested whether similar filenames existed\n # in the repository. The only way this would be wrong is if the filename had a different\n # version in than the Version field in changes..\n\n\n try:\n closes = self.changes['Closes']\n except KeyError:\n closes = None\n\n # TODO: fix these magic numbers\n if qa.stop():\n qa_status = 1\n else:\n qa_status = 0\n\n maintainer_matches = re.compile(r'(.*) <(.*)>').match(self.changes['Changed-By'])\n maintainer = maintainer_matches.group(2)\n\n package_version = PackageVersion(package=package, version=self.changes['Version'],\n section=section, distribution=self.changes['Distribution'], qa_status=qa_status,\n component=component, priority=self.changes.get_priority(), closes=closes,\n uploaded=datetime.now(), maintainer=maintainer)\n meta.session.add(package_version)\n\n source_package = SourcePackage(package_version=package_version)\n meta.session.add(source_package)\n\n binary_package = None\n\n # Add PackageFile objects to the database for each uploaded file\n for file in self.files:\n filename = os.path.join(self.changes.get_pool_path(), file)\n # This exception should be never caught.\n # It implies something went wrong before, as we expect a file which does not exist\n try:\n sum = md5sum(os.path.join(pylons.config['debexpo.repository'], filename))\n except AttributeError as e:\n self._fail(\"Could not calculate MD5 sum: %s\" % (e))\n\n size = os.stat(os.path.join(pylons.config['debexpo.repository'], filename))[ST_SIZE]\n\n # Check for binary or source package file\n if file.endswith('.deb'):\n # Only create a BinaryPackage if there actually binary package files\n if binary_package is None:\n binary_package = BinaryPackage(package_version=package_version, arch=file[:-4].split('_')[-1])\n meta.session.add(binary_package)\n\n meta.session.add(PackageFile(filename=filename, binary_package=binary_package, size=size, md5sum=sum))\n else:\n meta.session.add(PackageFile(filename=filename, source_package=source_package, size=size, md5sum=sum))\n\n meta.session.commit()\n log.warning(\"Finished adding PackageFile objects.\")\n\n # Add PackageInfo objects to the database for the package_version\n for result in qa.result:\n meta.session.add(PackageInfo(package_version=package_version, from_plugin=result.from_plugin,\n outcome=result.outcome, rich_data=result.data, severity=result.severity))\n\n # Commit all changes to the database\n meta.session.commit()\n log.debug('Committed package data to the database')\n\n subscribers = meta.session.query(PackageSubscription).filter_by(package=self.changes['Source']).filter(\\\n PackageSubscription.level <= constants.SUBSCRIPTION_LEVEL_UPLOADS).all()\n\n if len(subscribers) > 0:\n email = Email('package_uploaded')\n self.send_email(email, [s.user.email for s in subscribers], package=self.changes['Source'],\n version=self.changes['Version'], user=self.user)\n\n log.debug('Sent out package subscription emails')\n\n # Send success email to uploader\n email = Email('successful_upload')\n dsc_url = pylons.config[\n 'debexpo.server'] + '/debian/' + self.changes.get_pool_path() + '/' + self.changes.get_dsc()\n rfs_url = pylons.config['debexpo.server'] + url('rfs', packagename=self.changes['Source'])\n self.send_email(email, [self.user.email], package=self.changes['Source'],\n dsc_url=dsc_url, rfs_url=rfs_url)",
"def create_entry():\n new_entry = DB_Entry() # Create instance of entry to add the info to\n print('Eratosthenes is ready to add your new entry.\\n')\n new_entry.set_id()\n title = input('Enter the title:\\n')\n new_entry.set_title(title)\n authors = input('Enter the authors as list of surname, firstname separated by semicolons:\\n')\n new_entry.set_authors(authors)\n try:\n year = int(input('Enter the year:\\n'))\n except ValueError:\n try:\n year = int(input('Enter the year as an integer:\\n'))\n except ValueError:\n print('You failed to follow basic instructions. The year is set to 2000\\n')\n year = 2000\n new_entry.set_year(year)\n pub_type = input('Enter the publication type as article/review/book/other:\\n')\n try:\n new_entry.set_type(pub_type)\n except ValueError:\n try:\n pub_type = input('Type must be one of article/review/book/other:\\n')\n new_entry.set_type(pub_type)\n except ValueError:\n print('You failed to follow basic instructions. Type is now set to \\'other\\'\\n')\n pub_type = 'other'\n new_entry.set_type(pub_type)\n keywords = input('Enter list of keywords separated by semicolons:\\n')\n new_entry.set_keywords(keywords.split(';'))\n current_path = input('Enter the current path to the file\\n')\n current_path = current_path.replace('~', '/Users/marcus')\n if not os.path.isfile(current_path):\n print('File not found. Please try again')\n current_path = input('Enter the current path to the file\\n')\n if not os.path.isfile(current_path):\n print('File not found')\n new_entry.set_new_path()\n db_actions.copy_file(new_entry.get_path(), current_path)\n return new_entry",
"def create_entry(cls, title, date, timeSpent, learned, resources):\n try:\n with DATABASE.transaction():\n cls.create(\n title=title,\n date=date,\n timeSpent=timeSpent,\n learned=learned,\n resources=resources\n )\n except IntegrityError:\n raise ValueError(\"Entry already exists\")",
"def _add_new_struct_to_existing_database(self, db, filename):\n\n pass",
"def create():\n\tcreate_db()",
"def populate_t_database():\n with open('minerals.json') as file:\n file = json.loads(file.read())\n\n for mineral in file[:22]:\n mineral_entry = Mineral.objects.get_or_create(**mineral)",
"def main(store, args, _config, _env):\n tsk = doto.model.task.Task(args.title, args.description)\n if args.due is not None:\n tsk.due = doto.cli.parser.date_parser(args.due)\n if args.repeat is not None:\n tsk.repeat = doto.model.repeat.parse(args.repeat, tsk.due, tsk.id)\n doto.model.repeat.add_new(store, tsk.repeat)\n\n if args.difficulty is not None:\n tsk.difficulty = args.difficulty\n try:\n doto.model.task.add_new(store, tsk)\n store.save()\n except:\n print(\"It was not possible to save the new task. What are you doing Dave!\")\n return 4\n return 0",
"def run(self):\n self.db.table('points').insert({\n 'name': 'biblioteca',\n 'rfid': '123456'\n })",
"def create_db(self):",
"def load(cls):\n \n # Loop through problems and build patient problem lists:\n probs = csv.reader(file(PROBLEMS_FILE,'U'),dialect='excel-tab')\n header = probs.next() \n for prob in probs:\n cls(dict(zip(header,prob))) # Create a problem instance ",
"def create_new_lab(title):\n\n lab = Lab(title=title)\n db.session.add(lab)\n db.session.commit()\n\n return lab",
"def do_create(self, arg):\n\n args = shlex.split(arg)\n if len(args) == 0:\n print(\"** class name missing **\")\n\n elif args[0] in models.classes:\n new_instance = models.classes[args[0]]()\n print(new_instance.id)\n \"\"\"saves it (to the JSON file) \"\"\"\n models.storage.save()\n\n else:\n print(\"** class doesn't exist **\")",
"def seed_movie_data(filename):\n\n #open file and go through it line by line\n log_file = open(filename)\n\n for line in log_file:\n data = line.strip().split(\"|\")\n\n #get data from split line\n id = int(data[0])\n release = data[2]\n url = data[4]\n\n #titles might have accented characters so test for this and decode\n #them if so\n title = data[1]\n try:\n title = unicode(title)\n except UnicodeError:\n title = title.decode(\"utf-8\")\n\n #if there's a date there, parse it\n if release:\n release = datetime.strptime(data[2], \"%d-%b-%Y\")\n #otherwise, set release to None so it will become NULL in the database\n else:\n release = None\n\n #create a new record and add it to the queue\n new_movie = Movie(movie_id=id, title=title, \n released_at=release, imdb_url=url)\n db.session.add(new_movie)\n\n #commit changes\n db.session.commit()",
"def create():",
"def create():",
"def add_entry(self, scenario_info):\n scenario_id, status = scenario_info[\"id\"], \"created\"\n sql = self.insert()\n self.cur.execute(\n sql,\n (\n scenario_id,\n status,\n ),\n )",
"def add_entry(self, scenario_info):\n scenario_id, status = scenario_info[\"id\"], \"created\"\n sql = self.insert()\n self.cur.execute(\n sql,\n (\n scenario_id,\n status,\n ),\n )"
]
| [
"0.6484389",
"0.6370863",
"0.59666836",
"0.5927601",
"0.58917224",
"0.5867778",
"0.5842167",
"0.58306783",
"0.5819347",
"0.5805411",
"0.578956",
"0.57660884",
"0.56141496",
"0.55846286",
"0.5555678",
"0.5531445",
"0.5467168",
"0.5451255",
"0.5420608",
"0.54112333",
"0.53974634",
"0.5387078",
"0.5372284",
"0.53713286",
"0.5361273",
"0.5353934",
"0.5310175",
"0.5310175",
"0.5285283",
"0.5285283"
]
| 0.6735829 | 0 |
Return a list of problems in the database. | def get_problems(self):
with self.__orm.session_scope() as session:
try:
problems = session.query(Problem.name).all()
return [problem[0] for problem in problems]
except NoResultFound:
return [] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_problems():\n problems = list()\n solved = database.session.query(Submission).\\\n filter(Submission.username == current_user.username).\\\n filter(Submission.result == \"good\").\\\n all()\n solved_set = set()\n for solve in solved:\n solved_set.add(solve.pid)\n\n for problem in database.session.query(Problem).all():\n problems.append({\n 'pid': problem.pid,\n 'name': problem.name,\n 'shortname': problem.shortname,\n 'appeared': problem.appeared,\n 'difficulty': problem.difficulty,\n 'comp_release': problem.comp_release,\n 'added': problem.added,\n 'solved': problem.pid in solved_set,\n 'url': url_for_problem(problem)\n })\n return serve_response(problems)",
"def problems(self):\n return self.configuration.problems",
"def problem_list(self):\r\n return [{\r\n 'location': location, 'problem_name': name,\r\n 'num_graded': self.DUMMY_DATA['problem_list_num_graded'],\r\n 'num_pending': self.DUMMY_DATA['problem_list_num_pending'],\r\n 'num_required': self.DUMMY_DATA['problem_list_num_required']\r\n } for location, name in self.problems.items()\r\n ]",
"def _get_problem_list(self):\r\n self._success_response({'problem_list': self.server.problem_list})",
"def get_bugs(self):\n return self.execute(TABELLE['bugs']['select'])",
"def issues(db):\n db.session.query(Issue).delete()\n\n issues = [\n {\n 'label': 'login',\n 'email': '[email protected]',\n 'question': '42.',\n 'status': 'unread'\n },\n {\n 'label': 'login',\n 'email': '[email protected]',\n 'question': 'Hello.',\n 'status': 'unread'\n }\n ]\n\n for issue in issues:\n db.session.add(Issue(**issue))\n\n db.session.commit()\n\n return db",
"def get_reports(self):\r\n result = QtSql.QSqlQuery('''Select * FROM failures''')\r\n list = []\r\n while result.next():\r\n failure = Failure(unicode(result.value(0).toString()), # id\r\n unicode(result.value(1).toString()), # comment\r\n unicode(result.value(2).toString()), # indicator\r\n bool(result.value(3))) # release\r\n p = self.get_presentation(failure.talkId)\r\n r = Report(p, failure)\r\n list.append(r)\r\n return list",
"def _load_problems(statement_id) -> List[Problem]:\n problems_statement_problems = db.session.query(Problem, StatementProblem) \\\n .join(StatementProblem, StatementProblem.problem_id == Problem.id) \\\n .filter(StatementProblem.statement_id == statement_id) \\\n .filter(StatementProblem.hidden == 0) \\\n .options(Load(Problem).load_only('id', 'name')) \\\n .options(Load(StatementProblem).load_only('rank'))\n\n problems = []\n # Yes it is ugly but I think its better than rewrite query\n for problem, sp in problems_statement_problems.all():\n problem.rank = sp.rank\n problems.append(problem)\n\n return problems",
"def get_problem_list(self, course_id, user_id):\r\n return {'success': True, 'error': 'No problems found.'}",
"def all_exercises(self):\n with sqlite3.connect(self.db_path) as conn:\n conn.row_factory = lambda cursor, row: Exercises(\n row[0], row[1]\n )\n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n select e.Name,\n e.Language\n from Exercise e\n \"\"\")\n\n all_exercises = db_cursor.fetchall()\n\n for exercise in all_exercises:\n print(exercise)",
"def issues(self):\r\n return issues.Issues(self)",
"def get_problem_list(request, course_id):\r\n assert(isinstance(course_id, basestring))\r\n course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)\r\n _check_access(request.user, course_key)\r\n try:\r\n response = staff_grading_service().get_problem_list(course_key, unique_id_for_user(request.user))\r\n\r\n # If 'problem_list' is in the response, then we got a list of problems from the ORA server.\r\n # If it is not, then ORA could not find any problems.\r\n if 'problem_list' in response:\r\n problem_list = response['problem_list']\r\n else:\r\n problem_list = []\r\n # Make an error messages to reflect that we could not find anything to grade.\r\n response['error'] = _(\r\n u'Cannot find any open response problems in this course. '\r\n u'Have you submitted answers to any open response assessment questions? '\r\n u'If not, please do so and return to this page.'\r\n )\r\n valid_problem_list = []\r\n for i in xrange(0,len(problem_list)):\r\n # Needed to ensure that the 'location' key can be accessed.\r\n try:\r\n problem_list[i] = json.loads(problem_list[i])\r\n except Exception:\r\n pass\r\n if does_location_exist(course_key.make_usage_key_from_deprecated_string(problem_list[i]['location'])):\r\n valid_problem_list.append(problem_list[i])\r\n response['problem_list'] = valid_problem_list\r\n response = json.dumps(response)\r\n\r\n return HttpResponse(response,\r\n mimetype=\"application/json\")\r\n except GradingServiceError:\r\n #This is a dev_facing_error\r\n log.exception(\r\n \"Error from staff grading service in open \"\r\n \"ended grading. server url: {0}\".format(staff_grading_service().url)\r\n )\r\n #This is a staff_facing_error\r\n return HttpResponse(json.dumps({'success': False,\r\n 'error': STAFF_ERROR_MESSAGE}))",
"def addProblems(self):\n if self.pid in Problem.problems: \n for prob in Problem.problems[self.pid]:\n subs = {'end': {'end': '2010-09-13'}}\n self._set_default_attrs(prob, subs)\n prob_string = PROBLEM.sub({\n 'onset':prob.start,\n 'resolution':prob.end,\n 'snomed':prob.snomed, \n 'name':prob.name\n }).done()\n self.data.append(SDMX.sub({'models':prob_string}, escape=False).done())",
"def all_exercises(self):\n\n with sqlite3.connect(self.db_path) as conn:\n conn.row_factory = lambda cursor, row: Exercise(row [1], row [2])\n\n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n select e.id,\n e.name,\n e.language\n \n from exercises e\n order by e.language\n \"\"\")\n\n all_exercises = db_cursor.fetchall()\n print('\\n***All Exercises***')\n for exercise in all_exercises:\n print(exercise)",
"def problem(self, identifier):\n return self._get(\"problems/%d\" % identifier).json()",
"def issues(self):\r\n return issues.RepoIssues(self)",
"def issues(self):\r\n return issues.RepoIssues(self)",
"def all_pythonExercises(self):\n\n with sqlite3.connect(self.db_path) as conn:\n conn.row_factory = lambda cursor, row: PythonExercises(row[0], row[1])\n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n SELECT [Language], [Name]\n FROM Exercise\n WHERE [Language] IN (\"Python\");\n \"\"\")\n\n all_pythonExercises = db_cursor.fetchall()\n for exercise in all_pythonExercises:\n print(exercise)",
"def test_get_problem_list(self):\r\n # Initialize a StudentProblemList object.\r\n student_problem_list = utils.StudentProblemList(self.course.id, unique_id_for_user(self.user))\r\n # Get the initial problem list from ORA.\r\n success = student_problem_list.fetch_from_grading_service()\r\n # Should be successful, and we should have three problems. See mock class for details.\r\n self.assertTrue(success)\r\n self.assertEqual(len(student_problem_list.problem_list), 3)\r\n\r\n # See if the problem locations are valid.\r\n valid_problems = student_problem_list.add_problem_data(reverse('courses'))\r\n # One location is invalid, so we should now have two.\r\n self.assertEqual(len(valid_problems), 2)\r\n # Ensure that human names are being set properly.\r\n self.assertEqual(valid_problems[0]['grader_type_display_name'], \"Instructor Assessment\")",
"def test_problem_list(self):\r\n request = Mock(user=self.user)\r\n response = views.student_problem_list(request, self.course.id.to_deprecated_string())\r\n self.assertRegexpMatches(response.content, \"Here is a list of open ended problems for this course.\")",
"def problem_list(self):\r\n return self.q(css='a.problem-button').text",
"def bugs(self,active_only=True,name_only=False):\n\n q=\"SELECT {},{},{},{},{},{},{} FROM {} \".format(\n BugDB.NAME_COLUMN,\n BugDB.STEPS_COLUMN,\n BugDB.XB_COLUMN,\n BugDB.OB_COLUMN,\n BugDB.ASS_COLUMN,\n BugDB.CREATED_DATE_COLUMN,\n BugDB.FIXED_COLUMN,\n BugDB.BUG_TABLE,\n\n )\n params=[]\n if active_only:\n q+=\"\"\"\n WHERE {} IS NOT ?\n\"\"\".format(BugDB.FIXED_COLUMN)\n \n params.append(1)\n q+=\" ORDER BY ROWID\"\n with self.cxn:\n cur=self.cxn.cursor()\n for row in cur.execute(q,params): \n name=row[\"bug_name\"]\n if name_only:\n yield name\n else:\n bug={}\n for k in row.keys():\n bug[k]=row[k]\n yield bug",
"def display_problems():\n\n res = choose_problems()\n\n cc_name1 = res[0][0]\n url_link1 = res[0][1]\n cc_name2 = res[1][0]\n url_link2 = res[1][1]\n cc_name3 = res[2][0]\n url_link3 = res[2][1]\n\n #TODO: implement datetime (i.e. \"11.07.21\")\n print('Weekly Wednesday Problems')\n print(f'Problem 1: {cc_name1} - {url_link1}')\n print(f'Problem 2: {cc_name2} - {url_link2}')\n print(f'Problem 3: {cc_name3} - {url_link3}')\n\n return cc_name1, url_link1, cc_name2, url_link2, cc_name3, url_link3",
"def get_all_bugs(self) -> List:\n #starting point\n offset = 0\n #list for all bugs\n resultBugList = []\n #list for bug IDs\n bugIDList = []\n #checks if there are still results returned\n notEmpty = True\n\n #queries in 500 bug steps until the result list is empty\n while notEmpty:\n print(\"entered\")\n #interpretation of result as list plus formatting for eval errors\n result = ast.literal_eval(self.session.get(self.bugURL + \"&offset=\" + str(offset)).text.\n replace('true', 'True').replace('false', 'False').replace('null', 'None'))[\"bugs\"]\n #checks if the query needs to be set again with a new offset\n if result:\n resultBugList += result\n else:\n notEmpty = False\n\n #gets the ID out of all comments\n partList = [bug[\"id\"] for bug in result]\n bugIDList += partList\n #sets new starting point\n offset += 500\n\n #inserts bug ids and bugs into db if given one\n if self.mongoDB:\n for id in bugIDList:\n self.mongoDB[\"BugIDs\"].insert_one({\"ID\": id})\n self.mongoDB[\"BugsData\"].insert_many(resultBugList)\n\n #creates files for bug ids and bugs if given a folder\n if self.folder:\n #saves bug list as python object\n with open(self.folderpath + \"bugIDListP.pickle\", \"wb\") as a:\n pickle.dump(bugIDList, a)\n #saves bug list as csv\n with open(self.folderpath + \"bugIDList.csv\", \"w\") as b:\n for id in bugIDList:\n b.write(str(id) + \"\\n\")\n with open(self.folderpath + \"bugsData.txt\", \"w\") as c:\n for bug in resultBugList:\n c.write(str(bug) + \"\\n\")\n\n #returns List Object for further processing\n return(bugIDList)",
"def get_problem(id):\n return query(WEB_EXAMPLE_BASE + f\"/classical/problem/{id}\")",
"def get_all_from_file(cls, fileID, problemDir):\n\n # Load the module\n if problemDir not in sys.path:\n sys.path.insert(0, problemDir)\n try:\n problemModule = __import__(fileID)\n except Exception as e: # pragma: no cover\n warn(\"Could not import file \" + fileID)\n print(e)\n return []\n\n foundProblems = [] # Holds the TestProblems we find in this file\n\n # Look for a dictionary\n PROBLEM_LIST = [\"problems\"]\n for problemList in PROBLEM_LIST:\n if problemList in [name for name in dir(problemModule)]:\n problems = getattr(problemModule, \"problems\")\n for problemDict in problems:\n foundProblems.append(cls.process_problem_dict(**problemDict))\n if len(foundProblems) == 0: # pragma: no cover\n warn(fileID + \" contains no problem objects.\")\n return foundProblems",
"def iter_problems(self, engine, ifname: str) -> Iterable[ProblemItem]:\n # Check filter query, if any\n if self.filter_query:\n if not self.filter_query.any(engine, ifname=ifname):\n return\n # Process rules\n for rule in self.rules:\n if not rule.is_active:\n continue\n if rule.filter_query:\n if not rule.filter_query.any(engine, ifname=ifname):\n continue\n for ctx in rule.query.query(engine, ifname=ifname, **rule.query_params):\n if \"error\" in ctx:\n tpl = Template(rule.error_text_template)\n path = [ifname]\n if rule.error_code:\n path += [rule.error_code]\n yield ProblemItem(\n alarm_class=rule.alarm_class.name if rule.alarm_class else None,\n path=path,\n message=tpl.render(ctx),\n code=rule.error_code or None,\n vars={\"interface\": ifname},\n )\n if rule.is_fatal:\n return",
"def get_problem_list(self, course_id, grader_id):\r\n params = {'course_id': course_id.to_deprecated_string(), 'grader_id': grader_id}\r\n result = self.get(self.get_problem_list_url, params)\r\n tags = [u'course_id:{}'.format(course_id)]\r\n self._record_result('get_problem_list', result, tags)\r\n dog_stats_api.histogram(\r\n self._metric_name('get_problem_list.result.length'),\r\n len(result.get('problem_list', []))\r\n )\r\n return result",
"def list_unique_problems(arn=None, nextToken=None):\n pass",
"def get_all_labs():\n return Lab.query.all()"
]
| [
"0.69904286",
"0.689693",
"0.6717712",
"0.6546775",
"0.6248936",
"0.61356276",
"0.60820484",
"0.6078774",
"0.6023992",
"0.5908121",
"0.58521813",
"0.5791114",
"0.5778342",
"0.5670173",
"0.5617919",
"0.5605232",
"0.5605232",
"0.5576442",
"0.5553699",
"0.5512389",
"0.5427262",
"0.5409918",
"0.536917",
"0.534544",
"0.53277975",
"0.5320244",
"0.53062624",
"0.52851266",
"0.52809393",
"0.52648324"
]
| 0.7453994 | 0 |
Load dataset for given problem with given split. | def load_dataset(self, problem_name="", split="train"):
orm = self.__orm
username = "admin" # should be unused (unless submit new feature to db)
with orm.session_scope() as session:
if not problem_name:
problem_name = session.query(Problem.name)\
.filter(Problem.name != "demo").scalar()
problem_id = session.query(Problem.id)\
.filter(Problem.name == problem_name).scalar()
data_dir = os.path.join("/data", split)
dataset, entities_featurized, target = load_dataset_from_dir(
session, data_dir, problem_name)
suffix = "_" + split
return problem_name, dataset, entities_featurized, target | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_data(self,split='train'):\n raise ValueError('Please implement me!')",
"def load_data(self,split='train'):\n raise NotImplementedError",
"def load_dataset(self, split='train'):\n path = self.args.data\n if not os.path.exists(path):\n raise FileNotFoundError(\n \"Dataset not found: ({})\".format(path)\n )\n\n files = os.listdir(path) if os.path.isdir(path) else [path]\n files = [f for f in files if split in f]\n assert len(files) > 0\n\n self.datasets[split] = CombineBertData(files)\n\n\n \"\"\"\n dataset = data_utils.load_indexed_dataset(\n split_path, self.dictionary, self.args.dataset_impl, combine=combine\n )\n if dataset is None:\n raise FileNotFoundError(\n \"Dataset not found: {} ({})\".format(split, split_path)\n )\n\n dataset = TokenBlockDataset(\n dataset,\n dataset.sizes,\n self.args.tokens_per_sample,\n pad=self.dictionary.pad(),\n eos=self.dictionary.eos(),\n break_mode=self.args.sample_break_mode,\n include_targets=True,\n )\n\n add_eos_for_other_targets = (\n self.args.sample_break_mode is not None\n and self.args.sample_break_mode != \"none\"\n )\n\n self.datasets[split] = MonolingualDataset(\n dataset,\n dataset.sizes,\n self.dictionary,\n self.output_dictionary,\n add_eos_for_other_targets=add_eos_for_other_targets,\n shuffle=True,\n targets=self.targets,\n add_bos_token=self.args.add_bos_token,\n )\n \"\"\"",
"def load_dataset(data_dir, dataset_split, logger=None):\n current_dir = Path()\n dir_path = current_dir / \"data\" / \"break_data\" / \"preprocessed\"\n file_name = \"dataset_preprocessed_\" + dataset_split + \".pkl\"\n if not (dir_path / file_name).is_file():\n # Download and preprocess the BREAK dataset (logical form and lexicon), and save the preprocessed data.\n if logger:\n logger.info('Downloading and preparing datasets...')\n dataset_logical = load_dataset('break_data', dataset_split, cache_dir=data_dir)\n save_obj(dir_path, dataset_logical, file_name)\n\n # Load the saved preprocessed data.\n dataset = load_obj(dir_path, file_name)\n return dataset",
"def load_dataset(self, split, combine=False, **kwargs):\r\n data_json_path = os.path.join(self.args.data, \"{}.json\".format(split))\r\n self.datasets[split] = get_asr_dataset_from_json(data_json_path, self.tgt_dict)",
"def load(split: str) -> tf.data.Dataset:\n\n def _parse_single_example(example):\n feature_description = {\n \"label\": tf.io.FixedLenFeature([], tf.int64),\n \"image\": tf.io.FixedLenFeature([], tf.string),\n }\n features = tf.io.parse_single_example(example, feature_description)\n img = tf.io.decode_jpeg(features[\"image\"], channels=3)\n img = tf.reshape(img, [960, 1280, 3]) # For shape inference.\n features[\"image\"] = img\n return features\n\n split = split.lower()\n assert split in (\"train\", \"test\"), f\"Invalid split: {split}\"\n\n base_dir = pathlib.Path(__file__).parent / \"trafficsigns\"\n filename = \"set1.tfrecords\" if split == \"train\" else \"set2.tfrecords\"\n path = os.path.join(base_dir, filename)\n\n data = tf.data.TFRecordDataset(path)\n return data.map(_parse_single_example,\n num_parallel_calls=tf.data.experimental.AUTOTUNE).cache()",
"def _init_dataset(self, data_config, split='train'):\n assert split in {'train', 'valid'}\n\n # load datasets\n print(f'Load {split} dataset')\n if data_config['type'] == 'npy':\n dataset = MSDMelDataset(\n data_config['mel_root'], data_config[f'{split}_tids_fn'],\n data_config['label_fn'], on_mem=data_config['on_mem'],\n ignore_intersection=data_config['ignore_label_intersection'],\n transform=ToVariable())\n\n elif data_config['type'] == 'hdf':\n dataset = MSDMelHDFDataset(\n data_config['hdf_fn'], data_config[f'{split}_tids_fn'],\n data_config['label_fn'],\n ignore_intersection=data_config['ignore_label_intersection'],\n transform=ToVariable())\n\n elif data_config['type'] == 'audio':\n dataset = MSDAudioDataset(\n data_config['audio_root'], data_config[f'{split}_tids_fn'],\n data_config['tid2path_fn'], data_config['label_fn'],\n ignore_intersection=data_config['ignore_label_intersection'],\n device='cpu',\n transform=ToVariable())\n\n return dataset",
"def load_dataset(self, split, epoch=1, combine=False, **kwargs):\n paths = utils.split_paths(self.cfg.data)\n assert len(paths) > 0\n data_path = paths[(epoch - 1) % len(paths)]\n\n # infer langcode\n src, tgt = self.cfg.source_lang, self.cfg.target_lang\n\n self.datasets[split] = load_langpair_dataset(\n data_path,\n split,\n src,\n self.src_dict,\n tgt,\n self.tgt_dict,\n combine=combine,\n dataset_impl=self.cfg.dataset_impl,\n upsample_primary=self.cfg.upsample_primary,\n left_pad_source=self.cfg.left_pad_source,\n left_pad_target=self.cfg.left_pad_target,\n max_source_positions=self.cfg.max_source_positions,\n max_target_positions=self.cfg.max_target_positions,\n truncate_source=self.cfg.truncate_source,\n )",
"def load_dataset(split_ratio, save_root_dir):\n # Set the processed data directories\n train_ct, train_label_map, test_ct, test_label_map = train_test_split(split_ratio=split_ratio,\n save_root_dir=save_root_dir)\n\n train_dataset = train_label_map, train_ct\n test_dataset = test_label_map, test_ct\n return train_dataset, test_dataset",
"def get_dataset(name, split, data_dir=\"~/tensorflow_datasets\"):\n assert split in [\"train\", \"train+validation\", \"validation\", \"test\"]\n dataset, info = tfds.load(name, split=split, data_dir=data_dir, with_info=True)\n return dataset, info",
"def get_dataset(name, split, data_dir=\"~/tensorflow_datasets\"):\n\n data_dir = os.path.join(os.getcwd(),'data/VOC')\n if not os.path.exists(data_dir):\n os.makedirs(data_dir)\n\n assert split in [\"train\", \"train+validation\", \"validation\", \"test\"]\n\n dataset, info = tfds.load(name, split=split, data_dir=data_dir, with_info=True)\n return dataset, info",
"def load_dataset(\n self,\n split: str,\n epoch: int = 1,\n combine: bool = False,\n task_cfg: DictConfig = None,\n **kwargs,\n ):\n paths = utils.split_paths(self.cfg.data)\n assert len(paths) > 0\n if split != self.cfg.train_subset:\n # if not training data set, use the first shard for valid and test\n paths = paths[:1]\n data_path = paths[(epoch - 1) % len(paths)]\n task_cfg = task_cfg or self.cfg\n\n self.datasets[split] = get_asr_dataset_from_json(\n data_path,\n split,\n self.dictionary,\n combine=combine,\n upsample_primary=self.cfg.upsample_primary,\n num_buckets=self.cfg.num_batch_buckets,\n shuffle=(split != self.cfg.gen_subset),\n pad_to_multiple=self.cfg.required_seq_len_multiple,\n is_training_set=(split == self.cfg.train_subset),\n lf_mmi=(self.cfg.criterion_name == \"lattice_free_mmi\"),\n seed=self.cfg.seed,\n global_cmvn_stats_path=self.cfg.global_cmvn_stats_path,\n specaugment_config=self.cfg.specaugment_config,\n chunk_width=None\n if self.training_stage and split in self.cfg.valid_subset.split(\",\")\n else self.chunk_width,\n chunk_left_context=self.chunk_left_context,\n chunk_right_context=self.chunk_right_context,\n label_delay=self.label_delay,\n )",
"def load_data(self,split='train'):\n return load_arrow_data(self.config,split)",
"def load_dataset(self, split, epoch=0, combine=False, **kwargs):\n paths = self.args.data.split(':')\n assert len(paths) > 0\n data_path = paths[epoch % len(paths)]\n\n # infer langcode\n src, tgt = self.args.source_lang, self.args.target_lang\n if not hasattr(self.args, \"source_tau\"): self.args.source_tau = -1\n if not hasattr(self.args, \"target_tau\"): self.args.target_tau = -1\n\n if not hasattr(self.args, 'source_tau'): self.args.source_tau = -1\n if not hasattr(self.args, 'target_tau'): self.args.target_tau = -1\n\n if self.args.main_src_wordfreq is not None and self.args.dialect_src_wordfreq is not None:\n def word_idx_from_file(filename):\n idx = []\n with open(filename, 'r') as myfile:\n for line in myfile:\n idx.append(self.src_dict.index(line.split()[0]))\n return idx\n self.main_src_word_idx = word_idx_from_file(self.args.main_src_wordfreq) \n self.dialect_src_word_idx = word_idx_from_file(self.args.dialect_src_wordfreq)\n idx_to_sample_prob = []\n for i, src_word in enumerate(self.main_src_word_idx):\n if self.args.dialect_tau == -1:\n dialect_word_probs = np.array([1. for k in range(len(self.dialect_src_word_idx))])\n else:\n dialect_word_probs = np.array([-np.absolute(k-i) for k in range(len(self.dialect_src_word_idx))])\n idx_to_sample_prob.append(dialect_word_probs)\n #self.idx_to_sample_prob = scipy.special.softmax(np.array(self.idx_to_sample_prob)*0.01, axis=1)\n idx_to_sample_prob = scipy.special.softmax(np.array(idx_to_sample_prob)*self.args.dialect_tau, axis=1)\n print(idx_to_sample_prob)\n self.idx_to_sample_prob = {}\n for i, src_word in enumerate(self.main_src_word_idx):\n self.idx_to_sample_prob[src_word] = idx_to_sample_prob[i]\n pass_item = (self.idx_to_sample_prob, self.dialect_src_word_idx)\n else:\n pass_item = None\n if split != 'train':\n src_tau = -1 \n tgt_tau = -1\n mlm = None\n idx_to_src_gradnorm = None\n else: \n src_tau = self.args.source_tau \n tgt_tau = self.args.target_tau \n mlm = self.mlm\n idx_to_src_gradnorm = self.idx_to_src_gradnorm\n\n self.datasets[split] = load_langpair_dataset(\n data_path, split, src, self.src_dict, tgt, self.tgt_dict,\n combine=combine, dataset_impl=self.args.dataset_impl,\n upsample_primary=self.args.upsample_primary,\n left_pad_source=self.args.left_pad_source,\n left_pad_target=self.args.left_pad_target,\n max_source_positions=self.args.max_source_positions,\n max_target_positions=self.args.max_target_positions,\n src_tag=self.args.src_tag, tgt_tag=self.args.tgt_tag,\n src_tau=src_tau, tgt_tau=tgt_tau,\n epoch=epoch,\n id_to_sample_probabilities=pass_item,\n lm=mlm,\n idx_to_src_gradnorm=idx_to_src_gradnorm,\n )",
"def __loadDataset(self, parameters):\n # self.localConfigured = Settings.instance().readValue( key = 'Common/local-repo' )\n for pr in parameters:\n if pr['type'] == 'dataset':\n if pr['value'].startswith('undefined:/'):\n fileName = pr['value'].split('undefined:/')[1]\n if not os.path.exists( fileName ):\n raise Exception(\"the following test data file is missing: %s \" % fileName)\n\n doc = FileModelTestData.DataModel()\n res = doc.load( absPath = fileName )\n pr['value'] = \"undefined:/%s\" % doc.getRaw()\n elif pr['value'].startswith('local-tests:/'):\n fileName = pr['value'].split('local-tests:/')[1]\n\n if not os.path.exists( fileName ):\n raise Exception(\"the following test data file is missing: %s \" % fileName)\n \n doc = FileModelTestData.DataModel()\n res = doc.load( absPath = fileName )\n pr['value'] = \"local-tests:/%s\" % doc.getRaw()\n else:\n pass",
"def load_dataset(self, split, combine=False, **kwargs):\n\n def get_path(type, split):\n return os.path.join(self.args.data, type, split)\n\n def make_dataset(type, dictionary):\n split_path = get_path(type, split)\n\n dataset = data_utils.load_indexed_dataset(\n split_path,\n dictionary,\n self.args.dataset_impl,\n combine=combine,\n )\n assert dataset is not None, \"could not find dataset: {}\".format(\n get_path(type, split))\n return dataset\n\n src_tokens = make_dataset(\"input0\", self.source_dictionary)\n pos_tokens = make_dataset(\"input1\", self.pos_dictionary)\n\n with data_utils.numpy_seed(self.args.seed):\n shuffle = np.random.permutation(len(src_tokens))\n\n label0_dataset = make_dataset(\"label0\", self.label0_dictionary)\n label1_dataset = make_dataset(\"label1\", self.label1_dictionary)\n\n dataset = {\n \"id\": IdDataset(),\n \"net_input\": {\n \"src_tokens\": RightPadDataset(\n src_tokens,\n pad_idx=self.source_dictionary.pad(),\n pad_to_length=self._max_positions,\n ),\n \"src_lengths\": NumelDataset(src_tokens, reduce=False),\n },\n \"segments\": {\n \"seg_tokens\": RightPadDataset(\n pos_tokens,\n pad_idx=self.pos_dictionary.pad(),\n pad_to_length=self._max_positions,\n ),\n \"seg_lengths\": NumelDataset(pos_tokens, reduce=False),\n },\n \"target0\": RightPadDataset( # use 1 as padding, will be used to mask out padding when calculating loss\n ReplaceDataset( # replace eos and existing padding (used when some tokens should not be predicted) with -1\n OffsetTokensDataset( # offset tokens to get the targets to the correct range (0,1,2,...)\n label0_dataset,\n offset=-self.label0_dictionary.nspecial,\n ),\n replace_map={\n self.label0_dictionary.eos()\n - self.label0_dictionary.nspecial: -1,\n self.label0_dictionary.pad()\n - self.label0_dictionary.nspecial: -1,\n },\n offsets=np.zeros(len(label0_dataset), dtype=np.int),\n ),\n pad_idx=-1,\n pad_to_length=self._max_positions,\n ),\n \"target1\": RightPadDataset( # use 1 as padding, will be used to mask out padding when calculating loss\n ReplaceDataset( # replace eos and existing padding (used when some tokens should not be predicted) with -1\n OffsetTokensDataset( # offset tokens to get the targets to the correct range (0,1,2,...)\n label1_dataset,\n offset=-self.label1_dictionary.nspecial,\n ),\n replace_map={\n self.label1_dictionary.eos()\n - self.label1_dictionary.nspecial: -1,\n self.label1_dictionary.pad()\n - self.label1_dictionary.nspecial: -1,\n },\n offsets=np.zeros(len(label1_dataset), dtype=np.int),\n ),\n pad_idx=-1,\n pad_to_length=self._max_positions,\n ),\n \"nsentences\": NumSamplesDataset(),\n \"ntokens\": NumelDataset(src_tokens, reduce=True),\n }\n\n nested_dataset = NestedDictionaryDataset(\n dataset,\n sizes=[src_tokens.sizes],\n )\n\n if self.args.no_shuffle:\n dataset = nested_dataset\n else:\n dataset = SortDataset(\n nested_dataset,\n # shuffle\n sort_order=[shuffle],\n )\n logger.info(\"Loaded {0} with #samples: {1}\".format(split, len(dataset)))\n self.datasets[split] = dataset\n return self.datasets[split]",
"def get_dataset(dataset: str, split: str) -> Dataset:\n if dataset == \"imagenet\":\n return _imagenet(split)\n elif dataset == \"imagenet32\":\n return _imagenet32(split)\n elif dataset == \"cifar10\":\n return _cifar10(split)",
"def load_data(self, task):\n params = self.params\n data = {splt: {} for splt in ['train', 'valid', 'test']}\n dpath = os.path.join(params.data_path, 'eval', task)\n\n self.n_sent = 1 if task in ['SST-2', 'CoLA'] else 2\n\n for splt in ['train', 'valid', 'test']:\n\n # load data and dictionary\n data1 = load_binarized(os.path.join(dpath, '%s.s1.pth' % splt), params)\n data2 = load_binarized(os.path.join(dpath, '%s.s2.pth' % splt), params) if self.n_sent == 2 else None\n data['dico'] = data.get('dico', data1['dico'])\n\n # set dictionary parameters\n set_dico_parameters(params, data, data1['dico'])\n if self.n_sent == 2:\n set_dico_parameters(params, data, data2['dico'])\n\n # create dataset\n if self.n_sent == 1:\n data[splt]['x'] = Dataset(data1['sentences'], data1['positions'], params)\n else:\n data[splt]['x'] = ParallelDataset(\n data1['sentences'], data1['positions'],\n data2['sentences'], data2['positions'],\n params\n )\n\n # load labels\n if splt != 'test' or task in ['MRPC']:\n # read labels from file\n with open(os.path.join(dpath, '%s.label' % splt), 'r') as f:\n lines = [l.rstrip() for l in f]\n # STS-B task\n if task == 'STS-B':\n assert all(0 <= float(x) <= 5 for x in lines)\n y = [float(l) for l in lines]\n # QQP\n elif task == 'QQP':\n UNK_LABEL = 0\n lab2id = {x: i for i, x in enumerate(sorted(set(lines) - set([''])))}\n y = [lab2id.get(x, UNK_LABEL) for x in lines]\n # other tasks\n else:\n lab2id = {x: i for i, x in enumerate(sorted(set(lines)))}\n y = [lab2id[x] for x in lines]\n data[splt]['y'] = torch.LongTensor(y)\n assert len(data[splt]['x']) == len(data[splt]['y'])\n\n # compute weights for weighted training\n if task != 'STS-B' and params.weighted_training:\n weights = torch.FloatTensor([\n 1.0 / (data['train']['y'] == i).sum().item()\n for i in range(len(lab2id))\n ]).npu()\n self.weights = weights / weights.sum()\n else:\n self.weights = None\n\n return data",
"def get_loader(split):\n assert split in ['train', 'val', 'trainval', 'test']\n image_feature_path = config.rcnn_trainval_path if split != 'test' else config.rcnn_test_path\n dataset = VQAFeatureDataset(\n split,\n image_feature_path,\n )\n loader = torch.utils.data.DataLoader(\n dataset,\n batch_size=512,\n shuffle=True if split not in ['val', 'test'] else False, # only shuffle the data in training\n pin_memory=True,\n num_workers=config.workers,\n )\n return loader",
"def _read_examples(self, split: base.Split) -> tf.data.Dataset:\n if split == base.Split.TEST:\n return tf.data.Dataset.range(self._num_test_examples)\n if split == base.Split.TRAIN:\n return tf.data.Dataset.range(self._num_train_examples)\n if split == base.Split.VAL:\n return tf.data.Dataset.range(self._num_validation_examples)",
"def load_dataset(self, split, epoch=1, combine=False, **kwargs):\n paths = self.args.data.split(':')\n assert len(paths) > 0\n data_path = paths[(epoch - 1) % len(paths)]\n\n # infer langcode\n \n lg_datasets = []\n for lg in self.gt_langs:\n src, tgt = lg, lg \n bos_id = self.tgt_dict.index('[{}]'.format(lg))\n data_path_lg = os.path.join(data_path, lg)\n dataset = load_generation_pair_dataset(\n data_path_lg, split, tgt, self.src_dict, self.tgt_dict,\n combine=combine, dataset_impl=self.args.dataset_impl,\n upsample_primary=self.args.upsample_primary,\n left_pad_source=self.args.left_pad_source,\n left_pad_target=self.args.left_pad_target,\n max_source_positions=getattr(self.args, 'max_source_positions', 1024),\n max_target_positions=getattr(self.args, 'max_target_positions', 1024),\n load_alignments=self.args.load_alignments,\n prepend_bos=getattr(self.args, 'preprend_bos', False),\n append_source_id=True,\n common_eos=self.args.common_eos,\n lg_id=bos_id\n )\n lg_datasets.append(dataset)\n \n dataset_lengths = np.array([len(d) for d in lg_datasets], dtype=float) \n\n sample_probs = self._get_sample_prob(dataset_lengths)\n logger.info(\"| Sample probability by language: \", {\n lang: \"{0:.4f}\".format(sample_probs[id])\n for id, lang in enumerate(self.gt_langs)\n }\n )\n size_ratio = (sample_probs * dataset_lengths.sum()) / dataset_lengths\n logger.info(\"| Up/Down Sampling ratio by language: \", {\n lang: \"{0:.2f}\".format(size_ratio[id])\n for id, lang in enumerate(self.gt_langs)\n }\n )\n if split == getattr(self.args, \"train_subset\", \"train\"):\n resampled_lang_datasets = [\n ResamplingDataset(\n lg_datasets[i],\n size_ratio=size_ratio[i],\n seed=self.args.seed,\n epoch=epoch,\n replace=size_ratio[i] >= 1.0,\n )\n for i, d in enumerate(lg_datasets)\n ]\n dataset = ConcatDataset(\n resampled_lang_datasets,\n )\n else:\n dataset = ConcatDataset(lg_datasets)\n lang_splits = [split]\n for lang_id, lang_dataset in enumerate(lg_datasets):\n split_name = split + '_' + self.gt_langs[lang_id]\n lang_splits.append(split_name)\n self.datasets[split_name] = lang_dataset\n \n if hasattr(self.args, \"valid_subset\"):\n if split in self.args.valid_subset:\n self.args.valid_subset = self.args.valid_subset.replace(\n split, ','.join(lang_splits)\n )\n\n with data_utils.numpy_seed(self.args.seed + epoch):\n shuffle = np.random.permutation(len(dataset))\n self.datasets[split] = SortDataset(\n dataset,\n sort_order=[\n shuffle,\n dataset.sizes,\n ],\n )",
"def _load_dataset(self, split, align, partition):\n\n if partition == 'all':\n self._image_list = self._face.image_list + self._clothes.image_list\n celeba_num = self._face.num_images\n deepfashion_num = self._clothes.num_images\n elif partition == 'face':\n self._image_list = self._face.image_list\n celeba_num = self._face.num_images\n deepfashion_num = 0\n elif partition == 'clothes':\n self._image_list = self._clothes.image_list\n celeba_num = 0\n deepfashion_num = self._clothes.num_images\n\n self._gtdb = {'attr': -1.*np.ones((self.num_images, self.num_classes), dtype=np.float64)}\n\n # load labels for celeba images if they are included. \n if celeba_num > 0:\n self._gtdb['attr'][:celeba_num, self._face_class_idx] = self._face.gtdb['attr']\n # load soft labels for clothes attributes on celeba\n if align:\n fn = osp.join(self.data_path, 'person_'+'face'+'_'+split+'_align.pkl')\n else:\n fn = osp.join(self.data_path, 'person_'+'face'+'_'+split+'.pkl') \n if osp.exists(fn):\n if partition == 'all':\n with open(fn, 'rb') as fid:\n labels = cPickle.load(fid)\n self._gtdb['attr'][:celeba_num, self._clothes_class_idx] = labels\n else:\n 'Dataset {}: Labels for clothes attributes on CelebA are not loaded, the partition is not \"all\"'.format(self.name)\n else:\n print 'Dataset {}: Labels for clothes attributes on CelebA are not available! Missing filename: {}. Did you forget to run load_person.py first?'.\\\n format(self.name, fn)\n\n # load labels for deepfashion images if they are included.\n if deepfashion_num > 0:\n self._gtdb['attr'][celeba_num:, self._clothes_class_idx] = self._clothes.gtdb['attr']\n # load soft labels for face attributes on deepfashion\n fn = osp.join(self.data_path, 'person_'+'clothes'+'_'+split+'.pkl')\n if osp.exists(fn):\n if partition == 'all':\n with open(fn, 'rb') as fid:\n labels = cPickle.load(fid)\n self._gtdb['attr'][celeba_num:, self._face_class_idx] = labels\n else:\n 'Dataset {}: Labels for face attributes on Deepfashion are not loaded, the partition is not \"all\"'.format(self.name)\n else:\n print 'Dataset {}: Labels for face attributes on Deepfashion are not available! Missing filename: {}. Did you forget to run load_person.py first?'.\\\n format(self.name, fn)",
"def load_data(filename, split = 0.9):\n (black, white, player, move, ko) = read_file(filename)\n dataset = zip(black, white, player, move, ko)\n train_data = dataset[:int(math.ceil(split*len(dataset)))]\n test_data = dataset[int(math.ceil(split*len(dataset))):]\n return train_data, test_data",
"def _load(self, dataset):\n raise NotImplementedError('Loader {} does not support loading datasets.'.format(self.type()))",
"def as_dataset(self, split, shuffle_files=None):\n return self._as_dataset(split=split, shuffle_files=shuffle_files)",
"def split_dataset(self, split):\n trunk_pos_size = math.ceil((1 - split) * len(self.Pos))\n trunk_neg_size = math.ceil((1 - split) * len(self.Neg))\n trunk_num = int(1 / (1 - split))\n pos_temp = list()\n neg_temp = list()\n for index in range(trunk_num):\n pos_temp.append(self.Pos[index * trunk_pos_size:(index + 1) *\n trunk_pos_size])\n neg_temp.append(self.Neg[index * trunk_neg_size:(index + 1) *\n trunk_neg_size])\n self.test = pos_temp.pop(2) + neg_temp.pop(2)\n # self.train = [i for item in pos_temp + neg_temp for i in item]\n self.train = []\n for item in pos_temp + neg_temp:\n for i in item:\n self.train.append(i)\n\n random.shuffle(self.train)\n random.shuffle(self.test)",
"def split_datasets(ql_file, nl_question_file, output_dir, split):\n\n with io.open(ql_file, encoding='utf-8') as query_org, \\\n io.open(nl_question_file, encoding='utf8') as nl_org:\n ql = query_org.readlines()\n nl = nl_org.readlines()\n\n split = split / 100\n\n train_ql, val_ql, train_nl, val_nl = train_test_split(ql, nl,\n train_size=split,\n random_state=42,\n shuffle=True)\n\n with io.open(output_dir + \"-train.ql\", 'w', encoding='utf-8') \\\n as ql_train, \\\n io.open(output_dir + \"-val.ql\", 'w', encoding='utf-8') \\\n as ql_val, \\\n io.open(output_dir + \"-train.nl\", 'w', encoding='utf-8') \\\n as nl_train, \\\n io.open(output_dir + \"-val.nl\", 'w', encoding='utf-8') \\\n as nl_val:\n ql_train.writelines(train_ql)\n ql_val.writelines(val_ql)\n nl_train.writelines(train_nl)\n nl_val.writelines(val_nl)",
"def load_split(self, config):\n data_split_str = config[\"data_split\"]\n split_paras = {}\n split_paras[\"test_rate\"] = config[\"test_rate\"] if \"test_rate\" in config else 0.1\n split_paras[\"random\"] = config[\"random\"] if \"random\" in config else False\n split_paras[\"download\"] = config[\"download\"] if \"download\" in config else False\n split_paras[\"n_negative\"] = (\n config[\"n_negative\"] if \"n_negative\" in config else 100\n )\n split_paras[\"by_user\"] = config[\"by_user\"] if \"by_user\" in config else False\n split_paras[\"n_test\"] = config[\"n_test\"] if \"n_test\" in config else 10\n\n if split_paras[\"n_negative\"] < 0 and split_paras[\"n_test\"] > 1:\n # n_negative < 0, validate and testing sets of splits will contain all the negative items.\n # There will be only one validata and one testing sets.\n split_paras[\"n_test\"] = 1\n\n data_split_mapping = {\n \"leave_one_out\": self.load_leave_one_out,\n \"leave_one_basket\": self.load_leave_one_basket,\n \"random_split\": self.load_random_split,\n \"random_basket_split\": self.load_random_basket_split,\n \"temporal\": self.load_temporal_split,\n \"temporal_basket\": self.load_temporal_basket_split,\n }\n\n split_para_mapping = {\n \"leave_one_out\": [\"random\", \"download\", \"n_negative\", \"n_test\"],\n \"leave_one_basket\": [\"random\", \"download\", \"n_negative\", \"n_test\"],\n \"random_split\": [\n \"test_rate\",\n \"download\",\n \"by_user\",\n \"n_negative\",\n \"n_test\",\n ],\n \"random_basket_split\": [\n \"test_rate\",\n \"download\",\n \"by_user\",\n \"n_negative\",\n \"n_test\",\n ],\n \"temporal\": [\"test_rate\", \"by_user\", \"download\", \"n_negative\", \"n_test\"],\n \"temporal_basket\": [\n \"test_rate\",\n \"download\",\n \"by_user\",\n \"n_negative\",\n \"n_test\",\n ],\n }\n para_dic = {\n split_para_key: split_paras[split_para_key]\n if split_para_key in split_paras\n else None\n for split_para_key in split_para_mapping[data_split_str]\n }\n train_data, valid_data, test_data = data_split_mapping[data_split_str](\n **para_dic\n )\n return train_data, valid_data, test_data",
"def _load_split_data(self, dataset_path):\n for i, prefix in enumerate(['train', 'dev', 'test']):\n filename = os.path.join(dataset_path, '{}.txt'.format(prefix))\n knowledge, src, tgt = self._load_multi_data(filename)\n self.group_text_data[0].append(knowledge)\n self.group_text_data[1].append(src)\n self.group_text_data[2].append(tgt)",
"def load_domain_split_dataset(self, data_dir, logger=None):\n current_dir = Path()\n dir_path = current_dir / \"data\" / \"break_data\" / \"preprocessed\"\n file_name = \"dataset_preprocessed_domain_split.pkl\"\n if not (dir_path / file_name).is_file():\n if logger:\n logger.info('Creating domain split dataset...')\n text_domain_dataset_prefixes = ('COMQA', 'CWQ', 'DROP', 'HOTP')\n image_domain_dataset_prefixes = ('CLEVR', 'NLVR2')\n DB_domain_dataset_prefixes = ('ACADEMIC', 'ATIS', 'GEO', 'SPIDER')\n image_plus_DB = image_domain_dataset_prefixes + DB_domain_dataset_prefixes\n train_filtererd = pd.DataFrame()\n validation_filtererd = pd.DataFrame()\n test_filtererd = pd.DataFrame()\n\n for i, example in enumerate(self.dataset_logical['train']):\n if example['question_id'].startswith(text_domain_dataset_prefixes):\n train_filtererd = train_filtererd.append(example, ignore_index=True)\n for i, example in enumerate(self.dataset_logical['validation']):\n if example['question_id'].startswith(image_plus_DB):\n validation_filtererd = validation_filtererd.append(example, ignore_index=True)\n for i, example in enumerate(self.dataset_logical['test']):\n if example['question_id'].startswith(image_plus_DB):\n test_filtererd = test_filtererd.append(example, ignore_index=True)\n\n # TODO delete this?\n # train_dataset = self.dataset_logical['train'].filter(\n # lambda example: example['question_id'].startswith(text_domain_dataset_prefixes))\n # validation_dataset = self.dataset_logical['validation'].filter(\n # lambda example: example['question_id'].startswith(image_plus_DB))\n # test_dataset = self.dataset_logical['test'].filter(\n # lambda example: example['question_id'].startswith(image_plus_DB))\n # train_filtererd_ds = Dataset.from_pandas(train_filtererd)\n to_save = {'train': Dataset.from_pandas(train_filtererd),\n 'validation': Dataset.from_pandas(validation_filtererd),\n 'test': Dataset.from_pandas(test_filtererd)}\n save_obj(dir_path, to_save, file_name)\n\n dataset = load_obj(dir_path, file_name)\n return dataset"
]
| [
"0.7480053",
"0.747074",
"0.720558",
"0.7184102",
"0.70533544",
"0.6940336",
"0.6938759",
"0.69116545",
"0.68739456",
"0.68407923",
"0.67328703",
"0.6732657",
"0.65436804",
"0.648422",
"0.6478639",
"0.6476277",
"0.64601445",
"0.64307505",
"0.63974446",
"0.6391252",
"0.6303175",
"0.6302331",
"0.62576413",
"0.6232183",
"0.61120105",
"0.61113626",
"0.6088615",
"0.60609585",
"0.60202295",
"0.60145015"
]
| 0.8008399 | 0 |
Get an approximated position of a vehicle at any point of time | def approximate_position(self, at_time: int) -> BasePosition:
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getPosition(self, cur_time=timezone.now()):\n # Get waypoints\n if hasattr(self, 'preprocessed_waypoints'):\n waypoints = self.preprocessed_waypoints\n else:\n # Load waypoints for obstacle, filter for consecutive duplicates\n all_wpts = self.waypoints.order_by('order')\n waypoints = [\n all_wpts[i]\n for i in range(len(all_wpts))\n if i == 0 or all_wpts[i].distanceTo(all_wpts[i-1]) != 0]\n self.preprocessed_waypoints = waypoints\n\n # Waypoint counts of 0 or 1 can skip calc, so can no speed\n num_waypoints = len(waypoints)\n if num_waypoints == 0:\n return (0, 0, 0) # Undefined position\n elif num_waypoints == 1 or self.speed_avg <= 0:\n wpt = waypoints[0]\n return (wpt.position.gps_position.latitude,\n wpt.position.gps_position.longitude,\n wpt.position.altitude_msl)\n\n # Get spline representation\n if hasattr(self, 'preprocessed_spline_curve'):\n spline_curve = self.preprocessed_spline_curve\n else:\n spline_curve = self.getSplineCurve(waypoints)\n self.preprocessed_spline_curve = spline_curve\n (total_travel_time, spline_reps) = spline_curve\n\n # Sample spline at current time\n epoch_time = timezone.now().replace(\n year=1970, month=1, day=1, hour=0, minute=0, second=0,\n microsecond=0)\n cur_time_sec = (cur_time - epoch_time).total_seconds()\n cur_path_time = np.mod(cur_time_sec, total_travel_time)\n latitude = float(splev(cur_path_time, spline_reps[0]))\n longitude = float(splev(cur_path_time, spline_reps[1]))\n altitude_msl = float(splev(cur_path_time, spline_reps[2]))\n\n return (latitude, longitude, altitude_msl)",
"def update_position(self, event):\n\n # Create a copy of the most recent stored twist data to perform calculations\n with self.lock:\n velocity_data = copy.deepcopy(self.twist)\n\n # Time elapsed since last update position call\n if hasattr(event, 'last_real'):\n if event.last_real is None:\n time = rospy.Duration(0.05)\n else:\n time = event.current_real - event.last_real\n \n time = time.to_sec()\n\n # Calculate angle turned in the given time using omega = theta/time\n angle = velocity_data.angular.z*time\n\n # Calculate distance travelled in the given time using linear velocity = arc distance/time\n distance = velocity_data.linear.x*time\n\n # Calculate yaw of the robot\n self.vehicle_yaw += angle\n\n # Calculate vehicle x, y, z position coordinates\n # TODO recalculate the position based on traveling in a circular arc.\n self.pose.position.x += (distance)*cos(self.vehicle_yaw)\n self.pose.position.y += (distance)*sin(self.vehicle_yaw)\n\n # Calculate z position using linear interpolation and create cloud array\n \n # 1. Create ranges to be used in interpolation function\n terrain_points_x = np.arange(0, self.gaussian_array.shape[1]*self.resolution, self.resolution)\n terrain_points_y = np.arange(0, self.gaussian_array.shape[0]*self.resolution, self.resolution)\n\n # 2. Create array of points to be converted to point cloud for vizualization\n terrain_mesh_x, terrain_mesh_y = np.meshgrid(terrain_points_x, terrain_points_y)\n terrain_x = terrain_mesh_x.ravel()\n terrain_y = terrain_mesh_y.ravel()\n terrain_z = self.gaussian_array.ravel()\n terrain_grid_points = np.stack((terrain_x, terrain_y, terrain_z), axis=1)\n\n # 3. Create interpolation function based on the ranges and gaussian data\n interp_func = RectBivariateSpline(terrain_points_y, terrain_points_x, self.gaussian_array)\n\n # 4. Find z value for x and y coordinate of vehicle using interpolation function\n # TODO compute z height based on footprint\n self.pose.position.z = interp_func(self.pose.position.y, self.pose.position.x)\n\n # Convert Euler Angles to Quarternion\n V_rotation = tf.transformations.quaternion_from_euler(0.0, 0.0, self.vehicle_yaw)\n\n # Broadcast vehicle frame which is a child of the world frame\n br = tf.TransformBroadcaster()\n br.sendTransform((self.pose.position.x, self.pose.position.y, self.pose.position.z), \n V_rotation, rospy.Time.now(),\"vehicle_frame\", \"map\")\n\n # Construct the homogenous transformation matrix for map to vehicle frame\n V_translation = [self.pose.position.x, self.pose.position.y, self.pose.position.z]\n map_T_V = tf.transformations.quaternion_matrix(V_rotation) \n map_T_V[:3,3] = np.array(V_translation)\n\n # Create footprint of vehicle\n V_footprint_range_x = np.linspace((-self.vehicle_length/2), (self.vehicle_length/2), 30)\n V_footprint_range_y = np.linspace((-self.vehicle_width/2), (self.vehicle_width/2), 15)\n V_footprint_mesh_x, V_footprint_mesh_y = np.meshgrid(V_footprint_range_x, V_footprint_range_y)\n V_footprint_x = V_footprint_mesh_x.ravel()\n V_footprint_y = V_footprint_mesh_y.ravel()\n\n # For every point in the vehicle footprint, calculate the position wrt to the vehicle's frame\n # and its interpolated z value. Add this point to a list of points for visualization.\n # TODO Flatten into a single matrix multiply to remove for loop\n V_viz_points = []\n for i in range(V_footprint_x.shape[0]):\n p = Point()\n V_footprint_point = np.array([[V_footprint_x[i]],[V_footprint_y[i]], [0.0], [1.0]])\n V_footprint_point = np.matmul(map_T_V, V_footprint_point)\n V_footprint_point[2, 0] = interp_func(V_footprint_point[1, 0], V_footprint_point[0, 0])\n p.x = V_footprint_point[0, 0]\n p.y = V_footprint_point[1, 0]\n p.z = V_footprint_point[2, 0]\n V_viz_points.append(p)\n\n #####################################################################################\n # Create a copy of the most recent stored JointState data to perform calculations\n with self.joint_lock:\n joint_data = copy.deepcopy(self.joint)\n\n # If the data is empty on first run, fill with 0.0\n if not joint_data.velocity:\n joint_data.velocity = [0.0,0.0]\n \n # Calculate angle based on velocity data and time\n angle = joint_data.velocity[0]*time\n angle2 = joint_data.velocity[1]*time\n\n self.joint1_pitch += angle\n self.joint2_pitch += angle2\n\n # Transformations from vehicle frame to Joint1 and Joint2\n \n # Static rotation about z-axis \n static_rot = tf.transformations.quaternion_from_euler(0.0, 0.0, 3.14159)\n translation = [0.0, 0.0, 0.0]\n V_T_SRz = tf.transformations.quaternion_matrix(static_rot)\n V_T_SRz[:3,3] = np.array(translation)\n\n # Dynamic rotation about the y-axis of Joint 1\n rot_SRz_T_J1 = [[cos(self.joint1_pitch), 0.0, sin(self.joint1_pitch)],\n [0.0, 1.0, 0.0],\n [-sin(self.joint1_pitch), 0.0, cos(self.joint1_pitch)]]\n\n trans_SRz_T_J1 = [0.0, 0.0, 0.0, 1.0]\n\n SRz_T_J1 = np.zeros((4,4))\n SRz_T_J1[:3,:3] = rot_SRz_T_J1\n SRz_T_J1[:4,3] = trans_SRz_T_J1\n\n # Translation based on length of Joint 1 arm \n no_rot = tf.transformations.quaternion_from_euler(0.0, 0.0, 0.0)\n translation = [self.joint1_length, 0.0, 0.0]\n J1_T_STx = tf.transformations.quaternion_matrix(no_rot)\n J1_T_STx[:3,3] = np.array(translation)\n\n # Dynamic rotation about y-axis of Joint 2\n dynamic_rot2 = tf.transformations.quaternion_from_euler(0.0, self.joint2_pitch, 0.0)\n translation = [0.0, 0.0, 0.0]\n STx_T_J2 = tf.transformations.quaternion_matrix(dynamic_rot2)\n STx_T_J2[:3,3] = np.array(translation)\n\n # matrix multiplication to form the homogenous matrices\n V_T_J1 = np.matmul(V_T_SRz, SRz_T_J1)\n V_T_STx = np.matmul(V_T_J1, J1_T_STx)\n V_T_J2 = np.matmul(V_T_STx, STx_T_J2)\n\n frame_J1 = tf_conversions.fromMatrix(V_T_J1)\n frame_J2 = tf_conversions.fromMatrix(V_T_J2)\n\n # The ripper tip is a point in the J2's frame, this is based on the length of the ripper\n ripper_tip_point_J2 = [self.ripper_length, 0.0, 0.0, 1.0]\n map_T_J2 = np.matmul(map_T_V, V_T_J2)\n ripper_tip_pt_map = np.matmul(map_T_J2, ripper_tip_point_J2)\n ripper_tip_point_viz = Point()\n ripper_tip_point_viz.x = ripper_tip_pt_map[0]\n ripper_tip_point_viz.y = ripper_tip_pt_map[1]\n ripper_tip_point_viz.z = ripper_tip_pt_map[2]\n V_viz_points.append(ripper_tip_point_viz)\n\n # use the ripper's position as an index value to access the gaussian array\n ripper_tip_cell_index_x = int(ripper_tip_pt_map[1]/self.resolution)\n ripper_tip_cell_index_y = int(ripper_tip_pt_map[0]/self.resolution)\n\n # Create a range of index values surrounding index_x and y\n nearby_index_cells_range_x = np.arange((ripper_tip_cell_index_x-1),(ripper_tip_cell_index_x+2), 1)\n nearby_index_cells_range_y = np.arange((ripper_tip_cell_index_y-1),(ripper_tip_cell_index_y+2), 1)\n nearby_index_cells_mesh_x, nearby_index_cells_mesh_y = np.meshgrid(nearby_index_cells_range_x,nearby_index_cells_range_y)\n nearby_index_cells_x = nearby_index_cells_mesh_x.ravel()\n nearby_index_cells_y = nearby_index_cells_mesh_y.ravel()\n\n # First check if the index is within the gaussian array, if it is, then check if the tip of\n # the ripper is beneath the soil, if it is, then remove the soil above the tip and disperse\n # it to the surrounding cells, provided those cells are also within the gaussian array\n # TODO Remove use of for loops and excess if statements\n\n if (0 <= ripper_tip_cell_index_x <= (self.gaussian_array.shape[0]-1)) and (0 <= ripper_tip_cell_index_y <= (self.gaussian_array.shape[1]-1)):\n if (self.gaussian_array[ripper_tip_cell_index_x][ripper_tip_cell_index_y] > ripper_tip_pt_map[2]):\n diff = self.gaussian_array[ripper_tip_cell_index_x][ripper_tip_cell_index_y] - ripper_tip_pt_map[2]\n for i in range(nearby_index_cells_x.shape[0]):\n if (0 <= nearby_index_cells_x[i] <= (self.gaussian_array.shape[0]-1)) and (0 <= nearby_index_cells_y[i] <= (self.gaussian_array.shape[1]-1)):\n self.gaussian_array[nearby_index_cells_x[i]][nearby_index_cells_y[i]] += diff/8\n self.gaussian_array[ripper_tip_cell_index_x][ripper_tip_cell_index_y] = ripper_tip_pt_map[2]\n \n\n # Publish all messages\n self.publish_messages(V_translation, V_rotation, terrain_grid_points, V_viz_points, frame_J1, frame_J2)",
"def OrbitPos(self, rv, t, m):\n \n params = np.array(rv)\n params = params.flatten()\n \n def GravityODE(rv,t):\n G = 6.67e-11\n m = 5.972e24\n x = rv[0]\n y = rv[1]\n vx = rv[2]\n vy = rv[3]\n \n dvydt = -((G*m*y)/((x**2+y**2)**(3/2)))\n dvxdt = -((G*m*x)/((x**2+y**2)**(3/2)))\n dxdt = vx\n dydt = vy\n\n pos_derivs = np.array([dxdt,dydt])\n v_deriv = np.array([dvxdt,dvydt])\n derivs = np.hstack((pos_derivs,v_deriv))\n \n return derivs \n \n satellite_orbit = integrate.odeint(GravityODE,params,t)\n \n return satellite_orbit[:,0],satellite_orbit[:,1]",
"def position(x,v,t,a):\n return x + v*t + 0.5*a*t**2 # pos = initial position + velocity*time + .5(acceleration)(time squared)",
"def get_position(self, t0):\n my_pos_x=np.random.uniform(-20, 20)\n my_pos_y=np.random.uniform(-20, 20)\n r=np.array([my_pos_x, my_pos_y])\n x_y=np.zeros(shape=(self.no_planets-1, 2))\n tol=1e-5\n diff=np.zeros(self.no_planets-1)\n for k in range(self.no_planets-1):\n r1=np.linalg.norm(r)\n r2=np.linalg.norm(r-self.positionFunction(t0)[:, k])\n r3=np.linalg.norm(r-self.positionFunction(t0)[:, k+1])\n x1=0\n y1=0\n x2=self.positionFunction(t0)[0,k]\n y2=self.positionFunction(t0)[1,k]\n x3=self.positionFunction(t0)[0,k+1]\n y3=self.positionFunction(t0)[1, k+1]\n x,y,difference=self.triangulate_analytic(x1,y1,r1,x2,y2,r2,x3,y3,r3)\n x_y[k, 0]=x\n x_y[k, 1]=y\n diff[k]=difference\n if (diff > tol).any():\n print diff.max()\n print \"Oh no, one failed :(\"\n sys.exit(1)\n print \"My pos x:\", my_pos_x\n print \"My pos y:\", my_pos_y\n #return x1, y1, r1, x2, y2, r2, x3, y3, r3",
"def getGlobalPosition(self, s, ey):\n\n ### what is ey?? error in y coordinate of vehicle from the track inertial frame?\n\n # wrap s along the track\n while (s > self.TrackLength):\n s = s - self.TrackLength\n\n # Compute the segment in which system is evolving\n PointAndTangent = self.PointAndTangent\n\n index = np.all([[s >= PointAndTangent[:, 3]], [s < PointAndTangent[:, 3] + PointAndTangent[:, 4]]], axis=0)\n ## i = int(np.where(np.squeeze(index))[0])\n i = np.where(np.squeeze(index))[0]\n\n if PointAndTangent[i, 5] == 0.0: # If segment is a straight line\n # Extract the first final and initial point of the segment\n xf = PointAndTangent[i, 0]\n yf = PointAndTangent[i, 1]\n xs = PointAndTangent[i - 1, 0]\n ys = PointAndTangent[i - 1, 1]\n psi = PointAndTangent[i, 2]\n\n # Compute the segment length\n deltaL = PointAndTangent[i, 4]\n reltaL = s - PointAndTangent[i, 3]\n\n # Do the linear combination\n x = (1 - reltaL / deltaL) * xs + reltaL / deltaL * xf + ey * np.cos(psi + np.pi / 2)\n y = (1 - reltaL / deltaL) * ys + reltaL / deltaL * yf + ey * np.sin(psi + np.pi / 2)\n theta = psi\n else:\n r = 1 / PointAndTangent[i, 5] # Extract curvature\n ang = PointAndTangent[i - 1, 2] # Extract angle of the tangent at the initial point (i-1)\n # Compute the center of the arc\n if r >= 0:\n direction = 1\n else:\n direction = -1\n\n CenterX = PointAndTangent[i - 1, 0] \\\n + np.abs(r) * np.cos(ang + direction * np.pi / 2) # x coordinate center of circle\n CenterY = PointAndTangent[i - 1, 1] \\\n + np.abs(r) * np.sin(ang + direction * np.pi / 2) # y coordinate center of circle\n\n spanAng = (s - PointAndTangent[i, 3]) / (np.pi * np.abs(r)) * np.pi\n\n angleNormal = wrap((direction * np.pi / 2 + ang))\n angle = -(np.pi - np.abs(angleNormal)) * (sign(angleNormal))\n\n x = CenterX + (np.abs(r) - direction * ey) * np.cos(\n angle + direction * spanAng) # x coordinate of the last point of the segment\n y = CenterY + (np.abs(r) - direction * ey) * np.sin(\n angle + direction * spanAng) # y coordinate of the last point of the segment\n theta = ang + direction * spanAng\n\n return x, y, theta",
"def distance_to_current_waypoint(vehicle):\n nextwaypoint = vehicle.commands.next\n if nextwaypoint==0:\n return None\n missionitem=vehicle.commands[nextwaypoint-1] #commands are zero indexed\n lat = missionitem.x\n lon = missionitem.y\n alt = missionitem.z\n targetWaypointLocation = LocationGlobalRelative(lat,lon,alt)\n distancetopoint = get_distance_metres(vehicle.location.global_frame, targetWaypointLocation)\n return distancetopoint",
"def getCarLocation():\n cams = CAMS\n img1, img2 = cams[0].getImage(), cams[1].getImage()\n potint1op, point2op = GetBalloonOld.getCar(img1), GetBalloonOld.getCar(\n img2)\n for j in range(len(point1op)):\n point1op[j] = np.array(np.array([point1op[j][0], point1op[j][1]]))\n for j in range(len(point2op)):\n point2op[j] = np.array(np.array([point2op[j][0], point2op[j][1]]))\n if len(point1op) == 0 or len(point2op) == 0:\n return None\n points = [[point1op[0]], [point2op[0]]]\n car = getTargetsPlaces(copy.deepcopy(points))[0]\n diff = abs(car[2] - CAR_Z)\n for op1 in point1op:\n for op2 in point2op:\n testPoint = [[op1], [op2]]\n testCar = getTargetsPlaces(copy.deepcopy(testPoint))[0]\n testDiff = abs(testCar[2] - CAR_Z)\n if testDiff < diff:\n diff = testDiff\n car = testCar\n return car",
"def target_velocity(self, time):\n \"\"\"\n start_point = self.points[self.cur_start]\n cur_target = self.points[(self.cur_start + 1) % 4]\n total_time = self.total_time / 4\n avg_vel = (cur_target - start_point)/ total_time\n return avg_vel\n \"\"\"\n total_time = self.total_time\n if time <= self.total_time/4:\n return self.path1.target_velocity(time)\n\n elif time - total_time/4 == 0:\n rospy.sleep(0.5)\n\n elif time <= self.total_time/2:\n return self.path2.target_velocity(time - (total_time/4 + 0.5))\n\n elif time - total_time/2 == 0:\n rospy.sleep(0.5)\n\n elif time <= self.total_time/4*3:\n return self.path3.target_velocity(time - (total_time/2 + 1))\n\n elif time - total_time/4*3 == 0:\n rospy.sleep(0.5)\n\n\n else:\n return self.path4.target_velocity(time - (total_time/4*3 + 1.5))",
"def pos(self, time):\n if (time < self.ti):\n t = 0\n elif (time > self.tf):\n t = self.tf - self.ti\n else:\n t = time - self.ti\n return self.a0 + self.a1 * t + self.a2 * pow(t, 2) + self.a3 * pow(t, 3) + self.a4 * pow(t, 4) + self.a5 * pow(t, 5)",
"def calc_nearest_ind(self, robot_pose):\n pass",
"def CalculatePosition(radius,velocity,time,dt):\n \n # Initial conditions\n theta = 0\n xini = radius * np.cos(theta)\n yini = radius * np.sin(theta)\n t = 0\n \n # Store positions and time\n xposition = [xini]\n yposition = [yini]\n storedtime = [t]\n \n # Calculate positions\n while t < time:\n t += dt\n x = radius * np.cos((velocity/radius)*t)\n y = radius * np.sin((velocity/radius)*t)\n \n xposition.append(x)\n yposition.append(y)\n storedtime.append(t)\n \n # Make an array\n xposition = np.array(xposition)\n yposition = np.array(yposition)\n storedtime = np.array(storedtime)\n \n return xposition,yposition,storedtime",
"def target_position(self, time):\n # get joint positions and use fk to get end effector position?\n # ar_tag from topic\n\n cur_pos = self.target_velocity(time)*time + self.start_pos\n\n self.points_generated.append(cur_pos)\n #print(self.start_pos)\n # print(cur_pos)\n return cur_pos",
"def getDynamicInterception(S, E, R, v_ball, v_robot):\n P = linePointProjection(S,E,R)\n B = np.linalg.norm(P-R)\n D = np.linalg.norm(P-S)\n # We have two equations:\n # 1. A^2 + B^2 = C^2\n # 2. (D-A) / v_ball = C / v_robot <- Same time to reach position\n # This leads to (with 'detailed steps')\n # A^2 + B^2 = (v_robot*(D-A)/v_ball)^2\n # We further note (v_robot/v_ball)^2 by VRB2\n # 2. A^2 + B^2 = VRB2 * (D^2 -2DA + A^2)\n # 3. (1- VRB2) A^2 + 2*D*A*VRB2 + (B^2 - D^2 VRB2)\n VRB2 = (v_robot/v_ball)**2\n a = 1 - VRB2\n b = 2 * D * VRB2\n c = B**2-D**2*VRB2\n potential_A = []\n if a == 0:\n if c == 0:\n potential_A = [0]\n else:\n # Simple case\n potential_A = [-c/b]\n else:\n # order 2 equation\n discriminant = b**2 - 4*a*c\n if discriminant < 0:\n return None\n potential_A.append((-b + np.sqrt(discriminant)) / (2*a))\n potential_A.append((-b - np.sqrt(discriminant)) / (2*a))\n kick_dist = np.linalg.norm(S-E)\n best_A = kick_dist + 10**-6\n for A in potential_A:\n # Interception can't occur before S or after E\n if A > D or (D-A) > kick_dist:\n continue\n # Since we want first valid interception in time, we want to minimize A\n if abs(A) < abs(best_A):\n best_A = A\n if abs(best_A) > kick_dist:\n return None\n return S + (E-S)/kick_dist * (D-best_A)",
"def acceleration(v,u,t):\n return ((v-u)/t)",
"def target_position(self, time):\n\n x_pos = self.r*sin(self.w*time)+self.ar_tag_pos[0]\n y_pos = self.r*cos(self.w*time)+self.ar_tag_pos[1]\n z_pos = self.ar_tag_pos[2]\n # print(x_pos,y_pos)\n # raise NotImplementedError\n return np.array([x_pos,y_pos,z_pos])",
"def target_position(self, time):\n \"\"\"\n start_pos = self.points[self.cur_start]\n seg_time = time - self.last_checkpoint_time\n\n #The arguement of target-velocity dosent matter\n cur_pos = self.target_velocity(time)*seg_time + start_pos\n\n \n # or time > (self.total_time / 4)*(self.cur_start + 1)\n cur_pos_norm = length(cur_pos - start_pos)\n\n next_corner = self.points[(self.cur_start + 1)%4]\n \n seg_norm = length(next_corner - start_pos)\n print(\"cur_pos : \", cur_pos, \"segment: \", self.cur_start, seg_norm - cur_pos_norm)\n\n if cur_pos_norm >= seg_norm:\n self.cur_start = (self.cur_start + 1) % 4\n self.last_checkpoint_time = time\n return cur_pos\n \"\"\"\n\n #Possibly use rospy.sleep()\n total_time = self.total_time\n\n\n if time < total_time/4:\n return self.path1.target_position(time)\n\n elif time - total_time/4 == 0:\n rospy.sleep(0.5)\n\n elif time < total_time/2:\n return self.path2.target_position(time - (total_time/4 + 0.5))\n # return self.path2.target_position(time - (total_time/4 ))\n\n\n elif time - total_time/2 == 0:\n rospy.sleep(0.5)\n\n elif time <= total_time/4*3:\n return self.path3.target_position(time - (total_time/2 + 1))\n # return self.path3.target_position(time - (total_time/2))\n\n\n elif time - total_time/4*3 == 0:\n rospy.sleep(0.5)\n\n else:\n return self.path4.target_position(time - (total_time/4*3 + 1.5))\n # return self.path4.target_position(time - (total_time/4*3))",
"def target_velocity(self, time):\n\n x_v = self.w*self.r*cos(self.w*time)\n y_v = -self.w*self.r*sin(self.w*time)\n z_v = 0\n # raise NotImplementedError\n return np.array([x_v,y_v,z_v])",
"def test_get_position():\n pos = get_position(date, lng, lat)\n assert np.isclose(pos['azimuth'], -2.5003175907168385)\n assert np.isclose(pos['altitude'], -0.7000406838781611)",
"def get_pos(self):\n current_angles = self.chain.get_current_values()\n current_xyz, current_rpy = self._solver.forward_solve(current_angles)\n return current_xyz, current_rpy",
"def position(t, x, y):\n return x * exp(-t * y) * sin(2 * pi * t)",
"def get_vsolar(self):\n return self.read_register(4098, 1, 3)",
"def approach_gps(g_lat,g_lon,emily_lat_start, emily_lon_start, pose_rad, Parameters): #approach a gps position using potential fields\r\n\tx_goal,y_goal = latlongtoxy(g_lat,g_lon,g_lat)\r\n\tx_e_start,y_e_start = latlongtoxy(emily_lat_start,emily_lon_start,g_lat)\r\n\r\n\tprint (\"\\n HERE I AM\\n\\n\")\r\n\r\n\tdist = haver_distance(g_lat, g_lon, emily_lat_start, emily_lon_start)\r\n\tinitial_dist = dist\r\n\r\n\tprint ('Distance: ',dist)\r\n\theading = get_heading(emily_lat_start, emily_lon_start, g_lat, g_lon)\r\n print ('After get heading')\r\n\t# Eric: I'm not sure if turn_towards is necessary for a successful run.\r\n\t#turn_towards(heading)\r\n\tprint ('After Turn towards')\r\n\t#turn towards the goal initially\r\n\r\n\tstart_time = time.time()\r\n\tcurrent_time = 0\r\n\tdstore = []\r\n\thstore = []\r\n\twhile(dist >= goal_radius):\r\n\r\n\t\t#------------ code for reading gps location of emily and its orientation ------\r\n\t\te_lat = vehicle.location.global_frame.lat\r\n\t\te_lon = vehicle.location.global_frame.lon\r\n\t\te_heading = vehicle.heading * pi/180\t\t# convert heading to radians\r\n\t\t#------------------ get e_lat,e_lon, e_orient ---------------------\r\n\r\n\r\n\t\tx_e,y_e = latlongtoxy(e_lat,e_lon,g_lat)\t\t\t#change latitude and longitude to xy\r\n\r\n\t\t#x,y are given to approach victim function as y,x to algin the north heading and direction in x,y\r\n\r\n\t\tdx,dy = approach_victim_behaviour(y_goal,x_goal, y_e,x_e, pose_rad, Parameters)\t#get potential field vector\r\n\t\trc1, rc3 = dxdytorc(dx,dy, e_heading,g_lon)\t\t\t\t\t#get rc parameters\r\n\t\tdist = haver_distance(g_lat, g_lon, e_lat, e_lon)\t\t\t\t#haversine distance\r\n\r\n\t\tcurrent_time = time.time() - start_time\r\n\t\tprint (\"Time, Heading, Distance\")\r\n\t\tprint (current_time, e_heading*180/pi, dist)\r\n\t\tdstore.append(dist)\r\n\t\thstore.append(e_heading*180/pi)\r\n\t\t#code for sending the writing the rc commands\r\n\t\t# 3 is the thrust control\r\n\t\t#vehicle.channels.overrides = {'3':rc3}\r\n\t\tsendThrottleCommand(rc3, enableThrottle)\r\n\t\ttime.sleep(0.5)\r\n\t\tvehicle.channels.overrides = {'1':rc1}\r\n\t\tprint (\"Rudder: \",rc1)\r\n\t\tprint (\"Throttle: \",rc3)\r\n\t\tsaveToLog(e_lat, e_lon,dist,rc1,rc3)\r\n\t\ttime.sleep(0.5)\r\n\tprint(initial_dist)\r\n\tprint(\"intial \", emily_lat_start,emily_lon_start)\r\n\tprint(\"final \",e_lat,e_lon)\r\n\tplt.plot(dstore)\r\n\t#plt.title('Distance form home vs time')\r\n\tplt.xlabel(\"Time\")\r\n\tplt.ylabel('Distance')\r\n\tplt.show()\r\n\tplt.plot(hstore)\r\n\tplt.show()",
"def FindClosestPoint(self, ):\n ...",
"def get_speed(vehicle):\n vel = vehicle.get_velocity()\n\n return 3.6 * math.sqrt(vel.x ** 2 + vel.y ** 2 + vel.z ** 2)",
"def _compute_solar_torque(self):\n pass",
"def getPosition(self, request, context): \n \n # The latitude and longitude are relative to the WGS84 coordinate system. \n # The altitude is relative to mean sea-level (MSL)\n globalLocation = str(self.vehicle.location.global_frame)\n \n # LocationGlobalRelative:lat=0.0,lon=0.0,alt=6.6\n # Altitude relative to the home location. The lat and long values for this and the above\n # variable will be the same.\n relativeLocation = str(self.vehicle.location.global_relative_frame)\n \n latitude, longitude, altGPS, altHome = None, None, None, None\n \n match = re.search(r\"lat=(.*),lon=(.*),alt=(.*)$\", globalLocation)\n if match:\n latitude = match.group(1)\n longitude = match.group(2)\n altGPS = match.group(3)\n \n match = re.search(r\"lat=(.*),lon=(.*),alt=(.*)$\", relativeLocation)\n if match:\n altHome = match.group(3)\n\t\t\t\n return droneconnect_pb2.Position(lat = float(latitude),\n lon = float(longitude),\n gpsAltitude = float(altGPS),\n relativeAltitude = float(altHome))",
"def advancePosition(self,time):\n velocity = self.getVelocity()\n return self.x + time*velocity",
"def current_pose_estimate(self):\n \n try:\n stamp = self._tf_listener.getLatestCommonTime(self._base_frame, self._map_frame)\n curr_pose = PoseStamped(header=Header(stamp=stamp, frame_id=self._base_frame))\n curr_pose = self._tf_listener.transformPose(self._map_frame, curr_pose)\n angles = tr.euler_from_quaternion([\n curr_pose.pose.orientation.x,\n curr_pose.pose.orientation.y,\n curr_pose.pose.orientation.z,\n curr_pose.pose.orientation.w])\n return Particle(curr_pose.pose.position.x, curr_pose.pose.position.y, angles[2],1)\n except (tf2.ExtrapolationException, tf2.LookupException, tf2.TransformException) as e:\n print(\"Robot pose estimate not ready yet: \", e.message)\n return Particle(0,0,0,1)",
"def estimateNextPosition(self, time):\n\t\tdeltaTime = (time - self.position[2]).total_seconds()\n\t\tself.getVelocity()\n\t\tmiddleOfFace = ((self.position[1][0]+self.position[0][0])/2,(self.position[1][1]+self.position[0][1])/2)\n\t\tif self.velocity != 0:\n\t\t\tdx = self.velocity[0]/self.velocity[2]*deltaTime\n\t\t\tdy = self.velocity[1]/self.velocity[2]*deltaTime\n\t\t\tmiddleOfFace = (middleOfFace[0] + dx, middleOfFace[1] + dy)\t\n\t\treturn middleOfFace"
]
| [
"0.6508476",
"0.63860726",
"0.636147",
"0.62651974",
"0.60823756",
"0.59323627",
"0.58962876",
"0.58249325",
"0.57726777",
"0.577182",
"0.5729244",
"0.57171816",
"0.56941193",
"0.5686298",
"0.5661377",
"0.5648338",
"0.56362337",
"0.56322366",
"0.5613614",
"0.56059855",
"0.5565936",
"0.55526257",
"0.5551913",
"0.55502164",
"0.5548627",
"0.5539856",
"0.55270153",
"0.5508103",
"0.5494543",
"0.54934883"
]
| 0.6622119 | 0 |
Distance traveled from the original position to an approximate position of a cetrain point of time | def traveled_distance(self, at_time: int) -> float:
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ecliptic_position(self):\n vector = _ECLIPJ2000.dot(self.position.au)\n return Distance(vector)",
"def euclidean_distance(self,):\n return sqrt(pow((self.pose1.x - self.pose2.x), 2) +\n pow((self.pose1.y - self.pose2.y), 2))",
"def _calc_distance(self, checkpoint_loc):\n return N.sqrt((self.current_location[1] - checkpoint_loc[1])**2 \\\n + (self.current_location[0] - checkpoint_loc[0])**2)",
"def time_to_point(distance):\n if distance <= (125 / 9) ** 2:\n return distance ** .5\n return distance * 9 / 250 + 125 / 18",
"def _earth_distance(time='now'):\n return get_earth(time).radius",
"def getDistance(self):\n taBox = (self.thor * self.tvert)/(720*960) #box area as percentage of whole\n if(taBox==None or taBox<=0): return -1\n const = 4 * math.tan(0.471)*math.tan(0.3576)\n return math.sqrt((self.abox)/(const*taBox))",
"def _distance(self, new_pt):\n\t\tnew_pt = np.resize(new_point, (self.n_row, new_pt.shape[0]))\n\t\tdist = euclidean_distance(self.data[:,0:-1], new_pt)\n\n\t\treturn dist",
"def measure_distance(self):\n # set Trigger to HIGH\n GPIO.output(self.GPIO_TRIGGER, True)\n\n # set Trigger after 0.01ms to LOW\n time.sleep(0.00001)\n GPIO.output(self.GPIO_TRIGGER, False)\n\n start_time = time.time()\n stop_time = time.time()\n\n # save StartTime\n while GPIO.input(self.GPIO_ECHO) == 0:\n start_time = time.time()\n\n # save time of arrival\n while GPIO.input(self.GPIO_ECHO) == 1:\n stop_time = time.time()\n\n # time difference between start and arrival\n time_elapsed = stop_time - start_time\n # multiply with the sonic speed (343.00 m/s)\n # and divide by 2, because there and back\n distance = (time_elapsed * 343.00) / 2\n\n return distance",
"def _get_distance(self, target): \r\n sensor_transform = self._sensors['rgb_front'].get_transform()\r\n\r\n distance = np.sqrt(\r\n (sensor_transform.location.x - target.x) ** 2 +\r\n (sensor_transform.location.y - target.y) ** 2 +\r\n (sensor_transform.location.z - target.z) ** 2)\r\n\r\n return distance",
"def euclidean_distance(self):\n return sqrt(pow((self.goal_pose.x - self.ground_truth_pose.x), 2) +\n pow((self.goal_pose.y - self.ground_truth_pose.y), 2))",
"def get_distance(self, point, cpoint):\n distance = 0.0\n for m, s in zip(point, cpoint):\n distance += pow(m - s, 2)\n distance = math.sqrt(distance)\n return distance",
"def getDistance(self):\n return sqrt(self.state[0] * self.state[0] + self.state[2] * self.state[2])",
"def estimated_distance(self, log=False):\n\t\t\n\t\tx0 = GRAVITY - self.thrust*.95 / (self.total_mass - self.fuel_consumption * 0.0)\n\t\tx1 = GRAVITY - self.thrust*.95 / (self.total_mass - self.fuel_consumption * 1.0)\n\n\t\t# Derivative at x=0 and x=1\n\t\tu = x0\n\t\tv = x1\n\t\t# Initial height at x=0\n\t\ty = abs(self.velocity)\n\n\t\tif log:\n\t\t\tprint(f'u: {u}, v: {v}, y: {y}\\nEstimated distance: {get_positive_area(u, v, y)}\\n')\n\t\t\n\t\treturn get_positive_area(u, v, y)",
"def _computeDistance(self, mote, neighbor):\n\n return 1000*math.sqrt((mote.x - neighbor.x)**2 +\n (mote.y - neighbor.y)**2)",
"def _computeDistance(self, mote, neighbor):\n\n return 1000*math.sqrt((mote.x - neighbor.x)**2 +\n (mote.y - neighbor.y)**2)",
"def test_distance():\n t0 = time.time()\n c1 = coord.CelestialCoord(0.234 * coord.radians, 0.342 * coord.radians)\n c2 = coord.CelestialCoord(0.234 * coord.radians, -1.093 * coord.radians)\n c3 = coord.CelestialCoord((pi + 0.234) * coord.radians, -0.342 * coord.radians)\n c4 = coord.CelestialCoord((pi + 0.234) * coord.radians, 0.832 * coord.radians)\n c5 = coord.CelestialCoord(1.832 * coord.radians, -0.723 * coord.radians)\n c6 = coord.CelestialCoord((0.234 + 2.3e-9) * coord.radians, (0.342 + 1.2e-9) * coord.radians)\n t1 = time.time()\n\n a1 = astropy.coordinates.SkyCoord(0.234 * units.radian, 0.342 * units.radian)\n a2 = astropy.coordinates.SkyCoord(0.234 * units.radian, -1.093 * units.radian)\n a3 = astropy.coordinates.SkyCoord((pi + 0.234) * units.radian, -0.342 * units.radian)\n a4 = astropy.coordinates.SkyCoord((pi + 0.234) * units.radian, 0.832 * units.radian)\n a5 = astropy.coordinates.SkyCoord(1.832 * units.radian, -0.723 * units.radian)\n a6 = astropy.coordinates.SkyCoord(0.234 + 2.3e-9, 0.342 + 1.2e-9, unit=units.radian)\n t2 = time.time()\n\n coord_dist = [c1.distanceTo(c).rad for c in [c2,c3,c4,c5,c6]]\n t3 = time.time()\n astropy_dist = [a1.separation(a).rad for a in [a2,a3,a4,a5,a6]]\n t4 = time.time()\n\n np.testing.assert_almost_equal(coord_dist, astropy_dist, decimal=12)\n # For the last one, the distance is rather small in radians, so test in arcsec\n np.testing.assert_almost_equal(coord_dist[-1] * (coord.radians/coord.arcsec),\n astropy_dist[-1] * (coord.radians/coord.arcsec), decimal=10)\n\n print('Compare times for distance calculations:')\n print(' Make CelestialCoords: t = ',t1-t0)\n print(' Make SkyCoords: t = ',t2-t1)\n print(' Calculate distances with Coord: t = ',t3-t2)\n print(' Calculate distances with Astropy: t = ',t4-t3)",
"def _center_distance(self):\n # Split positions in segments of two points :\n cut = np.vsplit(self.a_position, int(self.a_position.shape[0]/2))\n # Get center position and starting line position :\n center = np.mean(cut, axis=1)\n\n # ============ EUCLIDIAN DISTANCE ============\n diff = np.sqrt(np.square(center[:, np.newaxis, :] - center).sum(2))\n diff[np.tril_indices_from(diff)] = np.inf\n\n return center, diff",
"def distance_to_origin(self):\n return np.sqrt(self.x ** 2 + self.y ** 2)",
"def distance(self):\n return Distance(length_of(self.position.au))",
"def get_euclid_distance_to(self, atom):\n return linalg.norm(self.get_coords() - atom.get_coords())",
"def get_distance(self) -> int:\n return self.get_measurement_data().distance",
"def get_distance(self, pos, new_pos):\n cur = self.positions[pos].cord()\n new = self.positions[new_pos].cord()\n return distance.euclidean(cur, new)",
"def _distance_next(self):\n\n self.distance = 10\n\n # Here a set index to 0 if the car is finishing a lap\n # Also reset the farthest\n if self.index > (len(self.x_trajectory) - 6) and self.closed:\n self.index = 0\n self.farthest = -1\n self.laps += 1\n\n for w in range(self.index, self.index + 20):\n\n self.dist_point = math.sqrt((self.x_trajectory[w] - self.x)**2\n + (self.y_trajectory[w] - self.y)**2)\n\n if self.dist_point < self.distance:\n self.distance = self.dist_point\n self.index = w\n\n if w >= (len(self.x_trajectory) - 1):\n break\n\n self._calc_side()\n\n self.distance = self.distance * self.sign\n\n return self.distance",
"def get_distance(self):\n print(\"voici la distance à l'obstacle\")",
"def closest_distance(self, time, other_object, other_time):\n ti = np.where(self.times == time)[0][0]\n oti = np.where(other_object.times == other_time)[0][0]\n xs = self.x[ti].ravel()[self.masks[ti].ravel() == 1]\n xs = xs.reshape(xs.size, 1)\n ys = self.y[ti].ravel()[self.masks[ti].ravel() == 1]\n ys = ys.reshape(ys.size, 1)\n o_xs = other_object.x[oti].ravel()[other_object.masks[oti].ravel() == 1]\n o_xs = o_xs.reshape(1, o_xs.size)\n o_ys = other_object.y[oti].ravel()[other_object.masks[oti].ravel() == 1]\n o_ys = o_ys.reshape(1, o_ys.size)\n distances = (xs - o_xs) ** 2 + (ys - o_ys) ** 2\n return np.sqrt(distances.min())",
"def distance(v, t):\n rf = v.orbit.body.non_rotating_reference_frame\n vec = v3minus(v.position(rf), t.position(rf))\n a = vec[0] * vec[0]\n b = vec[1] * vec[1]\n c = vec[2] * vec[2]\n return math.sqrt(a + b + c)",
"def get_distance(pose1, pose2):\n return math.sqrt((pose1.x-pose2.x)**2+(pose1.y-pose2.y)**2)",
"def calc_dist(self, p):\n p = np.array((p.x, p.y, p.z))\n return LA.norm(p - self.car_pos)",
"def distance(self, c1, c2):\r\n x = (c2.x - c1.x) ** 2\r\n y = (c2.y - c1.y) ** 2\r\n d = int(round(math.sqrt(x + y)))\r\n return d",
"def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) **0.5"
]
| [
"0.6589855",
"0.64031625",
"0.63883924",
"0.6278429",
"0.6271049",
"0.6252697",
"0.62145644",
"0.6183875",
"0.61374414",
"0.6134414",
"0.609672",
"0.6093841",
"0.60573226",
"0.6027986",
"0.6027986",
"0.6016954",
"0.5998085",
"0.5989928",
"0.59836787",
"0.59779924",
"0.5970454",
"0.5961937",
"0.59525275",
"0.59363157",
"0.591587",
"0.5879393",
"0.58704907",
"0.5845701",
"0.5842012",
"0.5831533"
]
| 0.65310526 | 1 |
Return the LaTeX special characters and a corresponding error string | def forbidden_latex_chars():
tex_char = ['\\', '{', '}', '&', '[', ']', '^', '~']
chars = ', '.join(['"{char}"'.format(char=char) for char in tex_char])
message = _(u"Următoarele caractere sunt interzise și trebuie scoase : {chars}.".format(chars=chars))
return tex_char, message | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def escape_latex_characters(line):\n line = line.replace('\\\\', '\\\\textbackslash')\n line = line.replace('&', '\\&')\n line = line.replace('%', '\\%')\n line = line.replace('$', '\\$')\n line = line.replace('#', '\\#')\n line = line.replace('_', '\\_')\n line = line.replace('{', '\\{')\n line = line.replace('}', '\\}')\n line = line.replace('~', '\\\\textasciitilde')\n line = line.replace('^', '\\\\textasciicircum')\n line = line.replace('<', '\\\\textless')\n line = line.replace('>', '\\\\textgreater')\n return line",
"def escape_latex(s):\n return \"\".join(LATEX_CHARS.get(c, c) for c in s)",
"def _latex_(self):\n return \"\\\\textnormal{Extended code coming from %s}\" % self.original_code()",
"def validate_latex_free(string):\n tex_char, message = forbidden_latex_chars()\n for char in tex_char:\n if char in string:\n raise ValidationError(message)",
"def _latex_(self):\n return \"\\\\textnormal{Decoder of } %s \\\\textnormal{ through } %s\" % (self.code(), self.original_decoder())",
"def test_failing_rendering(self):\n with self.assertRaisesMessage(\n LatexConversionException, \"Couldn't compile LaTeX document\"\n ):\n render_latex_to_image(r\"invalid $ LaTeX\")",
"def test_syntax_errors(self):\r\n bad_math_list = [\r\n '11+',\r\n '11*',\r\n 'f((x)',\r\n 'sqrt(x^)',\r\n '3f(x)', # Not 3*f(x)\r\n '3|4',\r\n '3|||4'\r\n ]\r\n bad_exceptions = {}\r\n for math in bad_math_list:\r\n try:\r\n preview.latex_preview(math)\r\n except pyparsing.ParseException:\r\n pass # This is what we were expecting. (not excepting :P)\r\n except Exception as error: # pragma: no cover\r\n bad_exceptions[math] = error\r\n else: # pragma: no cover\r\n # If there is no exception thrown, this is a problem\r\n bad_exceptions[math] = None\r\n\r\n self.assertEquals({}, bad_exceptions)",
"def get_word_char_exceptions(self): # real signature unknown; restored from __doc__\n return \"\"",
"def escapeLaTeX(self,string):\n string_to_escape = \"{(&$#%)}\" # fixed the problem of producing \\[ \\] math environment\n new_str_list = map(lambda x: \"\\\\\" + x if x in string_to_escape else x,\n string)\n new_symbolfied_list = map(lambda x: symbols.unicode_to_latex_dict[x] if x in symbols.unicode_to_latex_dict else x, \n new_str_list)\n return ''.join(new_symbolfied_list)",
"def format_latex(self,str):\n\n # Characters that need to be escaped for latex:\n escape_re = re.compile(r'(%|_|\\$)',re.MULTILINE)\n # Magic command names as headers:\n cmd_name_re = re.compile(r'^(@.*?):',re.MULTILINE)\n # Magic commands \n cmd_re = re.compile(r'(?P<cmd>@.+?\\b)(?!\\}\\}:)',re.MULTILINE)\n # Paragraph continue\n par_re = re.compile(r'\\\\$',re.MULTILINE)\n\n str = cmd_name_re.sub(r'\\n\\\\texttt{\\\\textbf{\\1}}:',str)\n str = cmd_re.sub(r'\\\\texttt{\\g<cmd>}',str)\n str = par_re.sub(r'\\\\\\\\',str)\n str = escape_re.sub(r'\\\\\\1',str)\n #file('/home/fperez/ipython/doc/magic.tex','w').write(str) # dbg\n return str",
"def clean_latex(tuple_entry):\n def _clean_latex(tuple_entry_string):\n processed = False\n for symbol in ['_', '*']:\n if symbol in tuple_entry_string:\n tuple_entry_string = tuple_entry_string.replace(symbol, '\\\\' + symbol)\n processed = True\n if processed:\n return '\\\\texttt{' + tuple_entry_string + '}'\n else:\n return tuple_entry_string\n\n return _clean_latex(str(tuple_entry))",
"def unicode2html(_unicrap):\n xlate = {u'\\u0022': '"',\nu'\\u0026': '&',\nu'\\u0027': ''',\nu'\\u003C': '<',\nu'\\u003E': '>',\nu'\\u00A0': ' ',\nu'\\u00A1': '¡',\nu'\\u00A2': '¢',\nu'\\u00A3': '£',\nu'\\u00A4': '¤',\nu'\\u00A5': '¥',\nu'\\u00A6': '¦',\nu'\\u00A7': '§',\nu'\\u00A8': '¨',\nu'\\u00A9': '©',\nu'\\u00AA': 'ª',\nu'\\u00AB': '«',\nu'\\u00AC': '¬',\nu'\\u00AD': '­',\nu'\\u00AE': '®',\nu'\\u00AF': '¯',\nu'\\u00B0': '°',\nu'\\u00B1': '±',\nu'\\u00B2': '²',\nu'\\u00B3': '³',\nu'\\u00B4': '´',\nu'\\u00B5': 'µ',\nu'\\u00B6': '¶',\nu'\\u00B7': '·',\nu'\\u00B8': '¸',\nu'\\u00B9': '¹',\nu'\\u00BA': 'º',\nu'\\u00BB': '»',\nu'\\u00BC': '¼',\nu'\\u00BD': '½',\nu'\\u00BE': '¾',\nu'\\u00BF': '¿',\nu'\\u00C0': 'À',\nu'\\u00C1': 'Á',\nu'\\u00C2': 'Â',\nu'\\u00C3': 'Ã',\nu'\\u00C4': 'Ä',\nu'\\u00C5': 'Å',\nu'\\u00C6': 'Æ',\nu'\\u00C7': 'Ç',\nu'\\u00C8': 'È',\nu'\\u00C9': 'É',\nu'\\u00CA': 'Ê',\nu'\\u00CB': 'Ë',\nu'\\u00CC': 'Ì',\nu'\\u00CD': 'Í',\nu'\\u00CE': 'Î',\nu'\\u00CF': 'Ï',\nu'\\u00D0': 'Ð',\nu'\\u00D1': 'Ñ',\nu'\\u00D2': 'Ò',\nu'\\u00D3': 'Ó',\nu'\\u00D4': 'Ô',\nu'\\u00D5': 'Õ',\nu'\\u00D6': 'Ö',\nu'\\u00D7': '×',\nu'\\u00D8': 'Ø',\nu'\\u00D9': 'Ù',\nu'\\u00DA': 'Ú',\nu'\\u00DB': 'Û',\nu'\\u00DC': 'Ü',\nu'\\u00DD': 'Ý',\nu'\\u00DE': 'Þ',\nu'\\u00DF': 'ß',\nu'\\u00E0': 'à',\nu'\\u00E1': 'á',\nu'\\u00E2': 'â',\nu'\\u00E3': 'ã',\nu'\\u00E4': 'ä',\nu'\\u00E5': 'å',\nu'\\u00E6': 'æ',\nu'\\u00E7': 'ç',\nu'\\u00E8': 'è',\nu'\\u00E9': 'é',\nu'\\u00EA': 'ê',\nu'\\u00EB': 'ë',\nu'\\u00EC': 'ì',\nu'\\u00ED': 'í',\nu'\\u00EE': 'î',\nu'\\u00EF': 'ï',\nu'\\u00F0': 'ð',\nu'\\u00F1': 'ñ',\nu'\\u00F2': 'ò',\nu'\\u00F3': 'ó',\nu'\\u00F4': 'ô',\nu'\\u00F5': 'õ',\nu'\\u00F6': 'ö',\nu'\\u00F7': '÷',\nu'\\u00F8': 'ø',\nu'\\u00F9': 'ù',\nu'\\u00FA': 'ú',\nu'\\u00FB': 'û',\nu'\\u00FC': 'ü',\nu'\\u00FD': 'ý',\nu'\\u00FE': 'þ',\nu'\\u00FF': 'ÿ',\nu'\\u0152': 'Œ',\nu'\\u0153': 'œ',\nu'\\u0160': 'Š',\nu'\\u0161': 'š',\nu'\\u0178': 'Ÿ',\nu'\\u0192': 'ƒ',\nu'\\u02C6': 'ˆ',\nu'\\u02DC': '˜',\nu'\\u0391': 'Α',\nu'\\u0392': 'Β',\nu'\\u0393': 'Γ',\nu'\\u0394': 'Δ',\nu'\\u0395': 'Ε',\nu'\\u0396': 'Ζ',\nu'\\u0397': 'Η',\nu'\\u0398': 'Θ',\nu'\\u0399': 'Ι',\nu'\\u039A': 'Κ',\nu'\\u039B': 'Λ',\nu'\\u039C': 'Μ',\nu'\\u039D': 'Ν',\nu'\\u039E': 'Ξ',\nu'\\u039F': 'Ο',\nu'\\u03A0': 'Π',\nu'\\u03A1': 'Ρ',\nu'\\u03A3': 'Σ',\nu'\\u03A4': 'Τ',\nu'\\u03A5': 'Υ',\nu'\\u03A6': 'Φ',\nu'\\u03A7': 'Χ',\nu'\\u03A8': 'Ψ',\nu'\\u03A9': 'Ω',\nu'\\u03B1': 'α',\nu'\\u03B2': 'β',\nu'\\u03B3': 'γ',\nu'\\u03B4': 'δ',\nu'\\u03B5': 'ε',\nu'\\u03B6': 'ζ',\nu'\\u03B7': 'η',\nu'\\u03B8': 'θ',\nu'\\u03B9': 'ι',\nu'\\u03BA': 'κ',\nu'\\u03BB': 'λ',\nu'\\u03BC': 'μ',\nu'\\u03BD': 'ν',\nu'\\u03BE': 'ξ',\nu'\\u03BF': 'ο',\nu'\\u03C0': 'π',\nu'\\u03C1': 'ρ',\nu'\\u03C2': 'ς',\nu'\\u03C3': 'σ',\nu'\\u03C4': 'τ',\nu'\\u03C5': 'υ',\nu'\\u03C6': 'φ',\nu'\\u03C7': 'χ',\nu'\\u03C8': 'ψ',\nu'\\u03C9': 'ω',\nu'\\u03D1': 'ϑ',\nu'\\u03D2': 'ϒ',\nu'\\u03D6': 'ϖ',\nu'\\u2002': ' ',\nu'\\u2003': ' ',\nu'\\u2009': ' ',\nu'\\u200C': '‌',\nu'\\u200D': '‍',\nu'\\u200E': '‎',\nu'\\u200F': '‏',\nu'\\u2013': '–',\nu'\\u2014': '—',\nu'\\u2018': '‘',\nu'\\u2019': '’',\nu'\\u201A': '‚',\nu'\\u201C': '“',\nu'\\u201D': '”',\nu'\\u201E': '„',\nu'\\u2020': '†',\nu'\\u2021': '‡',\nu'\\u2022': '•',\nu'\\u2026': '…',\nu'\\u2030': '‰',\nu'\\u2032': '′',\nu'\\u2033': '″',\nu'\\u2039': '‹',\nu'\\u203A': '›',\nu'\\u203E': '‾',\nu'\\u2044': '⁄',\nu'\\u20AC': '€',\nu'\\u2111': 'ℑ',\nu'\\u2118': '℘',\nu'\\u211C': 'ℜ',\nu'\\u2122': '™',\nu'\\u2135': 'ℵ',\nu'\\u2190': '←',\nu'\\u2191': '↑',\nu'\\u2192': '→',\nu'\\u2193': '↓',\nu'\\u2194': '↔',\nu'\\u21B5': '↵',\nu'\\u21D0': '⇐',\nu'\\u21D1': '⇑',\nu'\\u21D2': '⇒',\nu'\\u21D3': '⇓',\nu'\\u21D4': '⇔',\nu'\\u2200': '∀',\nu'\\u2202': '∂',\nu'\\u2203': '∃',\nu'\\u2205': '∅',\nu'\\u2207': '∇',\nu'\\u2208': '∈',\nu'\\u2209': '∉',\nu'\\u220B': '∋',\nu'\\u220F': '∏',\nu'\\u2211': '∑',\nu'\\u2212': '−',\nu'\\u2217': '∗',\nu'\\u221A': '√',\nu'\\u221D': '∝',\nu'\\u221E': '∞',\nu'\\u2220': '∠',\nu'\\u2227': '∧',\nu'\\u2228': '∨',\nu'\\u2229': '∩',\nu'\\u222A': '∪',\nu'\\u222B': '∫',\nu'\\u2234': '∴',\nu'\\u223C': '∼',\nu'\\u2245': '≅',\nu'\\u2248': '≈',\nu'\\u2260': '≠',\nu'\\u2261': '≡',\nu'\\u2264': '≤',\nu'\\u2265': '≥',\nu'\\u2282': '⊂',\nu'\\u2283': '⊃',\nu'\\u2284': '⊄',\nu'\\u2286': '⊆',\nu'\\u2287': '⊇',\nu'\\u2295': '⊕',\nu'\\u2297': '⊗',\nu'\\u22A5': '⊥',\nu'\\u22C5': '⋅',\nu'\\u2308': '⌈',\nu'\\u2309': '⌉',\nu'\\u230A': '⌊',\nu'\\u230B': '⌋',\nu'\\u27E8': '⟨',\nu'\\u27E9': '⟩',\nu'\\u25CA': '◊',\nu'\\u2660': '♠',\nu'\\u2663': '♣',\nu'\\u2665': '♥',\nu'\\u2666': '♦'}\n\n strOut = \"\"\n if _unicrap is not None:\n for i in _unicrap:\n if i in xlate:\n strOut += xlate[i]\n else:\n strOut += str(i)\n return strOut",
"def _latex_(self):\n return \"\\\\textnormal{Extended matrix-based encoder for }%s\" % self.code()._latex_()",
"def sanitizeTex(texstring):\n\n newstring = (\n texstring.replace(r\"\\\\%\", r\"\\%\")\n .replace(r\"\\\\\", r\"\\tabularnewline\")\n .replace(\"\\$\", \"$\")\n .replace(\"\\_\", \"_\")\n .replace(\"ug/L\", \"\\si[per-mode=symbol]{\\micro\\gram\\per\\liter}\")\n .replace(r\"\\textbackslashtimes\", r\"\\times\")\n .replace(r\"\\textbackslash\", \"\")\n .replace(r\"\\textasciicircum\", r\"^\")\n .replace(\"\\{\", \"{\")\n .replace(\"\\}\", \"}\")\n )\n return newstring",
"def label(mi_, ma_):\n\treturn \"caractères Unicode des points de code {} à {}\".format(mi_, ma_)",
"def html_replace(exc):\n if isinstance(exc, (UnicodeEncodeError, UnicodeTranslateError)):\n # pylint: disable=invalid-name\n s = ['&%s;' % entities.codepoint2name[ord(c)] for c in exc.object[exc.start:exc.end]]\n return ''.join(s), exc.end\n raise TypeError(\"Can't handle exception %s\" % exc.__name__)",
"def check_special_chars(self,node,batch_tsvs): # probably need to add more types of special chars to this\n errors = []\n filename = batch_tsvs[\"node_tsvs\"][node]\n with open(filename, \"rb\") as tsv_file:\n lns = tsv_file.readlines()\n count = 0\n for ln in lns:\n count+=1\n if b\"\\xe2\" in ln:\n error = \"{} TSV has special char in line {}: {}\".format(node,count,ln)\n print(error)\n errors.append(error)\n return errors",
"def custom_latex_processing(latex):\n if latex is None:\n raise ValueError(\"Latex is null\")\n # this weird modification is only needed when jenkins run a unit test in\n # pyquickhelper (pycode)\n return latex",
"def errors_icons(self):\n msg_errors_lifes = ''\n for i in range(0,5):\n if self.letters_wrong <= i:\n msg_errors_lifes += ' ♥ '\n else:\n msg_errors_lifes += ' ☠ ' \n return msg_errors_lifes",
"def label_to_latex(text):\n if text is None:\n return ''\n for ch in LATEX_CONTROL_CHARS:\n text = text.replace(ch, \"\\\\%s\" % ch)\n return text",
"def latex(expression):\n return XML('<img src=\"http://chart.apis.google.com/chart?cht=tx&chl=%s\" align=\"center\"/>' % expression.replace('\"','\\\"'))",
"def error(self,text):\n if type(text) in (bytes, str):\n T = text\n else:\n # list probably:\n T = '\\n'.join(text)\n print(('-'*60))\n print(T)\n print(('='*60))\n return T",
"def html_error(string):\n return html_div(string, \"error\")",
"def formatError(self,error):\n return '<font color=\"#f00\"><b><i>%s</i></b></font><br />\\n' % error",
"def escape_tex(value):\n newval = value\n for pattern, replacement in LATEX_SUBS:\n newval = pattern.sub(replacement, newval)\n return newval",
"def key_error_message(self,key):\n if not key:\n return 'key is blank.'\n elif any(map(lambda s: s in key,space_chars)):\n return '\"{}\" contains whitespace.'.format(key)\n elif any(map(lambda s: s in key,bad_chars)):\n return '\"{}\" contains special characters.'.format(key)",
"def display_errors(self):\r\n\r\n def format_name(field_name):\r\n \"\"\"Formats field names for error display\"\"\"\r\n if field_name == \"celebration_tier\":\r\n return \"{wLargesse{n\"\r\n return \"{w%s{n\" % field_name.capitalize()\r\n\r\n msg = \"Please correct the following errors:\\n\"\r\n msg += \"\\n\".join(\r\n \"%s: {r%s{n\" % (format_name(field), \", \".join(errs))\r\n for field, errs in self.errors.items()\r\n )\r\n return msg",
"def escape(s, format=HTML):\r\n #Note: If you have to make sure that every character gets replaced\r\n # only once (and if you cannot achieve this with the following code),\r\n # use something like u\"\".join([replacedict.get(c,c) for c in s])\r\n # which is about 2-3 times slower (but maybe needs less memory).\r\n #Note: This is one of the most time-consuming parts of the template.\r\n # So maybe speed this up.\r\n\r\n if format is None or format == NONE:\r\n pass\r\n elif format == HTML:\r\n s = s.replace(u\"&\", u\"&\") # must be done first!\r\n s = s.replace(u\"<\", u\"<\")\r\n s = s.replace(u\">\", u\">\")\r\n s = s.replace(u'\"', u\""\")\r\n s = s.replace(u\"'\", u\"'\")\r\n elif format == LATEX:\r\n #TODO: which are the \"reserved\" characters for LaTeX?\r\n # are there more than these?\r\n s = s.replace(\"\\\\\", u\"\\\\backslash{}\") #must be done first!\r\n s = s.replace(\"#\", u\"\\\\#\")\r\n s = s.replace(\"$\", u\"\\\\$\")\r\n s = s.replace(\"%\", u\"\\\\%\")\r\n s = s.replace(\"&\", u\"\\\\&\")\r\n s = s.replace(\"_\", u\"\\\\_\")\r\n s = s.replace(\"{\", u\"\\\\{\")\r\n s = s.replace(\"}\", u\"\\\\}\")\r\n else:\r\n raise ValueError('Invalid format (only None, HTML and LATEX are supported).')\r\n return unicode(s)",
"def error():\n return render_template(\"error.html\", **locals())",
"def escape_tex(value):\n # This code, and the code that call this is courtesy of Clemens Kaposi\n # http://flask.pocoo.org/snippets/55/\n\n LATEX_SUBS = (\n (re.compile(r'\\\\'), r'\\\\textbackslash'),\n (re.compile(r'([{}_#%&$])'), r'\\\\\\1'),\n (re.compile(r'~'), r'\\~{}'),\n (re.compile(r'\\^'), r'\\^{}'),\n (re.compile(r'\"'), r\"''\"),\n (re.compile(r'\\.\\.\\.+'), r'\\\\ldots'),\n )\n\n newval = value\n for pattern, replacement in LATEX_SUBS:\n newval = pattern.sub(replacement, newval)\n return newval"
]
| [
"0.62524474",
"0.6218479",
"0.61760634",
"0.6106915",
"0.60790557",
"0.59577155",
"0.5891351",
"0.58853215",
"0.58652323",
"0.5775055",
"0.5732504",
"0.5689707",
"0.5683134",
"0.5635726",
"0.56353235",
"0.5581285",
"0.55806285",
"0.55648124",
"0.5557515",
"0.5553885",
"0.5547227",
"0.5537667",
"0.55131567",
"0.55101347",
"0.54885143",
"0.54737324",
"0.5457876",
"0.5405466",
"0.54019475",
"0.5379363"
]
| 0.75179136 | 0 |
Function used to get player and data for games between the start and end date range | def get_player_team_data(self, start_date, end_date = None,
get_player_data_ind = True, get_team_data_ind = True,
pre_player_data_dir = None, pre_team_data_dir = None):
#Converts start and end date from string to datetime
start_date = datetime.strptime(start_date, '%Y-%m-%d').date()
if end_date:
end_date = datetime.strptime(end_date, '%Y-%m-%d').date()
else:
end_date = start_date
if pre_player_data_dir:
try:
#Reads in the existing player dataset to append the scraped data to
exist_player_data = pd.read_csv(pre_player_data_dir)
except:
raise Exception('Cannot read in existing player dataset please ensure the directory is correct')
if pre_team_data_dir:
try:
#Reads in the existing player dataset to append the scraped data to
exist_team_data = pd.read_csv(pre_team_data_dir)
except:
raise Exception('Cannot read in existing team dataset please ensure the directory is correct')
delta = end_date - start_date
#Appends list of date between start and end date to strings
date_list = []
for i in range(delta.days + 1):
day = start_date + timedelta(days=i)
date_list.append(str(day))
for date in date_list:
print(f'Now scraping data from NBA games on {date}')
home_team_list = get_list_of_hometeams(self.driver, date)
if len(home_team_list) > 0:
counter = 1
for home_team in home_team_list:
if counter == 1:
if get_player_data_ind:
player_df_full = get_player_data(home_team = team_full_abrv_config[home_team]['Full Name'],
date_played = date,
driver = self.driver)
if get_team_data_ind:
team_df_full = get_team_data(home_team = team_full_abrv_config[home_team]['Full Name'],
date_played = date,
driver = self.driver)
else:
if get_player_data_ind:
player_df_full = player_df_full.append(get_player_data(home_team = team_full_abrv_config[home_team]['Full Name'],
date_played = date,
driver = self.driver), ignore_index=True)
if get_team_data_ind:
team_df_full = team_df_full.append(get_team_data(home_team = team_full_abrv_config[home_team]['Full Name'],
date_played = date,
driver = self.driver), ignore_index=True)
counter+=1
if pre_player_data_dir:
exist_player_data = exist_player_data.append(player_df_full)
exist_player_data.to_csv(pre_player_data_dir, index = False)
print(f'Updated player dataset will be overwritten in {pre_player_data_dir}')
if pre_team_data_dir:
exist_team_data = exist_team_data.append(team_df_full)
exist_team_data.to_csv(pre_team_data_dir, index = False)
print(f'Updated team dataset will be overwritten in {pre_team_data_dir}')
if pre_player_data_dir and pre_team_data_dir:
return exist_player_data, exist_team_data
elif pre_player_data_dir:
return exist_player_data
elif pre_team_data_dir:
return exist_team_data
elif get_player_data_ind and get_team_data_ind:
return player_df_full, team_df_full
elif get_player_data_ind:
return player_df_full
elif get_team_data_ind:
return team_df_full | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fetch_player_data(\n start_date: str = f\"{EARLIEST_SEASON_WITH_EXTENSIVE_PLAYER_STATS}-01-01\",\n end_date: str = str(date.today()),\n verbose: int = 1,\n) -> List[Dict[str, Any]]:\n if verbose == 1:\n print(\n f\"Fetching player data from between {start_date} and {end_date} \"\n \"in yearly baches...\"\n )\n\n data_batch_date_ranges = _player_batch_date_ranges(start_date, end_date)\n partial_fetch_player_stats_batch = partial(\n _fetch_player_stats_batch, verbose=verbose\n )\n\n # Google Cloud Run cannot handle such a large data set in its response, so we\n # fetch it in batches. With the implementation of kedro pipelines, we should\n # usually read historical data from files or Google Cloud Storage, so the slowness\n # of this isn't much of an issue.\n data = itertools.chain.from_iterable(\n [\n partial_fetch_player_stats_batch(*date_pair)\n for date_pair in data_batch_date_ranges\n ]\n )\n\n if verbose == 1:\n print(\"All player data received!\")\n\n return list(data)",
"def get_events(start_date, end_date, source=utils.get_native_source, **kwargs):\n if not isinstance(source, games.models.Source):\n source = source()\n logger.info(\"getting events from source %s...\", source)\n if not source:\n return []\n # with open('sportmonks/response_texts/fixtures_{}-{}.txt'.format(start_date.strftime('%Y-%m-%d'),\n # end_date.strftime('%Y-%m-%d')), 'w') as outfile:\n # season is necessary so that the season object is extracted and used\n include = kwargs.get('include', '')\n include = ','.join([include, 'season']) if include else 'season'\n kwargs['include'] = include\n data, meta, status_code = sportmonks.fixtures.by_date_range(start_date=start_date, end_date=end_date, **kwargs)\n # json.dump(data, outfile, indent=4)\n if not data:\n return []\n pre_events = []\n try:\n num_fetched_objects = len(data)\n except:\n num_fetched_objects = None\n num_processed_objects = 0\n try:\n for obj in data:\n num_processed_objects += 1\n try:\n sid = obj.get('id', None)\n time = obj.get('time', dict())\n starting_at = time.get('starting_at', dict())\n event_datetime = get_date(starting_at, 'date_time')\n # custom_timezone = pytz.timezone('Europe/Athens')\n # event_datetime = event_datetime.astimezone(custom_timezone)\n home_team_sid = obj.get('localteam_id', None)\n away_team_sid = obj.get('visitorteam_id', None)\n competition_season_sid = obj.get('season_id', None)\n season_string = obj.get('season', {}).get('data', {}).get('name')\n stage_sid = obj.get('stage_id', None)\n round_sid = obj.get('round_id', None)\n competition_sid = obj.get('league_id', None)\n except Exception as e:\n logger.data_error('%s', e)\n continue\n\n zak_season_name = games.models.Season.zakandify_season_string(season_string)\n season = zakanda.utils.season_from_season_name(zak_season_name)\n if not season:\n logger.data_error('Could not extract season object from season string: %s', season_string)\n continue\n\n # todo sportmonks fix\n # if the event involves a problematic team it is not created in order to avoid future problems\n if is_in_problematic_teams(home_team_sid):\n home_team_sid = None\n if is_in_problematic_teams(away_team_sid):\n away_team_sid = None\n\n competition_seasons = games.models.CompetitionSeason.by_sid(competition_season_sid, source, season)\n try:\n competition_season = competition_seasons.first() # only one entity exists in the queryset\n except Exception as e:\n logger.warning('%s', e)\n competition_season = None\n\n home_team = games.models.Team.by_sid(home_team_sid, source)\n away_team = games.models.Team.by_sid(away_team_sid, source)\n pre_event = pre_models.PreEvent(source, sid, event_datetime, home_team, away_team, competition_season)\n pre_events.append(pre_event)\n except Exception as e:\n logger.error('%s Unexpected problem with sportmonks.fixtures.by_date_range %s %s from source %s',\n e, start_date, end_date, source)\n logger.info(\"%s event objects were contained in the response\", num_fetched_objects)\n logger.info(\"%s event objects were processed\", num_processed_objects)\n logger.info(\"%s pre events were created\", len(pre_events))\n return pre_events",
"def get_player_stats_from_game(team, year, week):",
"def get_games(date):\n scoreboard = nba_py.Scoreboard(month=date.month,\n day=date.day,\n year=date.year)\n line_score = scoreboard.line_score()\n game_header = scoreboard.game_header()\n\n games = []\n current_game = {}\n game_sequence = 0\n game_sequence_counter = 0\n\n # Get HOME TEAM and AWAY TEAM data for each boxscore game in line_score.\n for i, value in enumerate(line_score):\n if (value[\"GAME_SEQUENCE\"] != game_sequence):\n game_sequence += 1\n\n current_game[\"GAME_ID\"] = value[\"GAME_ID\"]\n home_team_id = game_header[game_sequence - 1][\"HOME_TEAM_ID\"]\n\n if (home_team_id == value[\"TEAM_ID\"]):\n current_game[\"HOME_TEAM\"] = value[\"TEAM_ABBREVIATION\"]\n current_game[\"HOME_TEAM_WINS_LOSSES\"] = value[\"TEAM_WINS_LOSSES\"]\n current_game[\"HOME_TEAM_PTS\"] = value[\"PTS\"]\n current_game[\"HOME_TEAM_ID\"] = value[\"TEAM_ID\"]\n if (current_game[\"HOME_TEAM\"] in TEAM_ID_DATA):\n current_game[\"HOME_TEAM_IMG\"] = TEAM_ID_DATA[current_game[\"HOME_TEAM\"]][\"img\"]\n else:\n current_game[\"AWAY_TEAM\"] = value[\"TEAM_ABBREVIATION\"]\n current_game[\"AWAY_TEAM_WINS_LOSSES\"] = value[\"TEAM_WINS_LOSSES\"]\n current_game[\"AWAY_TEAM_PTS\"] = value[\"PTS\"]\n current_game[\"AWAY_TEAM_ID\"] = value[\"TEAM_ID\"]\n if (current_game[\"AWAY_TEAM\"] in TEAM_ID_DATA):\n current_game[\"AWAY_TEAM_IMG\"] = TEAM_ID_DATA[current_game[\"AWAY_TEAM\"]][\"img\"]\n\n if (value[\"TEAM_ABBREVIATION\"] in TEAMS):\n if (home_team_id == value[\"TEAM_ID\"]):\n current_game[\"HOME_TEAM_FULL_NAME\"] = TEAMS[value[\"TEAM_ABBREVIATION\"]][\"city\"] + \\\n \" \" + TEAMS[value[\"TEAM_ABBREVIATION\"]][\"name\"]\n else:\n current_game[\"AWAY_TEAM_FULL_NAME\"] = TEAMS[value[\"TEAM_ABBREVIATION\"]][\"city\"] + \\\n \" \" + TEAMS[value[\"TEAM_ABBREVIATION\"]][\"name\"]\n \n game_sequence = value[\"GAME_SEQUENCE\"]\n game_sequence_counter += 1\n elif game_sequence_counter == 1:\n if (\"AWAY_TEAM\" in current_game):\n current_game[\"HOME_TEAM\"] = value[\"TEAM_ABBREVIATION\"]\n current_game[\"HOME_TEAM_WINS_LOSSES\"] = value[\"TEAM_WINS_LOSSES\"]\n current_game[\"HOME_TEAM_PTS\"] = value[\"PTS\"]\n current_game[\"HOME_TEAM_ID\"] = value[\"TEAM_ID\"]\n if (current_game[\"HOME_TEAM\"] in TEAM_ID_DATA):\n current_game[\"HOME_TEAM_IMG\"] = TEAM_ID_DATA[current_game[\"HOME_TEAM\"]][\"img\"]\n else:\n current_game[\"AWAY_TEAM\"] = value[\"TEAM_ABBREVIATION\"]\n current_game[\"AWAY_TEAM_WINS_LOSSES\"] = value[\"TEAM_WINS_LOSSES\"]\n current_game[\"AWAY_TEAM_PTS\"] = value[\"PTS\"]\n current_game[\"AWAY_TEAM_ID\"] = value[\"TEAM_ID\"]\n if (current_game[\"AWAY_TEAM\"] in TEAM_ID_DATA):\n current_game[\"AWAY_TEAM_IMG\"] = TEAM_ID_DATA[current_game[\"AWAY_TEAM\"]][\"img\"]\n\n if (value[\"TEAM_ABBREVIATION\"] in TEAMS):\n if (\"AWAY_TEAM\" in current_game):\n current_game[\"HOME_TEAM_FULL_NAME\"] = TEAMS[value[\"TEAM_ABBREVIATION\"]][\"city\"] + \\\n \" \" + TEAMS[value[\"TEAM_ABBREVIATION\"]][\"name\"]\n else:\n current_game[\"AWAY_TEAM_FULL_NAME\"] = TEAMS[value[\"TEAM_ABBREVIATION\"]][\"city\"] + \\\n \" \" + TEAMS[value[\"TEAM_ABBREVIATION\"]][\"name\"]\n\n current_game[\"GAME_STATUS_TEXT\"] = game_header[game_sequence - 1][\"GAME_STATUS_TEXT\"]\n if not game_header[game_sequence - 1][\"NATL_TV_BROADCASTER_ABBREVIATION\"]:\n current_game[\"BROADCASTER\"] = \"\"\n else:\n current_game[\"BROADCASTER\"] = game_header[game_sequence - 1][\"NATL_TV_BROADCASTER_ABBREVIATION\"]\n\n games.append(current_game)\n\n current_game = {}\n\n game_sequence = value[\"GAME_SEQUENCE\"]\n game_sequence_counter -= 1\n\n east_standings = scoreboard.east_conf_standings_by_day()\n west_standings = scoreboard.west_conf_standings_by_day()\n\n return (games, east_standings, west_standings)",
"def available_players_query():\n\t#start the calculation timer\n\tcalc_start = time.time()\n\n\t#initialize everything\n\tlast_first_names = []\n\tfull_names = []\n\tplayer_key = []\n\tplayer_pos = []\n\tstart = 1\n\tdone = False\n\n\t#this is where the data is actually created\n\t#loop thru to get all of the players available\n\twhile(not done):\n\t\tquery_url = base_query_url + 'league/' + leagueID + '/players;status=A;sort=PTS;start=%s;count=25' %start\n\t\t\n\t\tr = s.get(query_url, params={'format': 'json'})\n\t\toutput = r.json()\n\t\toutput = output['fantasy_content']\n\t\toutput = output['league']\n\t\toutput = output[1]\n\t\toutput = output['players']\n\t\tcount = output['count']\n\t\tplayer_num = list(output.keys())\n\t\tplayer_num = player_num[0:len(player_num)-1]\n\t\t#grab the names for each of the players in this batch of players\n\t\tfor i in player_num:\n\t\t\t#get to player details\n\t\t\toutput1 = output[i]\n\t\t\toutput1 = output1['player']\n\t\t\toutput1 = output1[0]\n\t\t\t#get player name\n\t\t\toutput_name = output1[2]\n\t\t\toutput_name = output_name['name']\n\t\t\tfirst = output_name['first']\n\t\t\tlast = output_name['last']\n\t\t\tfull = output_name['full']\n\t\t\tlast_first = last + ', ' + first\n\t\t\t#get player key\n\t\t\toutput_key = list(output1[0].values())[0]\n\t\t\t#get player position\n\t\t\toutput_pos = list(output1[9].values())[0]\n #add items to lists\n\t\t\tlast_first_names.append(last_first)\n\t\t\tfull_names.append(full)\n\t\t\tplayer_key.append(output_key)\n\t\t\tplayer_pos.append(output_pos)\n\t\t\n\t\t#stopping rule: if the number of players on the page is less than 25, then stop\n\t\tstart += 25\n\t\tif count < 25:\n\t\t\tdone = True\n\n\t#stop the timer\n\tcalc_end = time.time()\n\t#print the calculation time\n\tprint('Process complete')\n\tprint('Calculation time for all available players: {0:0.2f} seconds'.format((calc_end-calc_start)))\n\t#return the players name and player key lists\n\treturn full_names, player_key, player_pos",
"def get_player_games(self, year, use_local=True):",
"def get_games(self, start_game_id, end_game_id):\n games = []\n \n num_games = end_game_id - start_game_id + 1\n \n for game_id in range(start_game_id, end_game_id + 1):\n try:\n game = self.get_game(game_id)\n games.append(game)\n except:\n print ('game_id =', game_id, 'failed')\n \n time.sleep(0.4)\n \n update_progress(game_id - start_game_id + 1, num_games)\n \n return games",
"def get_player_data(self, player, season, mtgs=None, past=None, future=None, single=False):\n\n avail = []\n scheduled = []\n\n # Should be empty arrays if None\n if past is None:\n past = []\n if future is None:\n future = []\n\n nplayed = Schedule.objects.filter(meeting__in=past, player=player).count()\n nscheduled = Schedule.objects.filter(meeting__in=future, player=player).count()\n\n av = PlayerAvailability.objects.get_for_season_player(player, season)\n\n p = {\n 'name': player.first + ' ' + player.last,\n 'id': player.id,\n 'isavail': av.available,\n 'scheduled': av.scheduled,\n 'played': av.played,\n 'nplayed': nplayed,\n 'nscheduled': nscheduled + nplayed,\n 'single': single\n }\n\n return p",
"def get_games(season, date):\n url = \"http://live.nhl.com/GameData/SeasonSchedule-\" + season + \".json\"\n response = urllib.urlopen(url)\n data = json.loads(response.read())\n games = []\n for game in data:\n if game[\"est\"][:8] == date:\n games.append(game)\n return games",
"def scrape(self):\n self._validate_date_range(self.start_date, self.end_date)\n self._validate_team()\n self._cache_source()\n soup = self.season_raw_cache[self.start_date.year]\n df = self._parse_raw(soup)\n return self._apply_filters(df)",
"def find_games(days_ahead=0):\n headers = {\n 'Host': 'stats.nba.com',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0',\n 'Accept': 'application/json, text/plain, */*',\n 'Accept-Language': 'en-US,en;q=0.5',\n 'Referer': 'https://stats.nba.com/',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Connection': 'keep-alive',\n 'x-nba-stats-origin': 'stats',\n 'x-nba-stats-token': 'true'\n }\n board = scoreboardv2.ScoreboardV2(day_offset=days_ahead, headers=headers).get_data_frames()[0]\n board.replace(id_to_abrv, inplace=True)\n return board[['GAME_DATE_EST', 'GAME_ID', 'HOME_TEAM_ID', 'VISITOR_TEAM_ID']]",
"def season_series(game_id, pref_team, other_team, last_season=False):\n\n # Init empty dictionaries and lists\n games_against = list()\n pref_toi = dict()\n pref_goals = dict()\n pref_assists = dict()\n pref_points = dict()\n pref_record = {\"wins\": 0, \"losses\": 0, \"ot\": 0}\n roster_player = True\n\n # If this is the first game of the season, we can set the 'last_season' flag to enable the\n # season series function to check last year's season series between the two teams.\n if not last_season:\n season_start = str(game_id)[0:4]\n season_end = str(int(season_start) + 1)\n yesterday = datetime.now() - timedelta(days=1)\n # yesterday = datetime.now() + timedelta(days=50)\n # schedule_url = (\n # f\"/schedule?teamId={pref_team.team_id}\"\n # f\"&expand=schedule.broadcasts,schedule.teams&startDate=\"\n # f\"{season_start}-08-01&endDate={yesterday:%Y-%m-%d}\"\n # )\n schedule_url = (\n f\"/schedule?teamId={pref_team.team_id}\"\n f\"&expand=schedule.broadcasts,schedule.teams\"\n f\"&season={season_start}{season_end}\"\n )\n else:\n season_start = int(str(game_id)[0:4]) - 1\n season_end = str(int(season_start) + 1)\n yesterday = datetime.now() - timedelta(days=1)\n # yesterday = datetime.now() + timedelta(days=50)\n # schedule_url = (\n # f\"/schedule?teamId={pref_team.team_id}\"\n # f\"&expand=schedule.broadcasts,schedule.teams&startDate=\"\n # f\"{season_start}-08-01&endDate={season_end}-06-01\"\n # )\n schedule_url = (\n f\"/schedule?teamId={pref_team.team_id}\"\n f\"&expand=schedule.broadcasts,schedule.teams\"\n f\"&season={season_start}{season_end}\"\n )\n\n schedule = api.nhl_api(schedule_url).json()\n dates = schedule[\"dates\"]\n\n # Loop through scheduled to get previously played games against\n for date in dates:\n game = date[\"games\"][0]\n game_type = game[\"gameType\"]\n game_id = game[\"gamePk\"]\n game_team_home = game[\"teams\"][\"home\"][\"team\"][\"name\"]\n game_team_away = game[\"teams\"][\"away\"][\"team\"][\"name\"]\n teams = [game_team_away, game_team_home]\n game_status = game[\"status\"][\"abstractGameState\"]\n if game_type == \"R\" and game_status == \"Final\" and other_team.team_name in teams:\n game_feed = f\"/game/{game_id}/feed/live\"\n games_against.append(game_feed)\n\n # If the two teams haven't played yet, just exit this function\n if not games_against:\n return None, None, None\n\n # Loop through newly created games_against list to get each stats\n for feed in games_against:\n game = api.nhl_api(feed).json()\n game_data = game[\"gameData\"]\n home_team_name = game_data[\"teams\"][\"home\"][\"name\"]\n pref_homeaway = \"home\" if home_team_name == pref_team.team_name else \"away\"\n other_homeaway = \"away\" if home_team_name == pref_team.team_name else \"home\"\n\n # Get season series\n end_period = game[\"liveData\"][\"linescore\"][\"currentPeriod\"]\n extra_time = True if end_period > 3 else False\n pref_score = game[\"liveData\"][\"linescore\"][\"teams\"][pref_homeaway][\"goals\"]\n other_score = game[\"liveData\"][\"linescore\"][\"teams\"][other_homeaway][\"goals\"]\n if pref_score > other_score:\n pref_record[\"wins\"] += 1\n elif other_score > pref_score and extra_time:\n pref_record[\"ot\"] += 1\n else:\n pref_record[\"losses\"] += 1\n\n season_series_str = f\"Series: {pref_record['wins']}-\" f\"{pref_record['losses']}-{pref_record['ot']}\"\n\n # Get stats leaders\n # pref_teamstats = game[\"liveData\"][\"boxscore\"][\"teams\"][pref_homeaway][\"teamStats\"]\n pref_playerstats = game[\"liveData\"][\"boxscore\"][\"teams\"][pref_homeaway][\"players\"]\n for id, player in pref_playerstats.items():\n try:\n # Calculate TOI\n player_toi_str = player[\"stats\"][\"skaterStats\"][\"timeOnIce\"]\n player_toi_minutes = int(player_toi_str.split(\":\")[0])\n player_toi_seconds = int(player_toi_str.split(\":\")[1])\n player_toi = (player_toi_minutes * 60) + player_toi_seconds\n pref_toi[id] = pref_toi.get(id, 0) + player_toi\n\n # Point Totals\n player_goal_str = player[\"stats\"][\"skaterStats\"][\"goals\"]\n pref_goals[id] = pref_goals.get(id, 0) + int(player_goal_str)\n player_assist_str = player[\"stats\"][\"skaterStats\"][\"assists\"]\n pref_assists[id] = pref_assists.get(id, 0) + int(player_assist_str)\n player_points = int(player_goal_str) + int(player_assist_str)\n pref_points[id] = pref_points.get(id, 0) + int(player_points)\n\n except KeyError:\n pass\n\n # Calculate Stats Leaders\n sorted_toi = sorted(pref_toi.values(), reverse=True)\n leader_toi = sorted_toi[0]\n\n sorted_points = sorted(pref_points.values(), reverse=True)\n leader_points = sorted_points[0]\n\n # Get TOI leader\n for id in pref_toi.keys():\n if pref_toi[id] == leader_toi:\n player_name = roster.player_attr_by_id(pref_team.roster, id, \"fullName\")\n if player_name is None:\n roster_player = False\n player_id_only = id.replace(\"ID\", \"\")\n player_name = roster.nonroster_player_attr_by_id(player_id_only, \"fullName\")\n leader_toi_avg = leader_toi / len(games_against)\n m, s = divmod(leader_toi_avg, 60)\n toi_m = int(m)\n toi_s = int(s)\n toi_s = \"0{}\".format(toi_s) if toi_s < 10 else toi_s\n toi_avg = \"{}:{}\".format(toi_m, toi_s)\n player_short_name = f\"{player_name[0]}. {' '.join(player_name.split()[1:])}\"\n toi_leader_str = \"TOI Leader: {} with {} / game.\".format(player_short_name, toi_avg)\n\n # Handle tied points leaders\n point_leaders = list()\n for id in pref_points.keys():\n if pref_points[id] == leader_points:\n point_leaders.append(id)\n\n if leader_points == 0:\n points_leader_str = \"Points Leader: None (all players have 0 points).\"\n\n elif len(point_leaders) == 1:\n leader = point_leaders[0]\n player_name = roster.player_attr_by_id(pref_team.roster, leader, \"fullName\")\n # If the player is no longer on the team, get their information (change string here?)\n if player_name is None:\n roster_player = False\n player_id_only = leader.replace(\"ID\", \"\")\n player_name = roster.nonroster_player_attr_by_id(player_id_only, \"fullName\")\n player_goals = pref_goals[leader]\n player_assists = pref_assists[leader]\n if not roster_player:\n points_leader_str = (\n f\"Points Leader: {player_name} with {leader_points} points \"\n f\"({player_goals}G {player_assists}A) \"\n )\n else:\n points_leader_str = \"Points Leader: {} with {} ({}G {}A).\".format(\n player_name, leader_points, player_goals, player_assists\n )\n\n elif len(point_leaders) > 3:\n point_leaders_with_attrs = list()\n for leader in point_leaders:\n player_name = roster.player_attr_by_id(pref_team.roster, leader, \"fullName\")\n if player_name is None:\n player_id_only = leader.replace(\"ID\", \"\")\n player_name = roster.nonroster_player_attr_by_id(player_id_only, \"fullName\")\n player_goals = pref_goals[leader]\n player_assists = pref_assists[leader]\n player_short_name = f\"{player_name[0]}. {' '.join(player_name.split()[1:])}\"\n point_leaders_with_attrs.append(player_short_name)\n\n point_leaders_joined = \", \".join(point_leaders_with_attrs[0:3])\n leftover_leaders = len(point_leaders) - 3\n points_leader_str = (\n f\"Points Leaders: {point_leaders_joined} & {leftover_leaders} others ({leader_points} each).\"\n )\n\n else:\n point_leaders_with_attrs = list()\n for leader in point_leaders:\n player_name = roster.player_attr_by_id(pref_team.roster, leader, \"fullName\")\n if player_name is None:\n player_id_only = leader.replace(\"ID\", \"\")\n player_name = roster.nonroster_player_attr_by_id(player_id_only, \"fullName\")\n player_goals = pref_goals[leader]\n player_assists = pref_assists[leader]\n player_short_name = f\"{player_name[0]}. {' '.join(player_name.split()[1:])}\"\n player_str = f\"{player_short_name} ({player_goals}G {player_assists}A)\"\n point_leaders_with_attrs.append(player_str)\n\n point_leaders_joined = (\n f\", \".join(point_leaders_with_attrs[:-1]) + f\" & {point_leaders_with_attrs[-1]}\"\n )\n points_leader_str = \"Points Leaders: {} with {} each.\".format(point_leaders_joined, leader_points)\n\n return season_series_str, points_leader_str, toi_leader_str",
"def playerStandings():\n\n getPlayers = \"SELECT id, name, wins, matches FROM playerstats ORDER BY wins DESC\"\n players = executeQuery({'dbname': 'tournament', 'query' : getPlayers, 'type' : 'find'})\n return players",
"def get_afltables_stats(\n self,\n start_date: Optional[str] = \"1965-01-01\",\n end_date: Optional[str] = \"2016-12-31\",\n ) -> pd.DataFrame:\n\n return self.__data(\n f'get_afltables_stats(start_date = \"{start_date}\", '\n f'end_date = \"{end_date}\")'\n ).assign(playing_for=self.__translate_team_column(\"playing_for\"))",
"def team_players_query():\n #start the calculation timer\n calc_start = time.time()\n\n #initialize everything\n last_first_names = []\n full_names = []\n player_key = []\n player_pos = []\n \n #build the query URL\n query_url = base_query_url + 'team/' + leagueID + teamID + '/roster'\n\n #get the json data\n r = s.get(query_url, params={'format': 'json'})\n output = r.json()\n output = output['fantasy_content']['team'][1]['roster']['0']['players']\n player_num = list(output.keys())\n player_num = player_num[0:len(player_num)-1]\n #loop thru all of the players and extract the necessary info\n for i in player_num:\n result = output[i]\n result = result['player'][0]\n #store the player key\n player_k = result[0]['player_key']\n #store the player position\n pos = result[9]['display_position']\n #store player names\n output_name = result[2]['name']\n f_name = output_name['first']\n l_name = output_name['last']\n full = output_name['full']\n #build formatted name\n last_first = l_name + ', ' + f_name\n #add to lists\n full_names.append(full)\n last_first_names.append(last_first)\n player_key.append(player_k)\n player_pos.append(pos)\n \n #stop the timer\n calc_end = time.time()\n #print the calculation time\n print('Process complete')\n print('Calculation time for rostered players: {0:0.2f} seconds'.format((calc_end-calc_start)))\n #return full names and player keys\n return full_names, player_key, player_pos",
"def all_match_data(year):\n year_match_data = []\n match_year_data = pm()\n for count in range(len(match_year_data)):\n if year == match_year_data[count][1]:\n year_match_data.append(match_year_data[count])\n for count in range(len(year_match_data)):\n print(\n f'Game ID: {count + 1} Match date: {year_match_data[count][3]} {year_match_data[count][4]} vs '\n f'{year_match_data[count][5]}')\n\n return year_match_data",
"def get_user_plays(username, mindate: datetime = None, maxdate: datetime = None):\n req_text = f\"https://www.boardgamegeek.com/xmlapi2/plays?username={username}\"\n\n if mindate is not None:\n if (maxdate is not None and maxdate < mindate) or mindate > datetime.now():\n raise ValueError(\"Must be a valid date range\")\n req_text += f\"&mindate={bgg_date_format(mindate)}\"\n\n if maxdate is not None:\n req_text += f\"&maxdate={bgg_date_format(maxdate)}\"\n\n result = requests.get(req_text)\n\n return result.text",
"def player_stats_query(week, player_list, session=s): \n #initialize lists\n pos_list = []\n team_list = []\n \n #cycle thru each player that is currently available\n for player in avail_player_key:\n #build the API url for the unique player key\n url_player = base_query_url+'league/'+leagueID+'/players;player_keys='+player+'/stats;type=week;week='+str(week)\n #convert API call to json\n raw = s.get(url_player, params={'format': 'json'}).json()\n #parse out the players details info (e.g. position, owned, etc.)\n player_details = raw['fantasy_content']['league'][1]['players']['0']['player'][0]\n #parse out position from player details\n pos = player_details[9]['display_position'].upper()\n \n ## FILTER OUT NON-OFFENSE POSITIONS\n if pos not in ['QB', 'WR', 'RB', 'TE']:\n continue\n else:\n \n #parse out team from player_details\n team = player_details[6]['editorial_team_abbr'].upper()\n #append data to lists\n pos_list.append(pos)\n team_list.append(team)\n \n #initialize a stats list\n stats_list = []\n #parse out the player stats\n player_stats = raw['fantasy_content']['league'][1]['players']['0']['player'][1]['player_stats']['stats']\n #loop thru all of the various stats\n for stat in player_stats:\n stat_dict = stat['stat']\n stats_list.append(stat_dict)\n \n return stats_list",
"def generate_tracking_game_logs(\n measure_type: TrackingMeasureType,\n player_or_team: PlayerOrTeam,\n date_from: date,\n date_to: date,\n **kwargs,\n) -> List[Any]:\n team_id_game_id_map = kwargs.get(\"team_id_game_id_map\")\n team_id_opponent_team_id_map = kwargs.get(\"team_id_opponent_team_id_map\")\n player_id_team_id_map = kwargs.get(\"player_id_team_id_map\")\n get_player_id_team_id_map = player_id_team_id_map is None\n get_team_id_maps = (\n team_id_game_id_map is None or team_id_opponent_team_id_map is None\n )\n game_logs = []\n for dt in rrule(DAILY, dtstart=date_from, until=date_to):\n if get_team_id_maps:\n (\n team_id_game_id_map,\n team_id_opponent_team_id_map,\n ) = helpers.get_team_id_maps_for_date(dt)\n if len(team_id_game_id_map.values()) != 0:\n if get_player_id_team_id_map:\n player_id_team_id_map = helpers.get_player_team_map_for_date(dt)\n date_game_id = list(team_id_game_id_map.values())[0]\n\n season = helpers.get_season_from_game_id(date_game_id)\n season_type = helpers.get_season_type_from_game_id(date_game_id)\n\n tracking_game_logs = get_tracking_stats(\n measure_type,\n [season],\n [season_type],\n player_or_team,\n # User per game here because it gives results to more decimal places\n PerMode=PerMode.per_game, # camel case to match request param key\n DateFrom=dt.strftime(\"%m/%d/%Y\"),\n DateTo=dt.strftime(\"%m/%d/%Y\"),\n )\n if player_or_team == PlayerOrTeam.player:\n # need to add team id for player because results only have last team id,\n # which may not be the team for which they played the game\n for game_log in tracking_game_logs:\n game_log.team_id = player_id_team_id_map[game_log.player_id]\n for game_log in tracking_game_logs:\n game_log.game_id = team_id_game_id_map[game_log.team_id]\n game_log.opponent_team_id = team_id_opponent_team_id_map[\n game_log.team_id\n ]\n game_logs += tracking_game_logs\n return game_logs",
"def get_all_player_history(understat_path, season): \n\n start_date, end_date = set_season_time(season)\n players = write_league_players(understat_path, season) # get all league players\n for i in range(len(players)):\n loop = asyncio.get_event_loop() \n result = loop.run_until_complete(get_player_history(int(players.loc[i][0])))\n name = players.loc[i][1]\n individuals = pd.DataFrame.from_dict(result)\n individuals['date'] = pd.to_datetime(individuals['date'])\n individuals = individuals[(individuals.date >= start_date)]\n individuals = individuals[(individuals.date <= end_date)]\n individuals['player_name'] = name\n individuals.to_csv(understat_path + \"{}_data.csv\".format(name), index = False) \n if i == 0:\n all_players = individuals\n else:\n all_players = all_players.append(individuals)\n all_players.to_csv(understat_path + 'all_understat_players.csv', index = False)",
"def extract_games(self) -> Dict[int, Dict[str, Any]]:\n optadocument = self._get_doc()\n attr = assertget(optadocument, '@attributes')\n matchdata = assertget(optadocument, 'MatchData')\n matches = {}\n for match in matchdata:\n matchattr = assertget(match, '@attributes')\n matchinfo = assertget(match, 'MatchInfo')\n matchinfoattr = assertget(matchinfo, '@attributes')\n game_id = int(assertget(matchattr, 'uID')[1:])\n matches[game_id] = dict(\n # Fields required by the base schema\n game_id=game_id,\n competition_id=int(assertget(attr, 'competition_id')),\n season_id=int(assertget(attr, 'season_id')),\n game_day=int(assertget(matchinfoattr, 'MatchDay')),\n game_date=datetime.strptime(assertget(matchinfo, 'Date'), '%Y-%m-%d %H:%M:%S'),\n # home_team_id=see below,\n # away_team_id=see below,\n # Optional fields\n # home_score=see below,\n # away_score=see below,\n # duration=?\n # referee=?\n # venue=?,\n # attendance=?\n # home_manager=?\n # away_manager=?\n )\n teamdata = assertget(match, 'TeamData')\n for team in teamdata:\n teamattr = assertget(team, '@attributes')\n side = assertget(teamattr, 'Side')\n teamid = assertget(teamattr, 'TeamRef')\n score = assertget(teamattr, 'Score')\n if side == 'Home':\n matches[game_id]['home_team_id'] = int(teamid[1:])\n matches[game_id]['home_score'] = int(score)\n else:\n matches[game_id]['away_team_id'] = int(teamid[1:])\n matches[game_id]['away_score'] = int(score)\n return matches",
"def games(self, competition_id: int, season_id: int) -> DataFrame[Any]:",
"def get_game(self, game_id):\n \n session = requests.session()\n response = session.get(self.baseURL + str(game_id), headers=self.headers)\n soup = BeautifulSoup(response.text)\n \n #get teams\n defeated_by = False \n game_header = soup.find_all(text=re.compile('defeats'))\n \n if len(game_header) == 0:\n game_header = soup.find_all(text=re.compile('defeated by'))\n \n if (len(game_header)) == 0:\n game_header = soup.find_all(text=re.compile('defeat'))\n \n if (len(game_header)) == 0:\n game_header = soup.find_all(text=re.compile('drew'))\n defeated_by = True \n else:\n defeated_by = True \n\n if defeated_by: \n teams = self.remove_long_names(game_header[1]).replace('\\n', '')\n home_team = teams.split(' ')[0]\n away_team = teams.split(' ')[3]\n else:\n teams = self.remove_long_names(game_header[1]).replace('\\n', '')\n home_team = teams.split(' ')[0]\n away_team = teams.split(' ')[2]\n \n date_string = game_header[0].split(' ')\n date_string_find = [date.lower() for date in date_string]\n \n venue = date_string[date_string_find.index('at') + 1]\n \n #get round\n round_num = None\n \n try:\n date_string_find.remove('')\n except:\n pass\n \n try:\n round_num = int(date_string[date_string_find.index('round') + 1])\n except:\n try:\n round_num = date_string_find[date_string_find.index('final') - 1] + ' final'\n except:\n round_num = date_string_find[date_string_find.index('semi-final')]\n \n date = date_string[-3:]\n date = ' '.join(date) \n date = parser.parse(date)\n \n #get attendance\n attend = soup.find_all(text=re.compile('Attendance'))\n attendance = 0\n \n if (len(attend) > 3):\n attendance = int(attend[1].split(' ')[-1])\n \n #get stats \n away_stats = {}\n home_stats = {}\n \n for stat in stats:\n stat_row = soup.find_all('td', text=stat)[0].find_parent('tr')\n elements = stat_row.find_all('td')\n \n if elements[0].text == '-':\n home_stats[stat] = None\n else:\n home_stats[stat] = elements[0].text\n \n if elements[0].text == '-':\n away_stats[stat] = None\n else:\n away_stats[stat] = elements[2].text\n \n return Game(game_id, home_team, away_team, venue, round_num, date, attendance, home_stats, away_stats)",
"def get_ride_report(startDate, endDate):\n\n results_list = []\n\n session = DB_SESSION()\n\n results = []\n\n results = session.query(Report).filter(Report.date_created>=startDate, Report.date_created<=endDate)\n\n for result in results:\n results_list.append(result.to_dict())\n print(result.to_dict())\n\n session.close()\n\n return results_list, 200",
"def get_teams():",
"def get_past_matches_data(team):\n matches = team.get_past_matches()\n match_list = []\n for match in matches:\n match_dict = {}\n match_dict['match_date'] = match.match_date\n match_dict['match_name'] = match.__str__()\n match_dict['id'] = match.id\n innings = match.get_innings()\n if len(innings):\n if innings[0].runs > innings[1].runs:\n match_dict['winner_team'] = innings[0].bat_team\n match_dict['win_margin'] = innings[0].runs - innings[1].runs\n match_dict['win_type'] = 'Runs'\n match_dict['winner_score'] = str(innings[0].runs) + '/' + str(innings[0].wickets)\n else:\n match_dict['winner_team'] = innings[1].bat_team\n match_dict['win_margin'] = 10 - innings[1].wickets\n match_dict['win_type'] = 'Wickets'\n match_dict['winner_score'] = str(innings[1].runs) + '/' + str(innings[1].wickets)\n match_list.append(match_dict)\n return match_list",
"def date_range(start, end):\n \"\"\"between the start and end date inclusive.\"\"\"\n # Create a link to the session\n session = Session(engine)\n \n # Get the start and end date of the data\n final_date = session.query(Measurements.date).order_by(Measurements.date.desc()).first()[0]\n first_date = session.query(Measurements.date).order_by(Measurements.date.asc()).first()[0]\n \n # Make sure dates are in range of available data\n if (start > final_date) or (start < first_date) or (end > final_date) or (end < first_date) or (start>end):\n return f\"{start} - {end} is not a proper date range.</br>Try dates between {first_date} - {final_date}\"\n\n # Query the min, avg, and max temps for the given timeframe\n results = []\n while start <= end:\n min_temp = session.query(func.min(Measurements.tobs)).filter(Measurements.date==start).first()[0]\n avg_temp = session.query(func.avg(Measurements.tobs)).filter(Measurements.date==start).first()[0]\n max_temp = session.query(func.max(Measurements.tobs)).filter(Measurements.date==start).first()[0]\n \n # Store the information retrieved\n results.append([start, min_temp, avg_temp, max_temp])\n \n # Update the date to check the next record\n date1 = start.split(\"-\")\n date1 = dt.date(int(date1[0]), int(date1[1]), int(date1[2])) + dt.timedelta(days=1)\n start = date1.strftime(\"%Y-%m-%d\")\n\n session.close()\n\n # Create a dictionary from the query results\n date_temps = []\n for date, min_temp, avg_temp, max_temp in results:\n date_temps_dict = {}\n date_temps_dict[\"date\"] = date\n date_temps_dict[\"min_temp\"] = min_temp\n date_temps_dict[\"avg_temp\"] = round(avg_temp, 2)\n date_temps_dict[\"max_temp\"] = max_temp\n date_temps.append(date_temps_dict)\n \n return jsonify(date_temps)",
"def get_player_data(self, playername=None):\r\n session = requests.session()\r\n url_comuniazo = 'http://www.comuniazo.com'\r\n user_agent = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:35.0) Gecko/20100101 Firefox/35.0'\r\n headers = {\"Content-type\": \"application/x-www-form-urlencoded\", \"Accept\": \"text/plain\",\r\n 'Referer': url_comuniazo,\r\n \"User-Agent\": user_agent}\r\n url_jugadores = url_comuniazo + '/comunio/jugadores/'\r\n suffix, lastname = '', ''\r\n count = 0\r\n dates, points, prices = list(), list(), list()\r\n while True and len(dates) < 2:\r\n playername = self.check_exceptions(playername)\r\n req = session.get(url_jugadores + playername.replace(\" \", \"-\").replace(\".\", \"\").replace(\"'\", \"\") + suffix,\r\n headers=headers).content\r\n dates_re = re.search(\"(\\\"[0-9 ][0-9] de \\w+\\\",?,?)+\", req)\r\n try:\r\n dates = dates_re.group(0).replace('\"', '').split(\",\")\r\n dates = self.translate_dates(dates)\r\n except AttributeError:\r\n if count == 0:\r\n suffix = '-2'\r\n count += 1\r\n continue\r\n elif count == 1:\r\n lastname = playername.split(\" \")[1]\r\n playername = playername.split(\" \")[0]\r\n suffix = ''\r\n count += 1\r\n continue\r\n elif count == 2:\r\n playername = lastname\r\n count += 1\r\n continue\r\n\r\n data_re = re.search(\"data: \\[(([0-9nul]+,?)+)\\]\", req)\r\n if data_re is None:\r\n pass\r\n for price in data_re.group(1).split(','):\r\n try:\r\n prices.append(int(price))\r\n except ValueError:\r\n # No price\r\n pass\r\n\r\n try:\r\n html = BeautifulSoup(req, \"html.parser\")\r\n points_rows = html.find('table', {'class': 'points-list'}).find_all('tr')\r\n for row in points_rows:\r\n gameday = int(row.td.text)\r\n if row.div:\r\n points.append([gameday, int(row.div.text)])\r\n else:\r\n points.append([gameday, 0])\r\n except AttributeError:\r\n # Player without points\r\n pass\r\n\r\n if suffix == '-2' or len(dates) > 2:\r\n break\r\n else:\r\n suffix = '-2'\r\n\r\n return dates, prices, points",
"def test_get_player_stats_from_game():\n team = \"Titans\"\n year = \"2018\"\n week = \"1\"\n expected_team_stats = {}\n\n assert ff_team.get_player_stats_from_game(\n team, year, week) == expected_team_stats",
"async def get_league_players(season): \n if season == '2021-22':\n get_epl = 2021\n if season == '2020-21':\n get_epl = 2020\n if season == '2019-20':\n get_epl = 2019\n async with aiohttp.ClientSession() as session:\n understat = Understat(session)\n player = await understat.get_league_players(\"epl\", get_epl)\n # print(json.dumps(player))\n return player"
]
| [
"0.7217022",
"0.65925366",
"0.65881795",
"0.65571594",
"0.6556971",
"0.64844286",
"0.6317446",
"0.6163747",
"0.6094667",
"0.60902554",
"0.6062693",
"0.60613227",
"0.60085285",
"0.5928634",
"0.5907967",
"0.5881291",
"0.5858981",
"0.5852907",
"0.5847442",
"0.5840196",
"0.582685",
"0.5814748",
"0.5786702",
"0.57789534",
"0.57742226",
"0.57663894",
"0.5749221",
"0.5741541",
"0.57387143",
"0.56964916"
]
| 0.7116889 | 1 |
> (str) full name of the field with the name of the table included. i.e. the field |bar| in a table |foo| returns 'foo.bar' | def name(self):
if self.table:
return "{}.{}".format(self.table, self.field_name)
return self.field_name | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def namehack(field):\n if field.endswith((\"attribute\", \"views\")):\n return field + \"__name\"\n else:\n return field",
"def get_related_name(self, field=None):\n related_name = self.subrecord.__name__.lower()\n if field:\n related_name = \"{0}__{1}\".format(related_name, field)\n if self.is_patient_subrecord:\n return \"patient__{0}\".format(related_name)\n else:\n return related_name",
"def name(self) -> str:\n return self.fqtable.replace(\".\", \"_\")",
"def field_label(field_name, bushfire=None):\r\n field_name = FIELD_MAPPING.get(field_name) or field_name\r\n if bushfire:\r\n try:\r\n return bushfire._meta.get_field(field_name).verbose_name\r\n except:\r\n return field_name\r\n else:\r\n return field_name",
"def db_field_name(self):\r\n return self.db_field or self.column_name",
"def db_field_name(self):\r\n return self.db_field or self.column_name",
"def get_bare_field_name(field_name: str) -> str:\n\n return re.sub(r\"_[^_]+$\", \"\", field_name).replace(\"human_readable_\", \"\")",
"def _field_prefix(self):\n if self.layer_name == 'geninfo':\n return ''\n return self.layer_name + '.'",
"def get_source_fullname(col_name):\n src_dump = get_src_dump()\n info = src_dump.find_one({\"$where\":\"function() {if(this.upload) {for(var index in this.upload.jobs) {if(this.upload.jobs[index].step == \\\"%s\\\") return this;}}}\" % col_name})\n if info:\n name = info[\"_id\"]\n if name != col_name:\n # col_name was a sub-source name\n return \"%s.%s\" % (name,col_name)\n else:\n return name",
"def as_field(identifier: str) -> str:\n return identifier.lower()",
"def get_name(tablename):\n\n return tablename[tablename.find(\"_\") + 1:].replace(\"_\", \" \").capitalize()",
"def py_field_name(self, field):\n name = field.name\n name = as_identifier(name)\n if self.options(field).convert_case:\n name = from_camel_case(name)\n name = self._mangle_name(name)\n return name",
"def col_name(col):\n\n if isinstance(col, str):\n return col\n return col.__name__",
"def capnp_field_name(self, field):\n name = field.name\n return as_identifier(name)",
"def get_field_class_name(field):\n return field.capitalize() + \"Field\"",
"def _field_name(self) -> str:\n name = self._resolve_field_name()\n if name is None:\n # pylint: disable=consider-using-f-string\n raise FieldNameError(\n \"No field name found among: explicit name = {}, inferred name = {}\".format(\n self.__name_explicit, self.__name_contextual\n )\n )\n return name",
"def generate_field_name(container, field):\n if \"standard_name\" in container.fields[field]:\n field_name = container.fields[field][\"standard_name\"]\n elif \"long_name\" in container.fields[field]:\n field_name = container.fields[field][\"long_name\"]\n else:\n field_name = str(field)\n field_name = field_name.replace(\"_\", \" \")\n field_name = field_name[0].upper() + field_name[1:]\n return field_name",
"def _get_field_name(self, instance):\n fields = getattr(instance, \"_fields\")\n return fields[self.id]",
"def _table_name(cls, suffix, relative=False):\n mname = inspect.getmodule(cls).__name__ + '_' + suffix\n if relative:\n mname = mname.split('.')[-1]\n return mname",
"def get_qualified_name(self):\r\n return self.__schema + \".\" + self.__name",
"def autoname(self):\n ret = \"%(table)s_%(reftable)s_fkey\"%dict(\n table=self.table.name,\n reftable=self.reftable.name,\n )\n return ret",
"def tablename(entity) -> str:\n return entity.__tablename__",
"def table_name() -> str:\n pass",
"def query_string_for_field(self, field, operator=None, model=None):\n if model:\n if model._meta.proxy and \\\n model._meta.proxy_for_model is not field.model:\n raise ModelTreeError('proxied model must be the field model')\n\n else:\n model = field.model\n\n # When an explicit reverse field is used, simply use it directly\n if isinstance(field, RelatedObject):\n toks = [field.field.related_query_name()]\n else:\n path = self.query_string(model)\n\n if path:\n toks = [path, field.name]\n else:\n toks = [field.name]\n\n if operator is not None:\n toks.append(operator)\n\n return str('__'.join(toks))",
"def fieldName(self):\n return self._field.name",
"def get_ast_field_name(ast):\n replacements = {\n # We always rewrite the following field names into their proper underlying counterparts.\n TYPENAME_META_FIELD_NAME: '@class'\n }\n base_field_name = ast.name.value\n normalized_name = replacements.get(base_field_name, base_field_name)\n return normalized_name",
"def NAME(self) -> str:\n return self._field_name",
"def get_quote_table_field(field, stock_ticker):\n quote_table = si.get_quote_table(stock_ticker)\n return quote_table[field]",
"def get_field_name(self):\n if self.language is None:\n lang = \"i18n\"\n else:\n lang = self.get_language()\n\n return build_localized_fieldname(self.original_name, lang)",
"def label_for_field(instance, arg):\n if isinstance(instance, QuerySet):\n instance = instance.model\n try:\n return instance._meta.get_field(arg).verbose_name\n except FieldDoesNotExist:\n return ''"
]
| [
"0.69205326",
"0.6881168",
"0.6826993",
"0.6584583",
"0.65795636",
"0.65795636",
"0.6551517",
"0.6536231",
"0.65072536",
"0.63897425",
"0.63614273",
"0.6269764",
"0.6268629",
"0.6237498",
"0.62347585",
"0.62013537",
"0.6199085",
"0.6191742",
"0.61680526",
"0.6143412",
"0.61332357",
"0.6126377",
"0.6118862",
"0.6091152",
"0.6044386",
"0.6006682",
"0.59630096",
"0.59395885",
"0.59166557",
"0.590879"
]
| 0.77557653 | 0 |
Sends current value of drop down file list to get_file() and saves contents to events variable. | def send_file_name():
if value.get() == "----------------------":
messagebox.showinfo("Choose File", "Please choose a file to edit.", parent=app_frame)
return
elif len(entries) != 0:
messagebox.showinfo("Warning!", "You must first close the current file!", parent=app_frame)
return
events = get_file(value.get())
# Call display_lr_assignments() and send events file to be displayed in the application window
display_lr_assignments(events) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def choose_file(self):\n pass",
"def receiveFileFromDialog(self, paths):\n self.filesList.filesStartedLoading.emit(False)\n for p in paths:\n self.filesList.registerFile(None, QtCore.QString(p))\n self.filesList.filesFinishedLoading.emit(True)",
"def selectFiles(self):\n\n filenames = []\n self.fileIDs = \"\"\n self.caseIDs = \"\" # clears any case selections\n cur = self.settings['conn'].cursor()\n cur.execute(\"select id, name, status from source\")\n result = cur.fetchall()\n for row in result:\n filenames.append({'id': row[0], 'name': row[1], 'status': row[2]})\n self.fileIDs += \",\" + str(row[0])\n if len(self.fileIDs) > 0:\n self.fileIDs = self.fileIDs[1:]\n\n Dialog_selectfile = QtGui.QDialog()\n ui = Ui_Dialog_selectfile(filenames)\n ui.setupUi(Dialog_selectfile, \"Select file(s) to view\", \"many\")\n ok = Dialog_selectfile.exec_()\n if ok:\n tmp_IDs = \"\"\n selectedFiles = ui.getSelected() # list of dictionaries\n for row in selectedFiles:\n tmp_IDs += \",\" + str(row['id'])\n if len(tmp_IDs) > 0:\n self.fileIDs = tmp_IDs[1:]",
"def OnOpen(self, e):\n self.mainparent.statusbar.SetStatusText(\"Loading Files ...\", 0)\n\n dirname = os.getcwd()\n dlg = wx.FileDialog(self, \"Select File\", dirname, \"\", \"*\", wx.FD_OPEN)\n\n if (dlg.ShowModal() != wx.ID_OK):\n dlg.Destroy()\n self.mainparent.reset_statusbar()\n return\n\n full_path = str(dlg.GetPath()) # get selected filename and convert to standard string\n\n self.mainparent.input_file = InputFile(full_path) # parse input file\n\n self.mainparent.update_namelist_menu() # update available namelist menu\n\n self.mainparent.reset_statusbar()\n self.mainparent.statusbar.SetStatusText(\"File: {}\".format(full_path), 2)\n\n self.mainparent.file_loaded = True",
"def setup(self):\n\n self.select_file.on_change(\"value\", self.callback_select_file)",
"def choosefile():\r\n\r\n # get filename\r\n filename = tkFileDialog.askopenfilename(**options)\r\n #print filename, '*****'\r\n\r\n # open file on your own\r\n if filename:\r\n #return open(filename, 'r')\r\n tasks.upload_chosen = filename",
"def _select_file(self, change):\n selected_file = change.get(\"new\")\n self.file = str(Path(self.current_folder).joinpath(selected_file).resolve())",
"def select_files(self):\n pass",
"def browse_files_out(self,*args):\n path_to_data = tkFileDialog.askopenfilename()\n #show chosen value in textframe\n self.docstring_offers.delete(0,tk.END)\n self.docstring_offers.insert(0,path_to_data)\n #use chosen value as self.exchanged_offers_filepa\n self.exchanged_offers_filepath.set(path_to_data)",
"def _get_file(self, event):\n dlg = wx.FileDialog(None, \"Select a file\", \n wildcard=\"Password Files (*.*)|*.*\",\n defaultDir=os.getcwd(), \n style=wx.FD_SAVE)\n \n if dlg.ShowModal() == wx.ID_OK:\n newpath = dlg.GetPaths()[0]\n self.dbFile.Value = newpath\n \n dlg.Destroy()",
"def select_file_upload_method():\n\n if not Settings.prompt(\"upload files\"): \n return \"unset\"\n Settings.print(\"Select an upload source\")\n sources = Settings.get_source_options()\n question = {\n 'type': 'list',\n 'name': 'upload',\n 'message': 'Upload:',\n 'choices': [src.title() for src in sources]\n }\n upload = PyInquirer.prompt(question)[\"upload\"]\n\n\n # everything after this part should be in another function\n # this should just return the string of the upload source\n\n\n if str(upload) == \"Local\":\n return File.select_files()\n elif str(upload) == \"Google\":\n return Google_File.select_files()\n # elif str(upload) == \"Dropbox\":\n # return Dropbox.select_files()\n elif str(upload) == \"Remote\":\n return Remote.select_files()\n return File.select_files()",
"def capture_files(e):\n i = 0\n \n while i < e.dataTransfer.files.length:\n self.vue.files.push( e.dataTransfer.files[i] );\n i+=1\n self.getImagePreviews()\n self.submitFiles()",
"def browse_files_in(self,*args):\n path_to_data = tkFileDialog.askopenfilename()\n #show chosen value in textframe\n self.docstring.delete(0,tk.END)\n self.docstring.insert(0,path_to_data)\n #use chosen value as self.data_file\n self.data_file.set(path_to_data)",
"def OnOpen(self, e):\n\t\tsuccess = False\n\t\tdlg = wx.FileDialog(self, \"Choose a file\", self.dirname, \"\", \"*.*\", wx.FD_OPEN)\n\t\tif dlg.ShowModal() == wx.ID_OK:\n\t\t\tsuccess = True\n\t\t\tself.dirname, self.filename = os.path.split(dlg.GetPath())\n\t\tdlg.Destroy()\n\t\tif success:\n\t\t\tself.FileText.SetLabel(\"File: \"+self.filename)\n\t\t\tself.raw_file = data.load_data(os.path.join(self.dirname, self.filename))\n\t\t\tself.combine_data()\n\t\t\tself.plot_data()",
"def _get_file(self, event):\n dlg = wx.FileDialog(None, \"Select a file\", \n wildcard=\"Password Files (*.*)|*.*\",\n defaultDir=os.getcwd(), \n style=wx.FD_OPEN|wx.FD_FILE_MUST_EXIST)\n \n if dlg.ShowModal() == wx.ID_OK:\n newpath = dlg.GetPaths()[0]\n self.dbFile.Value = newpath\n self._save_state()\n \n dlg.Destroy()",
"def _select_search_file(self, change):\n self.file = change.get(\"new\")",
"def add_file_option(self, name, callback):\n item = self.file_menu.Append(-1, name, name)\n self.Bind(wx.EVT_MENU, callback, item)",
"def on_browse(self, event):\r\n wildcard = \"All files (*.*)|*.*\"\r\n with wx.FileDialog(None, \"Choose a file\",\r\n wildcard=wildcard,\r\n style=wx.ID_OPEN) as dialog:\r\n if dialog.ShowModal() == wx.ID_OK:\r\n self.grin_location.SetValue(dialog.GetPath())",
"def input_file_chooser(self):\n filename = tk.filedialog.askopenfilename()\n self._input_path_var.set(filename)",
"def on_add_file(self, event):\n wildcard = \"Media Files (*.*)|*.*\"\n dlg = wx.FileDialog(\n self, message=\"Choose a file\",\n defaultDir=self.currentFolder, \n defaultFile=\"\",\n wildcard=wildcard,\n style=wx.OPEN | wx.CHANGE_DIR\n )\n if dlg.ShowModal() == wx.ID_OK:\n path = dlg.GetPath()\n self.currentFolder = os.path.dirname(path[0])\n trackPath = '\"%s\"' % path.replace(\"\\\\\", \"/\")\n self.mplayer.Loadfile(trackPath)\n \n t_len = self.mplayer.GetTimeLength()\n self.playbackSlider.SetRange(0, t_len)\n self.playbackTimer.Start(100)",
"def tag_file_chooser(self):\n filename_list = tk.filedialog.askopenfilenames()\n self._tag_path_var.set(filename_list)",
"def select_files():\n\n if not Settings.is_prompt(): return [File.get_random_file()]\n category = Settings.select_category()\n if not category: return File.select_file_upload_method()\n # if not Settings.confirm(category): return File.select_files()\n Settings.print(\"Select Files or a Folder\")\n files = []\n while True:\n file = File.select_file(category)\n if not file: break\n ##\n if \"performer\" in str(category):\n cat = Settings.select_category([cat for cat in Settings.get_categories() if \"performer\" not in cat])\n performerName = file.get_title()\n file = File.select_file(cat, performer=performerName)\n if not file: break\n setattr(file, \"performer\", performerName)\n files.append(file)\n if \"galler\" in str(cat) or \"video\" in str(cat): break\n ##\n files.append(file)\n if \"galler\" in str(category) or \"video\" in str(category): break\n if str(files[0]) == \"unset\": return files\n if not Settings.confirm([file.get_title() for file in files]): return File.select_files()\n return files",
"def do_f(self, parms):\n\t\tprint self.files",
"def on_coding_standard_file_browse(self, *args):\n file = GPS.MDI.file_selector()\n if file.path != \"\":\n self.fileEntry.set_text(file.path)",
"def on_coding_standard_file_browse(self, *args):\n file = GPS.MDI.file_selector()\n if file.path != \"\":\n self.fileEntry.set_text(file.path)",
"def invoke (self, context, event):\n context.window_manager.fileselect_add (self)\n return {'RUNNING_MODAL'}",
"def select_files(self):\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n files, _ = QFileDialog.getOpenFileNames(self.parent,\n \"File Export\",\n os.path.expanduser('~/'),\n \"Ensemble Files (*.ens, *.bin);;Binary Files (*.bin);;All Files (*)\",\n options=options)\n if files:\n # Store the list of results\n self.selected_files = files\n\n # Analyze the files\n self.analyze_files()",
"def get_file_list():\n wb = xw.Workbook.caller()\n path_input = xw.Range('Macro', 'FilePath').value\n l_file_path = glob.glob(path_input + '[!~]*.*')\n l_file_name = [l.split('/')[-1] for l in l_file_path]\n xw.Range('Macro', 'FileField').clear_contents()\n xw.Range('Macro', 'C_FilePath').options(transpose=True).value = l_file_path\n xw.Range('Macro', 'C_FileName').options(transpose=True).value = l_file_name\n xw.Sheet('Macro').activate()\n wb.macro('ShowMsg')(\"Choose DataType for all the listed files\")",
"def open_file(self):\n selected_file = open_file(self, 'Add File', _USER_DOCUMENTS, 'All Files (*)')\n if not selected_file:\n self.configuration_widgets.logger.warning('No file has been selected.')\n return\n self.configuration_widgets.logger.info('Processing File - {}'.format(selected_file))\n # Passing the selected item to the configure module to be processed\n _configure_object = ConfigureFiles(folder=os.path.dirname(selected_file))\n _configure_object.single_file(selected_file)\n # Adding the file\n self.tree_widget.add_items(_configure_object, self.configuration_widgets)",
"def _filename_multi(self):\n logger.debug(\"Popping Filename browser\")\n return filedialog.askopenfilenames(**self._kwargs)"
]
| [
"0.6390715",
"0.624644",
"0.6072101",
"0.601609",
"0.59868085",
"0.5983324",
"0.59812456",
"0.596189",
"0.59615093",
"0.59475327",
"0.5873696",
"0.58421177",
"0.5780616",
"0.57759833",
"0.5753058",
"0.5729331",
"0.5674728",
"0.5668507",
"0.56300676",
"0.55968094",
"0.5553564",
"0.5552105",
"0.5547482",
"0.5520952",
"0.5520952",
"0.55076355",
"0.5505013",
"0.54966676",
"0.5487769",
"0.5482503"
]
| 0.66030127 | 0 |
Open the file to edit and read contents in to events list. | def get_file(file_to_edit):
events = []
file_path = lrs_path + file_to_edit
with open(file_path, "r") as the_file:
filereader = csv.reader(the_file)
for row in filereader:
events.append(row)
the_file.close()
return events | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def file_input(file_path: str, data_formatter: DataFormatter) -> Stream:\n with open(file_path, \"r\") as f:\n content = f.readlines()\n events = Stream()\n for line in content:\n events.add_item(Event(line, data_formatter))\n events.close()\n return events",
"def _load(self, filename):\n with open(filename) as fp:\n reader = csv.DictReader(fp)\n self.events = list(reader)",
"def read():\n # checks if existing alarms exist and places them in a list for faster data manipulation\n event_log(\"reading event database....\",\"\")\n data_file = open(read_json(\"Event_database\"), \"r+\")\n temp_list = []\n if os.stat(read_json(\"Event_database\")).st_size > 0:\n for z in data_file:#reads each line of file\n temp = \"\"\n for element in z:\n if element == \",\":#looks for comma as its used for seperating data in file\n temp_list.append(temp)\n temp = \"\"\n else:\n temp = temp + element\n Events_list.append(temp_list.copy())\n if math.floor(time.time()) - (convert_to_epoch(temp_list[1])) < 0:#determines if event is not expired\n events.enter(-(math.floor(time.time()) - (convert_to_epoch(temp_list[1]))), 1, expired_alarm)\n else: # already expired\n expired_alarm()\n temp_list.clear()\n data_file.close()",
"def file_open(self):\n filename, _ = QtWidgets.QFileDialog.getOpenFileName(self, 'Open File')\n\n with open(filename, 'r', encoding=\"utf8\") as file:\n self.file_cont = file.readlines()\n self.textToAnalize.setText(''.join(self.file_cont))",
"def _readin_evtx(file):\n\tcontent = []\n\tunparsed_entries = 0\n\twith evtx.Evtx(file) as log:\n\t\tc = 0\n\t\tsources = []\n\t\tfor record in log.records():\n\t\t\tc += 1\n\t\t\t_print_progress(c)\n\t\t\ttry:\n\t\t\t\tobj = untangle.parse(record.xml())#untangle can produce an OSError on Windows, since Windows uses a different format for timestamps\n\t\t\texcept OSError:\n\t\t\t\tc -= 1\n\t\t\t\tunparsed_entries += 1\n\t\t\t\tcontinue\n\t\t\tcurr_obj = obj.Event.System\n\t\t\tdate = curr_obj.TimeCreated['SystemTime']\n\t\t\tif '.' in date:\n\t\t\t\tdate = datetime.datetime.strptime(date,\"%Y-%m-%d %H:%M:%S.%f\")\n\t\t\telse:\n\t\t\t\tdate = datetime.datetime.strptime(date,\"%Y-%m-%d %H:%M:%S\")\n\t\t\tfull_line = record.xml()\n\t\t\tif hasattr(curr_obj,'Provider'):\n\t\t\t\tsource = curr_obj.Provider['Name']\n\t\t\telse:\n\t\t\t\tsource = ''\n\t\t\tif ( (not source in sources) and (not sources == '')):\n\t\t\t\tsources.append(source)\n\t\t\tline_nr = curr_obj.EventRecordID.cdata\n\t\t\tcontent.append(logfile_entry(int(line_nr), file, curr_obj.EventID.cdata, full_line, date, curr_obj.Computer.cdata, source))\n\t\t_delete_print()\n\tif unparsed_entries > 0:\n\t\tprint('Unfortunately, {} entries could not be parsed. Please see the documentation'.format(unparsed_entries))\n\t\tprint()\n\treturn logfile(file, len(content), 'evtx', content, sources)",
"def read(cls, event_file, regex=regex):\n with open(event_file, 'r') as f:\n filedata = f.read()\n event_matches = re.finditer(regex, filedata, re.VERBOSE + re.MULTILINE)\n list_ = [i.groupdict() for i in event_matches]\n #util.ipshell()\n for event in list_: # convert numbers to float and int types\n for key, item in event.iteritems():\n if util.isint(item):\n event[key] = int(item)\n elif util.isfloat(item):\n event[key] = float(item)\n else:\n event[key] = item.strip()\n #if event[key] == '':\n # event[key] = None\n #if key == 'depth' and regex == cls.regex:\n # event[key] *= 1\n #util.ipshell()\n log.info('Read event information of %d events from events event_file %s' % (len(list_), event_file))\n return cls(list_)",
"def read_file(filename) -> List[Todo]:\n with pathlib.Path(filename).expanduser().open('r') as fp:\n return [Todo(_id, line) for _id, line in enumerate(fp)]",
"def read(self, filename):\n pass",
"def read(self, filename):\n pass",
"def create_event_list(event_file: TextIO) -> List[Event]:\n file = [event.strip('\\n').split() for event in event_file]\n events = []\n for line in file:\n if line[1] == 'Arrive':\n time = int(line[0])\n customer_name = line[2]\n items = []\n i = 3\n while i < len(line):\n items.append(Item(line[i], int(line[i+1])))\n i += 2\n events.append(CustomerArrival(time, Customer(customer_name, items)))\n else:\n time = int(line[0])\n line_index = int(line[2])\n events.append(CloseLine(time, line_index))\n return events",
"def open_file_path(self, file_path):\n\n\t\ttext = []\n\t\twith open(file_path, \"r\") as f:\n\t\t\tfor line in f:\n\t\t\t\ttext.append(line)\n\n\t\tself.raw = text",
"def run(self, edit):\n logger.debug('Schema Hacker: open file')\n for sel in self.view.sel():\n _open_files(self.view, sel)",
"def events(self):\n for line_num, line in enumerate(self.file_handler):\n if not line:\n break\n # process line input to dictionary\n data = json.loads(line)\n # add id information\n data['id'] = line_num\n # update timestamp history\n timestamp = self._get_timestamp(data)\n self.last_two_timestamps = [self.last_two_timestamps[-1], timestamp]\n self.event_timestamps[line_num] = timestamp\n\n self.alarms.append(0) # add field for alarms\n self.users.append(data['user']) # add field for user\n self.anomalies.append(data.get('is_anomaly', 0)) # add field for anomalies\n if 'is_anomaly' in data:\n del data['is_anomaly'] # remove anomaly information from data for contestants\n\n # return line id and serialized JSON as string representing one event\n str_dump = json.dumps(data)\n logger.info(self._get_inner_time() + ' > ' + str_dump)\n yield line_num, str_dump",
"def fromfile(cls, filename):\n root = xmlparser.fromfile(filename)\n return cls.wrap_list(root.iter('Event'))",
"def load_list(self):\n with open('/home/roman/Skola/ProjektyMimo/ApkaNaSkolu/output.dat', 'rb') as f:\n self.events = pickle.load(f)",
"def read_file(self):\n try:\n with open(self.file_name, 'r') as ach_file:\n file_contents = ach_file.read().replace('\\n', '').replace('\\r', '')\n\n self._parse_ach_file(file_contents)\n except FileNotFoundError as err:\n print(\"File does not exist -> \" + str(err))",
"def __parse(self):\n lines = self.file.readlines()\n for i in range(0, len(lines)):\n line = lines[i]\n tokens = line.split()\n if tokens[0] == \"#start\":\n trial_name = tokens[1]\n trial = Trial(trial_name)\n self.trials[trial_name] = trial\n elif tokens[0] == \"#end\":\n continue\n else:\n date_str = tokens[0] + \" \" + tokens[1]\n date = datetime.strptime(date_str, \"%m/%d/%y %H:%M:%S\")\n sound_file = line[18:-1].strip()\n event = Event(date, sound_file, 0)\n trial.addevent(event)",
"def __parse(self):\n lines = self.file.readlines()\n name_idx = 2\n name_idx_found = False\n pathre = re.compile(r\"^[A-Z]:[\\\\/]\\w+\")\n for i in range(0, len(lines)):\n line = lines[i]\n if line.strip() != \"\": # check if line isn't empty\n if pathre.match(line):\n self.path = line.strip()\n continue\n tokens = line.split()\n time_str = tokens[0] + \" \" + tokens[1]\n try:\n time = datetime.strptime(time_str, \"%m/%d/%y %H:%M:%S\")\n except ValueError:\n raise LogParseError('Invalid log format. Date must be first \\\n token for each log event.') \n if not name_idx_found:\n name_idx = tokens.index('Monitoring')\n name_idx_found = True\n name = \"\"\n if tokens[name_idx].strip() == 'Monitoring':\n name = tokens[name_idx].lower() + \" \" + tokens[name_idx + 1].lower()\n duration = 0.0\n else:\n name = tokens[name_idx].lower()\n duration = tokens[name_idx + 1]\n self.events[name] = Event(time, name, duration)\n self.start = self.events['monitoring started']\n self.end = self.events['monitoring stopped']",
"def open_file(self, fname):\n\n # Save that the file is opened.\n self.open_files[fname] = {}\n self.open_files[fname][\"name\"] = fname\n self.open_files[fname][\"contents\"] = []",
"def hook_file_opened(self):",
"def read_from_file(self, filename: str) -> None:",
"def load_arquivo_eventos(self, file):\n validfile, mensagem = self.valid_file(file,\n extensions=['json', 'bson', 'zip'])\n if not validfile:\n raise Exception(mensagem)\n if 'zip' in file.filename:\n file = ZipFile(file)\n content = file.read()\n content = content.decode('utf-8')\n eventos = json.loads(content)\n return eventos",
"def read_todo_file(self):\n\n todo = []\n in_progress = []\n done = []\n if os.path.exists('TODO.txt'):\n todo_fp = open('TODO.txt', 'r')\n state = 0\n line = todo_fp.readline()\n while line:\n line = line.strip()\n if state == 0:\n if line == '__IN_PROGRESS__':\n state = 1\n elif len(line) > 1:\n todo.append(line)\n elif state == 1:\n if line == '__DONE__':\n state = 2\n elif len(line) > 1:\n in_progress.append(line)\n elif state == 2:\n if len(line) > 1:\n done.append(line)\n line = todo_fp.readline()\n todo_fp.close()\n self.todo_scroll_cell.add_item_list(todo)\n self.in_progress_scroll_cell.add_item_list(in_progress)\n self.done_scroll_cell.add_item_list(done)",
"def test_handle_events_reading():\n # We can use any `raw` for this\n raw = _read_raw_fif(raw_fname)\n\n # Create an arbitrary events.tsv file, to test we can deal with 'n/a'\n # make sure we can deal w/ \"#\" characters\n events = {'onset': [11, 12, 'n/a'],\n 'duration': ['n/a', 'n/a', 'n/a'],\n 'trial_type': [\"rec start\", \"trial #1\", \"trial #2!\"]\n }\n tmp_dir = _TempDir()\n events_fname = op.join(tmp_dir, 'sub-01_task-test_events.json')\n _to_tsv(events, events_fname)\n\n raw = _handle_events_reading(events_fname, raw)\n events, event_id = mne.events_from_annotations(raw)",
"def _file_watcher(self, filename, interval):\n fp = open(filename)\n\n line = \"\"\n while self._watch_file:\n partial_line = fp.readline()\n if len(partial_line) != 0:\n line += partial_line\n if line.endswith(\"\\n\"):\n yield line\n line = \"\"\n else:\n time.sleep(interval)\n\n fp.close()",
"def read(self, filename):\n raise NotImplementedError",
"def test_load_events(self):\n command = '{0}'.format(\n os.path.join(self.datadir, 'monol_testA.evt'))\n hen.read_events.main(command.split())\n new_filename = self.first_event_file\n ev = hen.io.load_events(new_filename)\n assert hasattr(ev, 'header')\n assert hasattr(ev, 'gti')",
"def _read(self, file_name):\n f = open(file_name)\n lines = f.readlines()\n begin = 0\n end = 0\n while end < len(lines):\n op = ''\n for l in lines[begin:]:\n end += 1\n op = l.split()[0]\n if op in operations:\n self.operations.append(op)\n break\n if op == '=push':\n nfa = Automaton(lines[begin:end - 1])\n self.aut_to_push.append(nfa)\n begin = end\n f.close()",
"def open(self):",
"def readfile(self):\n try:\n with open(filename, mode=\"r\") as fileobject:\n for line in fileobject:\n line = line.rstrip()\n self.__domainlist.append(line)\n\n fileobject.close()\n except:\n print(\"Error when reading file\")"
]
| [
"0.64051765",
"0.6239517",
"0.6128682",
"0.6004973",
"0.60032636",
"0.59520817",
"0.59228414",
"0.5887477",
"0.5887477",
"0.57675457",
"0.57645625",
"0.5748714",
"0.5746571",
"0.5715799",
"0.57102877",
"0.56773293",
"0.5657386",
"0.56479377",
"0.5640856",
"0.5597892",
"0.55806214",
"0.5570351",
"0.55619806",
"0.55517656",
"0.55432296",
"0.55370593",
"0.549769",
"0.5479807",
"0.5469199",
"0.54550505"
]
| 0.75434244 | 0 |
Tests makeInset and useInset | def testMakeInset(self):
# all ids
small = Labels(data=self.small_array)
ids=[1,2,5,7]
desired_inset = [slice(0,3), slice(1,5)]
np_test.assert_equal(small.findInset(ids=ids), desired_inset)
small.makeInset(ids=ids)
np_test.assert_equal(small.inset, desired_inset)
np_test.assert_equal(small.data, self.small_array[tuple(desired_inset)])
# some ids
small = Labels(data=self.small_array)
ids=[2,5]
desired_inset = [slice(0,2), slice(3,5)]
np_test.assert_equal(small.findInset(ids=ids), desired_inset)
small.makeInset(ids=ids)
np_test.assert_equal(small.inset, desired_inset)
np_test.assert_equal(small.data, self.small_array[tuple(desired_inset)])
# some ids with extend
small = Labels(data=self.small_array)
ids=[2,5]
desired_inset = [slice(0,3), slice(2,6)]
np_test.assert_equal(small.findInset(ids=ids, extend=1), desired_inset)
small.makeInset(ids=ids, extend=1)
np_test.assert_equal(small.inset, desired_inset)
np_test.assert_equal(
small.data,
numpy.array([[1,2,5,0],
[0,2,5,0],
[0,0,7,0]]))
# wrong ids
small = Labels(data=self.small_array)
ids = [3,6]
desired_inset = [slice(0,0), slice(0,0)]
np_test.assert_equal(small.findInset(ids=ids), None)
small.makeInset(ids=ids)
np_test.assert_equal(small.inset, desired_inset)
np_test.assert_equal(small.data, self.small_array[tuple(desired_inset)])
# no ids
small = Labels(data=self.small_array)
ids = []
desired_inset = [slice(0,0), slice(0,0)]
np_test.assert_equal(small.findInset(ids=ids), None)
small.makeInset(ids=ids)
np_test.assert_equal(small.inset, desired_inset)
np_test.assert_equal(small.data, self.small_array[tuple(desired_inset)])
# no data
small = Labels(data=numpy.zeros((0,0), dtype=int))
ids = [1]
desired_inset = [slice(0,0), slice(0,0)]
np_test.assert_equal(small.findInset(ids=ids), None)
small.makeInset(ids=ids)
np_test.assert_equal(small.inset, desired_inset)
np_test.assert_equal(small.data, self.small_array[tuple(desired_inset)])
# with additional
small = Labels(data=self.small_array)
ids = [2]
small_2 = Labels(data=self.small_array)
additional_ids = [1]
desired_inset = [slice(0,3), slice(1,4)]
actual_inset = small.findInset(ids=ids, additional=small_2,
additionalIds=additional_ids)
np_test.assert_equal(actual_inset, desired_inset)
small.makeInset(ids=ids, additional=small_2,
additionalIds=additional_ids)
np_test.assert_equal(small.inset, desired_inset)
np_test.assert_equal(small.data, self.small_array[tuple(desired_inset)])
# with additional, update=False
small = Labels(data=self.small_array)
ids = [2]
small_2 = Labels(data=self.small_array)
additional_ids = [1]
desired_inset = [slice(0,3), slice(1,4)]
actual_inset = small.findInset(ids=ids, additional=small_2,
additionalIds=additional_ids)
np_test.assert_equal(actual_inset, desired_inset)
prev_inset = small.inset
prev_data = small.data
new_data = small.makeInset(ids=ids, additional=small_2,
additionalIds=additional_ids, update=False)
np_test.assert_equal(new_data, self.small_array[tuple(desired_inset)])
np_test.assert_equal(small.inset, prev_inset)
np_test.assert_equal(small.data, prev_data)
# no ids with additional
small = Labels(data=self.small_array)
ids = []
small_2 = Labels(data=self.small_array)
additional_ids = [1]
desired_inset = [slice(0,3), slice(1,3)]
actual_inset = small.findInset(ids=ids, additional=small_2,
additionalIds=additional_ids)
np_test.assert_equal(actual_inset, desired_inset)
small.makeInset(ids=ids, additional=small_2,
additionalIds=additional_ids)
np_test.assert_equal(small.inset, desired_inset)
np_test.assert_equal(small.data, self.small_array[tuple(desired_inset)])
# ids with wrong additional
small = Labels(data=self.small_array)
ids = [2]
small_2 = Labels(data=self.small_array)
additional_ids = [3]
desired_inset = [slice(0,2), slice(3,4)]
actual_inset = small.findInset(ids=ids, additional=small_2,
additionalIds=additional_ids)
np_test.assert_equal(actual_inset, desired_inset)
small.makeInset(ids=ids, additional=small_2,
additionalIds=additional_ids)
np_test.assert_equal(small.inset, desired_inset)
np_test.assert_equal(small.data, self.small_array[tuple(desired_inset)]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _make_inset_locator(self, bounds, trans):\n def inset_locator(ax, renderer):\n bbox = mtransforms.Bbox.from_bounds(*bounds)\n bb = mtransforms.TransformedBbox(bbox, trans)\n tr = self.figure.transFigure.inverted()\n bb = mtransforms.TransformedBbox(bb, tr)\n return bb\n return inset_locator",
"def inset_view(plot, lon_data, lat_data, xlim, ylim, xdiv, ydiv,\n buffer=0.5, inset_size=0.15, plot_cmap = 'magma', bkgmap = True):\n inset = plot.fig.add_axes([0.02, 0.01, 0.02+inset_size,\n 0.01+inset_size],projection=ccrs.PlateCarree())\n\n inset.hist2d(lon_data, lat_data,\n bins=[np.arange(xlim[0]-buffer, xlim[1]+buffer+xdiv, xdiv),\n np.arange(ylim[0]-buffer, ylim[1]+buffer+ydiv, ydiv)],\n density=True, cmap=plot_cmap,\n cmin=0.00001)\n\n if bkgmap == True:\n if COUNTIES is not None:\n inset.add_feature(COUNTIES, facecolor='none', edgecolor='gray')\n inset.add_feature(cfeature.BORDERS)\n inset.add_feature(cfeature.STATES.with_scale('10m'))\n inset.set_extent([xlim[0]-buffer, xlim[1]+buffer,\n ylim[0]-buffer, ylim[1]+buffer])\n inset.plot([xlim[0],xlim[0],xlim[1],xlim[1],xlim[0]],\n [ylim[0],ylim[1],ylim[1],ylim[0],ylim[0]],'k')\n return inset",
"def testSetOffsetWithInt(self):\n self.node.offset = 2\n\n self.assertEqual(\n (2, 2, 2),\n self.node.offset\n )",
"def indicate_inset_zoom(\n self, alpha=None,\n lw=None, linewidth=None, zorder=3.5,\n color=None, edgecolor=None, **kwargs\n ):\n # Should be called from the inset axes\n parent = self._inset_parent\n alpha = alpha or 1.0\n linewidth = _notNone(\n lw, linewidth, rc['axes.linewidth'],\n names=('lw', 'linewidth'))\n edgecolor = _notNone(\n color, edgecolor, rc['axes.edgecolor'],\n names=('color', 'edgecolor'))\n if not parent:\n raise ValueError(f'{self} is not an inset axes.')\n xlim, ylim = self.get_xlim(), self.get_ylim()\n rect = (xlim[0], ylim[0], xlim[1] - xlim[0], ylim[1] - ylim[0])\n\n # Call indicate_inset\n rectpatch, connects = parent.indicate_inset(\n rect, self, linewidth=linewidth, edgecolor=edgecolor, alpha=alpha,\n zorder=zorder, **kwargs)\n\n # Update zoom or adopt properties from old one\n if self._inset_zoom_data:\n rectpatch_old, connects_old = self._inset_zoom_data\n rectpatch.update_from(rectpatch_old)\n rectpatch_old.set_visible(False)\n for line, line_old in zip(connects, connects_old):\n visible = line.get_visible()\n line.update_from(line_old)\n line.set_linewidth(line_old.get_linewidth())\n line.set_visible(visible)\n line_old.set_visible(False)\n else:\n for line in connects:\n line.set_linewidth(linewidth)\n line.set_color(edgecolor)\n line.set_alpha(alpha)\n self._inset_zoom_data = (rectpatch, connects)\n return (rectpatch, connects)",
"def test_exchange_point_mark(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"1.0\", \"1.10\"),\n after_sel=(\"1.0\", \"1.10\"),\n command_name=\"exchange-point-mark\",\n )",
"def test_indent_relative(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"5.0\", \"5.0\"),\n after_sel=(\"5.8\", \"5.8\"),\n command_name=\"indent-relative\",\n )",
"def setupOverflows(region, style):\n\n # Extract the overall region.\n x0, y0, x1, y1 = region\n\n # By default, scale the overflow regions to the overall size.\n default_x_size = 0.035 * (x1 - x0)\n default_y_size = 0.035 * (y1 - y0)\n\n # Exctract style information.\n x_overflows = style.get(\"x_axis_overflows\", True)\n y_overflows = style.get(\"y_axis_overflows\", True)\n x_overflow_size = style.get(\"overflow_size\", default_x_size)\n y_overflow_size = style.get(\"overflow_size\", default_y_size)\n x_overflow_margin = style.get(\"overflow_margin\", default_x_size)\n y_overflow_margin = style.get(\"overflow_margin\", default_y_size)\n \n if x_overflows:\n # Adjust x coordinates for drawing overflows.\n xuf = (x0, x0 + x_overflow_size)\n x0 += x_overflow_size + x_overflow_margin\n xof = (x1 - x_overflow_size, x1)\n x1 -= x_overflow_size + x_overflow_margin\n else:\n xuf = None\n xof = None\n\n if y_overflows:\n # Adjust y coordinates for drawing overflows.\n yuf = (y0, y0 + y_overflow_size)\n y0 += y_overflow_size + y_overflow_margin\n yof = (y1 - y_overflow_size, y1)\n y1 -= y_overflow_size + y_overflow_margin\n else:\n yuf = None\n yof = None\n\n # Construct output.\n return (x0, y0, x1, y1), \\\n (x_overflows, y_overflows), \\\n (xuf, xof, yuf, yof)",
"def test_reset_limit_on_indent(self):\n indenter = indent.Indenter()\n indenter.indentation = -2\n self.assertRaises(ValueError, indenter.indent)\n indenter.indentation = -1\n self.assertRaises(ValueError, indenter.indent)\n indenter.indentation = 0\n indenter.indent()\n indenter.indentation = +1\n indenter.indent()\n indenter.indentation = +2\n indenter.indent()",
"def draw_at_position(inset, onto, at):\n x_start = at[0]\n y_start = at[1]\n x_end = x_start + inset.shape[1]\n y_end = y_start + inset.shape[0]\n onto[y_start:y_end, x_start:x_end] = inset",
"def test_position_adjustment(self):\n board = ss.Board()\n assert board.position_adjustment(8) == 10-8\n\n assert board.position_adjustment(56) == 37-56\n\n assert board.position_adjustment(63) == 0",
"def _patch_intercept_mark():\n\n original_intercept_mark = CodeViewText.intercept_mark\n\n def _patched_intercept_mark(self, *args):\n if args[:2] == (\"set\", \"insert\") and args[2].endswith(\".0\"):\n self.set_insertwidth(3)\n else:\n self.set_insertwidth(2)\n\n original_intercept_mark(self, *args)\n\n CodeViewText.intercept_mark = _patched_intercept_mark",
"def test_offsets():\n B = 100\n H = 20\n E = 210000\n sections = ((B, H, 0, E),)\n sections2 = ((B, H, 12.435, E),)\n EI, top, bot = bm.EI(sections, E)\n EI2, top2, bot2 = bm.EI(sections2, E)\n assert 0.99 < EI / EI2 < 1.01\n assert 0.99 < top / top2 < 1.01\n assert 0.99 < bot / bot2 < 1.01",
"def test_adjust_offsets(self):\n \n offsets = {\"ENSMUSG00000051951_1_147\" : 10, \n \"ENSG00000198901_2_52\" : 10 ,\n \"ENSG00000198901_3_239\" : 10, \n \"ENSG00000198901_4_85\" : 10 ,\n \"ENSG00000198901_5_47\" : 10 ,\n \"ENSG00000198901_6_119\" : 10 ,\n \"ENSG00000198901_7_58\" : 10 ,\n \"ENSG00000198901_8_588\" : 10 ,\n \"ENSG00000198901_10_92\" : 10 ,\n \"ENSG00000198901_11_59\" : 10 ,\n \"ENSG00000198901_12_196\" : 10 ,\n \"ENSG00000198901_13_36\" : 10 ,\n\n }\n bedtool = pybedtools.BedTool(clipper.test_file(\"clip_analysis_test_peak_results.bed\"))\n \n \n results = adjust_offsets(bedtool, offsets)\n \n true_results = ((3206126, 3206130),\n (91513660, 91513664),\n (91517394, 91517398),\n (91517935, 91517939),\n (91522404, 91522408),\n (91523607, 91523611),\n (91524250, 91524254),\n (91525137, 91525141),\n (91527347, 91527351),\n (91527937, 91527941),\n (91528034, 91528038),\n (91537658, 91537662),\n )\n for result, true_result in zip(results, true_results):\n self.assertEqual(int(result[6]), true_result[0])\n self.assertEqual(int(result[7]), true_result[1])",
"def test__markMarkdown_indent1b(self):\n self._degrotesque._restoreDefaultElementsToSkip()\n assert(self._degrotesque._markMarkdown(\"Hallo\\n Mama!\")==\"000000111111111\")",
"def test_test_new_inbox_ruleset(self):\n pass",
"def test_bz_batch(self):\n assert self.design.layout.layers[0].name == 'top'",
"def test_test_inbox_ruleset(self):\n pass",
"def block_offsets(self):\n ...",
"def testGetOffset(self):\n # Bypass setter\n self.node._offset = [12.8, 1.2, 1.4]\n\n self.assertEqual(\n (12.8, 1.2, 1.4),\n self.node.offset,\n )",
"def test_cover_set_position(self):\n with patch.dict(TYPES, {'WindowCovering': self.mock_type}):\n state = State('cover.set_position', 'open',\n {ATTR_SUPPORTED_FEATURES: 4})\n get_accessory(None, state, 2, {})",
"def test_inset(depth_one_tree):\n depth_one_tree.insert(2, 3)\n print(depth_one_tree.root.children[0].children)\n print(depth_one_tree.root.children[1].children)\n print(depth_one_tree.root.children[2].children)\n print(depth_one_tree.root.children[3].children)\n assert str(depth_one_tree.root.children[2].children) == str([2])",
"def test_multi_area(self):\n pass",
"def test__markMarkdown_indent1a(self):\n self._degrotesque._restoreDefaultElementsToSkip()\n assert(self._degrotesque._markMarkdown(\"Hallo\\n\\tMama!\")==\"000000111111\")",
"def set_offset(self,offset,mid = None):\n\n BaseScreen.set_offset(self,offset,mid)\n for i in self.tilelist:\n for j in i:\n j.rect[0] += offset[0]\n j.rect[1] += offset[1]",
"def show_shape(model, inset_ax=[[-82, -72], [0, 5]], select=None,\n save=None, fig_mod=None):\n # If the function has already a figure to work with\n if fig_mod:\n ax = fig_mod.add_subplot('111')\n else:\n fig, ax = plt.subplots(figsize=(3.5, 3.5))\n\n lines = {i: [] for i in model.sections.iterkeys()}\n\n # Flat to plot the soma only once\n flag = True\n for section in model.sections.itervalues():\n lines, flag = add_line(ax, lines, section, flag)\n\n mark_circles = []\n for i, mark in enumerate(model.marks):\n s = model.mark_size\n c = model.mark_color\n mark_circles.append(plt.Circle(mark, s, color=c))\n\n # Add the marks if there is a marking\n for i in mark_circles:\n ax.add_artist(i)\n\n # Set fov\n ax.set_ylim(-100, 100)\n ax.set_xlim(-100, 100)\n ax.set_xticks([-100, 0, 100])\n ax.set_xlabel(r\"size($\\mu$m)\")\n ax.set_yticks([])\n\n # Set the right size when making films\n if not save:\n ax.set_aspect('equal')\n\n if inset_ax:\n clx, cly = inset_ax\n # Creating the inset\n ax2 = fig_mod.add_axes([.75, 0.23, 0.1, 0.2])\n adjust_spines(ax2, ['left', 'bottom'])\n ax2.set_ylim(cly[0], cly[1])\n ax2.set_ylabel(\"voltage(mv)\")\n ax2.set_xlim(clx[0], clx[1])\n ax2.set_xlabel(\"time(ms)\")\n ax2.set_yticks([int(cly[0]), int(cly[1])])\n ax2.set_xticks([0, clx[1]])\n\n if save:\n plt.tight_layout()\n plt.savefig(save, dpi=800)\n plt.close()\n else:\n if fig_mod:\n return fig_mod, ax, ax2, lines\n else:\n return ax, lines",
"def testGetPointsAll(self):\n\n # make data\n data = numpy.array(\n [[1, 1, 1, 1, 1, 1, 1],\n [2, 2, 2, 2, 2, 2, 2],\n [3, 3, 3, 3, 3, 3, 3],\n [4, 4, 4, 4, 4, 4, 4]])\n\n # mode all, no inset\n labels = Labels(data=data)\n desired = (numpy.array([1, 1, 1, 1, 1, 1, 1]),\n numpy.array([0, 1, 2, 3, 4, 5, 6]))\n np_test.assert_equal(labels.getPoints(ids=2, mode='all', \n format_='numpy'), desired)\n desired = [[1, 0],\n [1, 1],\n [1, 2],\n [1, 3],\n [1, 4],\n [1, 5],\n [1, 6]]\n np_test.assert_equal(\n labels.getPoints(ids=[2], mode='all', format_='coordinates'), \n desired)\n\n # mode all, inset\n labels = Labels(data=data[1:4, 2:6])\n labels.setInset([slice(1, 4), slice(2, 6)])\n desired = (numpy.array([1, 1, 1, 1]),\n numpy.array([2, 3, 4, 5]))\n np_test.assert_equal(labels.getPoints(ids=[2], mode='all', \n format_='numpy'), desired)\n\n # mode all, multi ids, inset\n data_diag = numpy.array(\n [[2, 3, 4, 0, 0],\n [1, 2, 3, 4, 0],\n [0, 1, 2, 3, 4],\n [0, 0, 1, 2, 3]])\n labels = Labels(data=data_diag)\n desired = (numpy.array([0, 0, 1, 1, 2, 2, 3, 3]), \n numpy.array([0, 1, 1, 2, 2, 3, 3, 4]))\n np_test.assert_equal(\n labels.getPoints(ids=[2, 3], mode='all', format_='numpy'), desired)\n labels = Labels(data=data_diag[1:4, 2:5])\n labels.setInset([slice(1, 4), slice(2, 5)])\n desired = (numpy.array([1, 2, 2, 3, 3]), numpy.array([2, 2, 3, 3, 4]))\n np_test.assert_equal(\n labels.getPoints(ids=[2, 3], mode='all', format_='numpy'), desired)\n\n # mode all, inset + additional inset\n labels = Labels(data=data_diag[1:4, 1:5])\n labels.setInset([slice(1, 4), slice(1, 5)])\n desired = (numpy.array([1, 2]), numpy.array([3, 4]))\n np_test.assert_equal(\n labels.getPoints(ids=4, mode='all', format_='numpy'), desired)",
"def testOffsetSetAndGet(self):\n\n offset = (-1.3782, 278.32, 0.738378233782)\n offsetD = tuple([Decimal(str(i)) for i in offset])\n\n self.cc.offset = offset\n\n self.assertEqual(\n offsetD,\n self.cc.offset\n )",
"def testEditConfigCreateOffset(self):\n self.ports.editconfig_create_offset(file_name = 'editconfig_create_port_label.xml', port_ids = portsDict['port_ids'], offsets = portsDict['offset'])",
"def set_initial_offset(self, offset):\n self.initial_offset = max(\n min(\n (len(self) + 0.5) * self.item_heights - self.my_surface.get_height(),\n offset\n ),\n 0\n )",
"def test__markMarkdown_indent2b(self):\n self._degrotesque._restoreDefaultElementsToSkip()\n assert(self._degrotesque._markMarkdown(\"Hallo\\n\\tMama!\\n Ich bin ein\\nCode\")==\"000000111111111111111111111110000\")"
]
| [
"0.6361135",
"0.5541073",
"0.5483409",
"0.5347032",
"0.5241986",
"0.51955086",
"0.51911783",
"0.5136891",
"0.50874513",
"0.5050792",
"0.5041951",
"0.50263894",
"0.4981297",
"0.49733314",
"0.4969125",
"0.49624497",
"0.49620852",
"0.49477723",
"0.49254608",
"0.49208865",
"0.49013603",
"0.48942262",
"0.484729",
"0.48412228",
"0.4832675",
"0.48294154",
"0.4811949",
"0.48011124",
"0.4793105",
"0.47671205"
]
| 0.74113864 | 0 |
Tests getPoints(mode='geodesic') and getPointsGeodesic(). Run several times because the methods tested depend on a random variable. | def testGetPointsGeodesic(self):
for i in range(10):
self.basicGetPointsGeodesic() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def basicGetPointsGeodesic(self):\n\n # more geodesic, distance=2 (complicated because random)\n data = numpy.array([[0, 1, 1, 1, 2, 2, 2, 0],\n [0, 1, 1, 1, 2, 2, 2, 0],\n [0, 1, 1, 1, 2, 2, 2, 0]])\n labels = Labels(data=data)\n result = labels.getPoints(ids=[1], mode='geodesic', distance=2, \n connectivity=1)\n result = result.tolist()\n if len(result) == 5:\n desired = [[0, 1], [0, 3], [1, 2], [2, 1], [2, 3]]\n elif len(result) == 4:\n desired = [[0, 2], [1, 1], [1, 3], [2, 2]]\n elif len(result) == 3:\n if [1, 2] in result:\n if [0, 1] in result:\n desired = [[0, 1], [1, 2], [2, 3]] \n elif [0, 3] in result:\n desired = [[0, 3], [1, 2], [2, 1]]\n elif [0, 1] in result:\n if [0, 3] in result:\n desired = [[0, 1], [0, 3], [2, 2]]\n elif [2, 1] in result:\n desired = [[0, 1], [2, 1], [1, 3]]\n else:\n desired = [[0, 1], [1, 3], [2, 2]]\n elif [2, 3] in result:\n if [0, 3] in result:\n desired = [[0, 3], [1, 1], [2, 3]]\n elif [2, 1] in result:\n desired = [[0, 2], [2, 1], [2, 3]]\n else:\n desired = [[2, 3], [1, 1], [0, 2]] \n elif [0, 3] in result:\n desired = [[0, 3], [1, 1], [2, 2]]\n elif [2, 1] in result:\n desired = [[2, 1], [1, 3], [0, 2]]\n for des in desired:\n np_test.assert_equal(des in result, True)\n for res in result:\n np_test.assert_equal(res in desired, True)\n\n # mode geodesic, distance=3, inset\n labels = Labels(data=data[1:3, 2:8])\n labels.setInset([slice(1, 3), slice(2, 8)])\n result = labels.getPoints(ids=[2], mode='geodesic', distance=3, \n connectivity=1)\n result = result.tolist()\n if len(result) == 1:\n np_test.assert_equal(result[0][1], 5)\n elif len(result) == 2:\n desired = []\n if [1, 4] in result:\n desired = [[1, 4], [2, 6]]\n elif [2, 4] in result:\n desired = [[2, 4], [1, 6]]\n for des in desired: \n np_test.assert_equal(des in result, True)\n for res in result:\n np_test.assert_equal(res in desired, True)",
"def test_points_calculation(self):\n\n assert self.test_shape.points == [\n (1030.0, 525.0),\n (1030.0, 475.0),\n (970.0, 475.0),\n (970.0, 525.0),\n ]",
"def test_get_points():\n d1 = Driver(\"Andrew\", \"Audi A6\")\n initial_points = d1.get_points()\n assert type(d1.get_points()) == int\n assert d1.get_points() == 0\n d1.add_result(1, 18)\n assert d1.get_points() == 18\n d1.add_result(1, 25)\n assert d1.get_points() == 43\n assert initial_points != d1.get_points()",
"def run_all(params, mock=False):\n start = datetime.datetime.now()\n g_places = {}\n\n bounds = params[\"bounds\"]\n i = 0\n for lat, lng in get_circle_centers([bounds[\"lower\"][\"lat\"], bounds[\"lower\"][\"lng\"]], # southwest\n [bounds[\"upper\"][\"lat\"], bounds[\"upper\"][\"lng\"]], # northeast\n params[\"radius\"]):\n if not mock:\n logging.info(f\"Fetching places for {lat}, {lng}\")\n sleep(0.5)\n # all places found in the current circle (using the nearly API)\n circle_places = get_radar(params, {\n \"pos\": (lat, lng),\n \"res\": 0\n })\n logging.info(f\"{len(circle_places)} places found for {lat}, {lng}\")\n\n # add the places found in this circle to all places for the given bounding box\n g_places.update(circle_places)\n\n i += 1\n\n if mock:\n logging.info(f\"Mock run finished with {i} circles\")\n\n logging.info(\"Finished in: {}\".format(str(datetime.datetime.now() - start)))\n\n return g_places",
"def test_get_latitude():\n pass",
"def test_geo_ops_smoke(geo_table):\n t = geo_table\n\n # alias for fields\n point = t.geo_point\n linestring = t.geo_linestring\n polygon = t.geo_polygon\n multipolygon = t.geo_multipolygon\n\n # test ops\n point.srid()\n point.x()\n point.y()\n\n linestring.contains(point)\n linestring.end_point()\n linestring.length()\n linestring.max_distance(point)\n linestring.point_n(1)\n linestring.start_point()\n linestring.x_max()\n linestring.x_min()\n linestring.y_max()\n linestring.y_min()\n\n polygon.area()\n polygon.perimeter()\n\n multipolygon.n_points()\n multipolygon.n_rings()",
"def testPointSystem():\n deleteMatches()\n deletePlayers()\n registerPlayer(\"Pikachu\")\n registerPlayer(\"Charmander\")\n registerPlayer(\"Bulbasaur\")\n registerPlayer(\"Squirtle\")\n registerPlayer(\"MewTwo\")\n standings = playerStandings()\n [id1, id2, id3, id4, id5] = [row[0] for row in standings]\n reportMatch(id1, id2)\n reportMatch(id3, id4, True)\n reportMatch(id5, id5, False, True)\n reportMatch(id1, id5)\n reportMatch(id3, id4)\n reportMatch(id2, id2, False, True)\n reportMatch(id1, id3)\n reportMatch(id5, id2)\n reportMatch(id4, id4, False, True)\n standings = playerStandings()\n if not (standings[0][0]==id2 and standings[0][2]==2 and\n standings[1][0]==id4 and standings[0][2]==2 and\n standings[2][0]==id3 and standings[0][2]==2 and\n standings[3][0]==id5 and standings[0][2]==2 and\n standings[4][0]==id1 and standings[0][2]==2):\n raise ValueError(\n \"Points are not tallied correctly.\"\n )\n\n print \"4. Points are tallied correctly.\"",
"def test_access(geometry):\n geometry.print_list_of_geos()\n geometry.print_list_of_geos_children()\n\n logger.info('TOP GEO:')\n top_geo = geometry.get_top_geo()\n top_geo.print_geo_children()\n\n logger.info('INTERMEDIATE GEO (QUAD):')\n geo = geometry.get_geo('QUAD:V1', 0)\n #geo = geometry.get_top_geo()\n geo.print_geo_children()\n\n t0_sec = time()\n X,Y,Z = geo.get_pixel_coords(do_tilt=True)\n #X,Y = geo.get_2d_pixel_coords()\n s = 'X: %s' % str(X)\n s+= '\\n Consumed time to get 3d pixel coordinates = %7.3f sec' % (time()-t0_sec)\n s+= '\\n Geometry object: %s:%d X.shape:%s' % (geo.oname, geo.oindex, str(X.shape))\n logger.info(s)\n\n logger.info('Test of print_pixel_coords() for quad:')\n geometry.print_pixel_coords('QUAD:V1', 1)\n logger.info('Test of print_pixel_coords() for CSPAD:')\n geometry.print_pixel_coords()\n\n s = 'Test of get_pixel_areas() for QUAD:'\n A = geo.get_pixel_areas()\n s+= '\\n Geometry object: %s:%d A.shape:%s' % (geo.oname, geo.oindex, str(A.shape))\n s+= '\\n A[0,0:5,190:198]:\\n' + str(A[0,0:5,190:198])\n logger.info(s)\n\n s = 'Test of get_pixel_areas() for CSPAD:'\n A = top_geo.get_pixel_areas()\n s+= '\\n Geometry object: %s:%d A.shape:%s' % (geo.oname, geo.oindex, str(A.shape))\n s+= '\\n A[0,0,0:5,190:198]:\\n' + str(A[0,0,0:5,190:198])\n logger.info(s)\n\n s = 'Test of get_size_geo_array()'\n s+= '\\n for QUAD: %d' % geo.get_size_geo_array()\n s+= '\\n for CSPAD: %d' % top_geo.get_size_geo_array()\n logger.info(s)\n\n s = 'Test of get_pixel_scale_size()'\n s+= '\\n for QUAD : %8.2f' % geo.get_pixel_scale_size()\n s+= '\\n for CSPAD : %8.2f' % top_geo.get_pixel_scale_size()\n s+= '\\n for geometry: %8.2f' % geometry.get_pixel_scale_size()\n logger.info(s)\n\n s = 'Test of get_dict_of_comments():'\n d = geometry.get_dict_of_comments()\n s+= '\\n d[0] = %s' % str(d[0])\n logger.info(s)",
"def test_points(self):\n\n good_points = {}\n\n for net in self.good.nets:\n for pid, p in net.points.items():\n good_points[pid] = p\n\n self.assertEqual(len(good_points), 24)\n\n for net in self.actual.nets:\n for pid, p in net.points.items():\n goodp = good_points.pop(pid)\n self.assertEqual(p.point_id, goodp.point_id)\n self.assertEqual(p.x, goodp.x)\n self.assertEqual(p.y, goodp.y)\n self.assertEqual(set(p.connected_points),\n set(goodp.connected_points))\n\n self.assertEqual(good_points, {})",
"def _run_test_case(radio, lines):\n calc_reachable_surface_and_people(radio, lines)",
"def test_l2_metric_geodesic(\n self, l2_metric_s2, times, n_sampling_points, landmarks_a, landmarks_b\n ):\n landmarks_ab = l2_metric_s2.geodesic(landmarks_a, landmarks_b)\n landmarks_ab = landmarks_ab(times)\n\n result = landmarks_ab\n expected = []\n for k in range(n_sampling_points):\n geod = l2_metric_s2.ambient_metric.geodesic(\n initial_point=landmarks_a[k, :], end_point=landmarks_b[k, :]\n )\n expected.append(geod(times))\n expected = gs.stack(expected, axis=1)\n\n self.assertAllClose(result, expected)",
"def runTest(self):\n self.setUp()\n self.test_FiberDistance1()",
"def test_get_latitude_fail():\n pass",
"def test_calculate_points():\n d1 = Driver(\"Andrew\", \"Audi A6\")\n assert type(d1.calculate_points()) == int\n assert d1.calculate_points() == 0\n d1.add_result(1, 25)\n assert d1.calculate_points() == 25\n d1.add_result(2, int(random.random()) + 1)\n assert d1.calculate_points() > 25",
"def test_get_longitude():\n pass",
"def test_get_nearest(self):\n switzerland = Country.objects.get(name=u\"Switzerland\")\n uk = Country.objects.get(name=u\"United Kingdom\")\n \n user1, person1 = self._create_person(\"user1\", \"[email protected]\",\n country=switzerland.name,\n latitude=46.519582,\n longitude=6.632121,\n location_description=u\"Geneva\")\n # Geneva -> Saint-Genis: 10.9km\n user2, person2 = self._create_person(\"user2\", \"[email protected]\",\n country=switzerland.name,\n latitude=46.205973,\n longitude=6.5995789,\n location_description=u\"Saint-Genis\")\n \n # Geneva -> Islington: 986km\n user3, person3 = self._create_person(\"user3\", \"[email protected]\",\n country=uk.name,\n latitude=51.532601866,\n longitude=-0.108382701874,\n location_description=u\"Islington\")\n \n # Geneva -> Lausanne: 63.2km\n user4, person4 = self._create_person(\"user4\", \"[email protected]\",\n country=switzerland.name,\n latitude=46.243572,\n longitude=6.02107,\n location_description=u\"Lausanne\")\n \n \n near = person1.get_nearest(within_range=9999)\n \n self.assertEqual(near, [person2, person4, person3])\n \n # the within range feature doesn't work in mysql\n if settings.DATABASE_ENGINE == 'mysql':\n return\n \n # person2: 21.7 miles\n # person4: 34.7 miles\n # person3: 471.9 miles\n near = person1.get_nearest(within_range=100)\n \n self.assertEqual(near, [person2, person4])\n \n near = person1.get_nearest(num=1, within_range=100)\n \n self.assertEqual(near, [person2])",
"def runTests():\r\n\r\n print(\"running a few tests\")\r\n\r\n average = compute .gpsAverage (4, 5)\r\n print(\"average = \", average)\r\n \r\n print (\"hello!\")",
"def test_proximity_endpoint(self):\n endpoint = settings.PROXIMITY_ENDPOINT\n access_token = config.ACCESS_TOKEN\n self.assertValidGetOicJsonEndpoint(endpoint, access_token)",
"def test_geo_data_created(self):\n # Currently, there are no GeometryStore or PointGeometry objects in the database\n self.assertEqual(GeometryStore.objects.count(), 0)\n self.assertEqual(PointGeometry.objects.count(), 0)\n\n self.call_command(filename='power_plant_import/tests/data/six_rows.csv')\n\n # Get the PowerPlants that were created during the import\n (powerplant_ouessant, powerplant_ilarionas, powerplant_tonstad) = self.get_created_plants()\n # Get the Projects that were created during the import\n (project_ouessant1, project_ouessant2, project_liaoning) = self.get_created_projects()\n\n # GeometryStore objects were created for:\n # - powerplant_ouessant\n # - powerplant_ilarionas\n # - project_liaoning\n # The project_ouessant1 and project_ouessant2 should use\n # powerplant_ouessant's GeometryStore\n self.assertEqual(GeometryStore.objects.count(), 3)\n # PointGeometry objects were created for:\n # - powerplant_ouessant\n # - powerplant_ilarionas\n # - project_liaoning\n # The project_ouessant1 and project_ouessant2 should use\n # powerplant_ouessant's PointGeometry\n self.assertEqual(PointGeometry.objects.count(), 3)\n # The powerplant_ouessant point is correct\n powerplant_ouessant_points = powerplant_ouessant.geo.points.all()\n self.assertEqual(powerplant_ouessant_points.count(), 1)\n self.assertEqual(powerplant_ouessant_points.first().geom.x, -5.11121)\n self.assertEqual(powerplant_ouessant_points.first().geom.y, 48.43754)\n # The powerplant_ilarionas point is correct\n powerplant_ilarionas_points = powerplant_ilarionas.geo.points.all()\n self.assertEqual(powerplant_ilarionas_points.count(), 1)\n self.assertEqual(powerplant_ilarionas_points.first().geom.x, 21.8039)\n self.assertEqual(powerplant_ilarionas_points.first().geom.y, 40.0966)\n # The project_liaoning gets its geodata from its latitude and longitude\n # cells\n project_liaoning_points = project_liaoning.geo.points.all()\n self.assertEqual(project_liaoning_points.count(), 1)\n self.assertEqual(project_liaoning_points.first().geom.x, 121.38065)\n self.assertEqual(project_liaoning_points.first().geom.y, 41.16469)\n # For the project_ouessant1 and project_ouessant2, the latitude and\n # longitude cells are blank, so they get their geodata from their\n # parent PowerPlant (powerplant_ouessant).\n self.assertEqual(project_ouessant1.geo, project_ouessant1.power_plant.geo)\n self.assertEqual(project_ouessant2.geo, project_ouessant2.power_plant.geo)\n # The powerplant_tonstad has no geo data\n self.assertIsNone(powerplant_tonstad.geo)",
"def main():\r\n test = TesterNeighbour()\r\n test.setUp()\r\n test.test_result_n()\r\n print(\"result_of_algorithm_test - passed\")",
"def test_get_points_to_estimate(self):\r\n # Ref in range.\r\n obs = self.estimator1._get_points_to_estimate(4, 1, 5, 4)\r\n self.assertEqual(obs, [1, 2, 3, 4, 5])\r\n\r\n # Ref not in range.\r\n obs = self.estimator1._get_points_to_estimate(4, 5, 10, 2)\r\n self.assertEqual(obs, [4, 5, 7, 9])\r\n\r\n # stop not supplied.\r\n obs = self.estimator1._get_points_to_estimate(5, 5, num_steps=2)\r\n self.assertEqual(obs, [5, 17, 29])",
"def test_KGE_methods(model_name):\n testing_function(model_name)",
"def test_same_point_not_submitted(self):\n self.c.force_login(self.u)\n data = {'lat': 34.0, 'lng': 45.3, 'zoom': 13}\n response = self.c.get(reverse(submit_point), data)\n response = self.c.get(reverse(submit_point), data)\n response = self.c.get(reverse(submit_point), data)\n response = self.c.get(reverse(submit_point), data)\n response = self.c.get(reverse(submit_point), data)\n response = self.c.get(reverse(submit_point), data)\n response = self.c.get(reverse(submit_point), data)\n self.assertEqual(Point.objects.count(), 1)",
"def test_processed_points_calculation(self):\n\n assert self.test_shape.processed_points == [\n (1030.0, 525.0, \"straight\"),\n (1030.0, 475.0, \"straight\"),\n (970.0, 475.0, \"straight\"),\n (970.0, 525.0, \"straight\"),\n (1030.0, 525.0, \"straight\"),\n ]",
"def test_nearest(self):\n for lat, lon, icao in ((28.43, -81.31, \"KMCO\"), (28.43, -81, \"KTIX\")):\n stn, dist = station.Station.nearest(lat, lon, is_airport=True)\n self.assertIsInstance(stn, station.Station)\n self.assertEqual(stn.icao, icao)\n for val in dist.values():\n self.assertIsInstance(val, float)\n # Test with IATA req disabled\n stn, dist = station.Station.nearest(28.43, -81, False, False)\n self.assertIsInstance(stn, station.Station)\n self.assertEqual(stn.icao, \"FA18\")\n for val in dist.values():\n self.assertIsInstance(val, float)",
"def test_getters():\n\n A = np.random.randint(2, size=(10, 10))\n S = np.random.randint(2, size=10)\n stocks = np.random.rand(10)\n ones = np.ones(10)\n # Dummy values\n m = ExploitCore(A, S, stocks, ones, ones, ones, 0.5, 2.0)\n assert (m.get_adjacency() == A).all()\n assert (m.get_strategies() == S).all()\n assert (m.get_stocks() == stocks).all()\n assert m.get_time() == 0",
"def test_by_accession_geo_platform_accession_get(self):\n pass",
"def test_sample(self):\n seed = 5\n space = Space()\n probs = (0.1, 0.2, 0.3, 0.4)\n categories = (\"asdfa\", 2, 3, 4)\n dim1 = Categorical(\"yolo\", OrderedDict(zip(categories, probs)), shape=(2, 2))\n space.register(dim1)\n dim2 = Integer(\"yolo2\", \"uniform\", -3, 6)\n space.register(dim2)\n dim3 = Real(\"yolo3\", \"norm\", 0.9)\n space.register(dim3)\n\n point = space.sample(seed=seed)\n rng = check_random_state(seed)\n test_point = [\n dict(\n yolo=dim1.sample(seed=rng)[0],\n yolo2=dim2.sample(seed=rng)[0],\n yolo3=dim3.sample(seed=rng)[0],\n )\n ]\n assert len(point) == len(test_point) == 1\n assert len(point[0].params) == len(test_point[0]) == 3\n assert np.all(point[0].params[\"yolo\"] == test_point[0][\"yolo\"])\n assert point[0].params[\"yolo2\"] == test_point[0][\"yolo2\"]\n assert point[0].params[\"yolo3\"] == test_point[0][\"yolo3\"]\n\n points = space.sample(2, seed=seed)\n rng = check_random_state(seed)\n points1 = dim1.sample(2, seed=rng)\n points2 = dim2.sample(2, seed=rng)\n points3 = dim3.sample(2, seed=rng)\n test_points = [\n dict(yolo=points1[0], yolo2=points2[0], yolo3=points3[0]),\n dict(yolo=points1[1], yolo2=points2[1], yolo3=points3[1]),\n ]\n assert len(points) == len(test_points) == 2\n for i in range(2):\n assert len(points[i].params) == len(test_points[i]) == 3\n assert np.all(points[i].params[\"yolo\"] == test_points[i][\"yolo\"])\n assert points[i].params[\"yolo2\"] == test_points[i][\"yolo2\"]\n assert points[i].params[\"yolo3\"] == test_points[i][\"yolo3\"]",
"def test_run_spatial_function(session):\n factories.ConnectionNodeFactory()\n q = session.query(func.ST_AsGeoJSON(models.ConnectionNode.the_geom))\n q.first()",
"def test_get_speed_limit():\n center = Coordinates(1 , 1)\n radius = 10\n speed_limit = 20\n\n assert get_speed_limit(center, radius, speed_limit) != center\n assert get_speed_limit(center, radius, speed_limit) != radius\n assert get_speed_limit(center, radius, speed_limit) == speed_limit"
]
| [
"0.6299371",
"0.58874136",
"0.58374065",
"0.5801667",
"0.5795033",
"0.5749049",
"0.57166374",
"0.56472635",
"0.562469",
"0.5586442",
"0.55645484",
"0.5560855",
"0.5552215",
"0.550962",
"0.5506189",
"0.5504319",
"0.54882073",
"0.5474092",
"0.5473591",
"0.5468628",
"0.54185873",
"0.5392978",
"0.5365177",
"0.534209",
"0.5337191",
"0.5334295",
"0.53249973",
"0.53214514",
"0.53177106",
"0.53108865"
]
| 0.8351135 | 0 |
Single test for getPoints(mode='geodesic') and getPointsGeodesic(). | def basicGetPointsGeodesic(self):
# more geodesic, distance=2 (complicated because random)
data = numpy.array([[0, 1, 1, 1, 2, 2, 2, 0],
[0, 1, 1, 1, 2, 2, 2, 0],
[0, 1, 1, 1, 2, 2, 2, 0]])
labels = Labels(data=data)
result = labels.getPoints(ids=[1], mode='geodesic', distance=2,
connectivity=1)
result = result.tolist()
if len(result) == 5:
desired = [[0, 1], [0, 3], [1, 2], [2, 1], [2, 3]]
elif len(result) == 4:
desired = [[0, 2], [1, 1], [1, 3], [2, 2]]
elif len(result) == 3:
if [1, 2] in result:
if [0, 1] in result:
desired = [[0, 1], [1, 2], [2, 3]]
elif [0, 3] in result:
desired = [[0, 3], [1, 2], [2, 1]]
elif [0, 1] in result:
if [0, 3] in result:
desired = [[0, 1], [0, 3], [2, 2]]
elif [2, 1] in result:
desired = [[0, 1], [2, 1], [1, 3]]
else:
desired = [[0, 1], [1, 3], [2, 2]]
elif [2, 3] in result:
if [0, 3] in result:
desired = [[0, 3], [1, 1], [2, 3]]
elif [2, 1] in result:
desired = [[0, 2], [2, 1], [2, 3]]
else:
desired = [[2, 3], [1, 1], [0, 2]]
elif [0, 3] in result:
desired = [[0, 3], [1, 1], [2, 2]]
elif [2, 1] in result:
desired = [[2, 1], [1, 3], [0, 2]]
for des in desired:
np_test.assert_equal(des in result, True)
for res in result:
np_test.assert_equal(res in desired, True)
# mode geodesic, distance=3, inset
labels = Labels(data=data[1:3, 2:8])
labels.setInset([slice(1, 3), slice(2, 8)])
result = labels.getPoints(ids=[2], mode='geodesic', distance=3,
connectivity=1)
result = result.tolist()
if len(result) == 1:
np_test.assert_equal(result[0][1], 5)
elif len(result) == 2:
desired = []
if [1, 4] in result:
desired = [[1, 4], [2, 6]]
elif [2, 4] in result:
desired = [[2, 4], [1, 6]]
for des in desired:
np_test.assert_equal(des in result, True)
for res in result:
np_test.assert_equal(res in desired, True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def testGetPointsGeodesic(self):\n\n for i in range(10):\n self.basicGetPointsGeodesic()",
"def geospatial(self):\n return bool(\n self.dataset.GetProjection() or\n (self.dataset.GetGCPProjection() and self.dataset.GetGCPs()) or\n self.dataset.GetGeoTransform(can_return_null=True) or\n hasattr(self, '_netcdf'))",
"def test_operators_functions_unavailable_for_geography(self):\n z = Zipcode.objects.get(code=\"77002\")\n point_field = \"%s.%s::geometry\" % (\n connection.ops.quote_name(City._meta.db_table),\n connection.ops.quote_name(\"point\"),\n )\n # ST_Within.\n qs = City.objects.filter(point__within=z.poly)\n with CaptureQueriesContext(connection) as ctx:\n self.assertEqual(qs.count(), 1)\n self.assertIn(f\"ST_Within({point_field}\", ctx.captured_queries[0][\"sql\"])\n # @ operator.\n qs = City.objects.filter(point__contained=z.poly)\n with CaptureQueriesContext(connection) as ctx:\n self.assertEqual(qs.count(), 1)\n self.assertIn(f\"{point_field} @\", ctx.captured_queries[0][\"sql\"])\n # ~= operator.\n htown = City.objects.get(name=\"Houston\")\n qs = City.objects.filter(point__exact=htown.point)\n with CaptureQueriesContext(connection) as ctx:\n self.assertEqual(qs.count(), 1)\n self.assertIn(f\"{point_field} ~=\", ctx.captured_queries[0][\"sql\"])",
"def test_points_calculation(self):\n\n assert self.test_shape.points == [\n (1030.0, 525.0),\n (1030.0, 475.0),\n (970.0, 475.0),\n (970.0, 525.0),\n ]",
"def test_access(geometry):\n geometry.print_list_of_geos()\n geometry.print_list_of_geos_children()\n\n logger.info('TOP GEO:')\n top_geo = geometry.get_top_geo()\n top_geo.print_geo_children()\n\n logger.info('INTERMEDIATE GEO (QUAD):')\n geo = geometry.get_geo('QUAD:V1', 0)\n #geo = geometry.get_top_geo()\n geo.print_geo_children()\n\n t0_sec = time()\n X,Y,Z = geo.get_pixel_coords(do_tilt=True)\n #X,Y = geo.get_2d_pixel_coords()\n s = 'X: %s' % str(X)\n s+= '\\n Consumed time to get 3d pixel coordinates = %7.3f sec' % (time()-t0_sec)\n s+= '\\n Geometry object: %s:%d X.shape:%s' % (geo.oname, geo.oindex, str(X.shape))\n logger.info(s)\n\n logger.info('Test of print_pixel_coords() for quad:')\n geometry.print_pixel_coords('QUAD:V1', 1)\n logger.info('Test of print_pixel_coords() for CSPAD:')\n geometry.print_pixel_coords()\n\n s = 'Test of get_pixel_areas() for QUAD:'\n A = geo.get_pixel_areas()\n s+= '\\n Geometry object: %s:%d A.shape:%s' % (geo.oname, geo.oindex, str(A.shape))\n s+= '\\n A[0,0:5,190:198]:\\n' + str(A[0,0:5,190:198])\n logger.info(s)\n\n s = 'Test of get_pixel_areas() for CSPAD:'\n A = top_geo.get_pixel_areas()\n s+= '\\n Geometry object: %s:%d A.shape:%s' % (geo.oname, geo.oindex, str(A.shape))\n s+= '\\n A[0,0,0:5,190:198]:\\n' + str(A[0,0,0:5,190:198])\n logger.info(s)\n\n s = 'Test of get_size_geo_array()'\n s+= '\\n for QUAD: %d' % geo.get_size_geo_array()\n s+= '\\n for CSPAD: %d' % top_geo.get_size_geo_array()\n logger.info(s)\n\n s = 'Test of get_pixel_scale_size()'\n s+= '\\n for QUAD : %8.2f' % geo.get_pixel_scale_size()\n s+= '\\n for CSPAD : %8.2f' % top_geo.get_pixel_scale_size()\n s+= '\\n for geometry: %8.2f' % geometry.get_pixel_scale_size()\n logger.info(s)\n\n s = 'Test of get_dict_of_comments():'\n d = geometry.get_dict_of_comments()\n s+= '\\n d[0] = %s' % str(d[0])\n logger.info(s)",
"def test_coords():\n x = np.array([8, 67, 79, 10, 52, 53, 98, 34, 15, 58], dtype=float)\n y = np.array([24, 87, 48, 94, 98, 66, 14, 24, 60, 16], dtype=float)\n\n return x, y",
"def isGeospatial(path):\n try:\n ds = gdal.Open(str(path), gdalconst.GA_ReadOnly)\n except Exception:\n return False\n if ds:\n if ds.GetGCPs() and ds.GetGCPProjection():\n return True\n if ds.GetProjection():\n return True\n if ds.GetGeoTransform(can_return_null=True):\n return True\n if ds.GetDriver().ShortName in {'NITF', 'netCDF'}:\n return True\n return False",
"def is_gps(self):\n row_type = self.get_type()\n is_gps = row_type in ('hidden geopoint', 'geopoint')\n return is_gps",
"def test_find_points_in_conus_no_shortcuts(self):\n\n conus_latitudes_deg, conus_longitudes_deg = (\n conus_boundary.read_from_netcdf()\n )\n\n these_flags = conus_boundary.find_points_in_conus(\n conus_latitudes_deg=conus_latitudes_deg,\n conus_longitudes_deg=conus_longitudes_deg,\n query_latitudes_deg=QUERY_LATITUDES_DEG,\n query_longitudes_deg=QUERY_LONGITUDES_DEG, use_shortcuts=False)\n\n self.assertTrue(numpy.array_equal(these_flags, IN_CONUS_FLAGS))",
"def test_geo_ops_smoke(geo_table):\n t = geo_table\n\n # alias for fields\n point = t.geo_point\n linestring = t.geo_linestring\n polygon = t.geo_polygon\n multipolygon = t.geo_multipolygon\n\n # test ops\n point.srid()\n point.x()\n point.y()\n\n linestring.contains(point)\n linestring.end_point()\n linestring.length()\n linestring.max_distance(point)\n linestring.point_n(1)\n linestring.start_point()\n linestring.x_max()\n linestring.x_min()\n linestring.y_max()\n linestring.y_min()\n\n polygon.area()\n polygon.perimeter()\n\n multipolygon.n_points()\n multipolygon.n_rings()",
"def test_coord_preceding_fs(self):",
"def geog(self) -> typing.Union[None, typing.Tuple[float, float]]:\n geog = self.data[4]\n geog = re.findall(r'(\\d{2})(\\d{2})(\\d{2}\\.\\d+)([NS]) (\\d{3})(\\d{2})(\\d{2}\\.\\d+)([EW])', geog)\n\n if geog:\n lat_deg, lat_min, lat_sec, lat_dir, long_deg, long_min, long_sec, long_dir = geog[0]\n\n lat = Point.parse_degrees(lat_deg, lat_min, lat_sec, direction=lat_dir)\n long = Point.parse_degrees(long_deg, long_min, long_sec, direction=long_dir)\n return lat, long\n return None",
"def test_run_spatial_function(session):\n factories.ConnectionNodeFactory()\n q = session.query(func.ST_AsGeoJSON(models.ConnectionNode.the_geom))\n q.first()",
"def test_point_positive_on_one_line(self):\n a = Point(1, 0)\n b = Point(34, 0)\n c = Point(42, 0)\n\n self.assertTrue(Point.on_one_line(a, b, c),\n \"Test of Point.on_one_line(a, b, c) failed, returned value != True.\")\n d = Point(1, 2)\n e = Point(34, 43)\n f = Point(42, 54)\n\n self.assertFalse(Point.on_one_line(d, e, f),\n \"Test of Point.on_one_line(d, e, f) failed, returned value != False.\")\n\n self.assertTrue(Point.on_one_line(a), \"Test of Point.on_one_line(a) failed, returned value != True.\")",
"def point(x, y):\n return test(Point(x,y))",
"def test_by_accession_geo_platform_accession_get(self):\n pass",
"def test_points_exists(self):\n self.assertEqual(Destination.objects.filter(name='testWithin')[0].point,\n self.test_point_inside)\n self.assertEqual(Destination.objects.filter(name='testWithout')[0].point,\n self.test_point_outside)",
"def test_geodesic(self, space, norm):\n n_steps = 1000\n space.equip_with_metric(self.Metric)\n base_point = space.random_point()\n tangent_vec = norm * space.metric.random_unit_tangent_vec(base_point=base_point)\n geod = space.metric.geodesic(\n initial_point=base_point,\n initial_tangent_vec=tangent_vec,\n )\n t = gs.linspace(0.0, 1.0, n_steps)\n geod_at_t = geod(t)\n velocity = n_steps * (geod_at_t[1:, :] - geod_at_t[:-1, :])\n velocity_norm = space.metric.norm(velocity, geod_at_t[:-1, :])\n result = 1 / velocity_norm.min() * (velocity_norm.max() - velocity_norm.min())\n expected = 0.0\n self.assertAllClose(expected, result, rtol=1.0)",
"def check_points(nodeL, nodeR, points, city):\n A = points\n B = city\n C = nodeL\n D = nodeR\n\n d1 = (B[0] - A[0]) * (C[1] - A[1]) - (B[1] - A[1]) * (C[0] - A[0])\n d2 = (B[0] - A[0]) * (D[1] - A[1]) - (B[1] - A[1]) * (D[0] - A[0])\n\n if (d1 < 0) & (d2 < 0):\n return True\n if (d1 > 0) & (d2 > 0):\n return True\n else:\n return False",
"def geodesic(self, initial_point, end_point=None, initial_tangent_vec=None):\n args = {\n \"initial_point\": initial_point,\n \"end_point\": end_point,\n \"initial_tangent_vec\": initial_tangent_vec,\n }\n geodesics = self._iterate_over_factors(\"geodesic\", args)\n\n def geod_fun(t):\n t = gs.to_ndarray(t, to_ndim=1)\n values = [geodesic(t) for geodesic in geodesics]\n return self._space.embed_to_product(values)\n\n return geod_fun",
"def global_coords(self) -> GlobalCoordsABC:",
"def test_points(self):\n\n good_points = {}\n\n for net in self.good.nets:\n for pid, p in net.points.items():\n good_points[pid] = p\n\n self.assertEqual(len(good_points), 24)\n\n for net in self.actual.nets:\n for pid, p in net.points.items():\n goodp = good_points.pop(pid)\n self.assertEqual(p.point_id, goodp.point_id)\n self.assertEqual(p.x, goodp.x)\n self.assertEqual(p.y, goodp.y)\n self.assertEqual(set(p.connected_points),\n set(goodp.connected_points))\n\n self.assertEqual(good_points, {})",
"def test_l2_metric_geodesic(\n self, l2_metric_s2, times, n_sampling_points, landmarks_a, landmarks_b\n ):\n landmarks_ab = l2_metric_s2.geodesic(landmarks_a, landmarks_b)\n landmarks_ab = landmarks_ab(times)\n\n result = landmarks_ab\n expected = []\n for k in range(n_sampling_points):\n geod = l2_metric_s2.ambient_metric.geodesic(\n initial_point=landmarks_a[k, :], end_point=landmarks_b[k, :]\n )\n expected.append(geod(times))\n expected = gs.stack(expected, axis=1)\n\n self.assertAllClose(result, expected)",
"def ground_contact_geoms(self):\n raise NotImplementedError",
"def gecos(self):\n\t\treturn self.__gecos",
"def test_get_closest_stations(self):\n\t\tpoint = \"POINT(40.71911552 -74.00666661)\"\n\t\tstations = set(server.get_closest_stations(point))\n\t\t# find the closest stations, make them a set of objects see if sets intersect completely",
"def spatial(self):",
"def test_get_coord_by_attr_valid():\n pass",
"def get_geospatial(self):\n self.unimpl_base_class()",
"def geodesics(self, source_indexes, method='exact'):\n if not isinstance(source_indexes, collections.Iterable):\n source_indexes = [source_indexes]\n if not all(0 <= i < self.n_points for i in source_indexes):\n raise TriMeshGeodesicsError('Invalid indexes ' +\n '(all must be in range '\n '0 <= i < n_points)')\n return self._kirsanov.geodesics(source_indexes, method)"
]
| [
"0.7228238",
"0.62414855",
"0.59653425",
"0.5937499",
"0.5887669",
"0.56210893",
"0.5615722",
"0.5609805",
"0.56017345",
"0.55899805",
"0.55500317",
"0.55399203",
"0.5539257",
"0.55192065",
"0.550209",
"0.5491937",
"0.5491439",
"0.54660296",
"0.545849",
"0.5451511",
"0.54421204",
"0.54277587",
"0.54208195",
"0.5400046",
"0.53841674",
"0.5378496",
"0.53757733",
"0.5372708",
"0.53628623",
"0.53432935"
]
| 0.6788587 | 1 |
Return the attributes `h` (Total heat loss), `hlp` (Heat Loss Parameter per square meter), `h_fabric`, `h_bridging`, `h_vent`, `h_vent_annual` on the given dwelling object | def heat_loss(dwelling):
# TODO: what is "h"?
if dwelling.get('hlp') is not None:
return dict(h=dwelling.hlp * dwelling.GFA, hlp=dwelling.hlp)
UA = sum(e.Uvalue * e.area for e in dwelling.heat_loss_elements)
A_bridging = sum(e.area for e in dwelling.heat_loss_elements if e.is_external)
if dwelling.get("Uthermalbridges") is not None:
h_bridging = dwelling.Uthermalbridges * A_bridging
else:
h_bridging = sum(x['length'] * x['y'] for x in dwelling.y_values)
h_vent = 0.33 * dwelling.infiltration_ach * dwelling.volume
h = UA + h_bridging + h_vent
return dict(
h=h,
hlp=h / dwelling.GFA,
h_fabric=UA,
h_bridging=h_bridging,
h_vent=h_vent,
h_vent_annual=monthly_to_annual(h_vent)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def perform_demand_calc(dwelling):\n\n # TODO: modify functions to take only the arguments they need instead of the whole dwelling data.\n dwelling.update(ventilation(dwelling))\n\n dwelling.update(heat_loss(dwelling))\n\n dwelling.update(hot_water_use(dwelling))\n\n dwelling.update(lighting_consumption(dwelling))\n\n dwelling.update(internal_heat_gain(dwelling))\n\n dwelling.update(solar(dwelling))\n\n # Need to copy the Q_required from the heat calc results to it's own attribute for compatibility\n dwelling.heat_calc_results = heating_requirement(dwelling)\n dwelling.Q_required = dwelling.heat_calc_results['heat_required']\n\n dwelling.Q_cooling_required = cooling_requirement(dwelling)\n\n dwelling.output_from_water_heater = water_heater_output(dwelling)\n\n return dwelling",
"def internal_heat_gain(dwelling):\n losses_gain = -40 * dwelling.Nocc\n water_heating_gains = (1000. / 24.) * dwelling.heat_gains_from_hw / DAYS_PER_MONTH\n\n mean_appliance_energy = 207.8 * (dwelling.GFA * dwelling.Nocc) ** 0.4714\n appliance_consumption_per_day = (mean_appliance_energy / 365.) * (\n 1 + 0.157 * numpy.cos((2. * math.pi / 12.) * (numpy.arange(12) - .78)))\n\n appliance_consumption = appliance_consumption_per_day * DAYS_PER_MONTH\n\n if dwelling.reduced_gains:\n met_gain = 50 * dwelling.Nocc\n cooking_gain = 23 + 5 * dwelling.Nocc\n appliance_gain = (0.67 * 1000. / 24) * appliance_consumption_per_day\n light_gain = 0.4 * dwelling.full_light_gain\n else:\n met_gain = 60 * dwelling.Nocc\n cooking_gain = 35 + 7 * dwelling.Nocc\n appliance_gain = (1000. / 24) * appliance_consumption_per_day\n light_gain = dwelling.full_light_gain\n\n total_internal_gains = (met_gain\n + light_gain\n + appliance_gain\n + cooking_gain\n + water_heating_gains\n + dwelling.pump_gain\n + losses_gain)\n\n if dwelling.reduced_gains:\n summer_met_gain = 60 * dwelling.Nocc\n summer_cooking_gain = 35 + 7 * dwelling.Nocc\n summer_appliance_gain = (1000. / 24) * appliance_consumption_per_day\n summer_light_gain = dwelling.full_light_gain\n total_internal_gains_summer = (summer_met_gain +\n water_heating_gains +\n summer_light_gain +\n summer_appliance_gain +\n summer_cooking_gain +\n dwelling.pump_gain +\n losses_gain\n - dwelling.heating_system_pump_gain)\n else:\n total_internal_gains_summer = total_internal_gains - dwelling.heating_system_pump_gain\n\n # Apply results to dwelling\n return dict(appliance_consumption=appliance_consumption,\n met_gain=met_gain,\n cooking_gain=cooking_gain,\n appliance_gain=appliance_gain,\n light_gain=light_gain,\n water_heating_gains=water_heating_gains,\n losses_gain=losses_gain,\n total_internal_gains=total_internal_gains,\n total_internal_gains_summer=total_internal_gains_summer)",
"def configure_fghr(dwelling):\n # TODO: Should check that fghr is allowed for this system\n\n if dwelling.get('fghrs') is not None:\n # TODO: Need to add electrical power G1.4\n # FIXME: Entire fghrs calc is unfinished really\n dwelling.fghrs.update(\n dict(get_fghr_system(dwelling.fghrs['pcdf_id'])))\n\n if dwelling.fghrs[\"heat_store\"] == \"3\":\n assert dwelling.water_sys.system_type == HeatingTypes.combi\n assert not dwelling.get('hw_cylinder_volume')\n assert not dwelling.has_hw_cylinder\n\n dwelling.has_hw_cylinder = True\n dwelling.has_cylinderstat = True\n dwelling.has_hw_time_control = True\n hw_cylinder_volume = dwelling.fghrs['heat_store_total_volume']\n dwelling.measured_cylinder_loss = dwelling.fghrs['heat_store_loss_rate']\n dwelling.water_sys.table2b_row = 5\n\n # !!! This ideally wouldn't be here! Basically combi loss\n # !!! has already been calculated, but now we are adding a\n # !!! thermal store, so need to recalculate it\n if dwelling.water_sys.get('pcdf_data'):\n configure_combi_loss(dwelling,\n dwelling.water_sys,\n dwelling.water_sys.pcdf_data)\n else:\n dwelling.water_sys.combi_loss = combi_loss_table_3a(\n hw_cylinder_volume, dwelling.water_sys)\n\n if dwelling.fghrs[\"has_pv_module\"]:\n assert \"PV_kWp\" in dwelling.fghrs\n appendix_m.configure_pv_system(dwelling.fghrs)\n dwelling.fghrs['monthly_solar_hw_factors'] = TABLE_H3[dwelling.fghrs['pitch']]\n\n\n dwelling.hw_cylinder_volume = hw_cylinder_volume\n\n else:\n assert not \"PV_kWp\" in dwelling.fghrs\n\n if (dwelling.water_sys.system_type in [HeatingTypes.combi,\n HeatingTypes.storage_combi] and\n dwelling.water_sys.get('has_no_keep_hot') and not dwelling.has_hw_cylinder):\n\n dwelling.fghrs['equations'] = dwelling.fghrs['equations_combi_without_keephot_without_ext_store']\n else:\n dwelling.fghrs['equations'] = dwelling.fghrs['equations_other']",
"def calc_lhv(self):\n hf = {}\n hf['hydrogen'] = 0\n hf['methane'] = -74.85\n hf['ethane'] = -84.68\n hf['propane'] = -103.8\n hf['butane'] = -124.51\n hf['O2'] = 0\n hf['CO2'] = -393.5\n # water (gaseous)\n hf['H2O'] = -241.8\n\n lhv = 0\n\n for f, x in self.fuel.val.items():\n molar_masses[f] = CP.PropsSI('M', f)\n fl = set(list(hf.keys())).intersection(\n set([a.replace(' ', '') for a in CP.get_aliases(f)]))\n if len(fl) == 0:\n continue\n\n if list(fl)[0] in self.fuels():\n structure = fluid_structure(f)\n\n n = {}\n for el in ['C', 'H', 'O']:\n if el in structure:\n n[el] = structure[el]\n else:\n n[el] = 0\n\n lhv += (-(n['H'] / 2 * hf['H2O'] + n['C'] * hf['CO2'] -\n ((n['C'] + n['H'] / 4) * hf['O2'] +\n hf[list(fl)[0]])) / molar_masses[f] * 1000) * x\n\n return lhv",
"def get_hod(self):\n return self.hod_dict",
"def describe(self) -> Dict:\n\n desc = {\n \"Win\": {\n \"max\": np.max(self.Win),\n \"min\": np.min(self.Win),\n \"mean\": np.mean(self.Win),\n \"median\": np.median(self.Win),\n \"std\": np.std(self.Win)\n },\n \"W\": {\n \"max\": np.max(self.W),\n \"min\": np.min(self.W),\n \"mean\": np.mean(self.W),\n \"median\": np.median(self.W),\n \"std\": np.std(self.W),\n \"sr\": max(abs(linalg.eig(self.W)[0]))\n }\n }\n if self.Wfb is not None:\n desc[\"Wfb\"] = {\n \"max\": np.max(self.Wfb),\n \"min\": np.min(self.Wfb),\n \"mean\": np.mean(self.Wfb),\n \"median\": np.median(self.Wfb),\n \"std\": np.std(self.Wfb)\n }\n if self.Wout is not None:\n desc[\"Wout\"] = {\n \"max\": np.max(self.Wout),\n \"min\": np.min(self.Wout),\n \"mean\": np.mean(self.Wout),\n \"median\": np.median(self.Wout),\n \"std\": np.std(self.Wout)\n }\n return desc",
"def get_hcost(self):\n hvcost = self.get_hvcost()\n dcost = self.get_dcost()\n hcost = hvcost + dcost\n return hcost",
"def shield(self):\n capacity = self._getAttribute(Attribute.shieldCapacity)\n recharge = self._getAttribute(Attribute.shieldRecharge)\n em = self._getAttribute(Attribute.shieldEM)\n explosive = self._getAttribute(Attribute.shieldExplosive)\n kinetic = self._getAttribute(Attribute.shieldKinetic)\n thermal = self._getAttribute(Attribute.shieldThermal)\n\n recharge /= 1000 # milliseconds\n em = 1.0 - em\n explosive = 1.0 - explosive\n kinetic = 1.0 - kinetic\n thermal = 1.0 - thermal\n\n return {\n \"capacity\": capacity,\n \"recharge\": recharge,\n \"resists\": {\n \"em\": em,\n \"explosive\": explosive,\n \"kinetic\": kinetic,\n \"thermal\": thermal\n }\n }",
"def hdw(sounding, elevation=None):\n \n bottom = sounding.profile.elevation\n if elevation is not None and elevation > bottom:\n bottom = elevation\n top = bottom + 500.0\n \n # Find the station pressure for the surface adjusted temperature and dew point.\n bottom_p = sounding.surface.pres\n i = 0\n while bottom_p is None or sounding.profile.hgt[i] < bottom:\n bottom_p = sounding.profile.pressure[i]\n i += 1\n \n vals = zip(\n sounding.profile.hgt, sounding.profile.temp, sounding.profile.dewpoint,\n sounding.profile.windSpd, sounding.profile.pressure\n )\n\n vals = filter(lambda x_: x_[0] >= bottom, vals)\n vals = tuple(takewhile(lambda x: x[0] <= top, vals))\n \n # Filter out None values\n vpds = (\n (x[1], x[2], x[4])\n for x in vals\n if x[1] is not None and x[2] is not None and x[4] is not None\n )\n # Convert to potential temperature and specific humidity for reducing to the surface.\n vpds = ((wxf.theta_kelvin(x[2], x[0]), wxf.specific_humidity(x[1], x[2])) for x in vpds)\n # Finish surface adjustment.\n vpds = (\n (\n wxf.temperature_c_from_theta(x[0], bottom_p),\n wxf.dew_point_from_p_and_specific_humidity(bottom_p, x[1])\n ) for x in vpds\n )\n \n vpds = ((wxf.vapor_pressure_liquid_water(x[0]) - \\\n wxf.vapor_pressure_liquid_water(x[1])) for x in vpds)\n max_vpd = max(vpds)\n \n max_wspd = max(x[3] for x in vals if x[3] is not None)\n max_wspd = wxf.knots_to_mps(max_wspd)\n \n return max_vpd * max_wspd",
"def get_hull_attributes():\n hulls = {}\n hull_table = db_parser.get_table_as_dict('hull')\n hull_loadouts = Hull.get_hull_loadouts()\n for row in hull_table:\n # Make a new nested dictionary indexed by this hull's name\n hull_name = row['hull_name']\n hulls[hull_name] = {}\n for key in row.keys():\n if key == 'hull_name':\n pass\n else:\n hulls[hull_name][key] = row[key]\n # Now add this hull's loadout to its dictionary\n hulls[hull_name]['loadout'] = hull_loadouts[hull_name]\n return hulls",
"def _func(w):\r\n W = _adj(w)\r\n loss, G_loss = _loss(W)\r\n h, G_h = _h(W)\r\n obj = loss + 0.5 * rho * h * h + alpha * h + lambda1 * w.sum()\r\n G_smooth = G_loss + (rho * h + alpha) * G_h\r\n g_obj = np.concatenate((G_smooth + lambda1, - G_smooth + lambda1), axis=None)\r\n return obj, g_obj",
"def get_obj_desc():\n\n attributes = {'results' : ['aperiodic_params_', 'peak_params_',\n 'r_squared_', 'error_',\n '_gaussian_params'],\n 'settings' : ['peak_width_limits', 'max_n_peaks',\n 'min_peak_height', 'peak_threshold',\n 'aperiodic_mode'],\n 'data' : ['power_spectrum', 'freq_range', 'freq_res'],\n 'data_info' : ['freq_range', 'freq_res'],\n 'arrays' : ['freqs', 'power_spectrum', 'aperiodic_params_',\n 'peak_params_', '_gaussian_params'],\n 'model_components' : ['_spectrum_flat', '_spectrum_peak_rm',\n '_ap_fit', '_peak_fit']}\n\n return attributes",
"def get_ehp(self, damageProfile = None):\n if damageProfile is None:\n damageProfile = [1, 1, 1, 1]\n\n totalDamage = sum(damageProfile)\n emMultiplier = damageProfile[0] / totalDamage\n explosiveMultiplier = damageProfile[1] / totalDamage\n kineticMultiplier = damageProfile[2] / totalDamage\n thermalMultiplier = 1.0 - emMultiplier - explosiveMultiplier - kineticMultiplier\n\n multipliers = [emMultiplier, explosiveMultiplier, kineticMultiplier, thermalMultiplier]\n\n effectiveShield = self._ehp(self.shield, multipliers)\n effectiveArmor = self._ehp(self.armor, multipliers)\n effectiveHull = self._ehp(self.hull, multipliers)\n\n return {\n \"shield\": effectiveShield,\n \"armor\": effectiveArmor,\n \"hull\": effectiveHull\n }",
"def itkHistogramThresholdCalculatorHDUC_cast(obj: 'itkLightObject') -> \"itkHistogramThresholdCalculatorHDUC *\":\n return _itkHistogramThresholdCalculatorPython.itkHistogramThresholdCalculatorHDUC_cast(obj)",
"def _h(W):\r\n # E = slin.expm(W * W)\r\n # h = np.trace(E) - d\r\n M = np.eye(d) + W * W / d\r\n E = np.linalg.matrix_power(M, d - 1)\r\n h = (E.T * M).sum() - d\r\n G_h = E.T * W * 2\r\n return h, G_h",
"def hull(self):\n capacity = self._getAttribute(Attribute.hullCapacity)\n em = self._getAttribute(Attribute.hullEM)\n explosive = self._getAttribute(Attribute.hullExplosive)\n kinetic = self._getAttribute(Attribute.hullKinetic)\n thermal = self._getAttribute(Attribute.hullThermal)\n\n em = 1.0 - em\n explosive = 1.0 - explosive\n kinetic = 1.0 - kinetic\n thermal = 1.0 - thermal\n\n return {\n \"capacity\": capacity,\n \"resists\": {\n \"em\": em,\n \"explosive\": explosive,\n \"kinetic\": kinetic,\n \"thermal\": thermal\n }\n }",
"def cast(obj: 'itkLightObject') -> \"itkHistogramThresholdCalculatorHDUS *\":\n return _itkHistogramThresholdCalculatorPython.itkHistogramThresholdCalculatorHDUS_cast(obj)",
"def itkHistogramThresholdCalculatorHDUS_cast(obj: 'itkLightObject') -> \"itkHistogramThresholdCalculatorHDUS *\":\n return _itkHistogramThresholdCalculatorPython.itkHistogramThresholdCalculatorHDUS_cast(obj)",
"def get_pfw_hdrupd(wcl):\n hdrupd = {}\n hdrupd['pipeline'] = \"%s/DESDM pipeline name/str\" % wcl.get('wrapper.pipeline')\n hdrupd['reqnum'] = \"%s/DESDM processing request number/int\" % wcl.get('reqnum')\n hdrupd['unitname'] = \"%s/DESDM processing unit name/str\" % wcl.get('unitname')\n hdrupd['attnum'] = \"%s/DESDM processing attempt number/int\" % wcl.get('attnum')\n hdrupd['eupsprod'] = \"%s/eups pipeline meta-package name/str\" % wcl.get('wrapper.pipeprod')\n hdrupd['eupsver'] = \"%s/eups pipeline meta-package version/str\" % wcl.get('wrapper.pipever')\n return hdrupd",
"def get_energy_effectiveness_ratio(self, obj):\n diesel_row = CreditCalculationService.get(\n category_id=obj.energy_effectiveness_ratio_category_id,\n effective_date=self.effective_date,\n fuel_class__fuel_class=\"Diesel\",\n model_name=\"EnergyEffectivenessRatio\"\n )\n\n gasoline_row = CreditCalculationService.get(\n category_id=obj.energy_effectiveness_ratio_category_id,\n effective_date=self.effective_date,\n fuel_class__fuel_class=\"Gasoline\",\n model_name=\"EnergyEffectivenessRatio\"\n )\n\n return {\n \"diesel\": diesel_row.ratio if diesel_row else None,\n \"gasoline\": gasoline_row.ratio if gasoline_row else None\n }",
"def _weigh_object(self, host_state, weight_properties):\n\n weight = 0.0\n if host_state.patch_prefer:\n weight += CONF.filter_scheduler.swmgmt_patch_weight_multiplier\n if host_state.upgrade_prefer:\n weight += CONF.filter_scheduler.swmgmt_upgrade_weight_multiplier\n return weight",
"def cast(obj: 'itkLightObject') -> \"itkHistogramThresholdCalculatorHDSS *\":\n return _itkHistogramThresholdCalculatorPython.itkHistogramThresholdCalculatorHDSS_cast(obj)",
"def perform_full_calc(dwelling):\n dwelling = perform_demand_calc(dwelling)\n dwelling.update(heating_systems_energy(dwelling))\n dwelling.update(appendix_m.pv(dwelling))\n dwelling.update(appendix_m.wind_turbines(dwelling))\n dwelling.update(appendix_m.hydro(dwelling))\n dwelling.update(appendix_c.chp(dwelling))\n\n dwelling.update(fuel_use(dwelling))\n\n return dwelling",
"def get_vdw_info(self):\n return",
"def itkHistogramThresholdCalculatorHDSS_cast(obj: 'itkLightObject') -> \"itkHistogramThresholdCalculatorHDSS *\":\n return _itkHistogramThresholdCalculatorPython.itkHistogramThresholdCalculatorHDSS_cast(obj)",
"def cast(obj: 'itkLightObject') -> \"itkHistogramThresholdCalculatorHDUC *\":\n return _itkHistogramThresholdCalculatorPython.itkHistogramThresholdCalculatorHDUC_cast(obj)",
"def get_dhw_state(self):\n url = (\n \"https://tccna.honeywell.com/WebAPI/emea/api/v1/\"\n \"domesticHotWater/%s/status?\" % self.dhwId\n )\n\n response = requests.get(\n url, headers=self.client._headers(), timeout=self.timeout\n )\n data = response.json()\n return data",
"def get_specific_heat() -> float:\n return 1006.0",
"def Hstep_cost_function(H): \n U = Wold - Yold\n #cost = -np.trace(H.T@K@H) + (self.admm_rho/2)*(norm(H.T@D - Wold + self.Y, 'fro')**2) \n cost = -np.trace(H.T@K@H)/nsamples + (rho/2)*np.trace((H.T@D - U)@(H.T@D-U).T) \n return cost",
"def device_state_attributes(self):\n # TODO: convert RH from Elk to AH ?\n #if self.current_humidity > 0:\n # humidity = self.current_humidity\n data = {\n 'hidden': self._hidden,\n 'temp_unit' : self.temperature_unit,\n }\n if self._device.temp_outside is not None and self._device.temp_outside > -460:\n data['temp_outside'] = self._device.temp_outside\n if self._device.temp_3 is not None and self._device.temp_3 > -460:\n data['temp_3'] = self._device.temp_3\n if self._device.temp_4 is not None and self._device.temp_4 > -460:\n data['temp_4'] = self._device.temp_4\n return data"
]
| [
"0.59208715",
"0.57487935",
"0.55831736",
"0.5531533",
"0.551731",
"0.54339695",
"0.53320193",
"0.5328312",
"0.530746",
"0.52977926",
"0.52972484",
"0.5286722",
"0.5277575",
"0.52104133",
"0.5186833",
"0.51818717",
"0.5173991",
"0.5159216",
"0.51519185",
"0.5130649",
"0.5092583",
"0.50783205",
"0.5076288",
"0.5052701",
"0.50396854",
"0.5036345",
"0.50308526",
"0.5029098",
"0.5007645",
"0.49964717"
]
| 0.720957 | 0 |
Calculate the SAP energy demand for a dwelling | def perform_demand_calc(dwelling):
# TODO: modify functions to take only the arguments they need instead of the whole dwelling data.
dwelling.update(ventilation(dwelling))
dwelling.update(heat_loss(dwelling))
dwelling.update(hot_water_use(dwelling))
dwelling.update(lighting_consumption(dwelling))
dwelling.update(internal_heat_gain(dwelling))
dwelling.update(solar(dwelling))
# Need to copy the Q_required from the heat calc results to it's own attribute for compatibility
dwelling.heat_calc_results = heating_requirement(dwelling)
dwelling.Q_required = dwelling.heat_calc_results['heat_required']
dwelling.Q_cooling_required = cooling_requirement(dwelling)
dwelling.output_from_water_heater = water_heater_output(dwelling)
return dwelling | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ComputeEnergyConsumption(self):\r\n pass",
"def perform_full_calc(dwelling):\n dwelling = perform_demand_calc(dwelling)\n dwelling.update(heating_systems_energy(dwelling))\n dwelling.update(appendix_m.pv(dwelling))\n dwelling.update(appendix_m.wind_turbines(dwelling))\n dwelling.update(appendix_m.hydro(dwelling))\n dwelling.update(appendix_c.chp(dwelling))\n\n dwelling.update(fuel_use(dwelling))\n\n return dwelling",
"def calculate_demand(self):\r\n \r\n for cell in self.cells:\r\n cell.demand = min(cell.volume, self.max_volume) /self.interval\r\n self.demand = self.cells[-1].demand",
"def efficiency_cal(self):\n Temp = 0\n for i in self.supplyseries:\n for j in self.demandseries:\n if(self.shortestpathij(i, j) == None):\n continue\n Temp += 1/self.shortestpathij(i, j)\n \n self.efficiency = 1/(self.supplynum*self.demandnum)*Temp",
"def _calc_energy( self, V_a, eos_d ):\n pass",
"def internal_heat_gain(dwelling):\n losses_gain = -40 * dwelling.Nocc\n water_heating_gains = (1000. / 24.) * dwelling.heat_gains_from_hw / DAYS_PER_MONTH\n\n mean_appliance_energy = 207.8 * (dwelling.GFA * dwelling.Nocc) ** 0.4714\n appliance_consumption_per_day = (mean_appliance_energy / 365.) * (\n 1 + 0.157 * numpy.cos((2. * math.pi / 12.) * (numpy.arange(12) - .78)))\n\n appliance_consumption = appliance_consumption_per_day * DAYS_PER_MONTH\n\n if dwelling.reduced_gains:\n met_gain = 50 * dwelling.Nocc\n cooking_gain = 23 + 5 * dwelling.Nocc\n appliance_gain = (0.67 * 1000. / 24) * appliance_consumption_per_day\n light_gain = 0.4 * dwelling.full_light_gain\n else:\n met_gain = 60 * dwelling.Nocc\n cooking_gain = 35 + 7 * dwelling.Nocc\n appliance_gain = (1000. / 24) * appliance_consumption_per_day\n light_gain = dwelling.full_light_gain\n\n total_internal_gains = (met_gain\n + light_gain\n + appliance_gain\n + cooking_gain\n + water_heating_gains\n + dwelling.pump_gain\n + losses_gain)\n\n if dwelling.reduced_gains:\n summer_met_gain = 60 * dwelling.Nocc\n summer_cooking_gain = 35 + 7 * dwelling.Nocc\n summer_appliance_gain = (1000. / 24) * appliance_consumption_per_day\n summer_light_gain = dwelling.full_light_gain\n total_internal_gains_summer = (summer_met_gain +\n water_heating_gains +\n summer_light_gain +\n summer_appliance_gain +\n summer_cooking_gain +\n dwelling.pump_gain +\n losses_gain\n - dwelling.heating_system_pump_gain)\n else:\n total_internal_gains_summer = total_internal_gains - dwelling.heating_system_pump_gain\n\n # Apply results to dwelling\n return dict(appliance_consumption=appliance_consumption,\n met_gain=met_gain,\n cooking_gain=cooking_gain,\n appliance_gain=appliance_gain,\n light_gain=light_gain,\n water_heating_gains=water_heating_gains,\n losses_gain=losses_gain,\n total_internal_gains=total_internal_gains,\n total_internal_gains_summer=total_internal_gains_summer)",
"def get_demand(self):\n return self.df_demand",
"def calcRMSE(inflow, demand):\n ssd = 0\n for i in range(len(inflow)):\n ssd += pow((inflow[i]-demand[i]),2)\n rmse = np.sqrt(ssd/len(inflow))\n return rmse",
"def energy(data):\n return sum(pow(data, 2))",
"def get_annual_energy_demand(cfg):\n houses_dict = cfg['houses']\n houses_list = sorted(houses_dict.keys())\n\n # Calculate annual energy demand of houses\n # and store the result in the dict containing the house info\n for house_name in houses_list:\n house_type = houses_dict[house_name]['house_type']\n N_Pers = houses_dict[house_name].get('N_Pers', None)\n N_WE = houses_dict[house_name].get('N_WE', None)\n\n # Assign defaults if values are not defined\n if house_type == 'EFH' and pd.isna(N_Pers):\n N_Pers = 3\n houses_dict[house_name]['N_Pers'] = N_Pers\n logger.warning('N_Pers not defined for ' + str(house_name)\n + '. Using default ' + str(N_Pers))\n if house_type == 'MFH' and pd.isna(N_WE):\n N_WE = 2\n houses_dict[house_name]['N_WE'] = N_WE\n logger.warning('N_WE not defined for ' + str(house_name)\n + '. Using default ' + str(N_WE))\n\n # Implement the restrictions defined on page 3:\n if house_type == 'EFH' and N_Pers > 12:\n logger.warning('VDI 4655 is only defined for N_Pers <= 12. '\n + str(house_name) + ' uses N_Pers = ' + str(N_Pers)\n + '. Proceeding with your input...')\n if house_type == 'MFH' and N_WE > 40:\n logger.warning('VDI 4655 is only defined for N_WE <= 40. '\n + str(house_name) + ' uses N_WE = ' + str(N_WE)\n + '. Proceeding with your input...')\n\n # Calculate annual energy demand estimates\n if house_type == 'EFH':\n # (6.2.2) Calculate annual electrical energy demand of houses:\n if N_Pers < 3:\n W_a = N_Pers * 2000 # kWh\n elif N_Pers <= 6:\n W_a = N_Pers * 1750 # kWh\n else:\n W_a = N_Pers * 1500 # kWh\n\n # (6.2.3) Calculate annual DHW energy demand of houses:\n Q_TWW_a = N_Pers * 500 # kWh\n\n elif house_type == 'MFH':\n # (6.2.2) Calculate annual electrical energy demand of houses:\n W_a = N_WE * 3000 # kWh\n\n # (6.2.3) Calculate annual DHW energy demand of houses:\n Q_TWW_a = N_WE * 1000 # kWh\n\n else:\n # No house category given. Just use annual demand of 1 kWh\n W_a = 1\n Q_TWW_a = 1\n\n # If W_a and/or Q_TWW_a were already defined by the user in the yaml\n # file, we use those values instead of the calculated ones:\n W_a = houses_dict[house_name].get('W_a', W_a)\n Q_TWW_a = houses_dict[house_name].get('Q_TWW_a', Q_TWW_a)\n\n # Store the results in the dict\n houses_dict[house_name]['W_a'] = W_a\n houses_dict[house_name]['Q_TWW_a'] = Q_TWW_a\n\n # Assign defaults if values are not defined\n if houses_dict[house_name].get('Q_Heiz_a', None) is None:\n Q_Heiz_a = 1 # kWh\n houses_dict[house_name]['Q_Heiz_a'] = Q_Heiz_a\n logger.warning('Q_Heiz_a not defined for ' + house_name\n + '. Using default ' + str(Q_Heiz_a) + ' kWh')\n\n # Apply the adjustment factors\n houses_dict[house_name]['Q_Heiz_a'] *= \\\n cfg.get('adjustment_factors', dict()).get('f_Q_Heiz', 1)\n\n houses_dict[house_name]['W_a'] *= \\\n cfg.get('adjustment_factors', dict()).get('f_W', 1)\n\n houses_dict[house_name]['Q_TWW_a'] *= \\\n cfg.get('adjustment_factors', dict()).get('f_Q_TWW', 1)\n\n return houses_dict",
"def test_energy_cost(self):\n rs = self.rate.get_rate_schedule(self.eir.api)\n\n i = pd.date_range(start = '2019-05-01', end='2019-06-30', freq='5min')\n s = pd.Series(data=0, index = i, dtype = np.float32)\n\n total = 10.0 * .1338\n total += 10.0 * .0969\n total += 10.0 * .1611\n total += 20.3 * 2\n s[pd.Timestamp('2019-05-01T18:00:00')] = 10.0\n s[pd.Timestamp('2019-05-01T06:00:00')] = 10.0\n s[pd.Timestamp('2019-06-05T15:00:00')] = 10.0\n\n df = rs.get_costs(s)\n\n print(df.head())",
"def energy_func(self):\n i = self.inl[0].to_flow()\n o = self.outl[0].to_flow()\n\n T_m = (T_mix_ph(i, T0=self.inl[0].T.val_SI) +\n T_mix_ph(o, T0=self.outl[0].T.val_SI)) / 2\n\n return (i[0] * (o[2] - i[2]) -\n self.A.val * (\n self.E.val * self.eta_opt.val -\n (T_m - self.Tamb.val_SI) * self.lkf_lin.val -\n self.lkf_quad.val * (T_m - self.Tamb.val_SI) ** 2))",
"def DW_cal(data, data_sm):\n n = len(data)\n numerator = 0\n denominator = 0\n for i in range(n):\n if i == 0:\n numerator = numerator + 0\n else:\n numerator = numerator + ((data[i] - data_sm[i]) - (data[i-1] - data_sm[i-1]))**2\n denominator = denominator + (data[i] - data_sm[i])**2\n return numerator/denominator*n/(n - 1)",
"def calc_monthly_cash(self):\n # shortcut to self\n s = self\n\n # Start the DataFrames, base and w/ heat pump\n # Each starts with just an index column with the month\n # Make shortcut variables as well.\n s.df_mo_dol_base = dfb = s.df_mo_en_base[[]].copy()\n s.df_mo_dol_hp = dfh = s.df_mo_en_base[[]].copy()\n\n # Determine the base electric use by month. Approach is different \n # if there is electric heat.\n is_electric_heat = (s.exist_heat_fuel_id == constants.ELECTRIC_ID)\n if not is_electric_heat:\n # Fuel-based space heat.\n # The User supplied a January and a May kWh usage value that should\n # be used for the base case (no heat pump) total electricity use.\n # But, need to come up with a kWh value for every month. Do that by\n # adjusting the kWh pattern available for this city.\n #\n # Determine the multiplier to adjust to the pattern to the actual.\n pat_use = np.array(s.city.avg_elec_usage)\n mult = (s.elec_use_jan - s.elec_use_may) / (pat_use[0] - pat_use[4])\n pat_use = mult * pat_use\n pat_use += s.elec_use_jan - pat_use[0]\n\n # The electricity use in the base case\n dfb['elec_kwh'] = pat_use\n\n # rough estimate of a base demand: not super critical, as the demand rate \n # structure does not have blocks. Assume a load factor of 0.4\n dfb['elec_kw'] = dfb.elec_kwh / (DAYS_IN_MONTH * 24.0) / 0.4\n\n else:\n # Electric Heat Case\n # No Jan and May values are provided. Instead we have possibly some\n # DHW, clothes drying, and cooking. Plus, we have base lights/other appliances.\n # And finally we have the Elecric heat making up the base electric usage.\n\n # First, DHW, Clothes Drying and Cooking. Assume flat use through year.\n # This is a numpy array because DAYS_IN_MONTH is an array.\n elec_kwh = s.fuel_other_uses / 8760.0 * DAYS_IN_MONTH * 24.0\n\n # Now lights and other misc. appliances. Some monthly variation, given\n # by LIGHTS_OTHER_PAT.\n elec_kwh += s.lights_other_elec / 8760.0 * LIGHTS_OTHER_PAT * DAYS_IN_MONTH * 24.0\n\n # For the peak demand of those two categories of use, just assume 40% load factor.\n elec_kw = elec_kwh / (DAYS_IN_MONTH * 24.0) / 0.4\n\n # Now add in space heating kWh and kW\n elec_kwh += s.df_mo_en_base.total_kwh.values\n elec_kw += s.df_mo_en_base.total_kw.values\n\n # store results\n dfb['elec_kwh'] = elec_kwh\n dfb['elec_kw'] = elec_kw\n\n # Make an object to calculate electric utility costs\n elec_cost_calc = ElecCostCalc(s.utility, sales_tax=s.sales_tax, pce_limit=s.pce_limit)\n # cost function that will be applied to each row of the cost DataFrame\n cost_func = lambda r: elec_cost_calc.monthly_cost(r.elec_kwh, r.elec_kw)\n\n dfb['elec_dol'] = dfb.apply(cost_func, axis=1)\n\n if not is_electric_heat:\n # Now fuel use by month. Remember that the home heat model only looked at\n # space heating, so we need to add in the fuel use from the other end uses\n # that use this fuel.\n dfb['secondary_fuel_units'] = s.df_mo_en_base.secondary_fuel_units + \\\n s.fuel_other_uses / 12.0\n dfb['secondary_fuel_dol'] = dfb.secondary_fuel_units * s.exist_unit_fuel_cost * (1. + s.sales_tax)\n else:\n # Electric Heat, so no secondary fuel\n dfb['secondary_fuel_units'] = 0.0\n dfb['secondary_fuel_dol'] = 0.0\n\n # Total Electric + space heat\n dfb['total_dol'] = dfb.elec_dol + dfb.secondary_fuel_dol\n\n # Now with the heat pump\n # determine extra kWh used in the heat pump scenario. Note, this will\n # be negative numbers if the base case used electric heat.\n extra_kwh = (s.df_mo_en_hp.total_kwh - s.df_mo_en_base.total_kwh).values\n dfh['elec_kwh'] = dfb['elec_kwh'] + extra_kwh\n extra_kw = (s.df_mo_en_hp.total_kw - s.df_mo_en_base.total_kw).values\n dfh['elec_kw'] = dfb['elec_kw'] + extra_kw\n dfh['elec_dol'] = dfh.apply(cost_func, axis=1)\n\n # Now fuel, including other end uses using the heating fuel\n if not is_electric_heat:\n dfh['secondary_fuel_units'] = s.df_mo_en_hp.secondary_fuel_units + \\\n s.fuel_other_uses / 12.0\n dfh['secondary_fuel_dol'] = dfh.secondary_fuel_units * s.exist_unit_fuel_cost * (1. + s.sales_tax)\n else:\n # Electric Heat, so no secondary fuel\n dfh['secondary_fuel_units'] = 0.0\n dfh['secondary_fuel_dol'] = 0.0\n\n # Total Electric + space heat\n dfh['total_dol'] = dfh.elec_dol + dfh.secondary_fuel_dol",
"def get_energy_demand_values_day(weather_data, houses_list, houses_dict,\n energy_factor_types, energy_demands_types,\n load_curve_houses, load_profile_df,\n daily_energy_demand_houses):\n start = weather_data.index[0]\n while start < weather_data.index[-1]:\n end = start + pd.Timedelta('1 days')\n if logger.isEnabledFor(logging.INFO):\n print('\\rProgress: '+str(start), end='\\r') # print progress\n typtag = weather_data.loc[start]['typtag']\n for house_name in houses_list:\n house_type = houses_dict[house_name]['house_type']\n for i, energy_factor_type in enumerate(energy_factor_types):\n energy_demand_type = energy_demands_types[i]\n # Example: Q_Heiz_TT(t) = F_Heiz_TT(t) * Q_Heiz_TT\n load_curve_houses.loc[start:end, (house_name,\n energy_demand_type)] =\\\n load_profile_df.loc[start:end, (energy_factor_type,\n house_type)] *\\\n daily_energy_demand_houses.loc[(house_name,\n energy_demand_type), typtag]\n# print(load_curve_houses.loc[start:end])\n start = end\n\n if logger.isEnabledFor(logging.INFO):\n # overwrite last status with empty line\n print('\\r', end='\\r')\n\n return load_curve_houses",
"def energy_yield(self):\n return self['kwh_per_kw']",
"def calculate_ttw_energy(self) -> None:\n\n self.energy = self.ecm.motive_energy_per_km(\n driving_mass=self[\"driving mass\"],\n rr_coef=self[\"rolling resistance coefficient\"],\n drag_coef=self[\"aerodynamic drag coefficient\"],\n frontal_area=self[\"frontal area\"],\n electric_motor_power=self[\"electric power\"],\n engine_power=self[\"power\"],\n recuperation_efficiency=self[\"recuperation efficiency\"],\n aux_power=self[\"auxiliary power demand\"],\n battery_charge_eff=self[\"battery charge efficiency\"],\n battery_discharge_eff=self[\"battery discharge efficiency\"],\n fuel_cell_system_efficiency=self[\"fuel cell system efficiency\"],\n )\n\n self.energy = self.energy.assign_coords(\n {\n \"powertrain\": self.array.powertrain,\n \"year\": self.array.year,\n \"size\": self.array.coords[\"size\"],\n \"value\": self.array.coords[\"value\"],\n }\n )\n\n if self.energy_consumption:\n self.override_ttw_energy()\n\n distance = self.energy.sel(parameter=\"velocity\").sum(dim=\"second\") / 1000\n\n self[\"engine efficiency\"] = (\n np.ma.array(\n self.energy.loc[dict(parameter=\"engine efficiency\")],\n mask=self.energy.loc[dict(parameter=\"power load\")] == 0,\n )\n .mean(axis=0)\n .T\n )\n\n _o = lambda x: np.where((x == 0) | (x == np.nan), 1, x)\n\n if self.engine_efficiency is not None:\n print(\"Engine efficiency is being overridden.\")\n for key, val in self.engine_efficiency.items():\n pwt, size, year = key\n if (\n (val is not None)\n & (pwt in self.array.powertrain.values)\n & (year in self.array.year.values)\n & (size in self.array[\"size\"].values)\n ):\n self.array.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"engine efficiency\",\n )\n ] = float(val)\n\n self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"engine efficiency\",\n )\n ] = float(val) * np.where(\n self.energy.loc[\n dict(\n parameter=\"power load\",\n powertrain=pwt,\n size=size,\n year=year,\n )\n ]\n == 0,\n 0,\n 1,\n )\n\n self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"motive energy\",\n )\n ] = self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"motive energy at wheels\",\n )\n ] / (\n _o(\n self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"engine efficiency\",\n )\n ]\n )\n * _o(\n self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"transmission efficiency\",\n )\n ]\n )\n )\n\n self[\"transmission efficiency\"] = (\n np.ma.array(\n self.energy.loc[dict(parameter=\"transmission efficiency\")],\n mask=self.energy.loc[dict(parameter=\"power load\")] == 0,\n )\n .mean(axis=0)\n .T\n )\n\n if self.transmission_efficiency is not None:\n print(\"Transmission efficiency is being overridden.\")\n for key, val in self.transmission_efficiency.items():\n pwt, size, year = key\n\n if (\n (val is not None)\n & (pwt in self.array.powertrain.values)\n & (year in self.array.year.values)\n & (size in self.array[\"size\"].values)\n ):\n self.array.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"transmission efficiency\",\n )\n ] = float(val)\n\n self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"transmission efficiency\",\n )\n ] = float(val) * np.where(\n self.energy.loc[\n dict(\n parameter=\"power load\",\n powertrain=pwt,\n size=size,\n year=year,\n )\n ]\n == 0,\n 0,\n 1,\n )\n\n self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"motive energy\",\n )\n ] = self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"motive energy at wheels\",\n )\n ] / (\n _o(\n self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"engine efficiency\",\n )\n ]\n )\n * _o(\n self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"transmission efficiency\",\n )\n ]\n )\n )\n\n self[\"TtW energy\"] = (\n self.energy.sel(\n parameter=[\"motive energy\", \"auxiliary energy\", \"recuperated energy\"]\n ).sum(dim=[\"second\", \"parameter\"])\n / distance\n ).T\n\n self[\"TtW energy, combustion mode\"] = self[\"TtW energy\"] * (\n self[\"combustion power share\"] > 0\n )\n self[\"TtW energy, electric mode\"] = self[\"TtW energy\"] * (\n self[\"combustion power share\"] == 0\n )\n\n self[\"auxiliary energy\"] = (\n self.energy.sel(parameter=\"auxiliary energy\").sum(dim=\"second\") / distance\n ).T",
"def calc_annual_electric_savings (self):\n costs = self.comp_specs['diesel generator o&m']\n\n for kW in costs.keys():\n try:\n if self.average_load < int(kW):\n maintenance = self.comp_specs['diesel generator o&m'][kW]\n break\n except ValueError:\n maintenance = self.comp_specs['diesel generator o&m'][kW]\n\n self.baseline_generation_cost = maintenance + \\\n (self.pre_intertie_generation_fuel_used * self.diesel_prices)\n\n maintenance = self.capital_costs * \\\n (self.comp_specs['percent o&m'] / 100.0)\n self.proposed_generation_cost = maintenance + \\\n self.intertie_offset_generation_fuel_used * \\\n self.intertie_diesel_prices\n self.annual_electric_savings = self.baseline_generation_cost -\\\n self.proposed_generation_cost\n #~ print len(self.annual_electric_savings)\n #~ print 'self.annual_electric_savings',self.annual_electric_savings",
"def energy(self):\n e = 0\n\n restoration = RestorationModel(self.graph_damaged)\n restoration.run(self.state)\n restoration_graphs = restoration.get_restoration_graphs()\n restoration_times = restoration.get_restoration_times()\n restoration_costs = restoration.get_restoration_costs()\n\n damaged = []\n damaged.append(get_delta(self.no_damage, self.initial_damage))\n\n sim_results = Parallel(n_jobs=4)(delayed(parallel_model)(\n graph, self.od_graph, self.od_matrix) for graph in restoration_graphs[:-1])\n for values in sim_results:\n damaged.append(get_delta(self.no_damage, values))\n\n for idx, values in enumerate(damaged):\n dt = restoration_times[idx] if idx == 0 else restoration_times[idx] - \\\n restoration_times[idx-1]\n e += sum(restoration_costs[idx]) + dt * (self.day_factor * values[2] * np.sum(self.mu*self.xi) +\n values[3] * np.sum(self.mu * (self.nu * self.F_w + self.rho)) + values[4] * self.upsilon)\n with open(self.fdir+'energy.csv', 'a') as f:\n f.write('\\n'+str(e))\n\n return e",
"def energy_func(self):\n i = self.inl[0].to_flow()\n o = self.outl[0].to_flow()\n\n T_m = (T_mix_ph(i, T0=self.inl[0].T.val_SI) +\n T_mix_ph(o, T0=self.outl[0].T.val_SI)) / 2\n\n iam = (\n 1 - self.iam_1.val * abs(self.aoi.val) -\n self.iam_2.val * self.aoi.val ** 2)\n\n return (i[0] * (o[2] - i[2]) -\n self.A.val * (\n self.E.val * self.eta_opt.val * self.doc.val ** 1.5 * iam -\n (T_m - self.Tamb.val_SI) * self.c_1.val -\n self.c_2.val * (T_m - self.Tamb.val_SI) ** 2))",
"def calc_demand(self, routing: np.ndarray, demand: float,\n commodity_idx: int) -> np.ndarray:\n commodity = self.commodities[commodity_idx]\n node_flow = np.zeros(self.num_nodes)\n node_flow[commodity[0]] = demand\n\n split_matrix = np.zeros((self.num_nodes, self.num_nodes), dtype=float)\n for edge_idx, edge in enumerate(self.edges):\n split_matrix[edge[1]][edge[0]] = routing[commodity_idx][edge_idx]\n split_matrix[:, commodity[1]] = 0 # no send from the destination node\n\n edge_utilisation = np.zeros((self.num_nodes, self.num_nodes))\n\n num_steps = 0\n while True:\n change = np.multiply(split_matrix, node_flow)\n edge_utilisation += change\n node_flow = np.matmul(split_matrix, node_flow)\n if np.any(np.isnan(change)):\n print(\"is_nan :'(\")\n comparison = np.less(np.nan_to_num(change), self.min_delta)\n if np.logical_and.reduce(np.logical_and.reduce(comparison)):\n break\n num_steps += 1\n # if we take more than |E| steps we have cycles which is not good.\n # Therefore: end here with really bad reward, scaled by number of\n # cycles\n if num_steps > routing.shape[1]:\n remaining_flow = np.greater(np.nan_to_num(change), 0.0)\n edge_utilisation += np.multiply(remaining_flow, np.full(\n (self.num_nodes, self.num_nodes), demand))\n break\n\n return edge_utilisation",
"def calc_excess_energy (self):\n #~ print sorted(self.cd.keys())\n self.excess_energy = \\\n (self.generation_wind_proposed - self.transmission_losses) * \\\n (self.cd['percent excess energy'] / 100.0)\n #~ print 'self.excess_energy',self.excess_energy",
"def analytical_power(self, proc, vdd, temp, load):\n from tech import spice\n leakage = spice[\"bitcell_leakage\"]\n dynamic = 0 #temporary\n total_power = self.return_power(dynamic, leakage)\n return total_power",
"def get_product_flow():\n return sum([i.get_total_flow('ton/day') for i in LAOs.products]) * LAOs.LAOs_tea.operating_days",
"def energy(energy_name: str) -> float:\n pass",
"def curDemand(curPrice, prevPrice, demandIntcpt, k1, k2, a, b, coff):\n\t# price dependent\n\tpdm = curPrice - priceMin\n\tpdp = curPrice - prevPrice\n\tq = demandIntcpt + k1 * pdm + k2 * pdm * pdm - a * shock(plus(pdp)) + b * shock(minus(pdp)) \n\t\n\t# cyclic \n\tq += demCycAmp * math.sin(2.0 * math.pi * coff / demCycPer)\n\t\n\t# random\n\tq += np.random.normal(0, randDemSd)\n\t\n\tq = plus(q)\n\treturn q",
"def get_hoomd_energy(key, hoomd_force_groups, calc_group):\n\n total_energy = 0\n total_energy = sum([a.get_energy(calc_group) \n for a in hoomd_force_groups[key]])\n return total_energy",
"def energy_func(self):\n return (\n self.inl[0].m.val_SI * (\n self.outl[0].h.val_SI - self.inl[0].h.val_SI) +\n self.inl[1].m.val_SI * (\n self.outl[1].h.val_SI - self.inl[1].h.val_SI))",
"def calc_annual_electric_savings (self):\n price = self.diesel_prices\n #TODO add rural v non rural\n self.base_generation_cost = self.electric_diesel_reduction * price\n\n\n self.proposed_generation_cost = self.maintenance_cost\n\n self.annual_electric_savings = self.base_generation_cost - \\\n self.proposed_generation_cost\n #~ print 'self.annual_electric_savings',self.annual_electric_savings",
"def get_total_energy_produced (self):\n return self.net_generation_wind"
]
| [
"0.6600343",
"0.64118046",
"0.6172422",
"0.6082626",
"0.60701555",
"0.5959659",
"0.5950571",
"0.58915526",
"0.5886798",
"0.5876974",
"0.5857146",
"0.5847542",
"0.5726186",
"0.5711322",
"0.56956786",
"0.5695321",
"0.5692664",
"0.56833786",
"0.56779873",
"0.5672595",
"0.56654024",
"0.56469846",
"0.5636373",
"0.56312066",
"0.56295717",
"0.56234753",
"0.56173295",
"0.55997694",
"0.5594759",
"0.55441463"
]
| 0.68815106 | 0 |
Perform a full SAP worksheet calculation on a dwelling, adding the results to the dwelling provided. This performs a demand calculation, and a renewable energies calculation | def perform_full_calc(dwelling):
dwelling = perform_demand_calc(dwelling)
dwelling.update(heating_systems_energy(dwelling))
dwelling.update(appendix_m.pv(dwelling))
dwelling.update(appendix_m.wind_turbines(dwelling))
dwelling.update(appendix_m.hydro(dwelling))
dwelling.update(appendix_c.chp(dwelling))
dwelling.update(fuel_use(dwelling))
return dwelling | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def perform_demand_calc(dwelling):\n\n # TODO: modify functions to take only the arguments they need instead of the whole dwelling data.\n dwelling.update(ventilation(dwelling))\n\n dwelling.update(heat_loss(dwelling))\n\n dwelling.update(hot_water_use(dwelling))\n\n dwelling.update(lighting_consumption(dwelling))\n\n dwelling.update(internal_heat_gain(dwelling))\n\n dwelling.update(solar(dwelling))\n\n # Need to copy the Q_required from the heat calc results to it's own attribute for compatibility\n dwelling.heat_calc_results = heating_requirement(dwelling)\n dwelling.Q_required = dwelling.heat_calc_results['heat_required']\n\n dwelling.Q_cooling_required = cooling_requirement(dwelling)\n\n dwelling.output_from_water_heater = water_heater_output(dwelling)\n\n return dwelling",
"def calc_monthly_cash(self):\n # shortcut to self\n s = self\n\n # Start the DataFrames, base and w/ heat pump\n # Each starts with just an index column with the month\n # Make shortcut variables as well.\n s.df_mo_dol_base = dfb = s.df_mo_en_base[[]].copy()\n s.df_mo_dol_hp = dfh = s.df_mo_en_base[[]].copy()\n\n # Determine the base electric use by month. Approach is different \n # if there is electric heat.\n is_electric_heat = (s.exist_heat_fuel_id == constants.ELECTRIC_ID)\n if not is_electric_heat:\n # Fuel-based space heat.\n # The User supplied a January and a May kWh usage value that should\n # be used for the base case (no heat pump) total electricity use.\n # But, need to come up with a kWh value for every month. Do that by\n # adjusting the kWh pattern available for this city.\n #\n # Determine the multiplier to adjust to the pattern to the actual.\n pat_use = np.array(s.city.avg_elec_usage)\n mult = (s.elec_use_jan - s.elec_use_may) / (pat_use[0] - pat_use[4])\n pat_use = mult * pat_use\n pat_use += s.elec_use_jan - pat_use[0]\n\n # The electricity use in the base case\n dfb['elec_kwh'] = pat_use\n\n # rough estimate of a base demand: not super critical, as the demand rate \n # structure does not have blocks. Assume a load factor of 0.4\n dfb['elec_kw'] = dfb.elec_kwh / (DAYS_IN_MONTH * 24.0) / 0.4\n\n else:\n # Electric Heat Case\n # No Jan and May values are provided. Instead we have possibly some\n # DHW, clothes drying, and cooking. Plus, we have base lights/other appliances.\n # And finally we have the Elecric heat making up the base electric usage.\n\n # First, DHW, Clothes Drying and Cooking. Assume flat use through year.\n # This is a numpy array because DAYS_IN_MONTH is an array.\n elec_kwh = s.fuel_other_uses / 8760.0 * DAYS_IN_MONTH * 24.0\n\n # Now lights and other misc. appliances. Some monthly variation, given\n # by LIGHTS_OTHER_PAT.\n elec_kwh += s.lights_other_elec / 8760.0 * LIGHTS_OTHER_PAT * DAYS_IN_MONTH * 24.0\n\n # For the peak demand of those two categories of use, just assume 40% load factor.\n elec_kw = elec_kwh / (DAYS_IN_MONTH * 24.0) / 0.4\n\n # Now add in space heating kWh and kW\n elec_kwh += s.df_mo_en_base.total_kwh.values\n elec_kw += s.df_mo_en_base.total_kw.values\n\n # store results\n dfb['elec_kwh'] = elec_kwh\n dfb['elec_kw'] = elec_kw\n\n # Make an object to calculate electric utility costs\n elec_cost_calc = ElecCostCalc(s.utility, sales_tax=s.sales_tax, pce_limit=s.pce_limit)\n # cost function that will be applied to each row of the cost DataFrame\n cost_func = lambda r: elec_cost_calc.monthly_cost(r.elec_kwh, r.elec_kw)\n\n dfb['elec_dol'] = dfb.apply(cost_func, axis=1)\n\n if not is_electric_heat:\n # Now fuel use by month. Remember that the home heat model only looked at\n # space heating, so we need to add in the fuel use from the other end uses\n # that use this fuel.\n dfb['secondary_fuel_units'] = s.df_mo_en_base.secondary_fuel_units + \\\n s.fuel_other_uses / 12.0\n dfb['secondary_fuel_dol'] = dfb.secondary_fuel_units * s.exist_unit_fuel_cost * (1. + s.sales_tax)\n else:\n # Electric Heat, so no secondary fuel\n dfb['secondary_fuel_units'] = 0.0\n dfb['secondary_fuel_dol'] = 0.0\n\n # Total Electric + space heat\n dfb['total_dol'] = dfb.elec_dol + dfb.secondary_fuel_dol\n\n # Now with the heat pump\n # determine extra kWh used in the heat pump scenario. Note, this will\n # be negative numbers if the base case used electric heat.\n extra_kwh = (s.df_mo_en_hp.total_kwh - s.df_mo_en_base.total_kwh).values\n dfh['elec_kwh'] = dfb['elec_kwh'] + extra_kwh\n extra_kw = (s.df_mo_en_hp.total_kw - s.df_mo_en_base.total_kw).values\n dfh['elec_kw'] = dfb['elec_kw'] + extra_kw\n dfh['elec_dol'] = dfh.apply(cost_func, axis=1)\n\n # Now fuel, including other end uses using the heating fuel\n if not is_electric_heat:\n dfh['secondary_fuel_units'] = s.df_mo_en_hp.secondary_fuel_units + \\\n s.fuel_other_uses / 12.0\n dfh['secondary_fuel_dol'] = dfh.secondary_fuel_units * s.exist_unit_fuel_cost * (1. + s.sales_tax)\n else:\n # Electric Heat, so no secondary fuel\n dfh['secondary_fuel_units'] = 0.0\n dfh['secondary_fuel_dol'] = 0.0\n\n # Total Electric + space heat\n dfh['total_dol'] = dfh.elec_dol + dfh.secondary_fuel_dol",
"def calc_annual_electric_savings (self):\n price = self.diesel_prices\n #TODO add rural v non rural\n self.base_generation_cost = self.electric_diesel_reduction * price\n\n\n self.proposed_generation_cost = self.maintenance_cost\n\n self.annual_electric_savings = self.base_generation_cost - \\\n self.proposed_generation_cost\n #~ print 'self.annual_electric_savings',self.annual_electric_savings",
"def calc_annual_heating_savings (self):\n price = (self.diesel_prices + self.cd['heating fuel premium'])\n\n #~ self.base_heating_cost =\n\n #~ self.proposed_heating_cost =\n\n\n\n\n self.annual_heating_savings = self.reduction_diesel_used * price\n #~ print 'self.annual_heating_savings',self.annual_heating_savings",
"def internal_heat_gain(dwelling):\n losses_gain = -40 * dwelling.Nocc\n water_heating_gains = (1000. / 24.) * dwelling.heat_gains_from_hw / DAYS_PER_MONTH\n\n mean_appliance_energy = 207.8 * (dwelling.GFA * dwelling.Nocc) ** 0.4714\n appliance_consumption_per_day = (mean_appliance_energy / 365.) * (\n 1 + 0.157 * numpy.cos((2. * math.pi / 12.) * (numpy.arange(12) - .78)))\n\n appliance_consumption = appliance_consumption_per_day * DAYS_PER_MONTH\n\n if dwelling.reduced_gains:\n met_gain = 50 * dwelling.Nocc\n cooking_gain = 23 + 5 * dwelling.Nocc\n appliance_gain = (0.67 * 1000. / 24) * appliance_consumption_per_day\n light_gain = 0.4 * dwelling.full_light_gain\n else:\n met_gain = 60 * dwelling.Nocc\n cooking_gain = 35 + 7 * dwelling.Nocc\n appliance_gain = (1000. / 24) * appliance_consumption_per_day\n light_gain = dwelling.full_light_gain\n\n total_internal_gains = (met_gain\n + light_gain\n + appliance_gain\n + cooking_gain\n + water_heating_gains\n + dwelling.pump_gain\n + losses_gain)\n\n if dwelling.reduced_gains:\n summer_met_gain = 60 * dwelling.Nocc\n summer_cooking_gain = 35 + 7 * dwelling.Nocc\n summer_appliance_gain = (1000. / 24) * appliance_consumption_per_day\n summer_light_gain = dwelling.full_light_gain\n total_internal_gains_summer = (summer_met_gain +\n water_heating_gains +\n summer_light_gain +\n summer_appliance_gain +\n summer_cooking_gain +\n dwelling.pump_gain +\n losses_gain\n - dwelling.heating_system_pump_gain)\n else:\n total_internal_gains_summer = total_internal_gains - dwelling.heating_system_pump_gain\n\n # Apply results to dwelling\n return dict(appliance_consumption=appliance_consumption,\n met_gain=met_gain,\n cooking_gain=cooking_gain,\n appliance_gain=appliance_gain,\n light_gain=light_gain,\n water_heating_gains=water_heating_gains,\n losses_gain=losses_gain,\n total_internal_gains=total_internal_gains,\n total_internal_gains_summer=total_internal_gains_summer)",
"def Calculate(WA_HOME_folder, Basin, P_Product, ET_Product, LAI_Product, NDM_Product, NDVI_Product, dict_crops, dict_non_crops, Startdate, Enddate, Simulation): \n ######################### Import WA modules ###################################\n \n from wa.General import raster_conversions as RC\n from wa.General import data_conversions as DC\n import wa.Functions.Three as Three\n import wa.Functions.Two as Two\n import wa.Functions.Start as Start\n import wa.Generator.Sheet3 as Generate\n import wa.Functions.Start.Get_Dictionaries as GD\n \n ######################### Set General Parameters ##############################\n\n # Check if there is a full year selected between Startdate and Enddate, otherwise Sheet 3 cannot be produced \n try:\n years_end = pd.date_range(Startdate,Enddate,freq=\"A\").year\n years_start = pd.date_range(Startdate,Enddate,freq=\"AS\").year\n if (len(years_start) == 0 or len(years_end) == 0):\n print \"Calculation period is less than a year, which is not possible for sheet 3\"\n quit\n years = np.unique(np.append(years_end,years_start))\n except:\n print \"Calculation period is less than a year, which is not possible for sheet 3\"\n quit\n\n # Get environmental variable for the Home folder\n if WA_HOME_folder == '':\n WA_env_paths = os.environ[\"WA_HOME\"].split(';')\n Dir_Home = WA_env_paths[0]\n else:\n Dir_Home = WA_HOME_folder\n \t\n # Create the Basin folder\n Dir_Basin = os.path.join(Dir_Home, Basin)\n if not os.path.exists(Dir_Basin):\n os.makedirs(Dir_Basin)\t\n\n # Get the boundaries of the basin based on the shapefile of the watershed\n # Boundaries, Shape_file_name_shp = Start.Boundaries.Determine(Basin)\n Boundaries, Example_dataset = Start.Boundaries.Determine_LU_Based(Basin, Dir_Home)\n \n ############################# Download Data ###################################\n\n # Set the NPP and GPP data for the whole year\n StartYear = Startdate[:4]\n EndYear = Enddate[:4]\n StartdateNDM = '%d-01-01' %int(StartYear)\n EnddateNDM = '%d-12-31' %int(EndYear)\n\n #Set Startdate and Enddate for moving average\n ET_Blue_Green_Classes_dict, Moving_Window_Per_Class_dict = GD.get_bluegreen_classes(version = '1.0') \n Additional_Months_tail = np.max(Moving_Window_Per_Class_dict.values())\n Startdate_Moving_Average = pd.Timestamp(Startdate) - pd.DateOffset(months = Additional_Months_tail)\n Enddate_Moving_Average = pd.Timestamp(Enddate) + pd.DateOffset(months = 0)\n Startdate_Moving_Average_String = '%d-%02d-%02d' %(Startdate_Moving_Average.year, Startdate_Moving_Average.month, Startdate_Moving_Average.day)\n Enddate_Moving_Average_String = '%d-%02d-%02d' %(Enddate_Moving_Average.year, Enddate_Moving_Average.month, Enddate_Moving_Average.day)\n\n # Download data\n Data_Path_P = Start.Download_Data.Precipitation(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate_Moving_Average_String, Enddate_Moving_Average_String, P_Product, Daily = 'n') \n Data_Path_ET = Start.Download_Data.Evapotranspiration(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate, Enddate, ET_Product)\n Data_Path_ETref = Start.Download_Data.ETreference(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate_Moving_Average_String, Enddate_Moving_Average_String)\n Data_Path_NDVI = Start.Download_Data.NDVI(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate, Enddate)\n \n if NDM_Product == 'MOD17':\n Data_Path_NPP = Start.Download_Data.NPP(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], StartdateNDM, EnddateNDM, NDM_Product) \n Data_Path_GPP = Start.Download_Data.GPP(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], StartdateNDM, EnddateNDM, NDM_Product) \n\n Data_Path_P_Monthly = os.path.join(Data_Path_P, 'Monthly')\n \n ########################### Create input data #################################\n\n # Create NDM based on MOD17\n if NDM_Product == 'MOD17':\n\n # Create monthly GPP\n Dir_path_GPP = os.path.join(Dir_Basin, Data_Path_GPP)\n Start.Eightdaily_to_monthly_state.Nearest_Interpolate(Dir_path_GPP, StartdateNDM, EnddateNDM)\n Data_Path_NDM = Two.Calc_NDM.NPP_GPP_Based(Dir_Basin, Data_Path_GPP, Data_Path_NPP, Startdate, Enddate)\n\n # Create monthly NDVI based on MOD13\n if NDVI_Product == 'MOD13':\n Dir_path_NDVI = os.path.join(Dir_Basin, Data_Path_NDVI)\n Start.Sixteendaily_to_monthly_state.Nearest_Interpolate(Dir_path_NDVI, Startdate, Enddate)\n\n ###################### Save Data as netCDF files ##############################\n \n #___________________________________Land Use_______________________________\n\n # Get the data of LU and save as nc, This dataset is also used as reference for others\n LUdest = gdal.Open(Example_dataset) \n DataCube_LU = LUdest.GetRasterBand(1).ReadAsArray()\n DataCube_LU[DataCube_LU<0] = np.nan\n\n Name_NC_LU = DC.Create_NC_name('LU', Simulation, Dir_Basin, 3)\n if not os.path.exists(Name_NC_LU):\n DC.Save_as_NC(Name_NC_LU, DataCube_LU, 'LU', Example_dataset)\n\n LUdest = None\n del DataCube_LU\n #_______________________________Evaporation________________________________\n\n # Define info for the nc files\n info = ['monthly','mm', ''.join([Startdate[5:7], Startdate[0:4]]) , ''.join([Enddate[5:7], Enddate[0:4]])]\n\n # Evapotranspiration data\n Name_NC_ET = DC.Create_NC_name('ET', Simulation, Dir_Basin, 3, info)\n if not os.path.exists(Name_NC_ET):\n\n # Get the data of Evaporation and save as nc\n DataCube_ET = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_ET, Startdate, Enddate, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_ET, DataCube_ET, 'ET', Example_dataset, Startdate, Enddate, 'monthly', 0.01)\n del DataCube_ET\n\n #____________________________________NDVI__________________________________\n\n info = ['monthly','-', ''.join([Startdate_Moving_Average_String[5:7], Startdate_Moving_Average_String[0:4]]) , ''.join([Enddate_Moving_Average_String[5:7], Enddate_Moving_Average_String[0:4]])]\n\n\n Name_NC_NDVI = DC.Create_NC_name('NDVI', Simulation, Dir_Basin, 3, info)\n if not os.path.exists(Name_NC_NDVI):\n\n # Get the data of Evaporation and save as nc\n DataCube_NDVI = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_NDVI, Startdate, Enddate, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_NDVI, DataCube_NDVI, 'NDVI', Example_dataset, Startdate, Enddate, 'monthly', 1)\n del DataCube_NDVI\n\n #______________________________Precipitation_______________________________\n\n # Define info for the nc files\n info = ['monthly','mm', ''.join([Startdate_Moving_Average_String[5:7], Startdate_Moving_Average_String[0:4]]) , ''.join([Enddate_Moving_Average_String[5:7], Enddate_Moving_Average_String[0:4]])]\n\n # Precipitation data\n Name_NC_P = DC.Create_NC_name('Prec', Simulation, Dir_Basin, 3, info)\n if not os.path.exists(Name_NC_P):\n\t\n # Get the data of Precipitation and save as nc\n DataCube_Prec = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_P_Monthly, Startdate_Moving_Average_String, Enddate_Moving_Average_String, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_P, DataCube_Prec, 'Prec', Example_dataset, Startdate_Moving_Average_String, Enddate_Moving_Average_String, 'monthly', 0.01)\n del DataCube_Prec\n\n #________________________Reference Evaporation______________________________\n\n # Reference Evapotranspiration data\n Name_NC_ETref = DC.Create_NC_name('ETref', Simulation, Dir_Basin, 3, info)\n if not os.path.exists(Name_NC_ETref):\n\n # Get the data of Evaporation and save as nc\n DataCube_ETref = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_ETref, Startdate_Moving_Average_String, Enddate_Moving_Average_String, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_ETref, DataCube_ETref, 'ETref', Example_dataset, Startdate_Moving_Average_String, Enddate_Moving_Average_String, 'monthly', 0.01)\n del DataCube_ETref\n\n #___________________________Normalized Dry Matter__________________________\n\n # Define info for the nc files\n info = ['monthly','kg_ha-1', ''.join([Startdate[5:7], Startdate[0:4]]) , ''.join([Enddate[5:7], Enddate[0:4]])]\n\n Name_NC_NDM = DC.Create_NC_name('NDM', Simulation, Dir_Basin, 3, info)\n if not os.path.exists(Name_NC_NDM):\n\n # Get the data of Evaporation and save as nc\n DataCube_NDM = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_NDM, Startdate, Enddate, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_NDM, DataCube_NDM, 'NDM', Example_dataset, Startdate, Enddate, 'monthly', 100)\n del DataCube_NDM\n\n ############################# Calculate Sheet 3 ###########################\n\n # Define info for the nc files\n info = ['monthly','mm', ''.join([Startdate_Moving_Average_String[5:7], Startdate_Moving_Average_String[0:4]]) , ''.join([Enddate_Moving_Average_String[5:7], Enddate_Moving_Average_String[0:4]])]\n\n #____________ Evapotranspiration data split in ETblue and ETgreen ____________\n\n Name_NC_ETgreen = DC.Create_NC_name('ETgreen', Simulation, Dir_Basin, 3, info)\n Name_NC_ETblue = DC.Create_NC_name('ETblue', Simulation, Dir_Basin, 3, info)\n \n if not (os.path.exists(Name_NC_ETgreen) or os.path.exists(Name_NC_ETblue)):\n\n # Calculate Blue and Green ET\n DataCube_ETblue, DataCube_ETgreen = Three.SplitET.Blue_Green(Startdate, Enddate, Name_NC_LU, Name_NC_ETref, Name_NC_ET, Name_NC_P)\n\n # Save the ETblue and ETgreen data as NetCDF files\n DC.Save_as_NC(Name_NC_ETblue, DataCube_ETblue, 'ETblue', Example_dataset, Startdate, Enddate, 'monthly', 0.01)\n DC.Save_as_NC(Name_NC_ETgreen, DataCube_ETgreen, 'ETgreen', Example_dataset, Startdate, Enddate, 'monthly', 0.01)\n\n del DataCube_ETblue, DataCube_ETgreen\n \n #____________________________ Create the empty dictionaries ____________________________\n \n # Create the dictionaries that are required for sheet 3 \n wp_y_irrigated_dictionary, wp_y_rainfed_dictionary, wp_y_non_crop_dictionary = GD.get_sheet3_empties()\n \n #____________________________________ Fill in the dictionaries ________________________\n\n # Fill in the crops dictionaries \n wp_y_irrigated_dictionary, wp_y_rainfed_dictionary = Three.Fill_Dicts.Crop_Dictionaries(wp_y_irrigated_dictionary, wp_y_rainfed_dictionary, dict_crops, Name_NC_LU, Name_NC_ETgreen, Name_NC_ETblue, Name_NC_NDM, Name_NC_P, Dir_Basin)\n\n # Fill in the non crops dictionaries \n wp_y_non_crop_dictionary = Three.Fill_Dicts.Non_Crop_Dictionaries(wp_y_non_crop_dictionary, dict_non_crops)\n\n for year in years:\n\n ############################ Create CSV 3 ################################# \n \n csv_fh_a, csv_fh_b = Generate.CSV.Create(wp_y_irrigated_dictionary, wp_y_rainfed_dictionary, wp_y_non_crop_dictionary, Basin, Simulation, year, Dir_Basin)\n\n ############################ Create Sheet 3 ############################### \n\n Generate.PDF.Create(Dir_Basin, Basin, Simulation, csv_fh_a, csv_fh_b)\n \n return()",
"def calc_annual_electric_savings (self):\n costs = self.comp_specs['diesel generator o&m']\n\n for kW in costs.keys():\n try:\n if self.average_load < int(kW):\n maintenance = self.comp_specs['diesel generator o&m'][kW]\n break\n except ValueError:\n maintenance = self.comp_specs['diesel generator o&m'][kW]\n\n self.baseline_generation_cost = maintenance + \\\n (self.pre_intertie_generation_fuel_used * self.diesel_prices)\n\n maintenance = self.capital_costs * \\\n (self.comp_specs['percent o&m'] / 100.0)\n self.proposed_generation_cost = maintenance + \\\n self.intertie_offset_generation_fuel_used * \\\n self.intertie_diesel_prices\n self.annual_electric_savings = self.baseline_generation_cost -\\\n self.proposed_generation_cost\n #~ print len(self.annual_electric_savings)\n #~ print 'self.annual_electric_savings',self.annual_electric_savings",
"def Calculate(WA_HOME_folder, Basin, P_Product, ET_Product, LAI_Product, NDM_Product, Startdate, Enddate, Simulation): \n ######################### Import WA modules ###################################\n \n from wa.General import raster_conversions as RC\n from wa.General import data_conversions as DC\n import wa.Functions.Two as Two\n import wa.Functions.Start as Start\n import wa.Generator.Sheet2 as Generate\n \n ######################### Set General Parameters ##############################\n\n # Get environmental variable for the Home folder\n if WA_HOME_folder == '':\n WA_env_paths = os.environ[\"WA_HOME\"].split(';')\n Dir_Home = WA_env_paths[0]\n else:\n Dir_Home = WA_HOME_folder\n \n # Create the Basin folder\n Dir_Basin = os.path.join(Dir_Home, Basin)\n if not os.path.exists(Dir_Basin):\n os.makedirs(Dir_Basin)\t\n\n # Get the boundaries of the basin based on the shapefile of the watershed\n # Boundaries, Shape_file_name_shp = Start.Boundaries.Determine(Basin)\n Boundaries, Example_dataset = Start.Boundaries.Determine_LU_Based(Basin, Dir_Home)\n \n ############################# Download Data ###################################\n\n # Set the NPP and GPP data for the whole year\n StartYear = Startdate[:4]\n EndYear = Enddate[:4]\n StartdateNDM = '%d-01-01' %int(StartYear)\n EnddateNDM = '%d-12-31' %int(EndYear)\n \n # Download data\n Data_Path_P = Start.Download_Data.Precipitation(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate, Enddate, P_Product, Daily = 'y') \n Data_Path_ET = Start.Download_Data.Evapotranspiration(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate, Enddate, ET_Product)\n Data_Path_LAI = Start.Download_Data.LAI(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate, Enddate, LAI_Product) \n \n if NDM_Product == 'MOD17':\n Data_Path_NPP = Start.Download_Data.NPP(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], StartdateNDM, EnddateNDM, NDM_Product) \n Data_Path_GPP = Start.Download_Data.GPP(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], StartdateNDM, EnddateNDM, NDM_Product) \n\n Data_Path_P_Daily = os.path.join(Data_Path_P, 'Daily')\n Data_Path_P_Monthly = os.path.join(Data_Path_P, 'Monthly')\n \n ########################### Create input data #################################\n\n # Create Rainy Days based on daily CHIRPS\n Data_Path_RD = Two.Rainy_Days.Calc_Rainy_Days(Dir_Basin, Data_Path_P_Daily, Startdate, Enddate)\n\n # Create monthly LAI\n Dir_path_LAI = os.path.join(Dir_Basin, Data_Path_LAI)\n Start.Eightdaily_to_monthly_state.Nearest_Interpolate(Dir_path_LAI, Startdate, Enddate)\n\n # Create NDM based on MOD17\n if NDM_Product == 'MOD17':\n \n # Create monthly GPP \n Dir_path_GPP = os.path.join(Dir_Basin, Data_Path_GPP)\n Start.Eightdaily_to_monthly_state.Nearest_Interpolate(Dir_path_GPP, StartdateNDM, EnddateNDM)\n Data_Path_NDM = Two.Calc_NDM.NPP_GPP_Based(Dir_Basin, Data_Path_GPP, Data_Path_NPP, Startdate, Enddate)\n\n ###################### Save Data as netCDF files ##############################\n \n #___________________________________Land Use_______________________________\n\n # Get the data of LU and save as nc, This dataset is also used as reference for others\n LUdest = gdal.Open(Example_dataset) \n DataCube_LU = LUdest.GetRasterBand(1).ReadAsArray()\n\n Name_NC_LU = DC.Create_NC_name('LU', Simulation, Dir_Basin, 2)\n if not os.path.exists(Name_NC_LU):\n DC.Save_as_NC(Name_NC_LU, DataCube_LU, 'LU', Example_dataset)\n\n LUdest = None\n del DataCube_LU\n\n #______________________________Precipitation_______________________________\n\n # Define info for the nc files\n info = ['monthly','mm', ''.join([Startdate[5:7], Startdate[0:4]]) , ''.join([Enddate[5:7], Enddate[0:4]])]\n\n # Precipitation data\n Name_NC_P = DC.Create_NC_name('Prec', Simulation, Dir_Basin, 2, info)\n if not os.path.exists(Name_NC_P):\n\t\n # Get the data of Precipitation and save as nc\n DataCube_Prec = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_P_Monthly, Startdate, Enddate, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_P, DataCube_Prec, 'Prec', Example_dataset, Startdate, Enddate, 'monthly', 0.01)\n del DataCube_Prec\n\n #_______________________________Evaporation________________________________\n\n # Evapotranspiration data\n Name_NC_ET = DC.Create_NC_name('ET', Simulation, Dir_Basin, 2, info)\n if not os.path.exists(Name_NC_ET):\n\n # Get the data of Evaporation and save as nc\n DataCube_ET = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_ET, Startdate, Enddate, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_ET, DataCube_ET, 'ET', Example_dataset, Startdate, Enddate, 'monthly', 0.01)\n del DataCube_ET\n\n #___________________________Normalized Dry Matter__________________________\n\n # Define info for the nc files\n info = ['monthly','kg_ha-1', ''.join([Startdate[5:7], Startdate[0:4]]) , ''.join([Enddate[5:7], Enddate[0:4]])]\n\n Name_NC_NDM = DC.Create_NC_name('NDM', Simulation, Dir_Basin, 2, info)\n if not os.path.exists(Name_NC_NDM):\n\n # Get the data of Evaporation and save as nc\n DataCube_NDM = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_NDM, Startdate, Enddate, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_NDM, DataCube_NDM, 'NDM', Example_dataset, Startdate, Enddate, 'monthly', 100)\n del DataCube_NDM\n\n #_______________________________Rainy Days_________________________________\n\n # Define info for the nc files\n info = ['monthly','days', ''.join([Startdate[5:7], Startdate[0:4]]) , ''.join([Enddate[5:7], Enddate[0:4]])]\n\n Name_NC_RD = DC.Create_NC_name('RD', Simulation, Dir_Basin, 2, info)\n if not os.path.exists(Name_NC_RD):\n\n # Get the data of Evaporation and save as nc\n DataCube_RD = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_RD, Startdate, Enddate, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_RD, DataCube_RD, 'RD', Example_dataset, Startdate, Enddate, 'monthly', 100)\n del DataCube_RD\n\n #_______________________________Leaf Area Index____________________________\n\n # Define info for the nc files\n info = ['monthly','m2-m-2', ''.join([Startdate[5:7], Startdate[0:4]]) , ''.join([Enddate[5:7], Enddate[0:4]])]\n\n Name_NC_LAI = DC.Create_NC_name('LAI', Simulation, Dir_Basin, 2, info)\n if not os.path.exists(Name_NC_LAI):\n\n # Get the data of Evaporation and save as nc\n DataCube_LAI = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_LAI, Startdate, Enddate, Example_data = Example_dataset)\n DC.Save_as_NC(Name_NC_LAI, DataCube_LAI, 'LAI', Example_dataset, Startdate, Enddate, 'monthly', 1)\n del DataCube_LAI\n\n ####################### Calculations Sheet 2 ##############################\n \n DataCube_I, DataCube_T, DataCube_E = Two.SplitET.ITE(Dir_Basin, Name_NC_ET, Name_NC_LAI, Name_NC_P, Name_NC_RD, Name_NC_NDM, Name_NC_LU, Startdate, Enddate, Simulation)\n \n ############################ Create CSV 2 ################################# \n\n Dir_Basin_CSV = Generate.CSV.Create(Dir_Basin, Simulation, Basin, Startdate, Enddate, Name_NC_LU, DataCube_I, DataCube_T, DataCube_E, Example_dataset)\n\n ############################ Create Sheet 2 ############################### \n\n Generate.PDF.Create(Dir_Basin, Basin, Simulation, Dir_Basin_CSV)\n\n return()",
"def solve_stationary_equilibrium(self) :\n \n \n \n \n #a. find the equilibrium wage given the tax rate and subsidy\n w_ss = self.find_equilibrium_wage(self.w0_guess, self.w1_guess)\n \n #b. obtain firm policy functions and discount present value factors\n W_e , pol_k, pol_n, pi, W, pol_enter = self.entry_condition(w_ss)\n \n \n #c. obtain the invariant distribution \n \n #i. normalized invariant distribution over firms\n mu_hat = pol_enter/self.lambdaa * self.joint_pdf\n \n #ii. labor market clearing (section 3.5), agg demand for labor\n N_ss = np.sum(np.sum(pol_n*mu_hat, axis=0))\n \n #iii. ss equilibrium level of entry (mass of entrants)\n E_star = 1/N_ss \n \n #iv. rescale invariant distribution over firms, mu(s,tau)\n mu = E_star*mu_hat\n \n #d. marginal distributions\n \n #i. sum over subsidies, except, taxes of stationary distribution\n distrib_stationary = np.sum(mu, axis=1)\n total_mass = np.sum(distrib_stationary)\n \n #ii. marginal stationary distribution over productivity\n pdf_stationary = distrib_stationary / total_mass\n cdf_stationary = np.cumsum(pdf_stationary)\n \n #iii. stationary distribution over number of employed \n distrib_emp = (pol_n[:,2] * pdf_stationary)/ np.sum(pol_n[:,2] * pdf_stationary)\n pdf_emp = distrib_emp / np.sum(distrib_emp)\n cdf_emp = np.cumsum(pdf_emp)\n \n #e. Aggregate statistics\n \n Y_ss = np.sum(np.sum( self.grid_s_matrix * pol_k**self.alpha * pol_n**self.gamma*mu, axis=0)) #ss output\n K_ss = np.sum(np.sum(pol_k*mu, axis=0)) #ss capital\n TFP_ss = Y_ss/(N_ss*E_star)/(K_ss/(N_ss*E_star))**self.alpha\n total_employment = np.dot(self.labor_demand_rel, distrib_stationary)\n average_firm_size = total_employment / total_mass\n \n #output share of subsidy, excemption, taxed\n Y_set = np.sum(self.grid_s_matrix * pol_k**self.alpha*pol_n**self.gamma*mu, axis=0) / Y_ss\n \n Y_sub_percent = Y_set[0] #output share of establishments that are receiving a subsidy, Y_s/Y\n Y_exempt_percent = Y_set[1]\n Y_taxed__Percent = Y_set[2]\n \n #the total subsidies paid out to establishments receiving subsidies as a fraction of output. numerator takes first column which is subsidy (S/Y)\n subsidy_size = np.sum(-self.tau_output[:,0]*self.grid_s_matrix[:,0]*pol_k[:,0]**self.alpha \\\n *pol_n[:,0]**self.gamma*mu[:,0]-self.tau_capital[:,0]*self.ret \\\n *pol_k[:,0]*mu[:,0]-self.tau_labor[:,0]*w_ss* \\\n pol_n[:,0]*mu[:,0]) / Y_ss\n \n \n return Y_ss, K_ss, TFP_ss, average_firm_size, E_star, Y_set, subsidy_size, N_ss, w_ss, cdf_stationary, cdf_emp",
"def payroll_calculation():\n\n name = search_employee()\n if name == None:\n return\n accrual_month = month('Accrual month: ')\n accrual_year = year('Accrual year: ')\n accrual = f'{accrual_month}-{accrual_year}'\n salary_value = month_salary()\n salary_base = salary(salary_value)\n overtime = value_input('Overtime: ')\n absences = value_input('Absences: ')\n late = value_input('Late: ')\n bonus = value_input('Bonus: ')\n\n hourly_wage = round(salary_value / 220, 2)\n overtime_value = round(float(hourly_wage * 1.5), 2)\n overtime_total = round(overtime_value * overtime, 2)\n daily_wage = round(salary_value / 30, 2)\n absences_value = round(daily_wage * absences, 2)\n late_value = round(daily_wage * late / 60, 2)\n inss_value = inss(salary_base, overtime_total)\n irrf_value = irrf(salary_base, overtime_total, inss_value, bonus)\n sleep(2)\n\n\n\n header('EARNINGS')\n print(f'Salary: {salary_base}')\n print(f'Bonus: {bonus}')\n print(f'Overtime: {overtime_total }')\n earnings_total = round(salary_base + overtime_total + bonus, 2)\n sleep(2)\n\n print(line())\n print(f'Earnings total: {earnings_total}')\n print(line())\n sleep(2)\n\n header('DISCOUNTS')\n\n transportation_vouchers = round(salary_base * 6 / 100, 2)\n health_care = round(salary_base * 2 / 100, 2)\n dental_care = round(salary_base * 0.5 / 100, 2)\n meal_ticket = round(salary_base * 1 / 100, 2)\n\n print(f'absences: {absences_value}')\n print(f'late: {late_value}')\n print(f'transportation_vouchers: {transportation_vouchers}')\n print(f'health_care: {health_care}')\n print(f'dental_care: {dental_care}')\n print(f'meal_ticket: {meal_ticket}')\n print(f'inss_value: {inss_value}')\n print(f'irrf_value: {irrf_value}')\n\n discounts_total = round(absences_value + late_value + transportation_vouchers + health_care +\n dental_care + meal_ticket + inss_value + irrf_value, 2)\n\n print(line())\n print(f'Discounts_total : {discounts_total }')\n print(line())\n liquid_salary = round(earnings_total - discounts_total, 2)\n print(f'Liquid_salary: {liquid_salary} ')\n print(line())\n\n conn = sqlite3.connect('data/people_management.db')\n cursor = conn.cursor()\n cursor.execute(f\"\"\"\n INSERT INTO salary (name, salary ,bonus, overtime, absences_value, late_value, \n t_vouchers, health_care, dental_care, meal_ticket, inss, irrf, \n earnings, discounts, liquid_salary, accrual)\n VALUES ('{name}', '{salary_base}' ,'{bonus}', '{overtime_total}', '{absences_value}', \n '{late_value}', '{transportation_vouchers}', '{health_care}', '{dental_care}', \n '{meal_ticket}', '{inss_value}', '{irrf_value}', '{earnings_total}', '{discounts_total}', \n '{liquid_salary}', '{accrual}')\n \"\"\")\n conn.commit()\n conn.close()",
"def main():\n data = get_sales_data()\n sales_data = [int(num) for num in data]\n update_worksheet(sales_data, 'sales')\n new_surplus_data = calculate_surplus_sandwiches(sales_data)\n update_worksheet(new_surplus_data, 'surplus')\n list_of_last_five_sales = get_last_five_sales_entries()\n stock_data = get_average_sales(list_of_last_five_sales)\n update_worksheet(stock_data, 'stock')\n return stock_data",
"def wwhr_savings(dwelling):\n # TODO: Variables were defined but not used\n # savings = 0\n # Nshower_with_bath = 1\n # Nshower_without_bath = 0\n Nshower_and_bath = dwelling.wwhr_total_rooms_with_shower_or_bath\n\n S_sum = 0\n for sys in dwelling.wwhr_systems:\n effy = sys['pcdf_sys']['effy_mixer_shower'] / 100\n util = sys['pcdf_sys']['utilisation_mixer_shower']\n S_sum += (sys['Nshowers_with_bath'] * .635 * effy *\n util + sys['Nshowers_without_bath'] * effy * util)\n\n Seff = S_sum / Nshower_and_bath\n Tcoldm = numpy.array(\n [11.1, 10.8, 11.8, 14.7, 16.1, 18.2, 21.3, 19.2, 18.8, 16.3, 13.3, 11.8])\n Awm = .33 * 25 * MONTHLY_HOT_WATER_TEMPERATURE_RISE / (41 - Tcoldm) + 26.1\n Bwm = .33 * 36 * MONTHLY_HOT_WATER_TEMPERATURE_RISE / (41 - Tcoldm)\n\n savings = (dwelling.Nocc * Awm + Bwm) * Seff * (35 - Tcoldm) * \\\n 4.18 * DAYS_PER_MONTH * MONTHLY_HOT_WATER_FACTORS / 3600.\n\n return savings",
"def refresh(self):\n lastDate = max(etf.data.index[-1] for etf in self.etfs.values())\n for etf in self.etfs.values():\n isLastDayMissing = etf.data.index[-1] < lastDate\n if isLastDayMissing and not etf.sold():\n lastDay = pd.DataFrame([etf.data.iloc[-1]], columns=etf.data.columns, index=[lastDate])\n etf.data = etf.data.append(lastDay)\n etf.calculateStats()\n # Get Profit/Loss series\n p_l = pd.DataFrame()\n for name, etf in self.etfs.items():\n p_l[name] = etf.data['Profit/Loss']\n p_l.fillna(method='ffill', inplace=True)\n self.data['Profit/Loss'] = p_l.sum(axis=1)\n\n # Get Invested amount seires\n inv = pd.DataFrame()\n for name, etf in self.etfs.items():\n inv[name] = etf.data['Invested']\n if etf.sold():\n inv.loc[etf.sell_date:,name] = -etf.profit_loss()\n inv.fillna(method='ffill', inplace=True)\n self.data['Invested'] = inv.sum(axis=1)\n\n self.data['Profit/Loss%'] = self.data['Profit/Loss'] / self.data['Invested'] * 100 # Calculates the Profit/Loss (%)\n self.data['Value'] = round(self.data['Invested'] + self.data['Profit/Loss'], 2)\n self.data['Gains'] = self.data['Profit/Loss'] - self.data['Profit/Loss'].shift(1)\n self.data['Gains%'] = self.data['Gains'] / self.data['Value'].shift(1) * 100",
"def efficiency_cal(self):\n Temp = 0\n for i in self.supplyseries:\n for j in self.demandseries:\n if(self.shortestpathij(i, j) == None):\n continue\n Temp += 1/self.shortestpathij(i, j)\n \n self.efficiency = 1/(self.supplynum*self.demandnum)*Temp",
"def incumbant_firm(self, wage):\n \n \n \n # a. demand for capital (capital policy function)\n pol_k = (self.alpha /(self.ret *(1+self.tau_capital)))**((1-self.gamma)/(1-self.gamma-self.alpha)) \\\n * (self.gamma /(wage * (1+self.tau_labor)))**(self.gamma/(1-self.gamma-self.alpha)) \\\n * (self.grid_s_matrix*(1-self.tau_output))**(1/(1-self.alpha-self.gamma))\n \n # b. demand of labor (labor policy function)\n pol_n = (1+self.tau_capital) * self.ret * self.gamma / ((1+self.tau_labor) * wage * self.alpha) * pol_k\n #pol_n = ((smatrix*(1-self.tau_output) * gamma) / wage)**(1/(1-gamma)) * pol_k**(alpha/(1-gamma))\n \n # c. incumbant profit\n pi=(1-self.tau_output) * self.grid_s_matrix * pol_k**self.alpha * pol_n**self.gamma \\\n - (1+self.tau_labor)* wage * pol_n - (1+self.tau_capital) * self.ret * pol_k - self.cf\n \n # d. discounted present value of an incumbent establishment, W(s,pol_k(s,theta))\n W = pi / (1-self.rho)\n \n return pol_k, pol_n, pi, W",
"def calculations():\r\n\t\r\n\tpayload, avionics, booster = weight_input()\r\n\r\n\tdrogue_size, drogue_force = drogue_calc()\r\n\tmain_size, main_force = main_calc(avionics, booster, drogue_force) #total mass, payload detaches\r\n\r\n\tprint(\"Drogue is diameter is \" + str(drogue_size) + \" inches\")\r\n\tprint(\"Main is diameter is \" + str(main_size) + \" inches\")",
"def apply_dw(self, dw):\n\n # list of trainable params\n param_names = [\"w_out\", \"b_out\", \"w_link\", \"w_in\", \"b_in\"]\n\n for param_name in param_names:\n self.__dict__[param_name] = self.__getattribute__(\n param_name) - LEARNING_RATE*dw[\"d\" + param_name]",
"def calculate(self):",
"def testAggregateCorrectly_simple(self):\n\n\tscaler = pf.LinearScaler()\n\tQBp = pf.ProductQuoteBasis(base_price = 1.53, date = dt.datetime(2012,01,01), source = \"P&T\", scaler = scaler, size_basis = uv.UnitVal(100, '1/gal'))\n\tesc = pf.NoEscalationEscalator()\n\tpr1 = pf.Product(name = 'gasoline', description = 'People', quote_basis = QBp, escalator = esc)\n\tpro1 = pf.Production(name = 'stream1', product = pr1, rate = uv.UnitVal(15000, 'gal/hr'), startup_discounter = None, init_date = dt.datetime(2015,01,01))\n\n\tQB = pf.VariableExpenseQuoteBasis(base_price = 0.062, date = dt.datetime(2012,01,01), source = \"P&T\", scaler = scaler, size_basis = uv.UnitVal(100, '1/(kW*hr)'))\n\tvex1 = pf.VariableExpense(name = 'Electricity', description = 'Power consumption by plant', quote_basis = QB, production = pro1, rate = uv.UnitVal(1, 'kW*hr/gal'), escalator = esc)\n\tend_date = dt.datetime(2034,12,31)\n\tvex1.preferred_units['variable_consumption'] = 'kW*hr'\t\n\tvex1.build_vex_schedule(end_date)\t\t#Do we need a term here? Yes? How do we control this...through the production? #maybe this should just be passed as an argument\n\n\tdates = [dt.datetime(2015,01,31), dt.datetime(2016,01,31), dt.datetime(2020, 03, 31), dt.datetime(2021, 12,31)]\n ###!!!###Still need values here!\t--should really test escalation here to be sure\n vals_cost = [22320,22320,22320,22320]\n vals_cons = [360000,360000,360000,360000]\n \t\n for d, v1, v2 in zip(dates, vals_cons, vals_cost):\n self.assertAlmostEqual(v1, vex1.schedule.loc[d, 'variable_consumption'],4)\n\t self.assertAlmostEqual(v2, vex1.schedule.loc[d, 'variable_costs'],4)",
"def calculate_profit(self):",
"def ComputeEnergyConsumption(self):\r\n pass",
"def calculate_ttw_energy(self) -> None:\n\n self.energy = self.ecm.motive_energy_per_km(\n driving_mass=self[\"driving mass\"],\n rr_coef=self[\"rolling resistance coefficient\"],\n drag_coef=self[\"aerodynamic drag coefficient\"],\n frontal_area=self[\"frontal area\"],\n electric_motor_power=self[\"electric power\"],\n engine_power=self[\"power\"],\n recuperation_efficiency=self[\"recuperation efficiency\"],\n aux_power=self[\"auxiliary power demand\"],\n battery_charge_eff=self[\"battery charge efficiency\"],\n battery_discharge_eff=self[\"battery discharge efficiency\"],\n fuel_cell_system_efficiency=self[\"fuel cell system efficiency\"],\n )\n\n self.energy = self.energy.assign_coords(\n {\n \"powertrain\": self.array.powertrain,\n \"year\": self.array.year,\n \"size\": self.array.coords[\"size\"],\n \"value\": self.array.coords[\"value\"],\n }\n )\n\n if self.energy_consumption:\n self.override_ttw_energy()\n\n distance = self.energy.sel(parameter=\"velocity\").sum(dim=\"second\") / 1000\n\n self[\"engine efficiency\"] = (\n np.ma.array(\n self.energy.loc[dict(parameter=\"engine efficiency\")],\n mask=self.energy.loc[dict(parameter=\"power load\")] == 0,\n )\n .mean(axis=0)\n .T\n )\n\n _o = lambda x: np.where((x == 0) | (x == np.nan), 1, x)\n\n if self.engine_efficiency is not None:\n print(\"Engine efficiency is being overridden.\")\n for key, val in self.engine_efficiency.items():\n pwt, size, year = key\n if (\n (val is not None)\n & (pwt in self.array.powertrain.values)\n & (year in self.array.year.values)\n & (size in self.array[\"size\"].values)\n ):\n self.array.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"engine efficiency\",\n )\n ] = float(val)\n\n self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"engine efficiency\",\n )\n ] = float(val) * np.where(\n self.energy.loc[\n dict(\n parameter=\"power load\",\n powertrain=pwt,\n size=size,\n year=year,\n )\n ]\n == 0,\n 0,\n 1,\n )\n\n self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"motive energy\",\n )\n ] = self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"motive energy at wheels\",\n )\n ] / (\n _o(\n self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"engine efficiency\",\n )\n ]\n )\n * _o(\n self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"transmission efficiency\",\n )\n ]\n )\n )\n\n self[\"transmission efficiency\"] = (\n np.ma.array(\n self.energy.loc[dict(parameter=\"transmission efficiency\")],\n mask=self.energy.loc[dict(parameter=\"power load\")] == 0,\n )\n .mean(axis=0)\n .T\n )\n\n if self.transmission_efficiency is not None:\n print(\"Transmission efficiency is being overridden.\")\n for key, val in self.transmission_efficiency.items():\n pwt, size, year = key\n\n if (\n (val is not None)\n & (pwt in self.array.powertrain.values)\n & (year in self.array.year.values)\n & (size in self.array[\"size\"].values)\n ):\n self.array.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"transmission efficiency\",\n )\n ] = float(val)\n\n self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"transmission efficiency\",\n )\n ] = float(val) * np.where(\n self.energy.loc[\n dict(\n parameter=\"power load\",\n powertrain=pwt,\n size=size,\n year=year,\n )\n ]\n == 0,\n 0,\n 1,\n )\n\n self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"motive energy\",\n )\n ] = self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"motive energy at wheels\",\n )\n ] / (\n _o(\n self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"engine efficiency\",\n )\n ]\n )\n * _o(\n self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"transmission efficiency\",\n )\n ]\n )\n )\n\n self[\"TtW energy\"] = (\n self.energy.sel(\n parameter=[\"motive energy\", \"auxiliary energy\", \"recuperated energy\"]\n ).sum(dim=[\"second\", \"parameter\"])\n / distance\n ).T\n\n self[\"TtW energy, combustion mode\"] = self[\"TtW energy\"] * (\n self[\"combustion power share\"] > 0\n )\n self[\"TtW energy, electric mode\"] = self[\"TtW energy\"] * (\n self[\"combustion power share\"] == 0\n )\n\n self[\"auxiliary energy\"] = (\n self.energy.sel(parameter=\"auxiliary energy\").sum(dim=\"second\") / distance\n ).T",
"def calculate():\n\n # Get all input data from the GUI\n age = float(age_input.get())\n weight = float(weight_input.get())\n height = float(height_input.get())\n heartrate = float(heartrate_input.get())\n duration = float(duration_input.get())\n\n if gender.get() == 0:\n # Calculate data for males\n bmr = male_bmr(weight, height, age)\n gross_calories = male_calories(heartrate, weight, age, duration)\n else:\n # Calculate data for females\n bmr = female_bmr(weight, height, age)\n gross_calories = female_calories(heartrate, weight, age, duration)\n\n net_calories = gross_calories - (bmr / 1440 * duration)\n\n # Display calculated data\n bmr_output.config(text=int(bmr))\n gross_output.config(text=int(gross_calories))\n net_output.config(text=int(net_calories))",
"def _calculate(self):\n raise NotYetImplemented()",
"def calculate(self, waste_heat=0):\n radiator_type = self.radiators_type.get()\n data = self.radiators[radiator_type]\n if not waste_heat:\n for _, subsystem in self.data.wasteheat.items():\n waste_heat += subsystem\n self.waste_heat.set(waste_heat)\n area = waste_heat / (data[\"Specific area heat\"] * 1000)\n mass = (area * 1000) * data[\"Specific area mass\"]\n self.data.masses[\"Lifesupport Radiators\"] = mass\n self.area.set(area)\n self.mass.set(mass)\n self.radiator_temperature.set(self.radiators[radiator_type][\"Radiator Temperature\"])",
"def calc_annual_heating_savings (self):\n price = self.diesel_prices + self.cd['heating fuel premium']\n maintenance = self.comp_specs['heat recovery o&m']\n self.annual_heating_savings = -1 * \\\n (maintenance + (self.lost_heat_recovery * price))",
"def DW_cal(data, data_sm):\n n = len(data)\n numerator = 0\n denominator = 0\n for i in range(n):\n if i == 0:\n numerator = numerator + 0\n else:\n numerator = numerator + ((data[i] - data_sm[i]) - (data[i-1] - data_sm[i-1]))**2\n denominator = denominator + (data[i] - data_sm[i])**2\n return numerator/denominator*n/(n - 1)",
"def calc_cash_flow(self):\n s = self # shortcut variable\n\n # determine the changes caused by the heat pump on an annual basis.\n # First calculate annual totals for base case and heat pump case and\n # then calculate the change.\n ann_base = s.df_mo_dol_base.sum()\n ann_hp = s.df_mo_dol_hp.sum()\n ann_chg = ann_hp - ann_base\n initial_cost = np.zeros(s.hp_life+1)\n \n # Am not automatically adding sales tax to the initial cost as the user was\n # supposed to includes sales tax in their input.\n initial_cost[0] = -s.capital_cost * (1 - s.pct_financed) + s.rebate_dol\n loan_pmt = npf.pmt(s.loan_interest, s.loan_term, s.capital_cost * s.pct_financed)\n if loan_pmt < -0.01: # loan payment is negative\n loan_cost = [0.0] + [loan_pmt] * s.loan_term + [0.0] * (s.hp_life - s.loan_term)\n loan_cost = np.array(loan_cost)\n else:\n loan_cost = 0.0\n op_cost = -s.op_cost_chg * make_pattern(s.inflation_rate, s.hp_life)\n fuel_cost = -ann_chg.secondary_fuel_dol * make_pattern(s.fuel_esc_rate, s.hp_life)\n elec_cost = -ann_chg.elec_dol * make_pattern(s.elec_esc_rate, s.hp_life)\n cash_flow = initial_cost + loan_cost + op_cost + fuel_cost + elec_cost\n\n # calculate cumulative, discounted cash flow.\n disc_factor = np.ones(s.hp_life) * (1 + s.discount_rate)\n disc_factor = np.insert(disc_factor.cumprod(), 0, 1.0)\n cum_disc_cash_flow = np.cumsum(cash_flow / disc_factor)\n \n s.df_cash_flow = pd.DataFrame(\n {'initial_cost': initial_cost,\n 'loan_cost': loan_cost,\n 'op_cost': op_cost,\n 'fuel_cost': fuel_cost,\n 'elec_cost': elec_cost,\n 'cash_flow': cash_flow,\n 'cum_disc_cash_flow': cum_disc_cash_flow,\n }\n )\n s.df_cash_flow.index.name = 'year'\n \n # Calculate IRR and NPV for w/ and w/o PCE.\n s.summary['irr'] = npf.irr(s.df_cash_flow.cash_flow)\n s.summary['npv'] = npf.npv(s.discount_rate, s.df_cash_flow.cash_flow)\n \n # Add some summary fuel and electric usage and unit cost info\n s.summary['fuel_use_base'] = ann_base.secondary_fuel_units\n s.summary['fuel_use_hp'] = ann_hp.secondary_fuel_units\n s.summary['fuel_use_chg'] = ann_chg.secondary_fuel_units\n if ann_chg.secondary_fuel_units != 0.0:\n s.summary['fuel_price_incremental'] = ann_chg.secondary_fuel_dol / ann_chg.secondary_fuel_units\n else:\n s.summary['fuel_price_incremental'] = np.nan\n s.summary['elec_use_base'] = ann_base.elec_kwh\n s.summary['elec_use_hp'] = ann_hp.elec_kwh\n s.summary['elec_use_chg'] = ann_chg.elec_kwh\n s.summary['elec_rate_avg_base'] = ann_base.elec_dol / ann_base.elec_kwh\n s.summary['elec_rate_avg_hp'] = ann_hp.elec_dol / ann_hp.elec_kwh\n s.summary['elec_rate_incremental'] = ann_chg.elec_dol / ann_chg.elec_kwh",
"def calculate(self):\r\n\r\n pass",
"def total_investment_costs(dh: DataHandler):\n # discountrate series with index: scenarios\n discount_rate = dh.get(\"scalars\").loc[\"discountrate\", :]\n sc = 8760 / dh.scenarios.hours\n\n scen_hor_map = dh.scenarios.horizon\n\n # investment costs dataframe with columns: scenarios and index: alltec\n inv = dh.get(\"i_cost\").xs(\"invest\", level=\"par_cost\")\n assert all(\n k in scen_hor_map for k in inv.columns\n ), \"You have not defined a horizon level for a scenario.\"\n tec_inv = list(\n dh.get(\"i_cost\")\n .xs(\"invest\", level=\"par_cost\")\n .index.get_level_values(\"alltec\")\n .unique()\n )\n inv = inv.groupby([\"alltec\"]).apply(extract_horizon_specific_cost, scen_hor_map)\n\n # lifetime dataframe with columns: scenarios and index: alltec\n\n lt = dh.get(\"i_cost\").xs(\"lifetime\", level=\"par_cost\")\n lt.index = lt.index.droplevel(\"i_cost\")\n lt = lt.loc[tec_inv, :]\n\n # flex_premium dataframe with columns: scenarios and index: alltec\n fp = dh.get(\"i_cost\").xs(\"flex_premium\", level=\"par_cost\")\n fp.index = fp.index.droplevel(\"i_cost\")\n fp = fp.loc[tec_inv, :]\n\n inv = (\n inv\n * ((1 + discount_rate) ** lt * discount_rate)\n / ((1 + discount_rate) ** lt - 1)\n )\n\n # investment costs DataFrame with columns: scenarios and index: [alltec, regions]\n cost = inv / sc * fp\n cost = add_dimension(cost, dh.merge_stored_sets(\"r\"), \"r\")\n cost = cost.reorder_levels([\"alltec\", \"r\"])\n\n inv_capa = dh.get(\"o_inve\")\n inv_capa.index.names = change_tec_lvl_name_to_alltec(inv_capa.index.names)\n inv_capa.index = inv_capa.index.droplevel([\"new\"])\n inv_capa = inv_capa.astype(\"Float64\")\n\n return inv_capa.mul(cost)"
]
| [
"0.739033",
"0.60644275",
"0.5957915",
"0.5946374",
"0.58137023",
"0.5806559",
"0.5777093",
"0.5729225",
"0.57091135",
"0.56209016",
"0.5604566",
"0.5585306",
"0.54862684",
"0.5486228",
"0.54801905",
"0.54555106",
"0.5446662",
"0.53991157",
"0.5393783",
"0.5389663",
"0.5348167",
"0.53479433",
"0.5331273",
"0.5325056",
"0.53082365",
"0.53070056",
"0.52635217",
"0.5258982",
"0.52560824",
"0.5253872"
]
| 0.7647427 | 0 |
This function computes the surface area | def compute_surface_area(self):
return np.sum(self._find_triangle_areas()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def surface_area(self) -> float:\n return 4 * np.pi * self.radius**2",
"def surfaceArea(self):\n surfaceArea = self.sideLength**2 * 6\n return surfaceArea",
"def getSurfaceArea(self) -> float:\n return self.area()",
"def surface_area(self):\n return self._surface_area",
"def rectangle_surface_area(a,b):\n return (a*b)",
"def square_surface_area(a):\n return (a*a)",
"def compute_area(self):\r\n\r\n \"\"\"Косое произведение векторов\r\n A = (x2-x1; y2-y1; z2-z1)\r\n B = (x3-x1; y3-y1; z3-z1)\r\n S = 0.5*sqrt((Ay*Bz - Az*By)^2 + (Az*Bx - Ax*Bz)^2 + (Ax*By - Ay*Bx)^2 )\r\n \"\"\"\r\n a_x = self.x2 - self.x1\r\n a_y = self.y2 - self.y1\r\n a_z = self.z2 - self.z1\r\n\r\n b_x = self.x3 - self.x1\r\n b_y = self.y3 - self.y1\r\n b_z = self.z3 - self.z1\r\n\r\n self.area = 0.5 * math.sqrt((a_y * b_z - a_z * b_y) ** 2 + (a_z * b_x - a_x * b_z) ** 2 + (a_x * b_y - a_y * b_x) ** 2)\r\n\r\n \"\"\"По теореме Герона\"\"\"\r\n # a = math.sqrt((self.x1-self.x2)**2 + (self.y1-self.y2)**2 + (self.z1-self.z2)**2)\r\n # b = math.sqrt((self.x1-self.x3)**2 + (self.y1-self.y3)**2 + (self.z1-self.z3)**2)\r\n # c = math.sqrt((self.x2-self.x3)**2 + (self.y2-self.y3)**2 + (self.z2-self.z3)**2)\r\n # p = 0.5 * (a + b + c)\r\n # self.area = math.sqrt(p * (p - a) * (p - b) * (p - c))\r",
"def surface_area(DEM, resolution):\n\n resolution_squared = resolution ** 2.\n cross_distance_squared = 2.0 * (resolution ** 2.)\n\n m1 = ((DEM[0:-1, 0:-1] - DEM[0:-1, 1:]) ** 2.0 + resolution_squared) ** 0.5\n m2 = ((DEM[0:-1, 0:-1] - DEM[1:, 0:-1]) ** 2.0 + resolution_squared) ** 0.5\n m3 = ((DEM[0:-1, 0:-1] - DEM[1:, 1:]) ** 2.0 + cross_distance_squared) ** 0.5\n m4 = ((DEM[0:-1, 1:] - DEM[1:, 1:]) ** 2.0 + resolution_squared) ** 0.5\n m5 = ((DEM[1:, 0:-1] - DEM[1:, 1:]) ** 2.0 + resolution_squared) ** 0.5\n\n #from pdb import set_trace; set_trace()\n # Heron's formula for computing the area of a triangle, knowing 3 sides lengths,\n # requires a semiperimeter variable \"s\"\n s1 = 0.5 * (m3 + m5 + m2)\n s2 = 0.5 * (m3 + m4 + m1)\n\n # Calculate area using Heron's formula. This computes the upper and lower triangle area for each set of 4 dem points\n area = np.sum(np.sqrt(s1 * (s1 - m3) * (s1 - m5) * (s1 - m2))) + np.sum(np.sqrt(s2 * (s2 - m3) * (s2 - m4) * (s2 - m1)))\n\n return area",
"def compute_mesh_area(mesh):\n vertices = mesh.vertices\n faces = mesh.faces\n areas = [compute_triangle_area(vertices[face]) for face in faces]\n mesh_surface_area = sum(areas)\n return mesh_surface_area",
"def calc_surface_area(faces, verts):\n # Calculate the surface area of a mesh from it's triangle faces.\n # faces: List of all the faces on the surface. Each face indexes three\n # points from verts which make up the triangle face.\n # verts: List of all the vertices on the surface.\n area = 0\n for face in faces:\n # Extract x's and y's from the face's vertices.\n xs = [verts[face[0]][0], verts[face[1]][0], verts[face[2]][0]]\n ys = [verts[face[0]][1], verts[face[1]][1], verts[face[2]][1]]\n # Compute area of face from triangle points.\n base = max(xs) - min(xs)\n height = max(ys) - min(ys)\n area += 0.5 * (base + height)\n return area",
"def area(self) -> npt.NDArray[np.float_]:\n points = self._normalized_projection()\n a = sum(det(points[..., [0, i, i + 1], :]) for i in range(1, points.shape[-2] - 1))\n return 1 / 2 * np.abs(a)",
"def area(self) -> torch.Tensor:\n box = self.tensor\n area = (box[:, 2] - box[:, 0]) * (box[:, 3] - box[:, 1])\n return area",
"def area(boxes):\n return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])",
"def area(self) -> npt.NDArray[np.float_]:\n return np.sum(self.faces.area)",
"def area(self):\n return 0.5*np.abs(np.dot(self.x,np.roll(self.y,1))-np.dot(self.y,np.roll(self.x,1)))",
"def surface_area_of_cube(side):\n return side",
"def area(boxes):\n return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])",
"def compute_mesh_area_smart(mesh):\n mesh_surface_area = mesh.area\n return mesh_surface_area",
"def area(poly):\n if len(poly) < 3: # not a plane - no area\n return 0\n total = [0, 0, 0]\n num = len(poly)\n for i in range(num):\n vi1 = poly[i]\n vi2 = poly[(i+1) % num]\n prod = np.cross(vi1, vi2)\n total[0] += prod[0]\n total[1] += prod[1]\n total[2] += prod[2]\n result = np.dot(total, unit_normal(poly[0], poly[1], poly[2]))\n return abs(result/2)",
"def _area(bounds):\n return (bounds[0, 1] - bounds[0, 0]) * (bounds[1, 1] - bounds[1, 0])",
"def area(self) -> float:\n return cross3(self.b.position - self.a.position,\n self.c.position - self.a.position).length() / 2.0",
"def circle_surface_area(a):\n return (a*a*math.pi)",
"def compute_area(boxes):\n area = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])\n\n return area",
"def area(\n self):\n pi = numpy.pi\n area0 = 4.0 * pi / 8.0\n areadiv = 4.0 ** self.depth\n area = area0 / areadiv * (180.0 / pi) ** 2\n return area",
"def area(self):\n if isinstance(self.crs, GeographicalCRS):\n major_axis = self.crs.ellipsoid.a\n minor_axis = self.crs.ellipsoid.b\n\n area = 0.0\n if major_axis == minor_axis: # Sphere\n for seg in self.segment_tuples:\n x1, y1 = seg[0]\n x2, y2 = seg[1]\n area += geodesy.spherical_area(major_axis, x1, y1, x2, y2)\n\n else:\n for seg in self.segment_tuples:\n x1, y1 = seg[0]\n x2, y2 = seg[1]\n area += geodesy.ellipsoidal_area(major_axis, minor_axis,\n x1, y1, x2, y2)\n\n else:\n # Cartesian coordinate systems\n x, y = self.coordinates\n x0 = np.min(x)\n area = (0.5*(x[0] + x[-1]) - x0) * (y[0] - y[-1])\n area += sum((0.5*(x[i+1]+x[i]) - x0) * (y[i+1] - y[i]) for i in range(len(x)-1))\n return abs(area) - sum(sub.area for sub in self.subs)",
"def ext_surface_area(self, rho_p, d_p):\n ac = 6./(d_p*rho_p)\n return ac",
"def surface_area(self):\n return [node.surface_area for node in self]",
"def area(self):\n area = 0\n last = self._coordinates[-1]\n for c in self._coordinates:\n area += (last[0] * c[1] - last[1] * c[0])\n last = c\n return float(\"{:.2f}\".format(abs(area) * 0.5))",
"def area(self):\n return numpy.prod(\n numpy.meshgrid(*self.binwidths, indexing='ij'), axis=0)",
"def area(symbol):\n return (symbol.bounding_box.vertices[2].x - symbol.bounding_box.vertices[0].x) * (\n symbol.bounding_box.vertices[2].y - symbol.bounding_box[0].y)"
]
| [
"0.80011004",
"0.7844058",
"0.76182777",
"0.7455883",
"0.7309027",
"0.7252126",
"0.72476107",
"0.7167929",
"0.7060911",
"0.7034661",
"0.7029887",
"0.69332796",
"0.6921517",
"0.68879753",
"0.688659",
"0.6880711",
"0.6879456",
"0.68330854",
"0.67889273",
"0.6771197",
"0.67643",
"0.6762344",
"0.67595816",
"0.6751028",
"0.67373925",
"0.67249703",
"0.66782093",
"0.6668751",
"0.6642016",
"0.6638628"
]
| 0.83014 | 0 |
Given two arrays [m1, n], [m2,n], returns a [m1, m2] array where each entry is True if those rows match. | def intersect_2d(x1, x2):
if x1.shape[1] != x2.shape[1]:
raise ValueError("Input arrays must have same #columns")
# This performs a matrix multiplication-esque thing between the two arrays
# Instead of summing, we want the equality, so we reduce in that way
res = (x1[..., None] == x2.T[None, ...]).all(1)
return res | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def in_array(array1, array2):",
"def fits(a, b):\n return all(x & y for x, y in zip(a, b))",
"def has_match(trajs_0, trajs_1):\n for i in range(len(trajs_0)):\n for j in range(len(trajs_1)):\n R = (trajs_0[i].get_slice()[:,:2] == trajs_1[j].get_slice()[:,:2])\n if isinstance(R, bool):\n if R:\n return True \n elif R.all():\n return True \n else:\n pass \n return False",
"def overlap(table1, table2):\n out = np.zeros(np.size(table1, axis=0), dtype='bool')\n for i in range(np.size(table1, axis=0)):\n s1_s2 = table1[i, 0] < table2[:, 0] \n s1_e2 = table1[i, 0] <= table2[:, 1]\n e1_s2 = table1[i, 1] < table2[:, 0]\n e1_e2 = table1[i, 1] < table2[:, 1]\n # no overlap occurs when all four parameters above either == 0 or 1\n sum_params = np.sum(np.array([s1_s2, s1_e2, e1_s2, e1_e2]), axis=0)\n olap = (sum_params == 1) | (sum_params == 2) | (sum_params == 3)\n out[i] = np.any(olap)\n return out",
"def match_arrays(a, b):\n order = np.argsort(a)\n \n sorted_a = a[order]\n \n idx_sorted_a = np.searchsorted(sorted_a, b)\n \n mask = idx_sorted_a < sorted_a.shape[0]\n mask[mask] = sorted_a[idx_sorted_a[mask]] == b[mask]\n \n idx_sorted_a = idx_sorted_a[mask]\n idx_b = np.where(mask)[0]\n \n return order[idx_sorted_a], idx_b",
"def match(cat1, cat2, xs, ys, mmax):\n\n nmatch = 0\n x1s, y1s = cat1[:,0], cat1[:,1]\n ind1 = np.arange(len(cat1))\n ind2 = np.empty(len(cat2),dtype=int)\n ind2.fill(-1)\n for i2, p2 in enumerate(cat2):\n x2, y2 = p2\n ok = (x1s > x2-xs-mmax) & (x1s < x2-xs+mmax) & \\\n (y1s > y2-ys-mmax) & (y1s < y2-ys+mmax)\n if len(x1s[ok]) == 1:\n nmatch += 1\n ind2[i2] = ind1[ok][0]\n\n return (nmatch, ind2)",
"def intersection(arrays):\n # Create hash table (dict) to store numbers in for faster O(1) lookup (for \n # any individual lookup):\n # numbers = {}\n\n # Create list for intersection of the sets:\n # intersection = []\n\n # Populate hash table with numbers from the first list (keys), because any numbers \n # not in the first list will not be in the intersection of the lists, by definition.\n numbers = {item:False for item in arrays[0]}\n # Now check the other input lists in order, removing any number/item that is not in both:\n for list in arrays[1:]:\n for item in list: # NOT actually O(n**2); just O(n) for the whole input matrix.\n # Mark as True to flag any items that are in the intersection of the two lists:\n if item in numbers:\n numbers[item] = True\n # Keep only the numbers that are in the intersection of the two lists:\n numbers = {key:value for key, value in numbers.items() if value == True}\n # Mark all as False again to start a fresh comparison with the next list:\n for item in numbers:\n numbers[item] = False\n\n return [*numbers.keys()]",
"def can_broadcast(shape1, shape2) -> bool:\n return(\n reduce(\n lambda a, b: a and b,\n starmap(\n lambda a, b: (a == b or (a == 1 or b == 1)),\n zip_longest(shape1, shape2, fillvalue=1)\n )\n )\n )",
"def fun_equal_multiset(vec_1, vec_2):\n return (len(vec_1) == len(vec_2)) & \\\n (reduce(lambda x, y: x & (y[0] == y[1]),\n list(zip(sorted(list(vec_1)), sorted(list(vec_2)))), True))",
"def match_all_columns(_left, _right):\n\n # TODO: Check if this can be done more pythonically\n for _curr_column in range(0, len(_left)):\n if _left[_curr_column] != _right[_curr_column]:\n return False\n\n return True",
"def grid_equal(grid1, grid2):\r\n for i in range(len(grid1)):\r\n for j in range(len(grid1[i])):\r\n if grid1[i][j] != grid2[i][j]:\r\n return False\r\n return True",
"def mapsMatch(m1,m2):\n same = True\n f1 = file(m1,'r').readlines()\n f2 = file(m2,'r').readlines()\n for i, row in enumerate(f1):\n row = row.strip().split()\n row2 = f2[i].strip().split()\n if row[0] <> row2[0]:\n\t same = False\n break\n return same",
"def _compare_matrix(mat1, mat2):\n\n length = len(mat1)\n if len(mat2) != length:\n return False\n\n for i in range(length):\n if _compare_vector(mat1[i], mat2[i]) == False:\n return False\n\n return True",
"def match_rows(X, Y):\n n, d = X.shape\n n_, d_ = Y.shape\n assert n == n_ and d == d_\n\n # Create a weight matrix to compare the two\n W = zeros((n, n))\n for i, j in it.product(xrange(n), xrange(n)):\n # Cost of 'assigning' j to i.\n W[i, j] = norm(X[j] - Y[i])\n\n matching = Munkres().compute( W )\n matching.sort()\n _, rowp = zip(*matching)\n rowp = array( rowp )\n # Permute the rows of B according to Bi\n X_ = X[ rowp ]\n\n return X_",
"def intersect_coords(coords1, coords2):\r\n # Find the longer one\r\n if coords1.shape[-1] > coords2.shape[-1]:\r\n coords_long = coords1\r\n coords_short = coords2\r\n else:\r\n coords_long = coords2\r\n coords_short = coords1\r\n\r\n ans = np.array([[], [], []], dtype='int') # Initialize as a 3 row variable\r\n # Loop over the longer of the coordinate sets\r\n for i in range(coords_long.shape[-1]):\r\n # For each coordinate:\r\n this_coords = coords_long[:, i]\r\n # Find the matches in the other set of coordinates:\r\n x = np.where(coords_short[0, :] == this_coords[0])[0]\r\n y = np.where(coords_short[1, :] == this_coords[1])[0]\r\n z = np.where(coords_short[2, :] == this_coords[2])[0]\r\n\r\n # Use intersect1d, such that there can be more than one match (and the\r\n # size of idx will reflect how many such matches exist):\r\n idx = np.intersect1d(np.intersect1d(x, y), z)\r\n # Append the places where there are matches in all three dimensions:\r\n if len(idx):\r\n ans = np.hstack([ans, coords_short[:, idx]])\r\n\r\n return ans",
"def test_multi_intersect():\r\n\r\n arr1 = np.array(np.arange(1000).reshape(2,500))\r\n arr2 = np.array([[1,0.1,0.2],[0.3,0.4, 0.5]])\r\n arr3 = np.array(1)\r\n npt.assert_equal(1, utils.multi_intersect([arr1, arr2, arr3]))",
"def overlap(array1,array2,thresh=0.05e0):\r\n arrayout = array1 * array2\r\n thresh2 = np.max(np.abs(arrayout))*thresh\r\n arrayout = np.array(1.0 * (np.abs(arrayout) > thresh2),dtype=np.bool)\r\n return arrayout",
"def predicates_overlap(tags1: List[str], tags2: List[str]) -> bool:\n # Get predicate word indices from both predictions\n pred_ind1 = get_predicate_indices(tags1)\n pred_ind2 = get_predicate_indices(tags2)\n\n # Return if pred_ind1 pred_ind2 overlap\n return any(set.intersection(set(pred_ind1), set(pred_ind2)))",
"def where_in(a, b):\n return torch.nonzero((a[..., None] == b).any(-1)).squeeze()",
"def _is_disjoint(x, y):\n if x.ndim != 1 or y.ndim != 1:\n raise ValueError(\"Inputs must be 1-d ndarrays\")\n\n test_val = np.setdiff1d(x, y).size\n test_val += np.setdiff1d(y, x).size\n\n return test_val == 0",
"def pairwise_and(first_tensor: tf.Tensor, second_tensor: tf.Tensor) -> tf.Tensor:\n\n column = tf.expand_dims(first_tensor, 2)\n row = tf.expand_dims(second_tensor, 1)\n return tf.logical_and(column, row)",
"def intersect2d(A, B):\n # print 'A = ', A\n # print 'B = ', B\n a1_rows = A.view([('', A.dtype)] * A.shape[1])\n a2_rows = B.view([('', B.dtype)] * B.shape[1])\n # Now trim those bonds from BL\n C = np.intersect1d(a1_rows, a2_rows).view(A.dtype).reshape(-1, A.shape[1])\n return C",
"def grid_equal (grid1, grid2):\r\n s=0 \r\n for h in range(4):\r\n for m in range(4):\r\n if grid1[h][m]==grid2[h][m]:\r\n s+=1\r\n else:\r\n ()\r\n if s==16:\r\n return True\r\n else:\r\n return False",
"def grid_equal (grid1, grid2):\r\n for i in range (4):\r\n for j in range (4):\r\n if grid1[i][j] != grid2[i][j]:\r\n return False\r\n return True",
"def intersect(a1, b1, a2, b2):\n return ccw(a1, b1, a2) != ccw(a1, b1, b2) and ccw(a2, b2, a1) != ccw(a2, b2, b1)",
"def bins_match (a, b):\n return np.sum ((a.bins - b.bins)**2) == 0",
"def test_equal12():\n x = np.array([[True, False, True], [True, False, True], [True, False, True]])\n y = np.array([[True, False, True], [False, False, False], [True, True, False]])\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)",
"def ismember(A,B):\n return [np.sum(a==B) for a in A]",
"def pairwise_equalities(things):\n\n return np.array([things[i] == things[j] for i in range(len(things))\n for j in range(i + 1, len(things))])",
"def CompareMatrices(mat1, mat2, tol):\n # just going to assume they are the same size...\n for i in range(len(mat1)):\n for j in range(len(mat1)):\n if abs(mat1[i][j] - mat2[i][j]) > tol:\n return False\n return True"
]
| [
"0.65438855",
"0.6537962",
"0.64174044",
"0.62901217",
"0.61515814",
"0.6138679",
"0.5988489",
"0.5976403",
"0.5972309",
"0.59241295",
"0.59137946",
"0.5910354",
"0.58483535",
"0.5839954",
"0.5832456",
"0.5829371",
"0.58285904",
"0.5787698",
"0.57538986",
"0.57493615",
"0.574632",
"0.5721341",
"0.57181376",
"0.5684182",
"0.5677769",
"0.5669207",
"0.5649853",
"0.5644507",
"0.56436425",
"0.56361437"
]
| 0.7020224 | 0 |
Returns the indices that sort scores descending in a smart way | def argsort_desc(scores):
return np.column_stack(np.unravel_index(np.argsort(-scores.ravel()), scores.shape)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_indices(scores: np.ndarray, shuffle_prop: float) -> np.ndarray:\n return _shuffle_subset(scores.argsort().argsort(), shuffle_prop)",
"def __get_score_ordered(scores, idx):\t\n\treturn [x[1][idx] for x in sorted(scores.items())]",
"def recommend_from_scores(scores: List[List[float]], n: int) -> List[List[int]]:\n\n def top_idx(scores):\n return np.array(scores).argsort()[::-1][:n]\n\n return [top_idx(s) for s in scores]",
"def sorted_index(self) -> np.ndarray:\n return np.argsort(self.result_array.sum(axis=1))[::-1]",
"def descents(self):\n a = self.array_form\n pos = [i for i in xrange(len(a)-1) if a[i] > a[i+1]]\n return pos",
"def __get_max_indexes(num_list, number):\n result = []\n\n num_list = np.array(num_list)\n result = num_list.argsort()[-number:][::-1]\n\n return result",
"def top_indices(preds, num):\n sort_preds = np.sort(preds, 1)\n sort_preds = np.flip(sort_preds)\n sort_index = np.argsort(preds, 1)\n sort_index = np.flip(sort_index)\n\n print(f\"Top {num} results:\")\n for i in range(num):\n print(sort_index[0][i], sort_preds[0][i])\n\n return 0",
"def filter_sort_results(scores, labels, multilabel=False, max_class_count=3):\n if multilabel:\n assert len(scores) == len(labels)\n max_class_count = len(labels)\n ids_order = range(max_class_count)\n else:\n max_class_count = min(len(labels), max_class_count)\n ids_order = np.argsort(scores)[::-1][:max_class_count]\n return ids_order",
"def indexsort(d, reverse=False):\n \n return [ i for (i,j) in sorted(enumerate(d), \\\n key=operator.itemgetter(1), reverse = reverse)]",
"def best_score(scores):\n idx, score = sorted(\n enumerate(scores), key=lambda e: e[1], reverse=scores[0].higher_better\n )[0]\n return (idx + 1, score)",
"def recommendation_ranking(self):\n iu = self.final_recommendation_score_matrix()\n new_iu = []\n for row in iu:\n li = []\n temp = row\n if self.product != \"dist\":\n temp = -np.sort(-temp)\n for element in row:\n li.append(binary_search_opp(temp,element)+1) \n else:\n temp = np.sort(temp)\n for element in row:\n li.append(np.searchsorted(temp,element)+1)\n new_iu.append(li)\n return np.array(new_iu)",
"def sort(self):\n\n # momentarily convert into numpy, to take advantage of their easy \n # sorting.\n top_indices = np.argsort([-n for n in self.Nx])\n self.Nx = [self.Nx[i] for i in top_indices]\n self.dictionary = h.dictionary.Dictionary([\n self.dictionary.tokens[i] for i in top_indices])\n\n self.sorted = True\n\n return top_indices",
"def tiles_by_score(self):\n sorted_list = sorted(self.tiles, key=lambda t: t.score, reverse=True)\n return sorted_list",
"def generate_order(arr, descending=True):\n sorted_indices = torch.argsort(arr, 0, descending=descending)\n return sorted_indices.reshape((len(arr), ))",
"def argsort(data, reversed=False):\n\n index = np.arange(len(data))\n key = lambda x: data[x]\n sortidx = sorted(index, key=key,reverse=reversed)\n sortidx = np.array(list(sortidx))\n return sortidx",
"def sorted_scores(scores):\n\treturn sorted(scores, key=lambda sailor: (total_score(sailor), sailor[1][0]))",
"def build_retrieved_list(self, scores):\n\n res = self.index.rank(scores)\n tmp_res = []\n # keep scores too\n tmp_scores = []\n\n # build the list\n tmp_res = []\n #print rank, \"<--\"\n for i, k in res:\n tmp_res.append( self.indices[i] )\n tmp_scores.append( k )\n\n\n # compute the difference with the difference\n diff = list(set(self.indices.values())-set(tmp_res))\n\n # shuffle to fill the rest of the list\n np.random.shuffle(diff)\n\n scores_diff = np.zeros( (len(diff,)) )\n\n final = []\n final_scores = []\n\n final.extend(tmp_res)\n final.extend(diff)\n\n final_scores.extend(tmp_scores)\n final_scores.extend(scores_diff)\n\n # remove extension for evaluation\n f = lambda x: x.split('.')[0]\n final = map(f, final)\n\n return final, final_scores",
"def find_max_score(self, scores: list) -> list:\n index = []\n max_score = max(scores)\n for i in range(len(scores)):\n if scores[i] == max_score:\n index.append(i)\n return index",
"def sort_scores(self, data: List[int]) -> List[int]:\n # Base Case\n if len(data) < 2:\n return data\n\n pivot = data[0]\n l = self.sort_scores([x for x in data[1:] if x < pivot])\n u = self.sort_scores([x for x in data[1:] if x >= pivot])\n return l + [pivot] + u",
"def _get_best_indexes(logits, n_best_size):\r\n index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)\r\n\r\n best_indexes = []\r\n for i in range(len(index_and_score)):\r\n if i >= n_best_size:\r\n break\r\n best_indexes.append(index_and_score[i][0])\r\n return best_indexes",
"def _get_best_indexes(logits, n_best_size):\n index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)\n\n best_indexes = []\n for i in range(len(index_and_score)):\n if i >= n_best_size:\n break\n best_indexes.append(index_and_score[i][0])\n return best_indexes",
"def _get_best_indexes(logits, n_best_size):\r\n index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)\r\n best_indexes = []\r\n for i in range(len(index_and_score)):\r\n if i >= n_best_size:\r\n break\r\n best_indexes.append(index_and_score[i][0])\r\n return best_indexes",
"def _get_winning_indices(self, index_to_best_hands):\n winning_indices = []\n winning_hand = None\n for idx, best_hand in index_to_best_hands.iteritems():\n if not winning_indices:\n winning_indices.append(idx)\n winning_hand = best_hand\n continue\n if best_hand > winning_hand:\n winning_indices = [idx]\n winning_hand = best_hand\n elif best_hand == winning_hand:\n winning_indices.append(idx)\n return winning_indices",
"def _get_best_indexes(logits, n_best_size):\n index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)\n\n best_indexes = []\n for i in range(len(index_and_score)):\n if i >= n_best_size:\n break\n best_indexes.append(index_and_score[i][0])\n return best_indexes",
"def _get_best_indexes(logits, n_best_size):\n index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)\n\n best_indexes = []\n for i in range(len(index_and_score)):\n if i >= n_best_size:\n break\n best_indexes.append(index_and_score[i][0])\n return best_indexes",
"def _get_best_indexes(logits, n_best_size):\n index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)\n\n best_indexes = []\n for i in range(len(index_and_score)):\n if i >= n_best_size:\n break\n best_indexes.append(index_and_score[i][0])\n return best_indexes",
"def nearby_sort(self, idx):\n start = max(0, idx - int(self.bin_size / 2))\n stop = min(idx + int(self.bin_size / 2), len(self.nums))\n self.nums[start: stop] = sorted(self.nums[start: stop])\n return stop",
"def get_switchy_score_order(x):\n switchy_scores = np.apply_along_axis(switchy_score, axis=0, arr=x)\n return np.argsort(switchy_scores)",
"def sort():\n return -1",
"def get_idxs_in_correct_order(idx1, idx2):\n if idx1 < idx2: return idx1, idx2\n else: return idx2, idx1"
]
| [
"0.6960895",
"0.6871682",
"0.6795757",
"0.65629816",
"0.6556796",
"0.6461051",
"0.6409348",
"0.63616157",
"0.633772",
"0.63236696",
"0.62087506",
"0.62082416",
"0.61969703",
"0.61930555",
"0.6136674",
"0.6005333",
"0.5997076",
"0.59930086",
"0.5972942",
"0.5960499",
"0.59568626",
"0.5943267",
"0.5937168",
"0.59239936",
"0.59239936",
"0.59239936",
"0.5913076",
"0.59064215",
"0.58949506",
"0.5886434"
]
| 0.75126874 | 0 |
Given a set of predicted triplets, return the list of matching GT's for each of the given predictions | def _compute_pred_matches(gt_triplets, pred_triplets,
gt_boxes, pred_boxes, iou_thresh=0.5, phrdet=False):
# This performs a matrix multiplication-esque thing between the two arrays
# Instead of summing, we want the equality, so we reduce in that way
# The rows correspond to GT triplets, columns to pred triplets
keeps = intersect_2d(gt_triplets, pred_triplets)
gt_has_match = keeps.any(1)
pred_to_gt = [[] for x in range(pred_boxes.shape[0])]
for gt_ind, gt_box, keep_inds in zip(np.where(gt_has_match)[0],
gt_boxes[gt_has_match],
keeps[gt_has_match],
):
boxes = pred_boxes[keep_inds]
if phrdet:
# Evaluate where the union box > 0.5
gt_box_union = gt_box.reshape((2, 4))
gt_box_union = np.concatenate((gt_box_union.min(0)[:2], gt_box_union.max(0)[2:]), 0)
box_union = boxes.reshape((-1, 2, 4))
box_union = np.concatenate((box_union.min(1)[:,:2], box_union.max(1)[:,2:]), 1)
gt_box_union = gt_box_union.astype(dtype=np.float32, copy=False)
box_union = box_union.astype(dtype=np.float32, copy=False)
inds = bbox_overlaps(gt_box_union[None],
box_union = box_union)[0] >= iou_thresh
else:
gt_box = gt_box.astype(dtype=np.float32, copy=False)
boxes = boxes.astype(dtype=np.float32, copy=False)
sub_iou = bbox_overlaps(gt_box[None,:4], boxes[:, :4])[0]
obj_iou = bbox_overlaps(gt_box[None,4:], boxes[:, 4:])[0]
inds = (sub_iou >= iou_thresh) & (obj_iou >= iou_thresh)
for i in np.where(keep_inds)[0][inds]:
pred_to_gt[i].append(int(gt_ind))
return pred_to_gt | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def performance_comparison_of_sets( predicted, known ):\n ### Example:\n ### predicted = Set( 2,3,4,5,10,11,12,13,14,15,20,21,22,23,24,25,26,27,28 )\n ### known = Set( 1,2,3,4,5,10,11,12,13,14,15, 21,22,23,24,25,26 )\n ### Return structure:\n ### [\n ### [ [2,3,4,5,10,11,12,13,14,15], [21,22,23,24,25,26] ], # TP correct predicted\n ### [ [20], [27,28] ], # FP over predicted\n ### [ [1] ], # FN under predicted\n ### ]\n\n # make CORRECT, UNDER and OVER predicted list\n correct = predicted.intersection( known )\n overpredicted = predicted.difference( known )\n underpredicted = known.difference( predicted )\n\n returnlists = []\n for item in ( correct, overpredicted, underpredicted ):\n if item:\n item = list(item)\n item.sort()\n tracks = [ [ item[0] ] ]\n for coord in item[1:]:\n if coord == max(tracks[-1])+1:\n tracks[-1].append(coord)\n else:\n tracks.append( [ coord ] )\n returnlists.append( tracks )\n else:\n # no overlap of this kind!\n returnlists.append( [] )\n\n # return the data structure\n return returnlists",
"def abgp_performance_measurement_of_sets(predicted,known,gffdata={}):\n # return list of gff tuples\n retgff = []\n fref = gffdata['fref']\n gname = gffdata['gname']\n\n # do the performance comparison of the sets\n (correct,over,under) = performance_comparison_of_sets(predicted,known)\n\n # make True Positives (TP) CORRECT predicted track\n for track in correct:\n gff = ( fref, GFF_PERFORMANCE_TP_FSOURCE, GFF_PERFORMANCE_TP_FMETHOD,\n min(track), max(track), '.', '+', '.', \"%s %s\" % (GFF_PERFORMANCE_TP_GCLASS, gname)\n )\n retgff.append(gff)\n\n # make False Positives (FP) OVER predicted track\n for track in over:\n gff = ( fref, GFF_PERFORMANCE_FP_FSOURCE, GFF_PERFORMANCE_FP_FMETHOD,\n min(track), max(track), '.', '+', '.', \"%s %s\" % (GFF_PERFORMANCE_FP_GCLASS, gname)\n )\n retgff.append(gff)\n\n # make False Negatives (FN) UNDER predicted track\n for track in under:\n gff = ( fref, GFF_PERFORMANCE_FN_FSOURCE, GFF_PERFORMANCE_FN_FMETHOD,\n min(track), max(track), '.', '+', '.', \"%s %s\" % (GFF_PERFORMANCE_FN_GCLASS, gname)\n )\n retgff.append(gff)\n\n # and return the gff\n return retgff",
"def most_frequent_eval(test_set, pred_tags):\n gold_tag_seqs = []\n pred_tag_seqs = []\n for sent in test_set:\n words, true_tags = zip(*sent)\n gold_tag_seqs.append(true_tags)\n\n ### YOUR CODE HERE\n DEFAULT_TAG = 'O'\n \n pred_tags_list = []\n for word in words:\n tag = DEFAULT_TAG\n if word in pred_tags:\n tag = pred_tags[word]\n pred_tags_list.append(tag)\n pred_tag_seqs.append(tuple(pred_tags_list)) \n ### END CODE HERE\n\n return evaluate_ner(gold_tag_seqs, pred_tag_seqs)",
"def get_statistics(pred, gt, num_cls=2):\n h,w = gt.shape\n statistics = []\n for i in range(num_cls):\n tp = np.sum((pred==i)&(gt==i))\n fp = np.sum((pred==i)&(gt!=i))\n fn = np.sum((pred!=i)&(gt==i)) \n statistics.append([tp, fp, fn])\n return statistics",
"def filter_gt(ground_truth, tokeep):\n gt = {}\n for k, v in ground_truth.items():\n tags = []\n for t, w in v:\n if t in tokeep:\n tags.append([t, w])\n if tags:\n gt[k] = tags\n\n return gt",
"def get_predictions(summaries, test_sets):\n predictions = []\n for test_data in test_sets:\n result = predict(summaries, test_data)\n predictions.append(result)\n\n return predictions",
"def get_matching_list(tidx=None, te_df=None, tr_df=None, skiplist=None):\n latlon_list = set()\n matching_list_ = defaultdict(int)\n tidx_list = set()\n for _, row in te_df[te_df['TRAJECTORY_IDX'] == tidx].iterrows():\n latlon_list.add((row['LATBIN'], row['LONBIN']))\n\n rebin = 1\n while len(matching_list_) == 0:\n for latbin, lonbin in latlon_list:\n cond0 = (tr_df['LATBIN']//rebin) == (latbin//rebin)\n cond1 = (tr_df['LONBIN']//rebin) == (lonbin//rebin)\n trj_arr = sorted(tr_df[cond0 & cond1]['TRAJECTORY_IDX'].unique())\n for tidx in trj_arr:\n if tidx in skiplist:\n continue\n matching_list_[tidx] += 1\n tidx_list.add(tidx)\n rebin *= 10\n matching_list = {}\n min_number_matched = 4\n while len(matching_list) == 0 and len(matching_list_) > 0 \\\n and min_number_matched > 0:\n for k in tidx_list:\n if matching_list_[k] >= min_number_matched:\n matching_list[k] = matching_list_[k]\n min_number_matched -= 1\n return matching_list, (min_number_matched+1)",
"def make_prediction(\n ingredients: List[str], top_n_suggestions: int, rules_path: str\n) -> Optional[List[Tuple[Set[str], float]]]:\n with open(rules_path, \"rb\") as f:\n rules = pickle.load(f)\n\n suggestions = [(rule[1], rule[2]) for rule in rules if rule[0] == ingredients]\n sorted_suggestions = sort_suggestions(suggestions)\n return remove_duplicates(sorted_suggestions[:top_n_suggestions])",
"def make_predictions(preds: []):\n temp = []\n res = []\n\n for i in range(0, len(preds)):\n pred = preds[i]\n next_pred = None\n\n if i < len(preds) - 1:\n next_pred = preds[i + 1]\n\n temp.append(pred)\n\n if next_pred is not None:\n # Check whether the next label is different than the current label\n if pred[PRED_LABEL_INDEX] != next_pred[PRED_LABEL_INDEX]:\n add_pred_to_list(temp, res)\n\n temp = []\n else: # Index at last item of list\n add_pred_to_list(temp, res)\n\n return res",
"def predicted_tags(classification): \n # translate classification into tag_ids and weights\n try:\n doc = [[tag_id, int(weight/classification_threshold)]\n for tag_id, weight in enumerate(classification)\n if weight > classification_threshold]\n\n # add contribution from all terms in all similar LDA topics\n tag_suggestions = defaultdict(int)\n for topic, weight in lda[doc]:\n for weight, term in lda.show_topic(topic):\n if \"class:\" not in term:\n tag_suggestions[term] += weight\n\n # turn weights into actual suggestions and take topN values\n return [tag for tag in sorted(tag_suggestions,\n key=tag_suggestions.get,\n reverse=True)\n if tag_suggestions[tag] > suggestion_threshold][:topN]\n except IndexError:\n return []",
"def __query_pairs(self):\n\n probs = self.clf.predict_proba(self.all_features)[:,1] # unlabeled_features\n\n probs_df = pd.DataFrame(probs, index=self.all_features.index.values, columns=['proba'])\n probs_df['certainty'] = abs(0.5 - probs_df.proba)\n probs_df.sort_values(by='certainty', axis=0, inplace=True)\n\n uncertain_pairs = probs_df[:self.n_uncertain]\n match_pairs = probs_df[probs_df.proba > 0.5].sample(self.n_match)\n notmatch_pairs = probs_df[probs_df.proba < 0.5].sample(self.n_notmatch)\n\n pairs_to_label = pd.concat([uncertain_pairs,\n match_pairs,\n notmatch_pairs], axis=0, ignore_index=False)\n\n return pairs_to_label.index.values",
"def get_patterns_also_in_gt(groundtruth, patterns):\n hits = [0 for p in patterns] # 1 if hit, 0 if miss\n\n # For each ground_truth pattern, check if we found it with our algorithm\n for i, p in enumerate(patterns):\n if len(p.es) == 0:\n continue\n c1 = p.vs[\"label\"]\n c1_edge = p.es[\"label\"]\n\n for gt in groundtruth:\n c2 = gt.vs[\"label\"]\n c2_edge = gt.es[\"label\"]\n\n if len(c1) != len(c2) or len(c1_edge) != len(c2_edge):\n continue\n\n if gt.isomorphic_vf2(p, color1=c1, color2=c2,\n edge_color1=c1_edge, edge_color2=c2_edge):\n if(hits[i] >= 1):\n print(\"Warning: ground-truth pattern already found\")\n else:\n hits[i] = 1\n break # consider multiple instances of same pattern?\n\n return (sum(hits), len(hits)) # hits,total",
"def compare_gene_predictors(GM_genes, Glim_genes):\n GM_starts = []\n Glim_starts = []\n GM_only = []\n Glim_only = []\n shared_starts = []\n # GM_stops = []\n # Glim_stops = []\n Glim_unique = 0\n GM_unique = 0\n\n for i in range(1,GM_genes[\"total genes\"]+1):\n GM_starts.append(GM_genes[\"gene\" + str(i)][\"start\"])\n for j in range(1,Glim_genes[\"total genes\"]+1):\n Glim_starts.append (Glim_genes[\"gene\"+ str(j)][\"start\"])\n for i in range(0,len(GM_starts)):\n if GM_starts[i] not in Glim_starts:\n print(\"start at pos. \" + str(GM_starts[i]) + \" is unique to GM genes\")\n GM_only.append(GM_starts[i])\n GM_unique += 1\n else:\n shared_starts.append(GM_starts[i])\n for j in range(0,len(Glim_starts)):\n if Glim_starts[j] not in GM_starts:\n print (\"start at pos. \" + str(Glim_starts[j]) + \" is unique to Glim genes\")\n Glim_only.append(Glim_starts[j])\n Glim_unique += 1\n else:\n if GM_starts[j] not in shared_starts:\n shared_starts.append(GM_starts[j])\n shared_starts.sort()\n print (\"Number of unique Glimmer starts = \" + str(Glim_unique))\n print (\"Number of unique GM starts = \" + str(GM_unique))\n print(\"Shared starts =\\n\")\n for k in range(0,len(shared_starts)):\n print (shared_starts[k])",
"def get_similarities(tags=None):\n tags = tags or _get_tags()\n # do your thing ...\n similar_tags = list()\n for item in itertools.permutations(tags, 2):\n diffchecker = SequenceMatcher(isjunk=None, a=item[0], b=item[1], autojunk=True)\n similarity = diffchecker.quick_ratio()\n if similarity >= SIMILAR:\n similar_tags.append(item)\n return similar_tags",
"def get_similarities(tags):\n return [(a, b)\n for (a, b) in itertools.permutations(tags, 2)\n if difflib.SequenceMatcher(a=a.lower(), b=b.lower()).ratio() > SIMILAR\n and a != b\n and b.endswith('s')]",
"def eval_prediction(gold_trees, pred_trees):\n assert [t.get_tid() for t in gold_trees] == [\n t.get_tid() for t in pred_trees\n ]\n gold_vectors = [t.get_vector() for t in gold_trees]\n pred_vectors = [t.get_vector() for t in pred_trees]\n\n def score_it(gold, pred, level):\n level_gold = list(chain.from_iterable([v[level] for v in gold]))\n level_pred = list(chain.from_iterable([v[level] for v in pred]))\n assert len(level_gold) == len(level_pred), \"{} {} {}\".format(\n level, level_gold, level_pred\n )\n return evaluate(level_gold, level_pred)\n\n results = []\n for level in [\"cc\", \"ro\", \"fu\", \"at\"]:\n results.append((level, score_it(gold_vectors, pred_vectors, level)))\n\n lat = labelled_attachment(gold_trees, pred_trees)\n results.append((\"lat\", {\"accuracy\": lat}))\n return results",
"def get_pred_ids(predictions):\n le_classes = ['Emotet', 'Mirai', 'Zeus'] \n malwares_dict = {'Emotet': 1, 'Mirai': 2, 'Zeus': 3}\n predicted_ids = []\n \n for idx in predictions:\n pred_name = le_classes[idx]\n pred_id = malwares_dict[pred_name]\n predicted_ids.append(pred_id)\n \n return predicted_ids",
"def get_similarities(tags):\n similar_tags = set()\n for tag_a, tag_b in itertools.combinations(tags, 2):\n if tag_a[0] != tag_b[0]: # ~12x faster\n continue\n ratio = difflib.SequenceMatcher(None, tag_a, tag_b).ratio()\n if ratio > SIMILAR:\n similar_tags.add((tag_a, tag_b))\n\n return similar_tags",
"def getfeaturesandlabels(lst, exptype=False, semantic=True, predict=True):\n if 'PGATE' in lst[0][0]:\n print \"Get features from {} expressions.\".format('predicted' if predict else 'gold')\n else:\n print \"Get features from gold expressions. (No PGATE in token)\"\n predict = False\n \n stats = {'holders_not_in_candidates': [],\n 'position': {},\n 'expt_not_in_candidates': []}\n if not exptype:\n exptypelist = EXPTYPES\n features = {}\n labels = {}\n pos = {}\n ev = evaluate()\n for expt in EXPTYPES:\n features[expt] = []\n labels[expt] = []\n pos[expt] = []\n features[expt+'implicit'] = []\n labels[expt+'implicit'] = []\n pos[expt+'implicit'] = []\n features[expt+'w'] = []\n labels[expt+'w'] = []\n pos[expt+'w'] = []\n for sent_i, sent in enumerate(lst):\n if DEBUG: print \"---\", sent_i\n if sent_i % 1000 == 0: print \"setning\", sent_i\n daughterlists_sent(sent)\n ex = getexpressions_sent(sent)\n pex = getexpressions_sent(sent, predict=predict)\n tagholdercandidates_sent(sent, predict=predict)\n candidates = getholdercandidates_list_sent(sent)\n holder_dct = getholders_sent_new(sent)\n holder_exp_pairs = getholder_exp_pairs_sent(sent, ex, holder_dct, test=predict)\n count_gold(holder_exp_pairs) \n if True: # syntactic_path\n paths = getpaths_sent(getgraph_sent(sent))\n else:\n paths = False\n if predict:\n\n holder_exp_pairs_sys = []\n\n for c, p in enumerate(extolst(pex, gatekey='PGATE')):\n # first located e' that corresponded to e\n argmaxcxe = 0 # at least some overlap\n if args.argmaxcxe:\n argmaxcxe = int(args.argmaxcxe)\n current_pair = None\n for exp_pair_i, exp_pair in enumerate(holder_exp_pairs):\n #argmax c(x,e) regardless of exp type j&m 7.1.1\n if DEBUG:\n print exp_pair\n cxe = ev.spancoverage(exp_pair[0], p['token_id']) \n if DEBUG:\n print cxe\n if cxe > argmaxcxe:\n argmaxcxe = cxe\n current_pair = exp_pair\n if current_pair:\n holder_exp_pairs_sys.append((p['token_id'], current_pair[1], current_pair[2], current_pair[3]))\n else:\n counters['falsely_detected_exp'] += 1\n counters['falsely_detected_exp' + p['expt']] += 1\n \n if predict:\n holder_exp_pairs_use = holder_exp_pairs_sys\n else:\n holder_exp_pairs_use = holder_exp_pairs\n holder_exp_pairs_use = count_sys(holder_exp_pairs_use, save=True)\n for exp_pair in holder_exp_pairs_use:\n expt = exp_pair[2]\n cand_exists = True\n holder_set = True\n # Categorise \n if isinstance(exp_pair[1], str):\n #if predict:\n holder_set = False\n elif isinstance(exp_pair[1], set):\n # om holder ikke er hc\n #print candidates\n if expt in candidates:\n if not exp_pair[1].intersection(candidates[expt]):\n counters['holder_not_in_candidate_head'] += 1\n cand_exists = False\n for cand in candidates[expt]:\n if exp_pair[1].intersection(get_subtree(sent, cand, transitive=True)):\n cand_exists = True\n if not cand_exists:\n counters['holder_not_in_candidates'] += 1\n counters['holder_not_in_candidates' + exp_pair[2]] += 1\n stats['holders_not_in_candidates'].append({'candidates': candidates[expt],\n 'exp_pair': exp_pair})\n else:\n cand_exists = False\n counters['ignore_count'] += 1\n counters['holder not in candidates - special case'] += 1\n #if cand_exists:\n # For prediction:\n elif isinstance(exp_pair[1], OrderedDict):\n if expt in candidates:\n holdermax = argmaxcxh(exp_pair[1], candidates[expt])\n if not holdermax[0]:\n cand_exists = False\n counters['ignore_count'] += 1\n else:\n cand_exists = False\n counters['expt_not_in_candidates - new'] += 1\n stats['expt_not_in_candidates'].append({'sent': sent_i,\n 'exp_pair': exp_pair})\n else:\n raise Exception('exp_pair[1] of unknown type: {}'.format(exp_pair[1]))\n\n if not predict or cand_exists:\n # we don't need to count false predicted holders, the p. sum is already\n # made, but we need these for training\n \n # ext-classifiers (w/imp)\n # labels\n if exp_pair[1] == 'w':\n labels[expt + 'w'].append(True)\n labels[expt + 'implicit'].append(False)\n elif exp_pair[1] == 'implicit':\n labels[expt + 'w'].append(False)\n labels[expt + 'implicit'].append(True)\n else:\n labels[expt + 'w'].append(False)\n labels[expt + 'implicit'].append(False)\n\n # Features\n featuresdict = {}\n ex_head = getex_head(exp_pair[0], sent)\n featuresdict['ex_head_word'] = sent[ex_head-1]['form']\n featuresdict['ex_head_pos'] = sent[ex_head-1]['pos']\n featuresdict['ex_head_lemma'] = sent[ex_head-1]['lemma']\n tmp = dom_ex_type(sent, sent[ex_head-1]['head'], transitive=False)\n if tmp:\n featuresdict['dom_ex_type'] = tmp\n featuresdict['ex_verb_voice'] = ex_verb_voice(sent, exp_pair[0])\n featuresdict['deprel_to_parent'] = sent[ex_head-1]['deprel']\n features[expt + 'w'].append(featuresdict)\n #features[expt + 'implicit'].append(featuresdict)\n pos[expt + 'w'].append({'sent': sent_i,\n 'exp': exp_pair[0],\n 'holder_gold': exp_pair[1],\n 'holder_sys': 'w'})\n pos[expt + 'implicit'].append({'sent': sent_i,\n 'exp': exp_pair[0],\n 'holder_gold': exp_pair[1],\n 'holder_sys': 'implicit'})\n\n if cand_exists:\n # internals\n if expt in candidates:\n featuresandlabeladded = False\n for cand in candidates[expt]:\n if args.restrict == 'sameexp' and cand in exp_pair[0]: #get_subtree(sent, cand, transitive=True)):\n pass\n else:\n featuresdict = {}\n if holder_set:\n featuresandlabeladded = True\n\n # labels\n if isinstance(exp_pair[1], OrderedDict):\n label = cand_in_ghodct(cand, exp_pair[1])\n if isinstance(exp_pair[1], set):\n label = cand in exp_pair[1]\n elif isinstance(exp_pair[1], str):\n label = cand == exp_pair[1]\n labels[expt].append(label)\n\n # positions\n pos[expt].append({'sent': sent_i,\n 'exp': exp_pair[0],\n 'holder_sys': get_subtree(sent, cand, transitive=True),\n 'holder_gold': exp_pair[1],\n 'coref_gold': exp_pair[3],\n 'exptype' : expt\n }) \n\n # features\n ex_head = getex_head(exp_pair[0], sent)\n featuresdict['synt_path'] = syntactic_path(cand, ex_head,\n sent, paths=paths)\n if semantic:\n tmp = shallow_sem_relation(cand-1, ex_head-1, sent)\n if tmp:\n featuresdict['shal_sem_rel'] = tmp\n featuresdict['ex_head_word'] = sent[ex_head-1]['form']\n featuresdict['ex_head_pos'] = sent[ex_head-1]['pos']\n featuresdict['ex_head_lemma'] = sent[ex_head-1]['lemma']\n featuresdict['cand_head_word'] = sent[cand-1]['form']\n featuresdict['cand_head_pos'] = sent[cand-1]['pos']\n tmp = dom_ex_type(sent, sent[ex_head-1]['head'], transitive=False)\n if tmp:\n featuresdict['dom_ex_type'] = tmp\n featuresdict['ex_verb_voice'] = ex_verb_voice(sent, exp_pair[0])\n if cand > 1:\n featuresdict['context_r_word'] = sent[cand-2]['form']\n featuresdict['context_r_pos'] = sent[cand-2]['pos']\n if cand < len(sent):\n featuresdict['context_l_word'] = sent[cand]['form']\n featuresdict['context_l_pos'] = sent[cand]['pos']\n featuresdict['deprel_to_parent'] = sent[ex_head-1]['deprel']\n \n features[expt].append(featuresdict)\n else:\n counters[\"expt_not_in_candidates\"] += 1\n counters[\"expt_not_in_candidates\" + expt] += 1\n\n stats['positions'] = pos\n return features, labels, stats",
"def compute_matches(gt_boxes, gt_class_ids, gt_masks,\n pred_boxes, pred_class_ids, pred_scores, pred_masks,\n iou_threshold=0.5, score_threshold=0.0):\n # Trim zero padding\n # TODO: cleaner to do zero unpadding upstream\n gt_boxes = trim_zeros(gt_boxes)\n gt_masks = gt_masks[..., :gt_boxes.shape[0]]\n pred_boxes = trim_zeros(pred_boxes)\n pred_scores = pred_scores[:pred_boxes.shape[0]]\n # Sort predictions by score from high to low\n indices = np.argsort(pred_scores)[::-1]\n pred_boxes = pred_boxes[indices]\n pred_class_ids = pred_class_ids[indices]\n pred_scores = pred_scores[indices]\n pred_masks = pred_masks[..., indices]\n\n # Compute IoU overlaps [pred_masks, gt_masks]\n overlaps = compute_overlaps_masks(pred_masks, gt_masks)\n\n # Loop through predictions and find matching ground truth boxes\n match_count = 0\n pred_match = -1 * np.ones([pred_boxes.shape[0]])\n gt_match = -1 * np.ones([gt_boxes.shape[0]])\n for i in range(len(pred_boxes)):\n # Find best matching ground truth box\n # 1. Sort matches by score\n sorted_ixs = np.argsort(overlaps[i])[::-1]\n # 2. Remove low scores\n low_score_idx = np.where(overlaps[i, sorted_ixs] < score_threshold)[0]\n if low_score_idx.size > 0:\n sorted_ixs = sorted_ixs[:low_score_idx[0]]\n # 3. Find the match\n for j in sorted_ixs:\n # If ground truth box is already matched, go to next one\n if gt_match[j] > -1:\n continue\n # If we reach IoU smaller than the threshold, end the loop\n iou = overlaps[i, j]\n if iou < iou_threshold:\n break\n # Do we have a match?\n if pred_class_ids[i] == gt_class_ids[j]:\n match_count += 1\n gt_match[j] = i\n pred_match[i] = j\n break\n\n return gt_match, pred_match, overlaps",
"def get_targets(self,\n cls_scores_list,\n bbox_preds_list,\n gt_bboxes_list,\n gt_labels_list,\n gt_bboxes_ignore_list=None):\n assert gt_bboxes_ignore_list is None, \\\n 'Only supports for gt_bboxes_ignore setting to None.'\n num_imgs = len(cls_scores_list)\n gt_bboxes_ignore_list = [\n gt_bboxes_ignore_list for _ in range(num_imgs)\n ]\n\n (labels_list, label_weights_list, bbox_targets_list,\n bbox_weights_list, pos_inds_list, neg_inds_list) = multi_apply(\n self._get_target_single, cls_scores_list, bbox_preds_list,\n gt_labels_list, gt_bboxes_list, gt_bboxes_ignore_list)\n num_total_pos = sum((inds.numel() for inds in pos_inds_list))\n num_total_neg = sum((inds.numel() for inds in neg_inds_list))\n return (labels_list, label_weights_list, bbox_targets_list,\n bbox_weights_list, num_total_pos, num_total_neg)",
"def predict_multi_target_candidates(\n sparql, timeout, gps, sources, parallel=None, exclude_source=None):\n assert len(sources) > 1 and isinstance(sources[0], (URIRef, Literal))\n if parallel is None:\n parallel = config.PREDICTION_IN_PARALLEL\n if exclude_source is None:\n exclude_source = config.PREDICTION_EXCLUDE_SOURCE\n\n pq = partial(\n predict_multi_query,\n sparql, timeout,\n sources=sources,\n )\n map_ = parallel_map if parallel else map\n results = map_(pq, gps)\n # drop timings:\n res = [stcs for _, stcs in results]\n if exclude_source:\n res = [\n OrderedDict([\n (s, tcs - {s})\n for s, tcs in stcs.items()\n ])\n for stcs in res\n ]\n return res",
"def get_word_pair_classifications(predictions, target, word_pairs, relations):\n \n wp_classifications = {}\n for relation in relations:\n wp_classifications[relation] = {\n \"false_positives\": [], \n \"true_positives\": [],\n \"false_negatives\": []\n }\n \n for pred, label, word_pair in zip(predictions, target, word_pairs):\n if pred == label:\n wp_classifications[relations[pred]][\"true_positives\"].append(word_pair)\n else:\n wp_classifications[relations[pred]][\"false_positives\"].append(word_pair)\n wp_classifications[relations[label]][\"false_negatives\"].append(word_pair)\n \n return wp_classifications",
"def triples():",
"def do_predict(self):\n answer = []\n response = []\n\n for it_predictions in json.loads(request.data.decode('UTF-8')):\n prediction = it_predictions['score']\n for ite_clf in g_list_of_classifier:\n answer.append(ite_clf.predict(prediction))\n if answer.count(True) > answer.count(False):\n response.append({'answer' : True})\n else:\n response.append({'answer' : False})\n return json.dumps(response, indent=4)",
"def get_intersection_over_union(predictions, gt):\n nsamples,nclasses,height,width = predictions.size()\n assert gt.size(0) == nsamples, \"gt size: {}, predictions size: {}\".format(gt.size(), predictions.size())\n assert gt.size(1) == height, \"gt size: {}, predictions size: {}\".format(gt.size(), predictions.size())\n assert gt.size(2) == width, \"gt size: {}, predictions size: {}\".format(gt.size(), predictions.size())\n prediction_max, prediction_argmax = predictions.max(-3)\n prediction_argmax = prediction_argmax.long()\n classes = gt.new_tensor([c for c in range(nclasses)]).view(1, nclasses, 1, 1) # [1,K,1,1]\n pred_bin = (prediction_argmax.view(nsamples, 1, height, width) == classes) # [N,K,H,W]\n gt_bin = (gt.view(nsamples, 1, height, width) == classes) # [N,K,H,W]\n intersection = (pred_bin * gt_bin).float().sum(dim=-2).sum(dim=-1) # [N,K]\n union = ((pred_bin + gt_bin) > 0).float().sum(dim=-2).sum(dim=-1) # [N,K]\n assert (intersection > union).sum() == 0\n return (intersection + 1e-8) / (union + 1e-8) # [N,K]",
"def infer_replicates(target_labels_long):\n replicate_lists = {}\n\n rep_re = []\n rep_re.append(re.compile('rep\\d+'))\n rep_re.append(re.compile('donor\\d+'))\n\n for ti in range(len(target_labels_long)):\n label = target_labels_long[ti]\n\n for ri in range(len(rep_re)):\n rep_m = rep_re[ri].search(label)\n if rep_m:\n rep_str = rep_m.group(0)\n label = label.replace(rep_str, '')\n\n replicate_lists.setdefault(label, []).append(ti)\n\n return replicate_lists",
"def evaluate_detections(ground_truth, predictions, class_name, overlap_threshold=0.5,\n allow_multiple_matches_per_ignored=True,\n verbose=True):\n\n Detection = namedtuple('Detection', ['image', 'bbox', 'score', 'gt_match'])\n GT = namedtuple('GroundTruth', ['bbox', 'is_matched', 'is_ignored'])\n detections = [Detection(image=img_pred.image_path,\n bbox=np.array(obj_pred[\"bbox\"]),\n score=obj_pred.get(\"score\", 0.0),\n gt_match=-1)\n for img_pred in predictions\n for obj_pred in img_pred\n if obj_pred[\"type\"] == class_name]\n\n scores = np.array([detection.score for detection in detections])\n sorted_ind = np.argsort(-scores)\n detections = [detections[i] for i in sorted_ind]\n\n gts = {}\n for img_gt in ground_truth:\n gts[img_gt.image_path] = GT(\n bbox=np.vstack([np.array(obj_gt[\"bbox\"]) for obj_gt in img_gt]) if img_gt else np.empty(\n (0, 4)),\n is_matched=np.zeros(len(img_gt), dtype=bool),\n is_ignored=np.array([obj_gt.get(\"is_ignored\", False) for obj_gt in img_gt], dtype=bool))\n\n detections_num = len(detections)\n true_pos = np.zeros(detections_num)\n false_pos = np.zeros(detections_num)\n\n for i, detection in tqdm(enumerate(detections), desc=\"Processing detections\",\n disable=not verbose):\n image_path = detection.image\n bboxes_gt = gts[image_path].bbox\n bbox = detection.bbox\n max_overlap = -np.inf\n\n if bboxes_gt is not None and bboxes_gt.shape[0] > 0:\n intersection_xmin = np.maximum(bboxes_gt[:, 0], bbox[0])\n intersection_ymin = np.maximum(bboxes_gt[:, 1], bbox[1])\n intersection_xmax = np.minimum(bboxes_gt[:, 0] + bboxes_gt[:, 2], bbox[0] + bbox[2])\n intersection_ymax = np.minimum(bboxes_gt[:, 1] + bboxes_gt[:, 3], bbox[1] + bbox[3])\n intersection_width = np.maximum(intersection_xmax - intersection_xmin, 0.)\n intersection_height = np.maximum(intersection_ymax - intersection_ymin, 0.)\n intersection = intersection_width * intersection_height\n\n det_area = bbox[2] * bbox[3]\n gt_area = bboxes_gt[:, 2] * bboxes_gt[:, 3]\n union = (det_area + gt_area - intersection)\n ignored_mask = gts[image_path].is_ignored\n if allow_multiple_matches_per_ignored:\n if np.any(ignored_mask):\n union[ignored_mask] = det_area\n\n overlaps = intersection / union\n # Match not ignored ground truths first.\n if np.any(~ignored_mask):\n overlaps_filtered = np.copy(overlaps)\n overlaps_filtered[ignored_mask] = 0.0\n max_overlap = np.max(overlaps_filtered)\n argmax_overlap = np.argmax(overlaps_filtered)\n # If match with non-ignored ground truth is not good enough,\n # try to match with ignored ones.\n if max_overlap < overlap_threshold and np.any(ignored_mask):\n overlaps_filtered = np.copy(overlaps)\n overlaps_filtered[~ignored_mask] = 0.0\n max_overlap = np.max(overlaps_filtered)\n argmax_overlap = np.argmax(overlaps_filtered)\n detections[i] = detection._replace(gt_match=argmax_overlap)\n\n if max_overlap >= overlap_threshold:\n if not gts[image_path].is_ignored[argmax_overlap]:\n if not gts[image_path].is_matched[argmax_overlap]:\n true_pos[i] = 1.\n gts[image_path].is_matched[argmax_overlap] = True\n else:\n false_pos[i] = 1.\n elif not allow_multiple_matches_per_ignored:\n gts[image_path].is_matched[argmax_overlap] = True\n else:\n false_pos[i] = 1.\n\n false_pos = np.cumsum(false_pos)\n true_pos = np.cumsum(true_pos)\n\n debug_visualization = False\n if debug_visualization:\n for image_path, bboxes_gt in gts.items():\n\n print(image_path)\n image = cv2.imread(image_path)\n image_gt = np.copy(image)\n for bbox in bboxes_gt.bbox:\n cv2.rectangle(image_gt, tuple(bbox[:2]), tuple(bbox[2:] + bbox[:2]),\n color=(255, 255, 0), thickness=2)\n cv2.imshow(\"gt\", image_gt)\n for detection in detections:\n if detection.image != image_path:\n continue\n bbox = detection.bbox\n cv2.rectangle(image, tuple(bbox[:2]), tuple(bbox[2:] + bbox[:2]), color=(0, 255, 0),\n thickness=2)\n if detection.gt_match is not None:\n bbox = bboxes_gt.bbox[detection.gt_match]\n cv2.rectangle(image, tuple(bbox[:2]), tuple(bbox[2:] + bbox[:2]),\n color=(0, 0, 255), thickness=1)\n cv2.imshow(\"image\", image)\n cv2.waitKey(0)\n\n # Handle equal-score detections.\n # Get index of the last occurrence of a score.\n ind = len(scores) - np.unique(scores[sorted_ind[::-1]], return_index=True)[1] - 1\n ind = ind[::-1]\n # Though away redundant points.\n false_pos = false_pos[ind]\n true_pos = true_pos[ind]\n\n total_positives_num = np.sum([np.count_nonzero(~gt.is_ignored) for gt in gts.values()])\n recall = true_pos / float(total_positives_num)\n # Avoid divide by zero in case the first detection matches an ignored ground truth.\n precision = true_pos / np.maximum(true_pos + false_pos, np.finfo(np.float64).eps)\n miss_rate = 1.0 - recall\n fppi = false_pos / float(len(gts))\n\n return recall, precision, miss_rate, fppi",
"def analyze_similarities():\r\n print('Total number of candidate pairs:', len(pairs))\r\n print(f'\\nNumber of actual item pairs in the train set: {pairs[\"true_match\"].sum()}\\n')\r\n\r\n for feature in ['text_score', 'image_score', 'txt_img_score', 'words_ratio', 'txt_img_words']:\r\n\r\n # Check distribution of True and False predictions for various similarity scores\r\n print('-' * 50)\r\n print(f'\\nDistribution of True/False predictions for {feature}')\r\n for thr in (0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95):\r\n print('-' * 50)\r\n print(f'Similarity score over {thr}')\r\n pairs_sample = pairs[pairs[feature] >= thr]\r\n print(f'Number of similar item pairs: {len(pairs_sample)}')\r\n print(pairs_sample['true_match'].value_counts(normalize=True))\r\n\r\n # Check if identical phash can be used to improve the accuracy\r\n same_phash = pairs[pairs['phash_match'] == 1]\r\n different_phash = pairs[pairs['phash_match'] == 0]\r\n\r\n print('\\nFor item pairs with the same phash:')\r\n print(same_phash['true_match'].value_counts(normalize=True))\r\n print('Number of item pairs in this subset:', len(same_phash))\r\n\r\n print('\\nFor item pairs with different phash:')\r\n print(different_phash['true_match'].value_counts(normalize=True))\r\n print('Number of item pairs in this subset:', len(different_phash))\r\n\r\n # Check if numbers in titles can be used to improve the accuracy\r\n same_numbers = pairs[pairs['nums_match'] == 1]\r\n different_numbers = pairs[pairs['nums_match'] == 0]\r\n\r\n print('\\nFor item pairs with the same numbers:')\r\n print(same_numbers['true_match'].value_counts(normalize=True))\r\n print('Number of item pairs in this subset:', len(same_numbers))\r\n\r\n print('\\nFor item pairs with different numbers:')\r\n print(different_numbers['true_match'].value_counts(normalize=True))\r\n print('Number of item pairs in this subset:', len(different_numbers))",
"def get_gt_patterns_found(groundtruth, patterns):\n hits = [0 for g in groundtruth] # 1 if hit, 0 if miss (on gt)\n\n # For each ground_truth pattern, check if we found it with our algorithm\n for i, gt in enumerate(groundtruth):\n c1 = gt.vs[\"label\"]\n c1_edge = gt.es[\"label\"]\n\n for p in patterns:\n if len(p.es) == 0:\n continue\n c2 = p.vs[\"label\"]\n c2_edge = p.es[\"label\"]\n\n if len(c1) != len(c2) or len(c1_edge) != len(c2_edge):\n continue\n\n try:\n if gt.isomorphic_vf2(p, color1=c1, color2=c2,\n edge_color1=c1_edge, edge_color2=c2_edge):\n if(hits[i] >= 1):\n print(\"Warning: ground-truth pattern already found\")\n else:\n hits[i] = 1\n # print(\"hit:\",p)\n break\n except:\n print('Error')\n print(c1_edge)\n print(c2_edge)\n\n return (sum(hits), len(hits)) # hits, total"
]
| [
"0.61147356",
"0.5765611",
"0.57320887",
"0.5713905",
"0.561453",
"0.5602846",
"0.5534629",
"0.55314827",
"0.55007005",
"0.54617465",
"0.5456477",
"0.54511446",
"0.5439374",
"0.5414442",
"0.5393676",
"0.5371183",
"0.53555274",
"0.5344448",
"0.5343099",
"0.5323618",
"0.53162754",
"0.53143656",
"0.531344",
"0.53103805",
"0.5306901",
"0.5303446",
"0.52975863",
"0.52938753",
"0.5285867",
"0.5278018"
]
| 0.6012907 | 1 |
Function that calls Instance.set_deleted and catches any exception that may occur. | def delete_instance(instance, user):
try:
instance.set_deleted(timezone.now(), user)
except FormInactiveError as e:
raise ParseError(str(e)) from e | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def post_delete_access_attempt(self, instance, **kwargs):",
"def soft_delete(self, instance):\n self.destroy(instance)",
"def _objectDeleted(self, obj):\n pass",
"def versionable_post_delete(self, instance, timestamp):\n pass",
"def versionable_pre_delete(self, instance, timestamp):\n pass",
"async def deleted(self, value):\n pass",
"def instance_deleted(sender, instance, **kwargs):\n try:\n if instance.is_registered and not instance.deregister(save=False):\n logger.warn(\"Could not unregister {0} on delete.\".format(\n sender\n ))\n except SNSException:\n # Avoid that invalid arn token cause error when deleting instance\n pass",
"def perform_destroy(self, instance):\n pass",
"def delete(self):\n if not self.is_deleted:\n self.is_deleted = True\n self.save()",
"def delete(self, *args, **kwargs):\n self.flush_from_cache()\n self._is_deleted = True\n super().delete(*args, **kwargs)",
"def pre_service_instance_delete(self, resource_id):\n pass",
"def delete(self):\n raise exceptions.NotImplemented",
"def delete(self):\n ...",
"def test_raise_on_delete_when_frozen(self):\n self.test_model.freeze()\n self.assertRaises(errors.PersistenceError, self.test_model.delete)",
"def _cleanup_running_deleted_instances(self, context):\n action = CONF.running_deleted_instance_action\n\n if action == \"noop\":\n return\n\n # NOTE(sirp): admin contexts don't ordinarily return deleted records\n with utils.temporary_mutation(context, read_deleted=\"yes\"):\n for instance in self._running_deleted_instances(context):\n if action == \"log\":\n LOG.warning(_LW(\"Detected instance with name label \"\n \"'%s' which is marked as \"\n \"DELETED but still present on host.\"),\n instance.name, instance=instance)\n\n elif action == 'shutdown':\n LOG.info(_LI(\"Powering off instance with name label \"\n \"'%s' which is marked as \"\n \"DELETED but still present on host.\"),\n instance.name, instance=instance)\n try:\n try:\n # disable starting the instance\n self.driver.set_bootable(instance, False)\n except NotImplementedError:\n LOG.debug(\"set_bootable is not implemented \"\n \"for the current driver\")\n # and power it off\n self.driver.power_off(instance)\n except Exception:\n msg = _LW(\"Failed to power off instance\")\n LOG.warn(msg, instance=instance, exc_info=True)\n\n elif action == 'reap':\n LOG.info(_LI(\"Destroying instance with name label \"\n \"'%s' which is marked as \"\n \"DELETED but still present on host.\"),\n instance.name, instance=instance)\n bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(\n context, instance.uuid, use_slave=True)\n self.instance_events.clear_events_for_instance(instance)\n try:\n self._shutdown_instance(context, instance, bdms,\n notify=False)\n self._cleanup_volumes(context, instance.uuid, bdms)\n except Exception as e:\n LOG.warning(_LW(\"Periodic cleanup failed to delete \"\n \"instance: %s\"),\n e, instance=instance)\n else:\n raise Exception(_(\"Unrecognized value '%s'\"\n \" for CONF.running_deleted_\"\n \"instance_action\") % action)",
"def test_delete(self):\n\n value = self.instance.delete()\n self.client.delete_instance.assert_called_once_with('nginx')\n self.assertEqual(value, self.client.delete_instance.return_value)",
"def _post_delete(self, instance, **kwargs):\n pk_name = instance._meta.pk.name\n for key in self.cache_fields:\n if key in ('pk', pk_name):\n continue\n # remove pointers\n cache.delete(self._get_from_cache_key(**{key: getattr(instance, key)}))\n # remove actual object\n cache.delete(self._get_from_cache_key(**{pk_name: instance.pk}))",
"def _Delete(self):\n pass",
"def __delete__(self, instance):\n raise AttributeError(\"A Default Property cannot be deleted\")",
"def before_delete(self, obj, st):\n pass",
"def test_instance_termination_exception_sets_error(self):\n instance = self._create_fake_instance_obj()\n\n def fake_delete_instance(self, context, instance, bdms):\n raise exception.InstanceTerminationFailure(reason='')\n\n self.stub_out('nova.compute.manager.ComputeManager._delete_instance',\n fake_delete_instance)\n\n self.assertRaises(exception.InstanceTerminationFailure,\n self.compute.terminate_instance,\n self.context,\n instance, [])\n instance = db.instance_get_by_uuid(self.context, instance['uuid'])\n self.assertEqual(instance['vm_state'], vm_states.ERROR)",
"def __delete__(self, instance):\n if self.deleted: return \n if self.mode != READWRITE:\n raise AttributeError(\"This is NOT a READWRITE Property, Error\")\n if self.name is None : self.name = Property.search(instance, None ,self)\n if self.name is not None:\n try:\n del instance.__dict__[self.name]\n if hasattr(instance, \"__store__\"):\n del instance.__store__[self.name]\n self.deleted = True\n except (AttributeError,KeyError) as error: raise error\n else:\n raise AttributeError(\"Cannot find Property: %s in: %s or its ancestors\" \n % (self,instance))",
"def delete(self):\n pass",
"def delete(self):\n pass",
"def delete(self):\n pass",
"def delete(self):\n pass",
"def after_delete(self, obj, st):\n pass",
"def force_delete(self):\n self.manager.force_delete(self)",
"def force_delete(self):\n self.manager.force_delete(self)",
"def delete(self):\n raise NotImplementedError()"
]
| [
"0.66897786",
"0.6640579",
"0.64718956",
"0.6322945",
"0.62817174",
"0.62663174",
"0.6152274",
"0.61403656",
"0.6046786",
"0.60244626",
"0.6016413",
"0.601435",
"0.5948192",
"0.5927294",
"0.58903",
"0.5888932",
"0.588098",
"0.587837",
"0.5854496",
"0.584458",
"0.5839829",
"0.5830313",
"0.58302736",
"0.58302736",
"0.58302736",
"0.58302736",
"0.58064276",
"0.57830876",
"0.57830876",
"0.57670903"
]
| 0.6802433 | 0 |
Returns appropriate serializer class based on context. | def get_serializer_class(self):
pk_lookup, dataid_lookup = self.lookup_fields
form_pk = self.kwargs.get(pk_lookup)
dataid = self.kwargs.get(dataid_lookup)
fmt = self.kwargs.get("format", self.request.GET.get("format"))
sort = self.request.GET.get("sort")
fields = self.request.GET.get("fields")
if fmt == Attachment.OSM:
serializer_class = OSMSerializer
elif fmt == "geojson":
serializer_class = GeoJsonSerializer
elif fmt == "xml":
serializer_class = DataInstanceXMLSerializer
elif (
form_pk is not None
and dataid is None
and form_pk != self.public_data_endpoint
):
if sort or fields:
serializer_class = JsonDataSerializer
else:
serializer_class = DataInstanceSerializer
else:
serializer_class = super().get_serializer_class()
return serializer_class | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_serializer_class(self):\n return self.serializer_class",
"def get_serializer_class(self):\n assert self.serializer_class is not None, (\n \"'%s' should either include a `serializer_class` attribute, \"\n \"or override the `get_serializer_class()` method.\"\n % self.__class__.__name__\n )\n\n return self.serializer_class",
"def serializer_for(self, obj):\n # 1-NULL serializer\n if obj is None:\n return self._null_serializer_adapter\n\n obj_type = type(obj)\n serializer = None\n\n # 2-Default serializers, Dataserializable, Portable, primitives, arrays, String and some helper types(BigInteger etc)\n serializer = self.lookup_default_serializer(obj_type)\n\n # 3-Custom registered types by user\n if serializer is None:\n serializer = self.lookup_custom_serializer(obj_type)\n\n # 4 Internal serializer\n if serializer is None and self._global_serializer_adaptor is None:\n serializer = self.lookup_python_serializer(obj_type)\n\n # 5-Global serializer if registered by user\n if serializer is None:\n serializer = self.lookup_global_serializer(obj_type)\n\n if serializer is not None:\n if self._active:\n raise HazelcastSerializationError(\"There is no suitable serializer for:\" + str(obj_type))\n else:\n raise HazelcastInstanceNotActiveError()\n return serializer",
"def get_serializer_class(self):\n if self.action == 'create':\n return self.serializer_classes.get('create')\n else:\n return self.serializer_classes.get('default')",
"def get_serializer(self, *args, **kwargs):\n serializer_class = self.get_serializer_class()\n kwargs['context'] = self.get_serializer_context()\n return serializer_class(*args, **kwargs)",
"def get_serializer_class(self):\n if self.action == 'retrieve':\n return self.serializer_classes.get('retrieve')\n elif self.action == 'create':\n return self.serializer_classes.get('create')\n elif self.action == 'update':\n return self.serializer_classes.get('update')\n else:\n return self.serializer_classes.get('default')",
"def get_serializer_class(self):\n group = self.request.query_params.get('type_group')\n return self.serializer_lookup.get(group, serializers.MeasurementTypeSerializer)",
"def get_serializer_class(self):\n return self.serializer_classes.get(self.action,\n self.default_serializer_class)",
"def get_serializer_class(self, *args, **kwargs):\n if self.action == 'list':\n return self.serializer_list_class\n else:\n return self.serializer_class",
"def get_serializer_class(self):\n serializer_map = {\n \"RealEstate\": RealEstateSerializer,\n \"BankAccount\": BankAccountSerializer,\n \"Insurance\": InsuranceSerializer,\n \"Investment\": InvestmentSerializer,\n \"Company\": CompanySerializer,\n \"Residual\": ResidualSerializer,\n }\n\n return serializer_map[self.kwargs.get(\"asset_type\", \"RealEstate\")]",
"def get_serializer_class(self):\n return self.serializers.get(self.action,\n self.serializers['default'])",
"def get_serializer_class(self):\n if self.action == 'retrieve':\n return serializers.ProductDetailSerializer\n\n return self.serializer_class",
"def get_serializer_class(self):\n serializer_map = {\n \"witness\": WitnessServiceSerializer,\n \"review\": ReviewServiceSerializer,\n \"certificate_provider\": LPACertificateServiceSerializer,\n }\n\n return serializer_map[self.kwargs.get(\"service_type\", \"witness\")]",
"def get_serializer_class(self):\n if self.action == 'retrieve':\n return serializers.AccountDetailSerializer\n\n return self.serializer_class",
"def get_serializer(self, *args, **kwargs):\n if self.__class__.serializer_class is not None:\n cls = self.__class__.serializer_class\n else:\n if self.action == 'list' and hasattr(self.__class__,\n 'list_serializer_class'):\n cls = self.__class__.list_serializer_class\n elif hasattr(self.__class__, 'detail_serializer_class'):\n cls = self.__class__.detail_serializer_class\n else:\n # error handling\n return super().get_serializer(*args, **kwargs)\n\n # default the context\n kwargs['context'] = self.get_serializer_context()\n\n return cls(*args, **kwargs)",
"def get_serializer_class(self):\n if self.action == 'retrieve':\n return serializers.BookDetailSerializer\n elif self.action == 'upload_image':\n return serializers.BookImageSerializer\n\n return self.serializer_class",
"def get_serializer_class(self):\n\n if self.action in ['list', 'retrieve']:\n return OrderListSerializer\n else:\n return OrderSerializer",
"def get_serializer_class(self):\n if self.action == 'retrieve':\n return serializers.OperationDetailSerializer\n\n return self.serializer_class",
"def get_serializer():\n if 'serializer' in _CACHE:\n serializer = _CACHE['serializer']\n else:\n name = getattr(settings, 'DJANGO_NUMERICS_SERIALIZER_BACKEND',\n _DEFAULT_SERIALIZER)\n serializer = import_string(name)()\n _CACHE['serializer'] = serializer\n return serializer",
"def get_serializer_class(self):\n return {\"create\": ReportFileSerializer, }.get(self.action, ReportFileSerializer)",
"def get_serializer_class(self):\n if self.action == 'list' or self.action == 'retrieve':\n return SillaSerializer\n else:\n return SillaSerializer",
"def getSerializer():",
"def get_serializer_class(self):\n #overide function this is a fun that called to retrive the serailizer class\n #for perticular request\n #this fun are used for wanted to chang the serailzer class for the different action\n #that are available on the recip0e viewset\n if self.action == 'retrieve':\n print('okkkkkkkkkkkkw')\n return serializers.RecipeDetailSerializer\n elif self.action == 'upload_image':\n print('okkkkkkkkkkkkkkkkk')\n return serializers.RecipeImageSerailzer\n\n return self.serializer_class",
"def get_serializer_class(self):\n if self.action == 'list' or self.action == 'retrieve':\n return EventosSerializer\n else:\n return EventosRegisterSerializer",
"def get_serializer_in(self, *args, **kwargs):\n serializer_class = self.get_serializer_class_in()\n kwargs['context'] = self.get_serializer_context()\n return serializer_class(*args, **kwargs)",
"def get_serializer(self, *args, **kwargs):\n kwargs['context'] = self.get_serializer_context()\n realm = kwargs['context'].get('realm', None)\n if realm is not None:\n serializer_class = ItemSerializer\n else:\n serializer_class = self.get_serializer_class()\n return serializer_class(*args, **kwargs)",
"def serializer_class(self):",
"def get_serializer_class(self):\n if (self.request.method == \"GET\" and\n self.request.query_params.get(\"nested\")):\n return serializers.ReviewNestedSerializer\n return serializers.ReviewSerializer",
"def get_serializer_class(self):\n if self.request.method == 'POST':\n return my_serializers.UploadedDataPostSerializer\n return self.serializer_class",
"def get_serializer_class(self):\n if self.action in [\"list\", \"retrieve\"]:\n return OrderSerializer\n return OrderCreateSerializer"
]
| [
"0.77186406",
"0.76763225",
"0.7583893",
"0.75552285",
"0.75115997",
"0.7477842",
"0.7349744",
"0.732208",
"0.7279612",
"0.72513735",
"0.7245787",
"0.72191036",
"0.72121954",
"0.7163859",
"0.7149745",
"0.71106553",
"0.70974696",
"0.7093688",
"0.7093164",
"0.7067511",
"0.7046832",
"0.7042383",
"0.6985342",
"0.6971087",
"0.6945434",
"0.6924911",
"0.6897177",
"0.687695",
"0.68692845",
"0.68369794"
]
| 0.7712263 | 1 |
Returns a paginated queryset. | def paginate_queryset(self, queryset):
if self.paginator is None:
return None
return self.paginator.paginate_queryset(
queryset, self.request, view=self, count=self.data_count
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def paginate_queryset(self, queryset):\n if self.paginator is None:\n return None\n return self.paginator.paginate_queryset(queryset, self.request, view=self)",
"def paginate_queryset(self, queryset):\n if self.paginator is None:\n return None\n return self.paginator.paginate_queryset(queryset, self.request, view=self)",
"def paginate_queryset(self, queryset, request, view=None):\n self.count = queryset.count()\n self.request = request\n try:\n self.page, self.next_page, self.has_next = queryset.fetch_page(\n self.get_page_size(request), start_cursor=Cursor(urlsafe=self.get_page_token()))\n except InvalidPage:\n raise NotFound('Requested page not found')\n except BadValueError as err:\n raise BadRequest(str(err))\n\n return list(self.page)",
"def paginate_queryset(self, queryset, request, view=None):\n # Needed for other methods of this class.\n self.request = request\n\n page_size = self.get_page_size(request)\n page_number = request.query_params.get(self.page_query_param, 1)\n\n original_page_number = page_number\n page_number = self._get_page_number(page_number)\n\n if page_number <= 0:\n msg = self.invalid_page_message.format(\n page_number=original_page_number,\n message=_(\"Invalid page\"),\n )\n raise NotFound(msg)\n\n start = (page_number - 1) * page_size\n end = page_number * page_size\n\n result = []\n total_count = 0\n total_pages = 1\n\n if queryset:\n result = queryset[start:end].execute()\n total_count = result.hits.total[\"value\"]\n hits = max(1, total_count)\n total_pages = ceil(hits / page_size)\n\n if total_pages > 1 and self.template is not None:\n # The browsable API should display pagination controls.\n self.display_page_controls = True\n\n # Needed for other methods of this class.\n self.page = PaginatorPage(\n page_number=page_number,\n total_pages=total_pages,\n count=total_count,\n )\n\n return result",
"def paginate_queryset(self, queryset, request, view=None):\n self.request = request\n\n try:\n self.page_number = int(request.query_params.get(\n self.page_query_param, 1\n ))\n except ValueError:\n self.page_number = 1\n\n if self.page_number > self.max_page:\n raise NotFound('Result page number too high.')\n\n offset = (self.page_number - 1) * self.page_size\n queryset = queryset[offset:offset + self.page_size]\n self.results = queryset.execute()\n\n self.page = self.results[:self.page_size]\n\n return self.results, self.page",
"def paginate_cursor(cls, **kwargs):\n return PaginationCursor(cls.objects, **kwargs)",
"def paginate_queryset(self, queryset, request, view=None):\n page_size = self.get_page_size(request)\n if not page_size:\n return None\n\n paginator = self.django_paginator_class(queryset, page_size)\n page_number = self.page_query_param\n if page_number in self.last_page_strings:\n page_number = paginator.num_pages\n\n try:\n self.page = paginator.page(page_number)\n except Exception as exc:\n msg = self.invalid_page_message.format(page_number=page_number,\n message=str(exc))\n raise NotFound(msg)\n\n if paginator.num_pages > 1 and self.template is not None:\n # The browsable API should display pagination controls.\n self.display_page_controls = True\n\n self.request = request\n return list(self.page)",
"def paginate_queryset(self, queryset, request, view=None):\n page_size = self.get_page_size(request)\n if not page_size:\n return None\n\n paginator = self.django_paginator_class(queryset, page_size)\n page_number = self.page_query_param\n if page_number in self.last_page_strings:\n page_number = paginator.num_pages\n\n try:\n self.page = paginator.page(page_number)\n except Exception as exc:\n msg = self.invalid_page_message.format(page_number=page_number,\n message=str(exc))\n raise NotFound(msg)\n\n if paginator.num_pages > 1 and self.template is not None:\n # The browsable API should display pagination controls.\n self.display_page_controls = True\n\n self.request = request\n return list(self.page)",
"def paginate_queryset(self, queryset, page_size):\n limit = self.kwargs.get('limit') or self.request.REQUEST.get('limit')\n try:\n limit = int(limit)\n except (TypeError, ValueError):\n limit = page_size\n limit = min(limit, getattr(settings, \"MAX_LIMIT_PER_PAGE\", 500))\n orphans = self.kwargs.get('orphans') or self.request.REQUEST.get('orphans') or getattr(settings, \"ORPHANS\", 0)\n paginator = self.get_paginator(queryset, limit, orphans=orphans, allow_empty_first_page=self.get_allow_empty())\n offset = self.kwargs.get('offset') or self.request.REQUEST.get('offset', 0)\n try:\n page = paginator.page_by_offset(offset)\n except OffsetNotAnInteger:\n # If page is not an integer, deliver first page.\n page = paginator.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 9999), deliver last page of results.\n page = paginator.page(paginator.num_pages)\n return (paginator, page, page.object_list, page.has_other_pages())",
"def paginate_queryset(self, queryset, request, view=None):\n # page_size = self.get_page_size(request)\n page_size = request.GET.get(\"page_size\")\n if not page_size:\n return None\n\n paginator = self.django_paginator_class(queryset, page_size)\n page_number = request.query_params.get(self.page_query_param, 1)\n if page_number in self.last_page_strings:\n page_number = paginator.num_pages\n\n try:\n self.page = paginator.page(page_number)\n except InvalidPage as exc:\n msg = self.invalid_page_message.format(\n page_number=page_number, message=str(exc)\n )\n raise NotFound(msg)\n\n if paginator.num_pages > 1 and self.template is not None:\n # The browsable API should display pagination controls.\n self.display_page_controls = True\n\n self.request = request\n return list(self.page)",
"def paginate_queryset(self, queryset, request, view=None):\n self.page_number_pagination = None\n if request.GET.get('q'):\n self.page_number_pagination = CustomPageNumberPagination()\n return self.page_number_pagination.paginate_queryset(\n queryset, request, view=view\n )\n\n self.base_url = request.build_absolute_uri()\n self.ordering = self.get_ordering(request, queryset, view)\n\n self.cursor = self.decode_cursor(request)\n if self.cursor is None:\n (offset, reverse, current_position) = (0, False, None)\n else:\n (offset, reverse, current_position) = self.cursor\n\n # Cursor pagination always enforces an ordering.\n if reverse:\n queryset.add_sort(*_reverse_ordering(self.ordering))\n else:\n queryset.add_sort(*self.ordering)\n\n # If we have a cursor with a fixed position then filter by that.\n if current_position is not None:\n order = self.ordering[0]\n is_reversed = order.startswith('-')\n order_attr = order.lstrip('-')\n\n # Test for: (cursor reversed) XOR (queryset reversed)\n if self.cursor.reverse != is_reversed:\n kwargs = {order_attr: {'lt': current_position}}\n else:\n kwargs = {order_attr: {'gt': current_position}}\n\n queryset.add_pagination_filter(kwargs)\n\n # If we have an offset cursor then offset the entire page by that amount.\n # We also always fetch an extra item in order to determine if there is a\n # page following on from this one.\n queryset = queryset[offset:offset + self.page_size + 1]\n logger.info('ES query: %s', json.dumps(queryset._s.to_dict()))\n results = queryset.execute()\n\n self.page = results[:self.page_size]\n if reverse:\n self.page = list(reversed(self.page))\n\n # Determine the position of the final item following the page.\n if len(results) > len(self.page):\n has_following_position = True\n following_position = self._get_position_from_instance(\n results[-1], self.ordering\n )\n else:\n has_following_position = False\n following_position = None\n\n if reverse:\n # If we have a reverse queryset, then the query ordering was in reverse\n # so we need to reverse the items again before returning them to the user.\n\n # Determine next and previous positions for reverse cursors.\n self.has_next = (current_position is not None) or (offset > 0)\n self.has_previous = has_following_position\n if self.has_next:\n self.next_position = current_position\n if self.has_previous:\n self.previous_position = following_position\n else:\n # Determine next and previous positions for forward cursors.\n self.has_next = has_following_position\n self.has_previous = (current_position is not None) or (offset > 0)\n if self.has_next:\n self.next_position = following_position\n if self.has_previous:\n self.previous_position = current_position\n\n # Display page controls in the browsable API if there is more\n # than one page.\n if (self.has_previous or self.has_next) and self.template is not None:\n self.display_page_controls = True\n\n return results, self.page",
"def get_paginator(queryset, page_number, items=settings.PAGINATOR_SIZE):\n paginator = Paginator(queryset, items)\n try:\n paginated_query = paginator.page(page_number)\n except (EmptyPage, InvalidPage):\n paginated_query = paginator.page(paginator.num_pages)\n return paginated_query",
"def paginate(self, request, queryset, max_results):\n page = request.GET.get('page')\n paginator = Paginator(queryset, max_results)\n try:\n return paginator.page(page)\n except PageNotAnInteger:\n return paginator.page(1)\n except EmptyPage:\n return paginator.page(paginator.num_pages)",
"def paginate_view(request, query_set, page=None, num_items=None):\n if page is None:\n page = request.GET.get('page', default=1)\n if num_items is None:\n num_items = request.GET.get('num_items', default=10)\n paginator = Paginator(query_set, num_items)\n try:\n data_set = paginator.page(page)\n except EmptyPage:\n data_set = paginator.page(paginator.num_pages)\n return data_set, num_items, page",
"def paginate_queryset(self, queryset, request, view=None):\n self.count = self.get_count(queryset)\n self.start_index = 0\n self.end_index = self.start_index + self.page_size - 1\n\n # TODO: this logic is repeated below...\n if self.end_index > self.count - 1:\n self.end_index = self.count - 1 if self.count else 0\n\n range_string = request.GET.get(self.range_query_param)\n\n if range_string:\n try:\n page_range = json.loads(range_string)\n except json.JSONDecodeError:\n return None\n\n if len(page_range) != 2:\n return None\n\n self.start_index, self.end_index = [pagination._positive_int(x) for x in page_range]\n\n if self.end_index > self.count - 1:\n self.end_index = self.count - 1 if self.count else 0\n\n if self.start_index > self.end_index:\n self.start_index = self.end_index\n\n return list(queryset[self.start_index:self.end_index + 1])",
"def paginate(self, *args, **kwargs):\n result = {}\n result.update(self.get_paginate_data(*args, **kwargs))\n result.update(self.get_objects_data())\n return result",
"def get_list(self, **kwargs):\n self.fields = self.get_fields(**kwargs)\n fields = \", \".join(self.fields)\n kwargs[\"query\"] = 'SELECT {0}'.format(fields)\n start = kwargs.pop(\"offset\", None)\n end = kwargs.pop(\"count\", None)\n data = self.filter(**kwargs)\n\n return self.paginate(data, start=start, end=end)",
"def get_queryset(self):\n queryset = self.queryset.all()\n \n #Filter based on query\n query = self.request.query_params.get('q', None)\n if query:\n queryset = queryset.filter(Q(pk__icontains=query) | \n Q(customer__name__icontains=query))\n \n offset = int(self.request.query_params.get('offset', 0))\n limit = int(self.request.query_params.get('limit', settings.REST_FRAMEWORK['PAGINATE_BY']))\n if offset and limit:\n queryset = queryset[offset - 1:limit + (offset - 1)]\n else:\n queryset = queryset[0:50]\n\n queryset = queryset.select_related('acknowledgement',\n 'pdf',\n 'customer',\n 'employee',\n 'project')\n queryset = queryset.prefetch_related('items',\n 'customer__addresses',\n 'items__item')\n \n return queryset",
"def paginated(self) -> global___Snippet.Paginated:",
"def paginate_queryset(self, queryset, page_size):\n paginator = self.get_paginator(queryset, page_size, allow_empty_first_page=self.get_allow_empty())\n page = self.kwargs.get('page') or self.request.GET.get('page') or 1\n try:\n page_number = int(page)\n except ValueError:\n if page == 'last':\n page_number = paginator.num_pages\n else:\n raise Http404(_(u\"Page is not 'last', nor can it be converted to an int.\"))\n try:\n page = paginator.page(page_number)\n except InvalidPage:\n # This used to raise a 404, but we're replacing this functionality\n #raise Http404(_(u'Invalid page (%(page_number)s)') % {\n # 'page_number': page_number\n #})\n #return last page where invalid page received\n page = paginator.page(paginator.num_pages)\n\n return (paginator, page, page.object_list, page.has_other_pages())",
"def paging(objects_list, request):\n paginator = Paginator(objects_list, 100)\n page = request.GET.get('page')\n\n return paginate(paginator, page)",
"def get_paginate_data(self, *args, **kwargs):\n pass",
"def prefetch_all(self) -> PaginationIterator[T]:\n iterator = cast(PaginationIterator[T], iter(self))\n\n if not self._has_next_page():\n return iterator\n\n # tweak per_page setting to make fetching as fast as possible\n old_per_page = self._per_page\n self._per_page = PER_PAGE_LIMIT\n\n self._page = (self._yielded_items // PER_PAGE_LIMIT) + 1\n to_skip = (self._yielded_items % PER_PAGE_LIMIT) + len(self._queue)\n\n self._fetch_next_page(skip_first=to_skip)\n\n while self._has_next_page():\n self._fetch_next_page()\n\n self._per_page = old_per_page\n\n return iterator",
"def get_page_list(self, offset=0, limit=50):\n return self._telegraph.method('getPageList', {\n 'offset': offset,\n 'limit': limit\n })",
"def paginate(self, page_num=1, page_size=100):\n from .database import Page\n count = self.count()\n pages_total = int(ceil(count / float(page_size)))\n if page_num == -1:\n page_num = pages_total\n elif page_num < 1:\n raise ValueError('Invalid page number: %d' % page_num)\n offset = (page_num - 1) * page_size\n return Page(\n objects=list(self[offset : offset + page_size]),\n number_of_objects=count,\n pages_total=pages_total,\n number=page_num,\n page_size=page_size\n )",
"def get_context_data(self, **kwargs):\n query = kwargs.pop('query', self.query)\n page = query.page\n queryset = page.object_list\n is_paginated = query.num_pages > 1\n\n context = {\n 'query': query,\n 'page_obj': page,\n 'object_list': queryset,\n 'is_paginated': is_paginated,\n }\n context_object_name = self.get_context_object_name(queryset)\n if context_object_name is not None:\n context[context_object_name] = queryset\n\n context.update(kwargs)\n return super(ListView, self).get_context_data(**context)",
"def _paginatedRequest(allPages, *args):\n data = []\n currentPage = 0\n while True:\n newData = Gw2Spidy._request(*(args + (str(currentPage),)))\n if not allPages:\n return newData['results']\n data.extend(newData['results'])\n currentPage = currentPage + 1\n if newData['page'] == newData['last_page']:\n break\n return data",
"def pagination(cls, per_page, page):\n\n # Validate class before query\n cls.__class_validation()\n\n entities = cls.query.paginate(per_page=per_page, page=page).items\n if entities:\n return entities\n\n return None",
"def get_queryset(self, request):\n querys = self.model.all_objects.get_queryset()\n ordering = self.get_ordering(request)\n if ordering:\n querys = querys.order_by(*ordering)\n return querys.prefetch_related('tags')",
"def get_queryset(self):\n return Page.objects.active()"
]
| [
"0.78808343",
"0.78808343",
"0.7629857",
"0.7568401",
"0.7416892",
"0.74030256",
"0.72709703",
"0.72709703",
"0.7251473",
"0.72404253",
"0.71125746",
"0.7033992",
"0.6993326",
"0.6931161",
"0.6896108",
"0.68769616",
"0.685527",
"0.6796368",
"0.67700887",
"0.6765375",
"0.6669542",
"0.6565592",
"0.6526117",
"0.64792705",
"0.6429908",
"0.6429651",
"0.64219916",
"0.64170116",
"0.6376876",
"0.6365555"
]
| 0.78830624 | 0 |
Get a StreamingHttpResponse response object | def _get_streaming_response(self):
def get_json_string(item):
"""Returns the ``item`` Instance instance as a JSON string."""
return json.dumps(item.json if isinstance(item, Instance) else item)
if self.kwargs.get("format") == "xml":
response = StreamingHttpResponse(
renderers.InstanceXMLRenderer().stream_data(
self.object_list, self.get_serializer
),
content_type="application/xml",
)
else:
response = StreamingHttpResponse(
json_stream(self.object_list, get_json_string),
content_type="application/json",
)
# calculate etag value and add it to response headers
if hasattr(self, "etag_hash"):
self.set_etag_header(None, self.etag_hash)
# set headers on streaming response
for k, v in self.headers.items():
response[k] = v
return response | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def stream(self):\n return ResponseStream(self)",
"def response_as_stream(self) -> Any:\n raise NotImplementedError # pragma: no cover",
"def response_handling(self) -> global___Snippet.StreamingResponseHandling:",
"def response_handling(self) -> global___Snippet.StreamingResponseHandling:",
"def __call__(self, get_response, request):\n response = get_response(request)\n\n if response.streaming:\n response.streaming_content = self.wrap_streaming_content(response.streaming_content)\n\n return response",
"def _get_stream(\n session: \"Session\", url_tail: str, params: Optional[Dict[str, Any]] = None\n) -> Any:\n response = _get(session, url_tail, params, stream=True)\n response.raw.decode_content = True\n return response.raw",
"def from_sync_httpx_response(cls, httpx_response, target, **kwargs):\n return httpcore.Response(\n status=httpx_response.status_code,\n headers=httpx_response.headers.raw,\n content=httpx_response.stream,\n extensions=httpx_response.extensions,\n )",
"def make_response(self, request, response, **response_kwargs):\n while iscoroutine(response):\n response = yield from response\n\n if isinstance(response, StreamResponse):\n return response\n\n response_kwargs.setdefault('content_type', 'application/json')\n\n return Response(text=dumps(response), **response_kwargs)",
"def getOutputStream(self):\r\n self._setHeaders()\r\n return self._response.getOutputStream()",
"def streaming_buffer(self) -> 'outputs.StreamingbufferResponse':\n return pulumi.get(self, \"streaming_buffer\")",
"def return_stream(self, entity, request, environ, start_response,\n response_headers, method):\n coll = entity.entity_set.open()\n try:\n if method == \"GET\":\n sinfo, sgen = coll.read_stream_close(entity.key())\n else:\n sinfo = coll.read_stream(entity.key())\n sgen = []\n coll.close()\n except Exception:\n coll.close()\n raise\n types = [sinfo.type] + self.StreamTypes\n response_type = self.content_negotiation(request, environ, types)\n if response_type is None:\n return self.odata_error(\n request, environ, start_response, \"Not Acceptable\",\n 'media stream type refused, try application/octet-stream', 406)\n response_headers.append((\"Content-Type\", str(response_type)))\n if sinfo.size is not None:\n response_headers.append((\"Content-Length\", str(sinfo.size)))\n if sinfo.modified is not None:\n response_headers.append((\"Last-Modified\",\n str(params.FullDate(src=sinfo.modified))))\n if sinfo.md5 is not None:\n response_headers.append(\n (\"Content-MD5\", force_ascii(base64.b64encode(sinfo.md5))))\n self.set_etag(entity, response_headers)\n start_response(\"%i %s\" % (200, \"Success\"), response_headers)\n return sgen",
"def mock_result_stream():\n response_file_path = os.path.join(import_module(__name__).__path__[0], \n \"splunk_job_result_stream_sample.xml\")\n with open(response_file_path, 'r') as response_file:\n response = StringIO(response_file.read())\n alsoProvides(response, ISplunkResultsStream)\n return response",
"async def test_compress_async_streaming_response(self):\n\n async def get_stream_response(request):\n async def iterator():\n for chunk in self.sequence:\n yield chunk\n\n resp = StreamingHttpResponse(iterator())\n resp[\"Content-Type\"] = \"text/html; charset=UTF-8\"\n return resp\n\n r = await GZipMiddleware(get_stream_response)(self.req)\n self.assertEqual(\n self.decompress(b\"\".join([chunk async for chunk in r])),\n b\"\".join(self.sequence),\n )\n self.assertEqual(r.get(\"Content-Encoding\"), \"gzip\")\n self.assertFalse(r.has_header(\"Content-Length\"))",
"def _send_request(self, http_request, **kwargs):\n # type: (HttpRequest, Any) -> HttpResponse\n http_request.url = self._client.format_url(http_request.url)\n stream = kwargs.pop(\"stream\", True)\n pipeline_response = self._client._pipeline.run(http_request, stream=stream, **kwargs)\n return pipeline_response.http_response",
"def test_compress_streaming_response(self):\n\n def get_stream_response(request):\n resp = StreamingHttpResponse(self.sequence)\n resp[\"Content-Type\"] = \"text/html; charset=UTF-8\"\n return resp\n\n r = GZipMiddleware(get_stream_response)(self.req)\n self.assertEqual(self.decompress(b\"\".join(r)), b\"\".join(self.sequence))\n self.assertEqual(r.get(\"Content-Encoding\"), \"gzip\")\n self.assertFalse(r.has_header(\"Content-Length\"))",
"def stream(self):\r\n return streams.Stream(self)",
"def stream(self):\r\n return streams.Stream(self)",
"def server_streaming(self) -> global___Snippet.ServerStreaming:",
"def test_random_bytes_streaming_response(self):\n\n def get_stream_response(request):\n resp = StreamingHttpResponse(self.sequence)\n resp[\"Content-Type\"] = \"text/html; charset=UTF-8\"\n return resp\n\n with mock.patch(\n \"django.utils.text.secrets.randbelow\", autospec=True, return_value=3\n ):\n r = GZipMiddleware(get_stream_response)(self.req)\n content = b\"\".join(r)\n # The fourth byte of a gzip stream contains flags.\n self.assertEqual(content[3], gzip.FNAME)\n # A 3 byte filename \"aaa\" and a null byte are added.\n self.assertEqual(content[10:14], b\"aaa\\x00\")\n self.assertEqual(self.decompress(content), b\"\".join(self.sequence))",
"def stream(self):\n return streams.Stream(self)",
"def client_streaming(self) -> global___Snippet.ClientStreaming:",
"async def _extract_response_content(\n self, response: Response\n ) -> Tuple[Response, List[Dict[str, Any]]]:\n body = b\"\"\n new_response = response\n if isinstance(response, StreamingResponse):\n async for chunk in response.body_iterator:\n body += chunk\n new_response = StreamingResponse(\n content=(chunk for chunk in [body]),\n status_code=response.status_code,\n headers={k: v for k, v in response.headers.items()},\n media_type=response.media_type,\n background=response.background,\n )\n else:\n body = response.body\n return new_response, json.loads(body)",
"async def _handle_request(\n self, request: web.Request, **kwargs: Any\n ) -> web.Response | web.StreamResponse:\n url = self._create_url(**kwargs)\n if not url:\n return web.Response(status=HTTP_NOT_FOUND)\n\n data = await request.read()\n source_header = _init_header(request)\n\n async with self._websession.request(\n request.method,\n url,\n headers=source_header,\n params=request.query,\n allow_redirects=False,\n data=data,\n ) as result:\n headers = _response_header(result)\n\n # Stream response\n response = web.StreamResponse(status=result.status, headers=headers)\n response.content_type = result.content_type\n\n try:\n await response.prepare(request)\n async for data in result.content.iter_chunked(4096):\n await response.write(data)\n\n except (aiohttp.ClientError, aiohttp.ClientPayloadError) as err:\n _LOGGER.debug(\"Stream error for %s: %s\", request.rel_url, err)\n\n return response",
"def streaming_request(self) -> global___Snippet.SimpleRequestInitialization:",
"def get(self, id_stream):\n\n session = current_app.session\n\n stream = session.query(StreamDao).filter(StreamDao.id == id_stream).first()\n\n if stream is None:\n return None, 204\n\n return stream, 200",
"def _get(session, url_tail, params, stream=False, fail_fast=False):\n # type: (Session, str, Optional[Dict[str, Any]], bool, bool) -> Response\n url = session.get_base_url2() + url_tail\n requests_session = _requests_session_fail_fast if fail_fast else _requests_session\n response = requests_session.get(\n url,\n headers=_get_headers(session),\n params=params,\n stream=stream,\n verify=session.verify_ssl_certs,\n )\n _check_response_status(response)\n return response",
"def stream():\n return flask.Response(event_stream(flask.request.access_route[0]),\n mimetype='text/event-stream')",
"def get_api_stream(url, params=None, headers=None):\n\n logging.debug(\"-> get_api_stream()\")\n logging.debug(\"Request url: %s\" % url)\n\n result = requests.get(url, params=params, headers=headers)\n\n logging.debug(\"Response content: %s\" % result.content)\n logging.debug(\"<- get_api_stream()\")\n\n return result",
"def simulate_get_stream(self, path='/', **kwargs):\n\n kwargs['_stream_result'] = True\n\n return _AsyncContextManager(self.simulate_request('GET', path, **kwargs))",
"def getresponse(self):\n self.resp.status = self.resp.status_code\n old_getheader = self.resp.raw.getheader\n\n def _decode_header(string):\n if string is None:\n return string\n return string.encode('iso-8859-1').decode('utf-8')\n\n def _encode_header(string):\n if string is None:\n return string\n return string.encode('utf-8').decode('iso-8859-1')\n\n def getheaders():\n return [(_decode_header(k), _decode_header(v))\n for k, v in self.resp.headers.items()]\n\n def getheader(k, v=None):\n return _decode_header(old_getheader(\n _encode_header(k.lower()), _encode_header(v)))\n\n def releasing_read(*args, **kwargs):\n chunk = self.resp.raw.read(*args, **kwargs)\n if not chunk:\n # NOTE(sigmavirus24): Release the connection back to the\n # urllib3's connection pool. This will reduce the number of\n # log messages seen in bug #1341777. This does not actually\n # close a socket. It will also prevent people from being\n # misled as to the cause of a bug as in bug #1424732.\n self.resp.close()\n return chunk\n\n self.resp.getheaders = getheaders\n self.resp.getheader = getheader\n self.resp.read = releasing_read\n\n return self.resp"
]
| [
"0.767063",
"0.7416034",
"0.7012967",
"0.7012967",
"0.6969851",
"0.68742424",
"0.6605871",
"0.6520931",
"0.6400283",
"0.6327227",
"0.6286114",
"0.62844795",
"0.618682",
"0.61518157",
"0.6086295",
"0.6071025",
"0.6071025",
"0.60105324",
"0.59928113",
"0.59779584",
"0.591866",
"0.58877796",
"0.5883414",
"0.58696526",
"0.58650327",
"0.5852888",
"0.5846174",
"0.5843904",
"0.5813607",
"0.57938147"
]
| 0.79366857 | 0 |
Returns the ``item`` Instance instance as a JSON string. | def get_json_string(item):
return json.dumps(item.json if isinstance(item, Instance) else item) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _serialize_item(self, item_id: str, item: Pipeline) -> dict:\n return item.to_json()",
"def __repr__(self):\n return f\"Item=(id={self.id},item_name={self.item_name},item_slug={self.item_slug})\"",
"def __repr__(self):\n return '<Item {0} : {1}>'.format(self.item_id, self.item_name)",
"def item_json(item_id):\n try:\n item = session.query(Item).filter_by(id=item_id).one()\n return jsonify(item=item.serialize)\n except Exception as inst:\n print(type(inst))\n print(inst.args)\n print(inst)",
"def to_json(self):\n return {\n \"item_name\": self.item_name,\n \"summary\": self.summary,\n \"content\": self.content,\n \"date_published\": self.date_published,\n \"item_slug\": self.item_slug,\n \"category_name\": self.category_name,\n }",
"def item_json(item_id):\n item_details_json = {}\n try:\n item_in_db = session.query(Item).filter_by(id=item_id).one()\n item_details_json['item'] = item_in_db.serialize\n except Exception as e:\n item_details_json['result'] = 'No data for item ID ' \\\n + str(item_id) + ': ' + str(e)\n return jsonify(item_details_json)",
"def __repr__(self):\n return json.dumps(self.__dict__)",
"def __str__(self):\n return str(self.item)",
"def __repr__(self):\n return \"Item('\"+ self.get_id() + \"')\"",
"def to_json(self) -> str:\n if self.actual_instance is None:\n return \"null\"\n\n to_json = getattr(self.actual_instance, \"to_json\", None)\n if callable(to_json):\n return self.actual_instance.to_json()\n else:\n return json.dumps(self.actual_instance)",
"def serialize(self):\n return {\n 'item_id': self.item_id,\n 'list_id': self.list_id,\n 'name': self.name,\n 'date_time': dump_datetime(self.date),\n 'amount': self.amount,\n 'bought': self.bought,\n }",
"def itemJSON(category_id, item_id):\n item = session.query(Item).filter_by(id=item_id).one()\n return jsonify(Item=item.serialize)",
"def serialize(self):\r\n return {\r\n 'name': self.name,\r\n 'id': self.id,\r\n 'items': [i.serialize for i in self.items]\r\n }",
"def __repr__(self):\n\n result = json.dumps(self.__dict__, ensure_ascii=False)\n logger.debug('%s: %s', self.__class__.__name__, result)\n\n return result",
"def __repr__(self):\n\n result = json.dumps(self.__dict__, ensure_ascii=False)\n logger.debug('%s: %s', self.__class__.__name__, result)\n\n return result",
"def json(self):\n return {\"name\": self.name,\n \"items\": [item.json() for item in self.item]} # self.name==storeObj.name and self.item==storeObj.item",
"def serialize(self) -> str:\n return json.dumps(self.__dict__)",
"def format(self, item):\n raise NotImplementedError()",
"def _get_item_string(\n cls,\n item: CmdTemplate,\n json: bool = False,\n ) -> str:\n return misc_utils.get_cmd_template_string(item, json)",
"def serialize(self):\n return json.dumps(self.as_dict())",
"def serialize(self):\n return {\n 'name': self.name,\n 'id': self.id,\n 'items': self.serialize_items\n }",
"def __repr__(self):\n return json.dumps(self, sort_keys=True, indent=2)",
"def __str__(self) -> str:\n return json.dumps(self.to_dict(), indent=2)",
"def __str__(self) -> str:\n return json.dumps(self.to_dict(), indent=2)",
"def __str__(self) -> str:\n return json.dumps(self.to_dict(), indent=2)",
"def __str__(self) -> str:\n return json.dumps(self.to_dict(), indent=2)",
"def __str__(self) -> str:\n return json.dumps(self.to_dict(), indent=2)",
"def __str__(self) -> str:\n return json.dumps(self.to_dict(), indent=2)",
"def __str__(self) -> str:\n return json.dumps(self.to_dict(), indent=2)",
"def __str__(self) -> str:\n return json.dumps(self.to_dict(), indent=2)"
]
| [
"0.7664697",
"0.7162026",
"0.7117946",
"0.6911071",
"0.6823493",
"0.6803025",
"0.6772745",
"0.6762667",
"0.6739272",
"0.6738411",
"0.6654118",
"0.66514206",
"0.65619713",
"0.6546932",
"0.6546932",
"0.65354466",
"0.6527233",
"0.6491184",
"0.6487239",
"0.64639443",
"0.6459266",
"0.6401976",
"0.63916487",
"0.63916487",
"0.63916487",
"0.63916487",
"0.63916487",
"0.63916487",
"0.63916487",
"0.63916487"
]
| 0.86737585 | 0 |
Returns a set of (week index, update type) tuples | def weeks_fetched(self, user):
successes = set()
for success in self.filter(user=user, status=m.Update.COMPLETE):
successes.add((success.week_idx, success.type))
return successes | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getWeeks(data: Sequence[HistoryElement]) -> Sequence[int]:\r\n _checkData(data)\r\n return [x.timeStamp.toDateTime().weekday() for x in data]",
"def GetListOfWeeks(self):\n delta_days = (self.GetFridayOfLastFullWeek() - self.START_DATE).days\n delta_weeks = int(math.floor(delta_days / 7))\n weeks = [self.START_DATE + dt.timedelta(days=7 * x) \n for x in range(0, delta_weeks + 1)]\n weeks = [week.strftime('%Y-%m-%d') for week in weeks]\n self.cursor.execute(\n 'SELECT DISTINCT week FROM %s' % self.BOX_OFFICE_TABLE)\n weeks_in_table = [x[0] for x in self.cursor.fetchall()]\n weeks = list(set(weeks) - set(weeks_in_table))\n weeks.sort() \n return weeks",
"def get_dates_list(self):\n week = [_day[2] for _day in [self.monday, self.tuesday, self.wednesday,\n self.thursday, self.friday, self.saturday]]\n return week",
"def weekly():",
"def week_index(self) -> pulumi.Input[Union[str, 'Type']]:\n return pulumi.get(self, \"week_index\")",
"def get_powerups() -> tuple:\n return tuple(PowerUp.powers.keys())",
"def do_upw(self, arg):\n self.do_timesheet('update week')",
"def weekNumber(self): # real signature unknown; restored from __doc__\r\n pass",
"def record_weeks(self, user, start, end, num=10):\n query = self.user_weeks_between(user, start, end).order_by('-plays')[:num]\n for week in query:\n date = ldates.date_of_index(week.week_idx)\n yield week, date",
"def dates_of_the_week():\n date_list = list()\n now = datetime.datetime.now()\n monday = now - datetime.timedelta(days=now.weekday(), hours=now.hour, minutes=now.minute, seconds=now.second,\n microseconds=now.microsecond)\n date_list.append(monday)\n for each in range(1, 6):\n monday = monday + datetime.timedelta(days=1)\n date_list.append(monday)\n date_list.append((monday + datetime.timedelta(days=1, hours=23, minutes=59, seconds=59)))\n return date_list",
"def get_rollover_weeks(shop):\n d = {}\n ods, r = get_rollovers(shop)\n\n for od in ods:\n week = int(od.eta.strftime('%W'))+1\n if d.has_key(week):\n d[week] += int(od.plan)\n else:\n d[week] = int(od.plan)\n\n # remove the pulled from this week\n this_week = int(datetime.datetime.today().strftime('%W'))+1 \n if d.has_key(this_week):\n d[this_week] = d[this_week] - get_pulled(shop)[1] \n\n # build the return list of (week, '00:00') tuples\n l = []\n d = sorted(d.items()) # sort dictionary by week\n for key, minutes in d:\n formatted_time = _get_display_hours(minutes)\n l.append((key,formatted_time))\n\n return l",
"def _compute_week_index(self, column_index):\n latest_week = self.end_date.isocalendar()[1]\n column_date = self.start_date + timedelta(column_index)\n week = column_date.isocalendar()[1]\n week_index = latest_week - week\n return week_index",
"def get_week_frame():\n now = datetime.now()\n\n week_start = now - timedelta(days=now.weekday(),\n hours=now.hour,\n minutes=now.minute,\n seconds=now.second)\n week_end = now + timedelta(days=6 - now.weekday(),\n hours=23 - now.hour,\n minutes=59 - now.minute,\n seconds=59 - now.second)\n\n return week_start, week_end",
"def get_week(time_index):\n return np.array(time_index.week).reshape(-1,1)",
"def read_weekly_breakdown_statuses(self):\n from itertools import repeat\n\n self.ID7_GNIP_BREAKDOWN = kpi_from_db_config.ID7_GNIP_BREAKDOWN\n self.ID7_STREAM_BREAKDOWN = kpi_from_db_config.ID7_STREAM_BREAKDOWN\n self.ID7_SEED_BREAKDOWN = kpi_from_db_config.ID7_SEED_BREAKDOWN\n self.ID7_MENTION_BREAKDOWN = kpi_from_db_config.ID7_MENTION_BREAKDOWN\n \n list_id = [self.ID7_GNIP_BREAKDOWN, \n self.ID7_STREAM_BREAKDOWN, \n self.ID7_SEED_BREAKDOWN, \n self.ID7_MENTION_BREAKDOWN]\n\n list_result = [[] for i in repeat(None,len(list_id))]\n for i in range(len(list_id)):\n self.cursor.execute('''\n SELECT value\n FROM public.kpi_report\n WHERE id = %s\n ORDER BY created_at DESC\n LIMIT 6\n ''', [list_id[i]])\n rows_count = self.cursor.rowcount\n \n if (rows_count == 6): # 6 is LIMIT from the query\n for doc in self.cursor:\n list_result[i].append(int(doc[0]))\n elif (rows_count >= 1 and rows_count < 6): # Change rows_count > 0 and rows_count < Number of limit\n for doc in self.cursor:\n list_result[i].append(int(doc[0]))\n list_result[i] = list_result[i] + [0] * (6 - rows_count) \n else:\n list_result[i] = [0] * 6\n\n return list_result",
"def get_updates():\n global PREVIOUS_NEWEST_STR, UPDATED, WIKIDATA_WB_API, WIKIDATA_WB_PARAMS\n r = requests.get(url=WIKIDATA_WB_API, params=WIKIDATA_WB_PARAMS)\n root = etree.fromstring(r.text)\n seen = 0\n updates = []\n oldest_str = None\n newest_str = None\n for entry in root.iterchildren('{http://www.w3.org/2005/Atom}entry'):\n # print(etree.tostring(entry))\n q = entry.find('{http://www.w3.org/2005/Atom}title').text\n updated_str = entry.find('{http://www.w3.org/2005/Atom}updated').text\n if newest_str is None or updated_str > newest_str:\n newest_str = updated_str\n if oldest_str is None or updated_str < oldest_str:\n oldest_str = updated_str\n updated = dateutil.parser.parse(updated_str)\n if not re.match(r'''Q\\d+$''', q):\n # This is not an updated entity, ignore\n pass\n elif q in UPDATED and UPDATED[q] >= updated:\n # print(\"See %s update already\" % (q))\n seen += 1\n else:\n updates.append(q)\n # print(\"Got %s (updated at %s)\" % (q, updated))\n UPDATED[q] = updated\n print(\"%s: Got %d updates (ignored %d already seen)\" % (datetime.now(), len(updates), seen))\n if oldest_str > PREVIOUS_NEWEST_STR:\n print(\"WARNING: Gap between feed dates from %s to %s\" % (PREVIOUS_NEWEST_STR, oldest_str))\n PREVIOUS_NEWEST_STR = newest_str\n return updates",
"def current_year_and_week():\n _update_week_number()\n return _cur_year, _cur_week",
"def week(self) -> Index:\n warnings.warn(\n \"`week` is deprecated in 3.5.0 and will be removed in 4.0.0.\",\n FutureWarning,\n )\n return Index(self.to_series().dt.week)",
"def do_upm(self, arg):\n self.do_timesheet('update week')",
"def weekly(evictiondata):\r\n evictions_per_week = {}\r\n for index, row in evictiondata.iterrows():\r\n if row['week_date'] not in evictions_per_week.keys():\r\n evictions_per_week[row['week_date']] = row['filings_2020']\r\n else:\r\n evictions_per_week[row['week_date']] += row['filings_2020']\r\n return evictions_per_week",
"def record_week_totals(self, user, start, end, num=10):\n for idx, total in \\\n self.weekly_play_counts(user, start, end, num, order_by_plays=True):\n yield idx, ldates.date_of_index(idx), total",
"def week(start_day=\"monday\"):\r\n today = datetime.datetime.combine(datetime.date.today(), datetime.datetime.min.time())\r\n weekday = today.weekday()\r\n week = list(calendar.Calendar(getattr(calendar, start_day.upper())).iterweekdays())\r\n pos = week.index(weekday)\r\n week_stamp = (to_stamp(today - datetime.timedelta(days=pos - i)) for i, d in enumerate(week))\r\n return collections.OrderedDict(((calendar.day_name[a], (b, b + DAY)) for a, b in zip(week, week_stamp)))",
"def _update_set(index, n_qubits):\n indices = set()\n\n # For bit manipulation we need to count from 1 rather than 0\n index += 1\n\n while index <= n_qubits:\n indices.add(index - 1)\n # Add least significant one to index\n # E.g. 00010100 -> 00011000\n index += index & -index\n return indices",
"def _get_update_set(self, var):\n update_set = set()\n cvar = self.arch.expand_reg_expr(var)\n for wrt in self.syncinfo.wrt_set:\n if var.find(wrt) != -1:\n # not in write set: comparison in a lexical form\n update_set.add(wrt)\n elif AsmParser.is_register(wrt):\n # not in write set: comparison in a normalized form\n cwrt = self.arch.expand_reg_expr(wrt)\n if self._overlap_cvars(cvar, cwrt) != None:\n update_set.add(wrt)\n return update_set",
"def record_unique_artists_in_week(self, user, start, end, num=10):\n qs = self.user_weeks_between(user, start, end) \\\n .values('week_idx') \\\n .annotate(Count('artist')) \\\n .order_by('-artist__count')[:num]\n for r in qs:\n idx = r['week_idx']\n yield idx, ldates.date_of_index(idx), r['artist__count']",
"def weekly():\n\n response = {}\n\n # 0..6 => Sunday..Saturday\n for i in range(7):\n hours = []\n interactions = 0\n\n for j in range(25):\n try:\n wfile = open(common.stats_path + '/weekly-average/' + str(i) + '/' + str(j))\n data = wfile.read()\n\n if j == 24:\n interactions = int(data)\n else:\n hours.append(int(data))\n\n wfile.close()\n except IOError:\n if i < 24:\n hours.append(0)\n\n response[DAYS[i]] = {'hours': hours, 'interactions': interactions}\n\n return response",
"def make_weeklycount(records):\n # convert the 'date' field to a datetime.date and add theisoweek\n for record in records:\n if 'start_date_local' in record:\n record['start_date_local'] = (\n datetime.strptime(record['start_date_local'], '%Y-%m-%d').date())\n record['week'] = (record['start_date_local'].isocalendar()[0] * 100\n + record['start_date_local'].isocalendar()[1])\n # then, make a dataset filled with the unique weeks and sports,\n # but no counts yet.\n # This functions is possibly much nicer with a defaultdict\n unique_weeks = SortedSet(record['week'] for record in records)\n unique_sports = SortedSet(record['type'] for record in records)\n data = {'weeks': unique_weeks, 'counts': {}}\n for sport in unique_sports:\n data['counts'].update({sport: []})\n # finally for each sport and week count the occurence of that sport\n for sport in unique_sports:\n for week in unique_weeks:\n count = sum(1 if (record['week'] == week and\n record['type'] == sport)\n else 0 for record in records)\n data['counts'][sport].append(count)\n return data",
"def _get_current_week_entries(today, user):\n some_day = today + timedelta(days=1)\n monday_of_week = some_day - timedelta(days=(some_day.isocalendar()[2] - 1))\n sunday_of_week = monday_of_week + timedelta(days=6)\n weekevents = TimeEntry.objects.filter(\n booking_date__gte=monday_of_week, booking_date__lt=sunday_of_week, user=user\n )\n return weekevents",
"def dates(self):\n #{{{ function to return start and end times for a station\n return self.wfdates.keys()",
"def getAllWeekends():\n all_weekends = set()\n for year in getYears():\n for week in getWeeks(year):\n all_weekends.add((year, week))\n return all_weekends"
]
| [
"0.6120125",
"0.60366195",
"0.5804807",
"0.57050747",
"0.55571145",
"0.5357577",
"0.52989227",
"0.5227775",
"0.52243876",
"0.5218489",
"0.52112573",
"0.5182029",
"0.51667124",
"0.51035386",
"0.5083654",
"0.5068206",
"0.50477016",
"0.5036192",
"0.5022804",
"0.5009464",
"0.49907902",
"0.49450228",
"0.4944055",
"0.49363846",
"0.49264434",
"0.4903172",
"0.48940322",
"0.48673388",
"0.48630217",
"0.4818563"
]
| 0.61926204 | 0 |
Returns a generator of (user, count of updates in progress) | def updating_users(self):
user_counts = self.values('user').filter(status=m.Update.IN_PROGRESS).annotate(count=Count('user'))
for entry in user_counts:
user = m.User.objects.get(id=entry['user'])
yield user, entry['count'] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def count_total_each_user():\r\n trans = transaction.begin()\r\n user_list = UserMgr.get_list(active=True)\r\n for user in user_list:\r\n StatBookmarkMgr.count_user_bookmarks(user.username)\r\n trans.commit()",
"def counter_batch_accepts_counter_mutations_test(self):\n cursor = self.prepare()\n cursor.execute(\"\"\"\n BEGIN COUNTER BATCH\n UPDATE clicks SET total = total + 1 WHERE userid = 1 and url = 'http://foo.com'\n UPDATE clicks SET total = total + 1 WHERE userid = 1 and url = 'http://bar.com'\n UPDATE clicks SET total = total + 1 WHERE userid = 2 and url = 'http://baz.com'\n APPLY BATCH\n \"\"\")\n cursor.execute(\"SELECT total FROM clicks\")\n res = cursor.fetchall()\n assert res == [[1], [1], [1]], res",
"def counter_batch_accepts_counter_mutations_test(self):\n session = self.prepare()\n session.execute(\"\"\"\n BEGIN COUNTER BATCH\n UPDATE clicks SET total = total + 1 WHERE userid = 1 and url = 'http://foo.com'\n UPDATE clicks SET total = total + 1 WHERE userid = 1 and url = 'http://bar.com'\n UPDATE clicks SET total = total + 1 WHERE userid = 2 and url = 'http://baz.com'\n APPLY BATCH\n \"\"\")\n rows = session.execute(\"SELECT total FROM clicks\")\n assert [list(rows[0]), list(rows[1]), list(rows[2])] == [[1], [1], [1]], rows",
"def test_task_count_user_total(self):\r\n tasks.count_total_each_user()\r\n\r\n stats = StatBookmark.query.all()\r\n\r\n expected = {\r\n 'admin': 0,\r\n self.username: 4,\r\n self.new_username: 3,\r\n }\r\n\r\n for stat in stats:\r\n user_key = stat.attrib.split('_')\r\n username = user_key[2]\r\n self.assertTrue(username in expected)\r\n self.assertEqual(expected[username], stat.data)",
"def user_stats(request):\r\n user_count = UserMgr.count()\r\n pending_activations = ActivationMgr.count()\r\n users_with_bookmarks = BmarkMgr.count(distinct_users=True)\r\n return _api_response(request, {\r\n 'count': user_count,\r\n 'activations': pending_activations,\r\n 'with_bookmarks': users_with_bookmarks\r\n })",
"def get_count(username):\n return get_contributor(username)[\"count\"]",
"def update_count(self):\n pass",
"def get_historic_users_count():\n return User.objects.all().count()",
"def Count():\n return CheckForError(lib.Generators_Get_Count())",
"def _setup_user_bookmark_count(self):\r\n test_date_1 = datetime(2013, 11, 25)\r\n stat1 = factory.make_user_bookmark_count(username=u'admin',\r\n data=20,\r\n tstamp=test_date_1)\r\n test_date_2 = datetime(2013, 11, 15)\r\n stat2 = factory.make_user_bookmark_count(username=u'admin',\r\n data=30,\r\n tstamp=test_date_2)\r\n test_date_3 = datetime(2013, 12, 28)\r\n stat3 = factory.make_user_bookmark_count(username=u'admin',\r\n data=15,\r\n tstamp=test_date_3)\r\n transaction.commit()\r\n return [stat1, stat2, stat3]",
"def get_num_updates(self):\n return self._num_updates",
"def counter_wrapper_2(generator):\n yield from generator",
"def use(self):\n with self._lock:\n users = self.users\n\n for user in users:\n if user['count'] < 2:\n logging.debug('User {} - {}'.format(user['username'], user['count']))\n user['count'] += 1\n\n self.users = users\n\n return AtomicUser(user['username'], user['password'], self)\n return None",
"def get_total_additions_per_user(contributors):\n return get_total_changes_per_user(contributors, 'a')",
"def enumerate_transfers_for_update(self):\n transfer_batches = self.single_robot_transfer_batches_for_update()\n for transfer_batch in transfer_batches:\n for transfer in transfer_batch.transfers:\n yield transfer",
"def update_count(self):\n pass # Do nothing",
"def test_updates_count(self):\n user = self.create_user()\n thread = self.create_thread(sender=user)\n\n original_count = thread.message_set.count()\n\n for _ in range(0, 5):\n msg = mommy.make(Message, thread=thread, sender=user)\n\n send_message(msg.pk)\n\n # Because django caches querysets, we need to request the thread again\n refreshed_thread = Thread.objects.get(pk=msg.thread.pk)\n\n self.assertEqual(refreshed_thread.total_messages, original_count + 5)",
"def UpdateCountsHandler(self):\n\n self.response.out.write('<br/><br/>Updating counts<br/>')\n MAX_COUNT = 200\n changesets = Changeset.all().order('-created_at').fetch(MAX_COUNT)\n\n date_of_first_changeset = changesets[0].created_at.date()\n date_of_last_changeset = changesets[-1].created_at.date()\n\n # if the same day for first and last write MAX_COUNT, skip next steps\n if date_of_last_changeset == date_of_first_changeset:\n update_count(date_of_first_changeset, MAX_COUNT)\n self.response.out.write('MAX_COUNT (%d) in this date (%s)<br/>' %\n (MAX_COUNT, str(date_of_first_changeset)) )\n return\n\n date_last = changesets[0].created_at.date()\n count_last = 0\n\n one_day = timedelta(days=1)\n\n for c in changesets:\n date_current = c.created_at.date()\n if date_current == date_last:\n count_last += 1\n else:\n if date_last - date_current > one_day:\n self.response.out.write('need to iterate between dates<br/>')\n d = date_current + one_day\n # iterate between dates, set counts to 0\n while d < date_last:\n self.response.out.write(str(d) + '<br/>')\n update_count(d, 0)\n d += one_day\n self.response.out.write(str(date_last)+': '+str(count_last)+'<br/>')\n is_new_entry = update_count(date_last, count_last)\n if not is_new_entry:\n self.response.out.write('not new entry<br/>')\n if not date_last == date_of_first_changeset:\n self.response.out.write(\n 'count for %s is already in datastore' % \n str(date_last)\n )\n return\n\n\n date_last = c.created_at.date()\n count_last = 1\n if c.created_at.date() == date_of_last_changeset:\n break\n \n self.response.out.write(str(changesets[0].created_at)+'<br/>')\n self.response.out.write(str(changesets[-1].created_at)+'<br/>')",
"def counter_wrapper(generator):\n for value in generator:\n yield value",
"def get_counters():\n servers = get_servers()\n\n online_players = sum([server.players.current for server in servers])\n active_servers = sum([1 for server in servers if server.players.current > 0])\n total_servers = len(servers)\n\n return (online_players, active_servers, total_servers)",
"def sync(self):\n self.send()\n detail_count = summary_count = 0\n while self.responses:\n response = self.responses[0]\n while not response.complete:\n detail_delta, summary_delta = self.fetch()\n detail_count += detail_delta\n summary_count += summary_delta\n return detail_count, summary_count",
"def msgStats():\n r = {}\n r[\"users\"] = User.count()\n return jsonify(r)",
"def get_online_count():\n return dict(online_user=get_online_users())",
"def progress(iteritem, update=1, stderr=False, start_newline=True):\n if stderr:\n stream = sys.stderr\n else:\n stream = sys.stdout\n start_time = time.time()\n curr_iter = 0\n if start_newline:\n stream.write('\\n')\n\n max_iter = len(iteritem)\n dlen = len(str(max_iter))\n memory = 0\n for idx, item in enumerate(iteritem):\n\n elapsed = int(time.time() - start_time)\n\n curr_iter += 1\n not_update = elapsed % update\n\n if not not_update and elapsed != memory:\n memory = elapsed\n remain = (max_iter - curr_iter) * (curr_iter / elapsed)\n out = '\\r%*d/%*d | Elapsed: %d sec | Remaining: %d sec '\\\n % (dlen, curr_iter, dlen, max_iter, elapsed, remain)\n stream.write(out)\n stream.flush()\n\n yield item\n\n out = '\\r%*d/%*d | Elapsed: %d sec | Remaining: 0 sec '\\\n % (dlen, curr_iter, dlen, max_iter, elapsed)\n stream.write(out)\n stream.flush()",
"def count_revisions_by_user(self):\n return self.run_query(f\"count({self.r}/contributor[id = 5558])\")",
"def GetProgress(self):\n return self.objects_finished",
"def getProgress(self):",
"def user_state_update(\n self,\n update_task: DatabaseTask,\n session: Session,\n ipfs_metadata,\n blacklisted_cids,\n user_factory_txs,\n block_number,\n block_timestamp,\n block_hash,\n) -> Tuple[int, Set]:\n\n num_total_changes = 0\n user_ids: Set[int] = set()\n if not user_factory_txs:\n return num_total_changes, user_ids\n\n user_abi = update_task.abi_values[\"UserFactory\"][\"abi\"]\n user_contract = update_task.web3.eth.contract(\n address=contract_addresses[\"user_factory\"], abi=user_abi\n )\n challenge_bus = update_task.challenge_event_bus\n\n # This stores the state of the user object along with all the events applied to it\n # before it gets committed to the db\n # Data format is {\"user_id\": {\"user\", \"events\": []}}\n # NOTE - events are stored only for debugging purposes and not used or persisted anywhere\n user_events_lookup = {}\n\n # for each user factory transaction, loop through every tx\n # loop through all audius event types within that tx and get all event logs\n # for each event, apply changes to the user in user_events_lookup\n for tx_receipt in user_factory_txs:\n txhash = update_task.web3.toHex(tx_receipt.transactionHash)\n for event_type in user_event_types_arr:\n user_events_tx = getattr(user_contract.events, event_type)().processReceipt(\n tx_receipt\n )\n # if record does not get added, do not count towards num_total_changes\n processedEntries = 0\n for entry in user_events_tx:\n user_id = entry[\"args\"]._userId\n try:\n user_id = entry[\"args\"]._userId\n user_ids.add(user_id)\n\n # if the user id is not in the lookup object, it hasn't been initialized yet\n # first, get the user object from the db(if exists or create a new one)\n # then set the lookup object for user_id with the appropriate props\n if user_id not in user_events_lookup:\n ret_user = lookup_user_record(\n update_task,\n session,\n entry,\n block_number,\n block_timestamp,\n txhash,\n )\n user_events_lookup[user_id] = {\"user\": ret_user, \"events\": []}\n\n # Add or update the value of the user record for this block in user_events_lookup,\n # ensuring that multiple events for a single user result in only 1 row insert operation\n # (even if multiple operations are present)\n\n if event_type == user_event_types_lookup[\"update_multihash\"]:\n metadata_multihash = helpers.multihash_digest_to_cid(\n entry[\"args\"]._multihashDigest\n )\n user_record = (\n parse_user_event(\n self,\n user_contract,\n update_task,\n session,\n tx_receipt,\n block_number,\n entry,\n event_type,\n user_events_lookup[user_id][\"user\"],\n ipfs_metadata[metadata_multihash],\n block_timestamp,\n )\n if metadata_multihash not in blacklisted_cids\n else None\n )\n else:\n user_record = parse_user_event(\n self,\n user_contract,\n update_task,\n session,\n tx_receipt,\n block_number,\n entry,\n event_type,\n user_events_lookup[user_id][\"user\"],\n None,\n block_timestamp,\n )\n\n if user_record is not None:\n user_events_lookup[user_id][\"events\"].append(event_type)\n user_events_lookup[user_id][\"user\"] = user_record\n processedEntries += 1\n except Exception as e:\n logger.error(\"Error in parse user transaction\")\n event_blockhash = update_task.web3.toHex(block_hash)\n raise IndexingError(\n \"user\", block_number, event_blockhash, txhash, str(e)\n ) from e\n\n num_total_changes += processedEntries\n\n logger.info(\n f\"index.py | users.py | There are {num_total_changes} events processed.\"\n )\n\n # for each record in user_events_lookup, invalidate the old record and add the new record\n # we do this after all processing has completed so the user record is atomic by block, not tx\n for user_id, value_obj in user_events_lookup.items():\n logger.info(f\"index.py | users.py | Adding {value_obj['user']}\")\n if value_obj[\"events\"]:\n invalidate_old_user(session, user_id)\n challenge_bus.dispatch(ChallengeEvent.profile_update, block_number, user_id)\n session.add(value_obj[\"user\"])\n\n return num_total_changes, user_ids",
"def __generateUserIDs(self,_count):\n return map(lambda x:self.__getNewUserID(),range(_count))",
"def _compute_user_stats():\n user_stats = []\n \n wmt16_group = Group.objects.filter(name='WMT16')\n wmt16_users = _get_active_users_for_group(wmt16_group)\n \n for user in wmt16_users:\n _user_stats = HIT.compute_status_for_user(user)\n _name = user.username\n _avg_time = seconds_to_timedelta(_user_stats[1])\n _total_time = seconds_to_timedelta(_user_stats[2])\n _data = (_name, _user_stats[0], _avg_time, _total_time)\n \n if _data[0] > 0:\n user_stats.append(_data)\n \n # Sort by total number of completed HITs.\n user_stats.sort(key=lambda x: x[1])\n user_stats.reverse()\n \n return user_stats"
]
| [
"0.57960415",
"0.57137436",
"0.55833614",
"0.55669683",
"0.5559869",
"0.5415968",
"0.5344817",
"0.5328511",
"0.5321689",
"0.5294137",
"0.52908075",
"0.5283701",
"0.52819383",
"0.5270134",
"0.5234338",
"0.52178",
"0.5208953",
"0.52078104",
"0.52028924",
"0.5191646",
"0.51594627",
"0.5140421",
"0.51397955",
"0.51326275",
"0.512995",
"0.51176167",
"0.51044667",
"0.5085635",
"0.50805306",
"0.50685215"
]
| 0.80484724 | 0 |
Returns any update that's IN_PROGRESS for more than an hour | def stalled(self):
oneHourAgo = datetime.today() - timedelta(hours = 1)
return self.filter(status=m.Update.IN_PROGRESS, requestedAt__lte=oneHourAgo) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def tasks_not_updated(request, query, state='submitted', hours_since_update=36, extra_str='(1=1)'):\n if 'status' in request.session['requestParams']:\n query['status'] = request.session['requestParams']['status']\n else:\n query['status'] = state\n if 'statenotupdated' in request.session['requestParams']:\n hours_since_update = int(request.session['requestParams']['statenotupdated'])\n\n query['statechangetime__lte'] = (\n datetime.utcnow() - timedelta(hours=hours_since_update)).strftime(settings.DATETIME_FORMAT)\n\n tasks = JediTasks.objects.filter(**query).extra(where=[extra_str]).values()\n\n return tasks",
"def property_updates_in_progress(self) -> 'outputs.PropertyUpdatesInProgressResponse':\n return pulumi.get(self, \"property_updates_in_progress\")",
"def _clean_outdated(self):\n now = _now()\n outdated = []\n for request_no, request_info in self._current_requests.items():\n if now - request_info.start_time > self._force_clean_after:\n outdated.append(request_no)\n if outdated:\n logging.error(\"There are {} requests which were started but haven't \"\n \"been finished in more than {}s.\"\n .format(len(outdated), self._force_clean_after))\n for request_no in outdated:\n del self._current_requests[request_no]\n self._last_autoclean_time = now",
"def update_status_interval(self):\n if not hasattr(self, \"_update_status_interval\"):\n self._update_status_interval = datetime.timedelta(seconds=0)\n return self._update_status_interval",
"def update_status(cls):\n for job in cls.query.filter(cls.finished == False):\n num_hits_left = session.query(BoxHit).filter_by(training_job_id = job.id, outstanding=True).count()\n urls_left = session.query(VideoTrainingURL).filter_by(training_job_id=job.id, processed = False)\n dynamo = DynamoIngestionStatusClient()\n num_urls_left = 0\n for url in urls_left:\n dynamo_url = dynamo.get(url.url)\n if dynamo_url is None or dynamo_url['status'] == 'Failed':\n # will never be processed, so ignore for our purposes\n url.processed = True\n else:\n num_urls_left += 1\n if num_hits_left+num_urls_left == 0:\n job.finished = True\n print '*** Job ID: %s is complete ***' % str(job.id)\n\n print '------------- Stats for Job ID: %s -------------' % str(job.id)\n print 'Total URLs : %i' % VideoTrainingURL.query.filter_by(training_job_id = job.id).count()\n print 'Total HITs : %i' % BoxHit.query.filter_by(training_job_id = job.id).count()\n if not job.finished:\n print 'unprocessed URLs: %i' % num_urls_left\n print 'outstanding HITs: %i\\n' % num_hits_left\n session.flush()",
"def needs_update(self):\n now = time.time()/60\n return (self.last_update_time_in_minutes+self.timeout) < now",
"def __get_updates(self, offset=0, timeout=10):\n method = 'getUpdates'\n params = {'timeout': timeout, 'offset': offset}\n resp = requests.get(self.api_url + method, params)\n try:\n return resp.json()['result']\n except KeyError:\n print('TimeoutError')\n sys.exit(1)",
"def time_limit(self):\n all_time_limit_updates = self.updates.exclude(\n time_limit_delta=timedelta())\n return self.time_limit_as_of_update(\n all_time_limit_updates.latest('id'))",
"def query_job_progress():\n pass",
"def check_status():\n logger.debug(\"Starting the check_status() routine.\")\n\n url = \"https://www.toggl.com/api/v8/time_entries/current\"\n token = os.environ[\"TOGGL_API_TOKEN\"]\n auth_token = base64.b64encode(f\"{token}:api_token\".encode()).decode()\n resp = requests.get(url, headers={\"Authorization\": \"Basic \" + auth_token})\n\n cols = \"id\", \"duration\", \"description\"\n status = {k: v for k, v in (resp.json()[\"data\"] or {}).items() if k in cols}\n logger.debug(f\"{'Something' if 'id' in status else 'Something'} is being tracked.\")\n\n return status",
"def tags_need_updating():\n\twith postgres, postgres.cursor() as cur:\n\t\t# Yeah, I'm doing all the logic in PostgreSQL. Because why not.\n\t\tcur.execute(\"\"\"select now() - tags_updated > '1 day'\n\t\t\tor (select count(*) from mustard.tags) < 1 from mustard.status\"\"\")\n\t\treturn cur.fetchone()[0]",
"def is_active(self):\n return (datetime.now() - self.updated).days < 100",
"def check_slick_status(self):\n retval = []\n slick = SlickAsPy(self.environment.slickurl + \"/api\")\n status = slick.get_host_status(self.name)\n if status['currentWork'] is None:\n seconds_since_last_checkin = (int(time.time() * 1000) - status['lastCheckin'])\n if seconds_since_last_checkin < 300000:\n retval.append(CheckStatus(self, CheckStatus.CHECK_SLICK_CHECKIN, CheckStatus.STATUS_PASS))\n else:\n retval.append(CheckStatus(self, CheckStatus.CHECK_SLICK_CHECKIN, CheckStatus.STATUS_FAIL, \"It's been {} minutes since the last checkin.\".format(seconds_since_last_checkin / 60000)))\n retval.append(CheckStatus(self, CheckStatus.CHECK_TEST_RUNTIME, CheckStatus.STATUS_NA))\n else:\n retval.append(CheckStatus(self, CheckStatus.CHECK_TEST_RUNTIME, CheckStatus.STATUS_NA))\n seconds_since_test_started = (int(time.time() * 1000) - status['currentWork']['recorded'])\n if seconds_since_test_started < 900000:\n retval.append(CheckStatus(self, CheckStatus.CHECK_TEST_RUNTIME, CheckStatus.STATUS_PASS))\n else:\n retval.append(CheckStatus(self, CheckStatus.CHECK_TEST_RUNTIME, CheckStatus.STATUS_FAIL, \"It's been {} minutes since the current test started.\".format(seconds_since_test_started / 60000)))\n return retval",
"def _get_update_interval(self, data: T) -> timedelta:\n if data and any(\n data[\"state\"][\"flags\"][key] for key in (\"pausing\", \"cancelling\")\n ):\n return timedelta(seconds=5)\n\n return super()._get_update_interval(data)",
"def updating_users(self):\n user_counts = self.values('user').filter(status=m.Update.IN_PROGRESS).annotate(count=Count('user'))\n for entry in user_counts:\n user = m.User.objects.get(id=entry['user'])\n yield user, entry['count']",
"def update(self):\n for uid, server in self.servers_online.items():\n if len(server.jobs):\n self.populate_server(server)\n for uid, server in self.servers_online.items():\n if server.jobs:\n server.jobs[0].task_time -= time_interval\n server.waiting_time -= time_interval\n if server.jobs[0].task_time <= 0:\n completed_task = server.jobs.pop(0)\n print(f\"Task '{completed_task.description}' completed\")\n self.all_tasks.remove(completed_task)\n self.servers_jobs_list[uid].pop(0)\n for uid, server in self.all_servers.items():\n if server.status:\n print(f\"{server.server_name} has {len(set(server.jobs))} job(s)\")\n else:\n print(f\"{server.server_name} is offline\")",
"def check_missed_job_completion_notifications(self):\n logger.info(\"Checking for missed job completion notifications\")\n #ten_min_ago = int((time.time() - 600) * 1e6)\n operating = self.instances.find({\n #'mtime': {'$lt': ten_min_ago},\n 'operation' : {'$exists': True, '$ne': None}\n })\n\n for fix_doc in operating:\n service = self.axops_client.get_service(fix_doc['operation']['id'])\n if ServiceStatus.completed(service['status']):\n # Keep this consistent with expectation in process_action_result() and axops/service/service.go\n payload = {\n \"id\": service['id'],\n \"name\": service['name'],\n \"status\": service['status'],\n \"annotations\": service.get('annotations', {}),\n \"user\": service['user']\n }\n try:\n logger.info(\"Detected missed job notification: %s\", payload)\n self.process_action_result(payload)\n except Exception:\n logger.exception(\"Failed to process completion event\")",
"def available(self):\n return self.coordinator.last_update_success",
"def available(self):\n return self.coordinator.last_update_success",
"def available(self):\n return self.coordinator.last_update_success",
"def available(self):\n return self.coordinator.last_update_success",
"def available(self):\n return self.coordinator.last_update_success",
"def available(self):\n return self.coordinator.last_update_success",
"def available(self):\n return self.coordinator.last_update_success",
"def need_update(self):\n five_minutes_ago = datetime.now() - timedelta(minutes=5)\n if (\n self.fetch_status != self.FetchStatus.NONE\n and self.collected_at > five_minutes_ago\n ):\n return False\n return True",
"def check_if_ok_to_update(self):\n current_time = int(time.time())\n last_refresh = self.last_refresh\n if last_refresh is None:\n last_refresh = 0\n if current_time >= (last_refresh + self.refresh_rate):\n return True\n return False",
"def longpoll(self, last_offset=0):\n\n params = {'timeout': 25}\n\n if last_offset != 0:\n params['offset'] = last_offset + 1\n\n return self.api_request('getUpdates', params=params, timeout=50)",
"def _get_requests(self, since=None):\n if since is None:\n return self._finished_requests\n # Find the first element newer than 'since' using bisect\n left, right = 0, len(self._finished_requests)\n while left < right:\n middle = (left + right) // 2\n if since <= self._finished_requests[middle].end_time:\n right = middle\n else:\n left = middle + 1\n result = self._finished_requests[left:]\n return result",
"def undead(self):\n cutoff = datetime.utcnow() - timedelta(seconds=HEARTBEAT_FAILED)\n return self.status_in(\"active\").filter(heartbeat__lt=cutoff)",
"def available(self):\n return self._coordinator.last_update_success"
]
| [
"0.6096992",
"0.587052",
"0.5578453",
"0.5522735",
"0.5438874",
"0.5422358",
"0.53898716",
"0.53732824",
"0.53431284",
"0.531127",
"0.52927464",
"0.5271256",
"0.5264709",
"0.5213381",
"0.51641107",
"0.5139483",
"0.51327115",
"0.5117717",
"0.5117717",
"0.5117717",
"0.5117717",
"0.5117717",
"0.5117717",
"0.5117717",
"0.51133597",
"0.5112248",
"0.51017404",
"0.51003677",
"0.5083856",
"0.5083571"
]
| 0.7538198 | 0 |
Returns a generator of weeks with most unique artists scrobbled. | def record_unique_artists_in_week(self, user, start, end, num=10):
qs = self.user_weeks_between(user, start, end) \
.values('week_idx') \
.annotate(Count('artist')) \
.order_by('-artist__count')[:num]
for r in qs:
idx = r['week_idx']
yield idx, ldates.date_of_index(idx), r['artist__count'] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_mostFrequent(self, n=5):\r\n pass",
"def get_mostFrequent(self, n=5):\r\n pass",
"def GetListOfWeeks(self):\n delta_days = (self.GetFridayOfLastFullWeek() - self.START_DATE).days\n delta_weeks = int(math.floor(delta_days / 7))\n weeks = [self.START_DATE + dt.timedelta(days=7 * x) \n for x in range(0, delta_weeks + 1)]\n weeks = [week.strftime('%Y-%m-%d') for week in weeks]\n self.cursor.execute(\n 'SELECT DISTINCT week FROM %s' % self.BOX_OFFICE_TABLE)\n weeks_in_table = [x[0] for x in self.cursor.fetchall()]\n weeks = list(set(weeks) - set(weeks_in_table))\n weeks.sort() \n return weeks",
"def k_most_talkative(self):\n word_counts = self.get_usercounts() # {u1: 3, u2: 4, }\n word_counts_heap = [(-count, username) for username, count in word_counts.items()] # [(-4, username), (-3, username)]\n heapify(word_counts_heap) # [(-4, u2), (-3, u1)]\n counter = 0\n while word_counts_heap or counter < k:\n _, username = heappop(word_counts_heap)\n counter += 1 # 1, 2\n yield username # u2, u1",
"def get_unique_N(iterable, N):\n seen = set()\n for e in iterable:\n if e in seen:\n continue\n seen.add(e)\n yield e\n if len(seen) == N:\n print(\n \"The following set of plausible SSA seasonalities have been identified: {}\".format(\n seen\n )\n )\n return",
"def user_weekly_plays_of_artists(self, user_id, artist_id, start, end):\n query = self.filter(user=user_id, artist=artist_id).order_by('week_idx')\n if start != ldates.idx_beginning or end != ldates.idx_last_sunday:\n query = query.filter(week_idx__range=(start, end))\n\n return [(week_data.week_idx, week_data.plays) for week_data in query]",
"def get_popular_tickets_solution(tickets):\n popular_tickets = []\n for ticket in tickets:\n num_watchers = len(ticket['people']['watchers'])\n if num_watchers >= 8:\n popular_tickets.append(ticket)\n return popular_tickets",
"def generate_wf(dataset: Dataset) -> Dict[str, int]:\n wf_dict = Counter()\n\n for item in tqdm(dataset, desc=\"Calculating word frequencies\"):\n for w in item[\"review\"].split():\n wf_dict[w] += 1\n\n return wf_dict",
"def most_popular_artist(our_data):\n counter_dict = {}\n for artist in all_artists(our_data):\n if artist in counter_dict:\n counter_dict[artist] += 1\n else:\n counter_dict[artist] = 1\n maximum_albums = max(counter_dict.values())\n artist_lists = []\n for keys, values in counter_dict.items():\n if values == maximum_albums:\n artist_lists.append(keys) \n return artist_lists",
"def gen_all_holds(hand):\n ans_set = set([()])\n\n for dummy_idx in range(len(hand)):\n temp_set = set([()])\n for seq in ans_set:\n for item in hand:\n new_seq = list(seq)\n if hand.count(item) > new_seq.count(item):\n new_seq.append(item)\n new_seq = sorted(new_seq)\n temp_set.add(tuple(new_seq))\n ans_set = temp_set\n return ans_set",
"def num_of_sets(l):\r\n distinct_sweets = set(l) #let's find all distinct sweets from input list\r\n dict_of = {} #empty dict to store key:value (sweet:number of occurrences)\r\n\r\n for i in distinct_sweets:\r\n dict_of[i] = l.count(i)\r\n \r\n key_min = min(dict_of.keys(), key=(lambda k: dict_of[k]))\r\n return dict_of[key_min]",
"def monkey_count(n: int) -> list:\n\n\tmonkeys = list()\n\n\tfor n in range(1, n + 1):\n\t\tmonkeys.append(n)\n\n\treturn monkeys",
"def record_weeks(self, user, start, end, num=10):\n query = self.user_weeks_between(user, start, end).order_by('-plays')[:num]\n for week in query:\n date = ldates.date_of_index(week.week_idx)\n yield week, date",
"def get_unique_highscores(self, num=5):\n # Checking if a player should be disqualified.\n suspicion = self.get_highscores().values('player').annotate(too_many=Count('suspects')).filter(too_many__gt=1)\n emails = []\n\n for sus in suspicion:\n emails.append(sus['player'])\n\n # Purge all suspected games.\n scores = self.get_highscores().annotate(suspected_games=Count('suspects')).exclude(suspected_games__gt=0)\n winnerlist = []\n distinct = []\n\n # Distinct is not implemented in combination with annotate. so doing a poor mans version.\n for score in scores:\n if score.player not in emails:\n if score.player not in distinct:\n winnerlist.append(score)\n distinct.append(score.player)\n if len(winnerlist) == num:\n break\n enough_winners = bool(len(winnerlist) >= num)\n if not enough_winners:\n log.info(u\"There are not enough unique contestants to fill the winnerlist\")\n return winnerlist, enough_winners",
"def calculateFrequentRolls():\n\n # initialize outcomeCounts to all 0s. The index corresponds to the outcome\n # NOTE: index positions 0 and 1 are not possible\n outcomeCounts = dict()\n for count in range(DIE_SIDES*2+1):\n outcomeCounts[count] = 0\n\n rollAndTallyOutcomes(outcomeCounts)\n\n print(\"outcomeCounts:\",outcomeCounts) # For debugging\n\n highestCount = max(outcomeCounts.values())\n\n mostFrequentRolls = findOutcomes(outcomeCounts, highestCount)\n\n print(\"mostFrequentRolls:\", mostFrequentRolls,\n \"and highestCount:\",highestCount) # For debugging\n\n return mostFrequentRolls, highestCount",
"def find_top_unique(self, list_of_entries, top_n):\n\n\n if len(list_of_entries) < top_n:\n self.top_n_too_large_label = Label(self.main_frame,\n fg=\"red\",\n text=\"Max N = %s\" % len(list_of_entries))\n if type(list_of_entries[0]) is AudioEntry:\n self.top_n_too_large_label.grid(row=13, column=4)\n if type(list_of_entries[0]) is VideoEntry:\n self.top_n_too_large_label.grid(row=13, column=5)\n raise Exception(\"N is larger than the total number of words\")\n\n if self.top_n_too_large_label is not None:\n self.top_n_too_large_label.grid_remove()\n\n sorted_by_count = sorted(list_of_entries, key=self.get_count, reverse=True)\n #self.top_n_too_large_label = Label(self.main_frame, fg=\"red\", text=\"Max N = %s\" % len(list_of_entries))\n unique_entries = [[] for i in range(top_n)]\n\n curr_rank = 0\n prev_count = None\n curr_count = None\n\n for entry in sorted_by_count:\n\n if entry.word in self.general_parser.words:\n entry.in_general = True\n else:\n entry.in_general = False\n\n curr_count = entry.count\n\n if prev_count is None:\n if entry.word not in self.specific_month_words:\n unique_entries[curr_rank].append(entry)\n prev_count = entry.count\n entry.rank = 1\n continue\n\n\n if curr_rank >= top_n:\n break\n\n\n if entry.word not in self.specific_month_words:\n # increment rank if current entry has a different count\n # (the last set of entries having this count are all filled\n # into the unique_entries[])\n if curr_count != prev_count:\n curr_rank = curr_rank + 1\n if curr_rank >= top_n:\n break\n unique_entries[curr_rank].append(entry)\n prev_count = entry.count\n entry.rank = curr_rank + 1\n continue\n unique_entries[curr_rank].append(entry)\n entry.rank = curr_rank + 1\n\n\n\n return unique_entries[0:curr_rank + 1]",
"def get_popular_tickets(tickets):\n popular_tickets = []\n #\n # TODO - your code here\n # \n for ticket in tickets:\n str_len=len(ticket['people']['watchers'])\n if str_len>=8:\n popular_tickets.append(ticket)\n \n return popular_tickets",
"def transform(self):\n\n transactions = self._get_transactions(self._x_transformed)\n self.frequent_items = [set(item) for item in find_frequent_itemsets(\n transactions, minimum_support=self.support_min) if len(item) > 1]\n return self.frequent_items",
"def weekly():",
"def make_weeklycount(records):\n # convert the 'date' field to a datetime.date and add theisoweek\n for record in records:\n if 'start_date_local' in record:\n record['start_date_local'] = (\n datetime.strptime(record['start_date_local'], '%Y-%m-%d').date())\n record['week'] = (record['start_date_local'].isocalendar()[0] * 100\n + record['start_date_local'].isocalendar()[1])\n # then, make a dataset filled with the unique weeks and sports,\n # but no counts yet.\n # This functions is possibly much nicer with a defaultdict\n unique_weeks = SortedSet(record['week'] for record in records)\n unique_sports = SortedSet(record['type'] for record in records)\n data = {'weeks': unique_weeks, 'counts': {}}\n for sport in unique_sports:\n data['counts'].update({sport: []})\n # finally for each sport and week count the occurence of that sport\n for sport in unique_sports:\n for week in unique_weeks:\n count = sum(1 if (record['week'] == week and\n record['type'] == sport)\n else 0 for record in records)\n data['counts'][sport].append(count)\n return data",
"def getWeeks(data: Sequence[HistoryElement]) -> Sequence[int]:\r\n _checkData(data)\r\n return [x.timeStamp.toDateTime().weekday() for x in data]",
"def getAllWeekends():\n all_weekends = set()\n for year in getYears():\n for week in getWeeks(year):\n all_weekends.add((year, week))\n return all_weekends",
"def make_most_abundant(seqs):\r\n seq_to_group = unique_id_map(seqs)\r\n groups = invert_dict(seq_to_group)\r\n\r\n def most_abundant(ids, seqs='ignored'):\r\n \"\"\"Returns most abundant seq from ids\"\"\"\r\n id_groups = [len(groups[seq_to_group[i]]) for i in ids]\r\n return ids[argmax(id_groups)]\r\n return most_abundant",
"def get_unique_words():\n # Unique words\n words_set = set()\n for i in range(1, 114+1):\n sura = quran.get_sura(i)\n for aya in sura:\n wordsList = aya.split(' ')\n for word in wordsList:\n words_set.add(word)\n\n return words_set",
"def birthdayCakeCandles(n, ar):\n\n tallest = max(ar)\n return ar.count(tallest)",
"def how_popular_most_popular(data):\r\n #list of artists\r\n artists = []\r\n for key in data:\r\n if key[-1] != \"$\":\r\n for x in data[key]:\r\n artists += [x]\r\n sorted_artists = selection_sort(artists)\r\n count = 1\r\n max_count = 1\r\n max_artists = []\r\n for i in range(len(sorted_artists)-1):\r\n #ends at second to last index because I use i and i + 1\r\n if sorted_artists[i] == sorted_artists[i+1]:\r\n count += 1\r\n else:\r\n if count == max_count:\r\n max_artists += [sorted_artists[i]]\r\n count = 1\r\n elif count > max_count:\r\n max_artists = []\r\n max_artists += [sorted_artists[i]]\r\n max_count = count\r\n count = 1\r\n return max_count",
"def monkey_count(n):\n return [i for i in range(1, n + 1)]",
"def test_weekly_resolution_hindcast(daily_initialized, daily_obs):\n weekly_hindcast = daily_initialized.resample(init=\"W\").mean()\n weekly_obs = daily_obs.resample(time=\"W\").mean()\n weekly_hindcast.lead.attrs[\"units\"] = \"weeks\"\n assert compute_hindcast(weekly_hindcast, weekly_obs).all()",
"def strongest(self):\n pps = collections.Counter()\n for crd in self:\n pps += collections.Counter( {crd.suit:crd.hc} )\n return sorted(pps.items(), reverse=True, key=lambda x:x[1])",
"def get_best_five(self):\n return sorted(self.speakers.iteritems(),\n key=lambda (key, val): (val, key),\n reverse=True)[:5]"
]
| [
"0.5435604",
"0.5435604",
"0.5316265",
"0.52608997",
"0.52595896",
"0.52537256",
"0.5251781",
"0.522599",
"0.516027",
"0.50954574",
"0.5080525",
"0.50767154",
"0.5069311",
"0.5067429",
"0.5006133",
"0.49751127",
"0.4955674",
"0.49242568",
"0.49237013",
"0.49234274",
"0.4915169",
"0.48851004",
"0.48846608",
"0.48703456",
"0.48660937",
"0.48525128",
"0.4848362",
"0.48425093",
"0.48340157",
"0.482643"
]
| 0.672771 | 0 |
Returns a basic query set of a user's data filtered to plays of particular artists, between start and end. | def user_weekly_plays_of_artists(self, user_id, artist_id, start, end):
query = self.filter(user=user_id, artist=artist_id).order_by('week_idx')
if start != ldates.idx_beginning or end != ldates.idx_last_sunday:
query = query.filter(week_idx__range=(start, end))
return [(week_data.week_idx, week_data.plays) for week_data in query] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def GetArtists(self, start=0, end=0, sortmethod='artist', sortorder='ascending', filter=''):\n self.logger.debug(\"Fetching all artists in the music database\")\n try:\n xbmc = Server(self.url('/jsonrpc', True))\n sort = {'order': sortorder, 'method': sortmethod, 'ignorearticle': True}\n properties = ['thumbnail', 'fanart']\n limits = {'start': int(start), 'end': int(end)}\n filter = {'field': 'artist', 'operator': 'contains', 'value': filter}\n return xbmc.AudioLibrary.GetArtists(properties=properties, limits=limits, sort=sort, filter=filter)\n except ValueError:\n logger.error(\"Unable to fetch artists!\")\n return",
"def artist_filter(query_params, query):\n table = Artist.__table__\n col_name = table.c.followers\n if query_params.get('min_followers') is not None \\\n and query_params.get('max_followers') is not None:\n filt_statement = and_(col_name >= query_params.get('min_followers'),\n col_name <= query_params.get('max_followers'))\n query = query.filter(filt_statement)\n elif query_params.get('min_followers') is not None:\n query = query.filter(\n col_name >= int(query_params.get('min_followers')))\n elif query_params.get('max_followers') is not None:\n query = query.filter(\n col_name <= int(query_params.get('max_followers')))\n if query_params.get('num_albums') is not None:\n query = query.filter(\n table.c.num_albums == int(query_params.get('num_albums')))\n if query_params.get('num_media') is not None:\n query = query.filter(\n table.c.num_media == int(query_params.get('num_media')))\n return query",
"def get_artists():\n return query_multiple(request.args, artist_search, \\\n artist_filter, Artist, artists_schema)",
"def by_user(cls, user, start_time=None, end_time=None):\n query = cls.objects.filter(user_id=user.pk)\n if start_time:\n query = query.filter(timestamp__gte=start_time)\n if end_time:\n query = query.filter(timestamp__lt=end_time)\n return query.order_by('timestamp')",
"def get_rides(cls, **kwargs):\n\n\n q = cls.query.options(db.joinedload('user'))\n\n if kwargs.get('start_lat'):\n deg = float(kwargs.get('deg'))\n start_lat = float(kwargs.get('start_lat'))\n start_lng = float(kwargs.get('start_lng'))\n\n q = q.filter(\n ( (cls.start_lat < str(start_lat + deg)) &\n (cls.start_lat > str(start_lat - deg))\n ) &\n ( (cls.start_lng < str(start_lng + deg)) &\n (cls.start_lng > str(start_lng - deg))\n ))\n\n if kwargs.get('end_lat'):\n deg = float(kwargs.get('deg'))\n end_lat = float(kwargs.get('end_lat'))\n end_lng = float(kwargs.get('end_lng'))\n q = q.filter(\n ( (cls.end_lat < str(end_lat + deg)) &\n (cls.end_lat > str(end_lat - deg))\n ) &\n ( (cls.end_lng < str(end_lng + deg)) &\n (cls.end_lng > str(end_lng - deg))\n ))\n\n\n if kwargs.get('date_from'):\n date_from = kwargs.get('date_from')\n\n q = q.filter((cast(cls.start_timestamp, Date) >= date_from))\n\n if kwargs.get('date_to'):\n date_to = kwargs.get('date_to')\n \n q = q.filter((cast(cls.start_timestamp, Date) <= date_to))\n\n # print '\\n\\nhiii: {}\\n\\n'.format(kwargs.get('start_time'))\n \n # q = q.filter(cast(cls.start_timestamp, Time) >= start_time)\n # if kwargs.get('start_time') >= '00:00:00' kwargs.get('start_time') and < \"00:08:00\":\n # pass\n\n # print '\\n\\n\\n*STARTTIME*\\n{}, {}\\n\\n\\n'.format(start_time, type(start_time))\n\n # q = q.filter(cast(cls.start_timestamp, Time) >= start_time)\n\n if kwargs.get('cost'):\n cost = int(kwargs.get('cost'))\n q = q.filter(cls.cost < cost)\n\n start_time = kwargs.get('start_time')\n if start_time >= time(0,0) and start_time < time(8,0):\n q1 = q.filter(cast(cls.start_timestamp, Time) >= start_time)\n q2 = q.filter(cast(cls.start_timestamp, Time) < time(8, 0))\n q = q1.intersect(q2)\n else:\n q1 = q.filter(cast(cls.start_timestamp, Time) >= start_time)\n q2 = q.filter(cast(cls.start_timestamp, Time) < time(8, 0))\n q = q1.union(q2)\n\n if kwargs.get('order_by') == 'date':\n q = q.order_by(cls.start_timestamp)\n\n if kwargs.get('order_by') == 'time':\n sort_order = case(value=(cast(cls.start_timestamp, Time)), whens=order_by_time)\n q = q.order_by(sort_order)\n\n if kwargs.get('order_by') == 'cost':\n q = q.order_by(cls.cost)\n\n if kwargs.get('limit'):\n limit_number = int(kwargs.get('limit'))\n q = q.limit(limit_number)\n\n if kwargs.get('offset'):\n offset_number = int(kwargs.get('offset'))\n q = q.offset(offset_number)\n\n if kwargs.get('count'):\n rides = q.count()\n else:\n rides = q.all()\n print '\\n\\n{}\\n\\n'.format(rides)\n\n return rides",
"def get(self):\n mb = MusicbrainzClient()\n query = self.get_argument('q')\n artists, tracks = yield [mb.search_artists(query),\n mb.search_tracks(query)]\n data = {\n 'artists': [\n {\n 'id': artist['id'],\n 'artist': artist['name'],\n 'note': artist.get('disambiguation', '')\n }\n for artist in artists['artist-list']\n ],\n 'tracks': [\n {\n 'id': track['id'],\n 'title': track['title'],\n 'artist': track['artist-credit-phrase']\n }\n for track in tracks['recording-list']\n ]\n }\n self.finish(data)",
"def _query_songs_by_artist(self, entities: List[str]):\n artists = []\n for e in entities:\n artists += self.kb_api.get_songs_by_artist(e)\n\n return artists",
"async def arts(self, ctx: BBContext, artist: Optional[discord.Member] = None):\n\n if artist:\n query = f'SELECT url, artist_name FROM {TABLE_ARTS} WHERE artist_id = $1 LIMIT 20'\n args = [query, artist.id]\n else:\n query = f'SELECT url, artist_name FROM {TABLE_ARTS} ORDER BY random() LIMIT 20'\n args = [query]\n\n con = await ctx.get_connection()\n data: List[asyncpg.Record] = await con.fetch(*args)\n\n view = ArtsPagination(data, ctx.author) # type: ignore (Direct messages intent is not being used so author can only be a member)\n await view.start(ctx.channel)",
"def GetAlbums(self, start=0, end=0, sortmethod='label', sortorder='ascending', filter='', artistid=None):\n self.logger.debug(\"Loading all albums for ARTISTID \" + str(artistid))\n try:\n xbmc = Server(self.url('/jsonrpc', True))\n sort = {'order': sortorder, 'method': sortmethod, 'ignorearticle': True}\n properties=['artist', 'title', 'year', 'description', 'thumbnail']\n limits = {'start': int(start), 'end': int(end)}\n if artistid is not None:\n filter = {'artistid': int(artistid)}\n else:\n filter = {'or': [\n {'field': 'album', 'operator': 'contains', 'value': filter},\n {'field': 'artist', 'operator': 'contains', 'value': filter}\n ]}\n return xbmc.AudioLibrary.GetAlbums(properties=properties, limits=limits, sort=sort, filter=filter)\n except ValueError:\n return",
"async def artists(self, ctx: BBContext):\n\n query = \"\"\"SELECT DISTINCT artist_name, COUNT(*)\n FROM extras.arts\n WHERE artist_name IS NOT NULL\n GROUP BY artist_name\n ORDER BY COUNT(*) DESC\"\"\"\n\n args = [query]\n\n con = await ctx.get_connection()\n data: List[asyncpg.Record] = await con.fetch(*args)\n view = ArtsLeaderboardPagination(data, ctx.author) # type: ignore (Direct messages intent is not being used so author can only be a member)\n await view.start(ctx.channel)",
"def get_playlists_for_user(self, request): \n user = Account.find_by_id(request.userid)\n playlists = Playlist.find_by_owner(user.key).fetch(20)\n return self.build_playlist_response(playlists)",
"def fetch_player_data(\n start_date: str = f\"{EARLIEST_SEASON_WITH_EXTENSIVE_PLAYER_STATS}-01-01\",\n end_date: str = str(date.today()),\n verbose: int = 1,\n) -> List[Dict[str, Any]]:\n if verbose == 1:\n print(\n f\"Fetching player data from between {start_date} and {end_date} \"\n \"in yearly baches...\"\n )\n\n data_batch_date_ranges = _player_batch_date_ranges(start_date, end_date)\n partial_fetch_player_stats_batch = partial(\n _fetch_player_stats_batch, verbose=verbose\n )\n\n # Google Cloud Run cannot handle such a large data set in its response, so we\n # fetch it in batches. With the implementation of kedro pipelines, we should\n # usually read historical data from files or Google Cloud Storage, so the slowness\n # of this isn't much of an issue.\n data = itertools.chain.from_iterable(\n [\n partial_fetch_player_stats_batch(*date_pair)\n for date_pair in data_batch_date_ranges\n ]\n )\n\n if verbose == 1:\n print(\"All player data received!\")\n\n return list(data)",
"def get_users(self, date_start, date_end, project='enwiki'):\n\n # @TODO MOVE DB REFS INTO QUERY MODULE\n\n params = {\n 'date_start': format_mediawiki_timestamp(date_start),\n 'date_end': format_mediawiki_timestamp(date_end),\n }\n conn = Connector(instance=settings.PROJECT_DB_MAP[project])\n query = sub_tokens(self.QUERY_TYPES[self._query_type],\n db=escape_var(project))\n conn._cur_.execute(query, params)\n\n for row in conn._cur_:\n yield row[0]",
"def get_artists_recent_played(session_):\n # artists = session_.query(Artist).order_by(Artist.name.asc()).paginate()\n artists = session_.query(Artist).order_by(Artist.played_at.asc()).all()\n return artists",
"def album_filter(query_params, query):\n table = Album.__table__\n col_name = table.c.release_date\n if query_params.get('start_year') is not None \\\n and query_params.get('end_year') is not None:\n filt_statement = and_(\n col_name >= date(int(query_params.get('start_year')), 1, 1),\n col_name <= date(int(query_params.get('end_year')), 12, 31))\n query = query.filter(filt_statement)\n elif query_params.get('start_year') is not None:\n query = query.filter(\n col_name >= date(int(query_params.get('start_year')), 1, 1))\n elif query_params.get('end_year') is not None:\n query = query.filter(\n col_name <= date(int(query_params.get('end_year')), 12, 31))\n if query_params.get('num_tracks') is not None:\n query = query.filter(\n table.c.num_tracks == int(query_params.get('num_tracks')))\n if query_params.get('label') is not None:\n query = query.filter(table.c.label == str(query_params.get('label')))\n return query",
"def get_queryset(self):\n\n\t\t# Initially set the returned objects to be all players\n\t\tqueryset = Player.objects.all()\n\n\t\t# Access the request params\n\t\tplayername = self.request.query_params.get('playername', None)\n\n\t\t# If a club name is specified ---> Set the filter\n\t\tif playername is not None:\n\t\t\tqueryset = queryset.filter(player=playername)\n\n\t\treturn queryset",
"def all_artists(our_data):\n return [album['artist'] for album in our_data]",
"def _query_artist_by_song(self, entities: List[str]):\n artists = []\n for e in entities:\n artists += [\n song.get('artist_name')\n for song\n in self.kb_api.get_song_data(e)\n ]\n\n return artists",
"def get_artist_tracks(\n self,\n artist: str,\n from_date: str = None,\n to_date: str = None,\n page: int = 1,\n ) -> ListModel[Track]:\n return self.retrieve(\n bind=Track,\n flatten=\"track\",\n params=dict(\n method=\"user.getArtistTracks\",\n user=self.name,\n artist=artist,\n startTimestamp=from_date,\n endTimestamp=to_date,\n page=page,\n ),\n )",
"def userlogs(self, start_time=None, end_time=None):\n url = (yield self.get_sitemap())['userlogs'] + '/query?'\n if start_time is None:\n start_time = time.strftime('%Y-%m-%d 00:00:00')\n if end_time is None:\n end_time = time.strftime('%Y-%m-%d 23:59:59')\n request_params = {\n 'start_time': start_time,\n 'end_time': end_time\n }\n query_string = urlencode(request_params)\n response = yield self.authorized_fetch(\n url='{}{}'.format(url, query_string), auth_token=self._session_id)\n raise tornado.gen.Return(json.loads(response.body))",
"def artists():\n # TODO: replace with real data returned from querying the database (DONE)\n artists = Artist.query.group_by(Artist.id, Artist.name).all()\n\n data = []\n\n for a in artists :\n data.append({\n 'id' : a.id,\n 'name' : a.name\n })\n\n return render_template(\"pages/artists.html\", artists=data)",
"def get_queryset(self):\n queryset = Snippet.objects.all()\n username = self.request.query_params.get('username', None)\n userqueryset = User.objects.all()\n users = userqueryset.filter(username=username)\n if users.count() != 0 and username is not None:\n queryset = queryset.filter(owner=users[0])\n return queryset\n return []",
"def get_artists_alpha(session_):\n # artists = session_.query(Artist).order_by(Artist.name.desc()).paginate()\n artists = session_.query(Artist).order_by(Artist.name.asc()).all()\n return artists",
"def get_shows(self, comparison):\n results = []\n\n for show in db.session.query(\n Show.artist_id.label('artist_id'),\n Artist.name.label('artist_name'),\n Artist.image_link.label('artist_image_link'),\n func.to_char(Show.start_time, 'YYYY-MM-DD HH24:MI:SS').label('start_time')\n ).filter(\n Show.artist_id == Artist.id,\n Show.venue_id == self.id\n ).filter(\n comparison\n ).all():\n\n results.append({\n 'artist_id': show.artist_id,\n 'artist_name': show.artist_name,\n 'artist_image_link': show.artist_image_link,\n 'start_time': show.start_time\n })\n return results",
"def get_queryset(self):\n\n\t\t# Initially set the returned objects to be all players\n\t\tqueryset = Player.objects.all()\n\n\t\t# Access the request params\n\t\tplayername = self.request.query_params.get('playername', None)\n\n\t\t# If a player name is specified ---> Set the filter\n\t\tif playername is not None:\n\t\t\tqueryset = queryset.filter(player=playername)\n\n\t\t# Return the appropriate queryset\n\t\treturn queryset",
"def artists(self, artists, **kwargs):\n artist_list = map(self._get_artist_id, artists)\n return self._get(API.ARTISTS.value, ids=\",\".join(artist_list), **kwargs)",
"def current_user_top_artists(\n self, limit=20, offset=0, time_range=TimeRange.MEDIUM_TERM, **kwargs\n ):\n return self._get(\n API.MY_TOP.value.format(type=\"artists\"), # pylint: disable=no-member\n time_range=TimeRange(time_range).value,\n limit=limit,\n offset=offset,\n **kwargs,\n )",
"def GetMovies(self, start=0, end=0, sortmethod='title', sortorder='ascending', hidewatched=0, filter=''):\n self.logger.debug(\"Fetching Movies\")\n try:\n xbmc = Server(self.url('/jsonrpc', True))\n sort = {'order': sortorder, 'method': sortmethod, 'ignorearticle': True}\n properties = ['title', 'year', 'plot', 'thumbnail', 'file', 'fanart', 'studio', 'trailer',\n 'imdbnumber', 'genre', 'rating', 'playcount']\n limits = {'start': int(start), 'end': int(end)}\n filter = {'field': 'title', 'operator': 'contains', 'value': filter}\n if hidewatched == \"1\":\n filter = {\"and\" : [filter, {'field': 'playcount', 'operator': 'is', 'value': '0'}]}\n return xbmc.VideoLibrary.GetMovies(sort=sort, properties=properties, limits=limits, filter=filter)\n except:\n self.logger.error(\"Unable to fetch movies!\")\n return",
"def get_queryset(self):\n\n\t\t# Initially set the returned objects to be all sentences\n\t\tqueryset = Score.objects.all()\n\n\t\t# Access the request params\n\t\tplayername = self.request.query_params.get('playername', None)\n\n\t\t# If a player name is specified ---> Set the filter\n\t\tif playername is not None:\n\t\t\tqueryset = queryset.filter(player_name=playername)\n\n\t\t# Return the appropriate queryset\n\t\treturn queryset",
"def get_artist_list():\n return list(dmla.list_artists())"
]
| [
"0.65713817",
"0.62819767",
"0.6086399",
"0.5897939",
"0.5707841",
"0.55241287",
"0.5524061",
"0.5481272",
"0.5450652",
"0.535093",
"0.5305764",
"0.52990735",
"0.52561194",
"0.5250819",
"0.5247014",
"0.5241949",
"0.52326745",
"0.521952",
"0.5169513",
"0.51664984",
"0.5152122",
"0.51153773",
"0.5107061",
"0.5105723",
"0.5093968",
"0.50631225",
"0.50615877",
"0.50399745",
"0.50351155",
"0.5008904"
]
| 0.646841 | 1 |
Find factors of num, in increasing order. >>> find_factors(10) [1, 2, 5, 10] >>> find_factors(11) [1, 11] >>> find_factors(111) [1, 3, 37, 111] >>> find_factors(321421) [1, 293, 1097, 321421] | def find_factors(num):
factors = set()
i = 1
while i*i < num:
if num % i == 0:
factors.add(i)
factors.add(int(num/i))
i+=1
factors = list(factors)
factors.sort()
return factors | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_factors(num):\n factors = []\n\n # Extend range by 1 to include num\n for i in range(1, num+1):\n if num % i == 0:\n factors.append(i)\n return factors",
"def factors(num):\n\tif is_prime(num) == True:\n\t\tfactors = [1, num]\n\t\treturn factors\n\telse:\n\t\tfactors = [1]\n\t\tsquare_root = int(math.ceil(math.sqrt(num)))\n\t\t\n\t\tfor n in range(2, square_root+1):\n\t\t\tif num % n == 0:\n\t\t\t\tfactors.append(n)\n\n\t\tfor n in range(1, len(factors)):\n\t\t\tnew_n = num / factors[n]\n\t\t\tif new_n not in factors:\n\t\t\t\tfactors.append(num / factors[n])\n\n\t\tfactors.append(num)\n\t\treturn factors",
"def prime_factors(num):\n if prime_checker(num):\n return num\n if num > 10^5:\n maxPrime = round(num**0.5) + 1\n else:\n maxPrime = round(num/2)+1\n primelist = prime_generator(maxPrime)\n factors = []\n\n while num > 1 and num not in primelist:\n for prime in primelist:\n if num % prime == 0:\n factors.append(prime)\n num = int(num / prime)\n break\n if not num == 1:\n factors.append(num)\n \n return factors",
"def find_factors(number):\n \n i = 2\n prod = 1\n factors = []\n sqrt = math.sqrt(number)\n num = number\n \n while i < num:\n div = check_divisbility(number, i)\n if div == 'divisible':\n factors.append(i)\n number /= i\n prod *= i\n recurse = find_factors(number)\n \n #I recurse here because it prevents us wasting time playing with large numbers\n for fac in recurse:\n factors.append(fac)\n number /= fac\n prod *= fac\n #stop if we find a factor greater tha sqrt(number)\n if i >= sqrt:\n break\n #make sure we're not looking once we find all the factors \n if prod == num:\n break\n else:\n if i> sqrt:\n if len(factors)==0:\n factors.append(num)\n prod *= num\n else: \n print i\n recurse = find_factors(number)\n for fac in recurse:\n factors.append(fac)\n prod *= fac\n if prod == num:\n break\n i = i+1\n if prod != num:\n raise ValueError (\"This isn't right\")\n return factors",
"def get_factors(number):\n\n factors = [1, number]\n\n for i in range(2, int(math.sqrt(number))):\n if number % i == 0:\n factors.extend([i, number / i])\n\n return(factors)",
"def factors(number):\n\n if not (isinstance(number, int)):\n raise TypeError(\n \"Incorrect number type provided. Only integers are accepted.\")\n\n factors = []\n for i in range(1, number + 1):\n if number % i == 0:\n factors.append(i)\n return factors",
"def get_unique_factors(num):\n a = num\n m = int(num ** 0.5) if num > 100 else num\n factors = []\n primes = sieve(m)\n # Divide the number by compatible primes until it is 1\n # (or we run out of primes...)\n for p in primes:\n if a % p == 0:\n a = a / p\n factors.append(p)\n if a == 1:\n break\n return factors",
"def factorize(num):\n factors = []\n while num not in primes_list:\n for prime in primes_list:\n if num % prime == 0:\n factors.append(prime)\n num /= prime\n break\n factors.append(num)\n factors = sorted(factors)\n return factors",
"def factor(number):\n\tdividing_primes = sieve(number/2 + 1)\n\tfactors = []\n\t\n\twhile number != 1:\t\n\t\tif not dividing_primes:\n\t\t\treturn [number]\n\n\t\tnext_divisor = min(dividing_primes)\n\n\t\tif not number % next_divisor:\n\t\t\tfactors.append(next_divisor)\n\t\t\tnumber /= next_divisor\n\t\telse:\n\t\t\tdividing_primes.remove(next_divisor)\n\n\treturn factors",
"def factors(n):\n _factors = []\n p = 1\n\n # Loop until half of n\n while p <= n // 2:\n p += 1\n if div_by(p, _factors):\n continue\n if not n % p:\n _factors.append(p)\n\n # Number given is a prime\n if not _factors:\n _factors.append(n)\n\n return _factors",
"def prime_factors(number):\n factors = []\n\n if number == 0 : return factors\n\n # first round factors by two\n while number % 2 == 0:\n factors.append(2)\n number /= 2\n\n # other rounds goes by odd numbers only (no other even is prime)\n divisor = 3\n while divisor <= number:\n while number % divisor == 0:\n factors.append(divisor)\n number /= divisor\n divisor += 2\n\n return factors",
"def prime_factors(num):\n prime_factors = []\n for i in range(2, num + 1):\n if (num % i) == 0 and is_prime(i) == True:\n prime_factors.append(i)\n return prime_factors",
"def factor(cls, number):\n factors = []\n for prime in cls():\n if prime > number:\n break\n # print 'Checking to see if %d is a factor of %d' % (prime, number)\n # reduce the total iterations\n if prime > math.sqrt(number):\n factors.append(number)\n break\n while not number % prime:\n number /= prime\n factors.append(prime)\n return factors",
"def primefactors(n):\n factors = []\n primes = prime_sieve(n)\n\n for p in primes:\n while n % p == 0:\n factors.append(p)\n n /= p\n if n == 1:\n return(factors)\n return([n])",
"def factors(n):\n nfactors = 0 # the number of factors of n\n for divisor in range(1, n+1): # divisors: {1,2,3,4...,n}\n if n%divisor == 0: # divides with no remainder\n nfactors += 1 # i.e. one new factor found\n return nfactors",
"def prime_factors(n):\n\n prime_set = primes(n)\n factors = []\n for prime in prime_set:\n if n % prime == 0:\n factors.append(prime)\n return factors",
"def factorize(num: int) -> [int, ]:\n # assert isinstance(num, int)\n primes = (2, 3, 5, 7, 11, 13, 17, 19, 23, 29,\n 31, 37, 41, 43, 47, 53, 59, 61, 67, 71,\n 73, 79, 83, 89, 97, 101, 103, 107, 109, 113,\n 127, 131, 137, 139, 149, 151, 157, 163, 167, 173,\n 179, 181, 191, 193, 197, 199, 211, 223, 227, 229,\n 233, 239, 241, 251, 257, 263, 269, 271, 277, 281,\n 283, 293, 307, 311, 313, 317, 331, 337, 347, 349,\n 353, 359, 367, 373, 379, 383, 389, 397, 401, 409,\n 419, 421, 431, 433, 439, 443, 449, 457, 461, 463,\n 467, 479, 487, 491, 499, 503, 509, 521, 523, 541)\n factors = []\n if num == 0:\n return [0, ]\n\n # Generate a list of prime factors:\n for prime in primes:\n if prime > num:\n break\n while num % prime == 0:\n factors.append(prime)\n num = int(round(num / prime))\n if num != 1:\n # TODO: make it find larger primes to avoid this problem.\n raise ArithmeticError(\n f'num is {num}. did not finish prime factorization.')\n return factors",
"def factors(n):\n factors = []\n for x in range(1, int(sqrt(n)+1)):\n if (n % x) == 0:\n factors += [x, n/x]\n \n return sorted(set(factors))",
"def factors(n):\n\tif n<0: n=-n # Only deal with positive integers\n\tif (is_prime(n)):\n\t\treturn [n]\n\tfact = factorone(n)\n\tif ((abs(n) == 1) or (n == 0)): raise ValueError('Unable to factor \\\"{0}\\\"'.format(n))\n\tfacts = factors(n//fact) + factors(fact)\n\tfacts.sort()\n\treturn facts",
"def factors(n):\r\n\tif n<0: n=-n # Only deal with positive integers\r\n\tif (is_prime(n)):\r\n\t\treturn [n]\r\n\tfact = factorone(n)\r\n\tif (fact == 1): return \"Unable to factor \"+str(n) # Can't deal with units\r\n\tfacts = factors(n/fact) + factors(fact)\r\n\tfacts.sort()\r\n\treturn facts",
"def prime_factors(n):\r\n factors = defaultdict(int)\r\n d = 2\r\n while n > 1:\r\n while n % d == 0:\r\n factors[d]+=1\r\n n /= d\r\n d = d + 1\r\n if d*d > n:\r\n if n > 1: factors[n]+=1\r\n break\r\n return factors",
"def prime_factors(n) -> []:\n i = 2\n factors = []\n while i * i <= n:\n if n % i:\n i += 1\n else:\n n //= i\n factors.append(i)\n if n > 1:\n factors.append(n)\n return factors",
"def get_prime_factors(num: int, prime_list: list = None) -> list:\n upper_bound = math.ceil(num / 2) + 1\n if not prime_list:\n prime_list = [prime for prime in primes.Primes(upper_bound)]\n\n prime_factors = []\n for prime in prime_list:\n temp = num\n multiplicity = 0\n temp, remainder = divmod(temp, prime)\n while remainder == 0 and temp >= 1:\n multiplicity += 1\n temp, remainder = divmod(temp, prime)\n if multiplicity > 0:\n prime_factors.append((prime, multiplicity))\n if prime > upper_bound:\n break\n\n if not prime_factors:\n prime_factors = [(num, 1)]\n\n return prime_factors",
"def factors(n):\n f = list(reduce(list.__add__, ([i, n // i] for i in range(1, int(pow(n, 0.5) + 1)) if n % i == 0)))\n return sorted(f)",
"def prime_factors(num):\n result = []\n for i in range(2, num):\n if (is_prime(i)) and (num % i == 0):\n result.append(i)\n if not result:\n print(\"No prime factors\")\n else:\n return result",
"def get_prime_factors(number):\n if number == 1:\n return []\n\n # We have to begin with 2 instead of 1 or 0\n # to avoid the calls infinite or the division by 0\n for i in range(2, number):\n # Get remainder and quotient\n rd, qt = divmod(number, i)\n if not qt: # if equal to zero\n return [i] + get_prime_factors(rd)\n\n return [number]",
"def prime_factors(n):\n factors = []\n lastresult = n\n c = 2\n while lastresult != 1:\n if lastresult % c == 0 and c % 2 > 0:\n factors.append(c)\n lastresult /= c\n c += 1\n else:\n c += 1\n return factors[0], factors[1]",
"def get_factors(val):\n N = np.sqrt(val)\n N = np.floor(N)\n M = val/N\n\n while (val % N != 0):\n N = N-1\n M = val/N\n\n return int(M), int(N)",
"def primeFactors(number):\n factorlist=[]\n loop=2\n while loop<=number:\n if number%loop==0:\n number/=loop\n factorlist.append(loop)\n else: \n loop+=1\n return factorlist",
"def _factors(n):\n gen = ([i, n // i] for i in range(1, int(n ** 0.5) + 1) if n % i == 0)\n return set(sum(gen, []))"
]
| [
"0.8587653",
"0.81951445",
"0.81831396",
"0.8080491",
"0.7941649",
"0.7896844",
"0.7802897",
"0.77760303",
"0.7773974",
"0.76784503",
"0.7663789",
"0.7661249",
"0.7620093",
"0.750173",
"0.741434",
"0.7364422",
"0.7358033",
"0.7344944",
"0.73159957",
"0.7259934",
"0.7245798",
"0.72433805",
"0.7239518",
"0.72171575",
"0.7198296",
"0.71703637",
"0.7167016",
"0.7131341",
"0.71305496",
"0.7129737"
]
| 0.8437094 | 1 |
Process the keywords for the given component | def get_keywords_for_component(component, user_defined_keywords):
output_keywords = []
input_keywords = user_defined_keywords # initialize with the user defined keywords
input_keywords += component.split('/') # split the component if there are multiple terms involved
for input_keyword in input_keywords:
output_keywords.append(input_keyword)
word_list_split_by_space = input_keyword.split(' ')
for word in extract_words_from_word_list_split_by_space(word_list_split_by_space):
output_keywords.append(word)
output_keywords += get_synonyms(word)
output_keywords = list(set(output_keywords))
return output_keywords | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def keyword_extraction(file_content):\n\n # [question, question....]\n for key, value in file_content.items():\n seg, hidden = ltp.seg([key])\n # ner: [[('Nh', 2, 2)]]\n ner = ltp.ner(hidden)\n # keywords: [('PERSON', \"吴轩\")], tuple_item: ('Nh', 2, 2)\n keywords = [(tag_to_name[tuple_item[0]], to_string(seg[0][tuple_item[1]: tuple_item[2]+1])) for tuple_item in ner[0]]\n file_content[key].keywords = keywords\n\n return file_content",
"def determine_keywords(self):\n\n split = dict()\n split['email_cc'] = re.compile(\"^\\s*CC[-_]?MAIL[:=]\\s*(.*)\")\n split['email_cc2'] = re.compile(\"^\\s*C[Cc][:=]\\s*(.*)\")\n split['fixed_in'] = re.compile(\"^\\s*FIXED[-_]?IN[:=]\\s*(.*)\")\n\n numeric = dict()\n numeric['bug_fixed'] = re.compile(\"^\\s*(?:BUGS?|FEATURE)[:=]\\s*(.+)\")\n numeric['bug_cc'] = re.compile(\"^\\s*CCBUGS?[:=]\\s*(.+)\")\n\n presence = dict()\n presence['email_gui'] = re.compile(\"^\\s*GUI:\")\n presence['silent'] = re.compile(\"(?:CVS|SVN|GIT|SCM).?SILENT\")\n presence['notes'] = re.compile(\"(?:Notes added by 'git notes add'|Notes removed by 'git notes remove')\")\n\n results = defaultdict(list)\n for line in self.commit.message.split(\"\\n\"):\n # If our line starts with Summary: (as it does when using Arcanist's default template) then strip this off\n # This allows for people to fill keywords in the Differential Summary and have this work smoothly for them\n line = re.sub(\"^Summary: (.+)\", \"\\g<1>\", line)\n\n # Start processing our keywords...\n for (name, regex) in split.iteritems():\n match = re.match( regex, line )\n if match:\n results[name] += [result.strip() for result in match.group(1).split(\",\")]\n\n for (name, regex) in numeric.iteritems():\n match = re.match( regex, line )\n if match:\n results[name] += re.findall(\"(\\d{1,10})\", match.group(1))\n\n for (name, regex) in presence.iteritems():\n if re.match( regex, line ):\n results[name] = True\n\n self.keywords = results",
"def enrichKeywords(self, result):\n\n # TODO: Implement function\n pass",
"def set_keywords(self):\n\n if len(self.get_keywords()) == 0 and len(self.get_files()) > 0:\n self.keywords = self.files[0].get_parent()[\"title\"].split(\" \")\n for keyword in self.keywords:\n if str(keyword) in str(self.text):\n self.keywords = []",
"def populate_keywords(kwds, pkg_id):\n if not kwds:\n return\n for word in kwds:\n # @todo(Check data and use the special character-list\n # variable in the constants' file.)\n word = word.strip(\".:;=-,\\\"'\\n $_%{}()[]^*?& +#`\").lower()\n if len(word) <= 1 or (word in constants.STOP_WORDS) or \\\n has_special_chars(word):\n continue\n insert_keyword(word, pkg_id)",
"def process_slide(self, slide, index, file_path):\n keyword_text = None\n\n for shape in slide.shapes:\n if hasattr(shape, \"text\") and shape.text.startswith(\"Keyword\"):\n keyword_text = shape.text.split(':', 1)[-1]\n break\n\n if keyword_text is None:\n return\n\n slide_keyword_list = self.remove_empty_strings(keyword_text.split(','))\n\n for i in range(len(slide_keyword_list)):\n slide_keyword_list[i] = slide_keyword_list[i].strip(' ').replace('\\n', '')\n\n for keyword in slide_keyword_list:\n self.process_keyword(keyword, index, file_path)",
"def iteritems(self, keyword):",
"def _generate_keywords(self):\n _keywords = [*self._lookup_opcodes_dir.keys(), *self._registers_list.keys()]\n for key in _keywords:\n self._keywords.extend(key.split(\" \"))\n return",
"def get_meta_keywords(self, article):\r\n return self.get_meta_content(article.doc, \"meta[name=keywords]\")",
"def process_article(sentences: List[Dict[str, str]],\n article: str,\n keyword: str,\n collect_all: bool\n ) -> List[Dict[str, str]]:\n with open(article, 'r') as txt:\n for line in txt.read().split('\\n'):\n if collect_all or keyword.lower() in line.lower():\n sentences.append({\n \"sentence\": line,\n \"keyword\": keyword\n })\n \n return sentences",
"def setKeywords(self,value):\n self.PDFreactorConfiguration.in1[\"keywords\"] = value",
"def getmetakeywords(allcontent, corpus):\n for i in range(0, len(allcontent)):\n words = re.split(\"[, ]+\", allcontent[i])\n if words[0] == \"Meta\":\n for j in range(3, len(words)):\n if len(processword(words[j])) > 0:\n corpus.append(processword(words[j]))",
"def __keyword_clicked(self, checkbox):\n\n\t\tif self.clearing: return\n\n\t\tself.StatusMessage(\"Applying keywords, please wait…\")\n\t\tself.window.set_sensitive(False)\n\n\t\tif checkbox.get_inconsistent():\n\t\t\t# add keywords to all files at first click.\n\t\t\tcheckbox.set_inconsistent(False)\n\t\t\tcheckbox.set_active(True)\n\n\t\tkw = checkbox.get_label()\n\t\trec = self.recursive.get_active()\n\n\t\tlogging.progress('__keyword_clicked(%s) called.' % kw)\n\n\t\tif checkbox.get_active():\n\t\t\tfunc = self.kw.AddKeywordsToPath\n\t\t\tmessage = \"Added keyword %s to %s.\"\n\t\t\tstatus = self.checked\n\t\telse:\n\t\t\tfunc = self.kw.DeleteKeywordsFromPath\n\t\t\tmessage = \"Removed keyword %s from %s.\"\n\t\t\tstatus = self.unchecked\n\n\t\tfor (path, ) in self.liststore:\n\t\t\ttry:\n\t\t\t\tfunc(path, [ kw ], rec)\n\t\t\t\tself.keyword_usage[kw] = status\n\t\t\t\tlogging.progress(message % (kw, path))\n\t\t\t\tself.hc.UpdateRequest(path)\n\n\t\t\texcept (OSError, IOError), e:\n\t\t\t\tif e.errno not in (61, 95):\n\t\t\t\t\t# TODO: refactor messagebox.\n\t\t\t\t\tmsgbox = gtk.MessageDialog(parent=self.window, type=gtk.MESSAGE_ERROR,\n\t\t\t\t\t\tbuttons=gtk.BUTTONS_CLOSE, message_format=str(e))\n\t\t\t\t\tmsgbox.set_modal(True)\n\t\t\t\t\tmsgbox.show()\n\n\t\tself.window.set_sensitive(True)\n\t\tself.StatusMessage(\"Done applying keywords.\")",
"def parse_process_keywords(proc_kw, rfd_kwargs=None):\n\n rfd_kwargs = rfd_kwargs if rfd_kwargs is not None else dict()\n\n if proc_kw in ('fast', 'f', 'quick', 'short', None):\n if 'n_samples' not in rfd_kwargs:\n rfd_kwargs['n_samples'] = 600\n elif proc_kw in ('medium', 'm'):\n if 'n_samples' not in rfd_kwargs:\n rfd_kwargs['n_samples'] = 1000\n elif proc_kw in ('slow', 'full', 'f', 's'):\n if 'n_samples' not in rfd_kwargs:\n rfd_kwargs['n_samples'] = None\n elif proc_kw in ('i', 'ideal', 'transit_search', 'ts', 'main'):\n if 'n_samples' not in rfd_kwargs:\n rfd_kwargs['n_samples'] = 1000\n if 'ocut' not in rfd_kwargs:\n rfd_kwargs['ocut'] = (5, 5, 4, 4, 4)\n rfd_kwargs['n_iters'] = len(rfd_kwargs['ocut'])\n elif proc_kw in ('ie', 'ideal_e', 'main_e', 'semi_evolve'):\n if 'n_samples' not in rfd_kwargs:\n rfd_kwargs['n_samples'] = 1000\n if 'ocut' not in rfd_kwargs:\n rfd_kwargs['ocut'] = (5, 5, 4, 4, 4)\n if 'evolve' not in rfd_kwargs:\n rfd_kwargs['evolve'] = (True, True, True, False, False)\n if 'full_final' not in rfd_kwargs:\n rfd_kwargs['full_final'] = True\n if 'n_iters' not in rfd_kwargs:\n rfd_kwargs['n_iters'] = len(rfd_kwargs['ocut'])\n elif proc_kw in ('long'):\n if 'n_samples' not in rfd_kwargs:\n rfd_kwargs['n_samples'] = None\n if 'ocut' not in rfd_kwargs:\n rfd_kwargs['ocut'] = (5, 5, 4, 4, 4)\n if 'full_final' not in rfd_kwargs:\n rfd_kwargs['full_final'] = False\n if 'n_iters' not in rfd_kwargs:\n rfd_kwargs['n_iters'] = len(rfd_kwargs['ocut'])\n else:\n raise ValueError(\"proc_kw not recognised: {}\".format(proc_kw))\n\n if 'n_iters' not in rfd_kwargs:\n rfd_kwargs['n_iters'] = 4\n\n return rfd_kwargs",
"def keywords(self, keywords):\n self._keywords = keywords",
"def __iter__(self):\n for keyword in self.meta.findall(CN('meta:keyword')):\n yield keyword.text",
"def processSearchLanguage(dataField):\n\tvalidCommands=[\"FILTER\",\"TYPE\",\"ORDER\",\"SORT\"]\n\trawData=getDataFromWidget(dataField)\n\t#Split the data\n\tsplitData=rawData.split(\"=\")\n\tif len(splitData) > 1:\n\t\tcommand=splitData[0]\n\t\tcommandData=splitData[1]\n\t\tif len(commandData.split()) > 0:\n\t\t\tcommandName=str(command).upper()\n\t\t\tif commandName in validCommands:\n\t\t\t\t#Filter\n\t\t\t\tif commandName == \"FILTER\" or commandName == \"TYPE\":\n\t\t\t\t\tfilterPodListbox(commandData)\n\t\t\t\telif commandName == \"ORDER\" or commandName == \"SORT\":\n\t\t\t\t\torderPodListbox(commandData)",
"def SetupKeywords(self):\n kwlist = u\" \".join(self._keywords)\n self.SetKeyWords(0, kwlist)",
"def register_keywords():\n resources = os.path.abspath(os.path.join(os.path.dirname(__file__), \"../resources\"))\n logger.info(\"resources=%s\" % resources)\n if not os.path.isdir(resources):\n raise AssertionError(\"Unable to find resources directory! resources=%s\" % resources)\n for filename in glob.glob(os.path.join(resources, \"*.robot\")):\n logger.info(\"looking up keywords in file %s\" % filename)\n try:\n BuiltIn().import_resource(filename)\n keywords = lookup_keywords(filename)\n for keyword in keywords:\n register_run_keyword(filename, keyword, 0)\n except:\n pass",
"def keywords(content_id=None):\n try:\n data = content_keywords(request.content)\n return render({'keywords': data}, template='keywords.jinja2')\n except Exception, e:\n traceback.print_exc()\n return render({'url': request.url, 'error': str(e)},\n template='error.jinja2')",
"def test_keyword_extractor(self):\n data = [{\"Header\": \"This is a Header\", \"Paragraph\": \"This is a Paragraph\", \"slide\": 10}]\n keywords = keyword_extractor(data)\n data[0][\"Header_keywords\"] = [\"header\"]\n data[0][\"Paragraph_keywords\"] = [\"paragraph\"]\n self.assertEqual(keywords, data)",
"def _fe_keyword_match(self, sample):\n result = OrderedDict()\n\n for item in self._keywords:\n result[item + \"_kw\"] = 1 if item in sample['fqdn'] else 0\n\n return result",
"def scrap_keywords():\n ParScr = ParallelScraper()\n ParScr.create_and_run_threads()\n return",
"def add_keyword(self,\r\n index,\r\n keywords):\r\n\r\n if isinstance(keywords, str):\r\n keywords = {keywords}\r\n\r\n self.edit(index,\r\n self.get_keys_from_note(index).union(keywords),\r\n self.get_text_from_note(index))",
"def use_keyword_classifier():\n while True:\n utterance = input(\"\\n\\nEnter utterance you want to classify, \\ntype menu or exit to go back:\\n-> \").lower()\n if utterance == \"menu\" or utterance == \"exit\":\n break\n else:\n try:\n label_pred = keyword_classifier(utterance)\n print(\"Prediction: {0}\".format(*label_pred))\n except ValueError:\n print(\"Prediction: {0}\".format(\"null\"))",
"def keywords(self, keywords):\n\n self._keywords = keywords",
"def getMetaKeywords(self, article):\n return self.getMetaContent(article.doc, \"meta[name=keywords]\")",
"def get_keywords(self, sectioned_text):\n \n keywords = []\n \n if 'full text' in list(sectioned_text.keys()):\n \n for word in self.keyword_list:\n if word in sectioned_text['full text']:\n keywords.append(word)\n \n else: \n fulltext = self.restitch_text(sectioned_text)\n for word in self.keyword_list:\n if word in fulltext:\n keywords.append(word)\n \n return keywords",
"def execute_method_for_keyword(self):\n\n kwargs, kw_status = self.get_argument_as_keywords()\n print_info(\"The Arguments passed for the current Step is: '{0}'\".format(kwargs))\n if kw_status:\n # Execute the corresponding method\n method_loader = self.exec_obj.im_class()\n try:\n keyword_result = self.exec_obj(method_loader, **kwargs)\n except Exception as exception:\n trcback = print_exception(exception)\n keyword_result = (\"EXCEPTION\", trcback)\n\n self.data_repository = self.update_data_repository(self.keyword,\n keyword_result,\n self.data_repository)\n return self.data_repository",
"def get_keywords_for_movie(url):\n pass"
]
| [
"0.5812088",
"0.57918507",
"0.55647784",
"0.55573857",
"0.5487196",
"0.54760396",
"0.546415",
"0.5446818",
"0.5295191",
"0.528454",
"0.5282875",
"0.52362025",
"0.5217535",
"0.52160937",
"0.52061707",
"0.5133049",
"0.51226133",
"0.512039",
"0.50877815",
"0.50626683",
"0.5056399",
"0.5037693",
"0.5030529",
"0.50291383",
"0.50009704",
"0.49979296",
"0.4984808",
"0.49617884",
"0.49606556",
"0.49587834"
]
| 0.7079821 | 0 |
Given a list of words, generate all potential combination of words in pairs | def generate_pairs_of_words(word_list):
def pair_words(word_list, i, j, connector):
return word_list[i] + connector + word_list[j]
pairs = []
n = len(word_list)
for i in range(n-1):
for j in range(i+1, n):
pairs.append(pair_words(word_list, i, j, ' '))
pairs.append(pair_words(word_list, j, i, ' '))
pairs.append(pair_words(word_list, i, j, '-'))
pairs.append(pair_words(word_list, j, i, '-'))
pairs.append(pair_words(word_list, i, j, '_'))
pairs.append(pair_words(word_list, j, i, '_'))
pairs.append(pair_words(word_list, i, j, ''))
pairs.append(pair_words(word_list, j, i, ''))
outputs = list(set(pairs)) # remove duplicates
return outputs | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def word_combination(wlist:list) -> list :\r\n\r\n if wlist and len(wlist)>1:\r\n return chain(*map(lambda x: combinations(wlist, x), range(1, len(wlist)+1)))\r\n else :\r\n return wlist",
"def get_pairs(terms):\n return itertools.combinations(terms, 2)",
"def find_pairs(words): \n pass",
"def crossword_words(crossword: list) -> list:\n pass",
"def make_pairs(txt):\r\n \r\n lista = []\r\n string = \"\" \r\n count = 0\r\n \r\n if len(txt)%2 != 0 :\r\n count = 1\r\n \r\n for i in range(len(txt)):\r\n \r\n string += txt[i]\r\n count += 1\r\n \r\n if count == 2:\r\n lista.append(string)\r\n string = \"\"\r\n count = 0\r\n \r\n return lista",
"def generate_words(combo,scrabble_words_dict):\n word_set = set()\n for w in itertools.permutations(combo):\n word = ''.join(w)\n if word in scrabble_words_dict:\n word_set.add(word)\n return word_set",
"def get_pairs(self, word: List[str]) -> List[Tuple[str, str]]:\n pairs: List[Tuple[str, str]] = []\n prev_char = word[0]\n for char in word[1:]:\n pairs.append((prev_char, char))\n prev_char = char\n return pairs",
"def get_pairs(word):\r\n pairs = set()\r\n prev_char = word[0]\r\n for char in word[1:]:\r\n pairs.add((prev_char, char))\r\n prev_char = char\r\n return pairs",
"def get_pairs(word):\n pairs = set()\n prev_char = word[0]\n for char in word[1:]:\n pairs.add((prev_char, char))\n prev_char = char\n return pairs",
"def get_pairs(word):\n pairs = set()\n prev_char = word[0]\n for char in word[1:]:\n pairs.add((prev_char, char))\n prev_char = char\n return pairs",
"def triples(self):\n\n if len(self.words) < 3:\n return\n\n for i in range(len(self.words) - 2):\n yield (self.words[i], self.words[i+1], self.words[i+2])",
"def alpha_chars_pairs (text):\n alpha_text = list (alpha_chars (text))\n return itertools.combinations (alpha_text)",
"def make_chains(text, text2):\n\n list_of_words = text.split() + text2.split()\n # Build dictionary where key = tuple and value = following word\n word_next_dict = {}\n pos = 0\n #Create word pairs of every combination from within source texts\n for word in list_of_words[0:-2]:\n pair = (list_of_words[pos], list_of_words[pos+1])\n if pair not in word_next_dict:\n word_next_dict[pair] = [list_of_words[pos+2]]\n pos += 1\n else:\n word_next_dict[pair].append(list_of_words[pos+2])\n pos += 1\n return word_next_dict",
"def triples(self):\r\n\r\n if len(self.words) < 3:\r\n return\r\n\r\n for i in range(len(self.words) - 2):\r\n yield (self.words[i], self.words[i + 1], self.words[i + 2])",
"def find_words(text):\n print \"finding combinations\"\n length = len(text)\n n = length - 1\n num_combos = 2 ** (length - 1)\n\n bins = []\n for i in range(num_combos):\n num = bin(i).rsplit('b', 1)[1]\n num_str = num.zfill(n)\n bins.append(num_str)\n\n total_combos = []\n for binary_num in bins:\n combo = []\n for i in range(n):\n if binary_num[i] == '1':\n combo.append(text[i])\n combo.append(',')\n else:\n combo.append(text[i])\n\n combo.append(text[-1])\n combo = ''.join(combo)\n combo = combo.split(',')\n total_combos.append(combo)\n\n return total_combos",
"def get_combinations(text):\n combinations = []\n arr = []\n slen = len(text)\n __find_factor(slen,slen,combinations,arr)\n \n elements = []\n for comb in combinations:\n tmp = [0] + comb\n elements.append([text[tmp[i]:tmp[i]+tmp[i+1]] for i in range(len(tmp)-1)])\n return elements",
"def representative_combos(list_1: list[str], list_2: list[str]) -> list[tuple[str, str]]:\n all_selected_combinations: list[tuple[str, str]] = []\n for i in range(max(len(list_1), len(list_2))):\n all_selected_combinations.append((list_1[i % len(list_1)], list_2[i % len(list_2)]))\n return all_selected_combinations",
"def lists_combinations(list_1, list_2):\n return [x[0] + ' ' + x[1] for x in itertools.product(list_1, list_2)]",
"def triples(self):\n\t\tif len(self.words) < 3:\n\t\t\treturn\n\t\t\n\t\tfor i in range(len(self.words) - 2):\n\t\t\tyield (self.words[i], self.words[i+1], self.words[i+2])",
"def merge_words(w, z):\n\n list_w = list(w)\n list_z = list(z)\n\n return recursive_build_list(\"\", list_w, list_z)",
"def get_all_possible_pairs(self, a, b):\n return itertools.product(a, b)",
"def combinations(s, n):\n return (\"\".join(x) for x in tuples(s,n))",
"def _get_possible_sense_combinations(self, taggable, tagged):\n\t\tprint(\"\\tget possible combinations...\")\n\t\t# first create a list of the already tagged senses and store for each of those one list inside that contains the one single correct sense\n\t\ttagged_sense_keys = [[(token, token.wn_sense_key)] for token in tagged]\n\t\ttaggable_possible_sense_keys = []\n\n\t\t# for each token that has to be tagged now find all possible senses and collect them\n\t\tfor token in taggable:\n\t\t\ttoken_sense_pairs = []\n\t\t\t# for each possible sense of the token add one to the list of that sense\n\t\t\tpossible_senses = self._get_possible_wn_senses_for_token(token)\n\t\t\tfor single_possible_sense in possible_senses:\n\t\t\t\ttoken_sense_pairs.append((token, single_possible_sense))\n\t\t\ttaggable_possible_sense_keys.append(token_sense_pairs)\n\n\t\tcomplete_list_of_tokens = taggable_possible_sense_keys + tagged_sense_keys\n\n\t\tprint(\"\\t\\t...building combinations\")\n\t\t# return a dot product of the lists of possible senses of all tokens\n\t\treturn list_product(*complete_list_of_tokens)",
"def gen_all_strings(word):\n if not word:\n return [\"\"]\n \n all_strings = []\n for string in gen_all_strings(word[1:]):\n for letter_idx in range(len(string) + 1):\n all_strings.append(string[letter_idx:] + word[0] + string[:letter_idx])\n \n return gen_all_strings(word[1:]) + all_strings",
"def combo(my_list, string):\n\n index = 0\n new_list = []\n \n first = list(my_list)\n second = list(string)\n \n for each in first:\n new_list.append((each, second[index]))\n index += 1\n \n return new_list",
"def make_chains(word_list):\n\n chains = {}\n for index in range(0, len(word_list) - 2):\n # only making small chains because I like maximum absurdity\n key = tuple(word_list[index:index + 2])\n if key not in chains:\n chains[key] = [word_list[index + 2]]\n else:\n chains[key].append(word_list[index + 2])\n return chains",
"def get_all_pairs(idioms_set, power_expressions, main_words):\r\n print('getting all pairs...')\r\n all_pairs = [' '.join(pair) for pair in list(itertools.product(power_expressions, main_words)) if ' '.join(pair) not in idioms_set]\r\n list_to_file(all_pairs, 'all_pairs.txt')\r\n print('file all_pairs.txt created')\r\n return all_pairs",
"def proc_sandwich(word, forward_pairs, uni_word, morphs, backward_pairs):\n sandwich_pairs = []\n if uni_word and morphs:\n sandwich_pairs = proc_sw_both_not_empty(forward_pairs, uni_word, morphs, backward_pairs)\n elif uni_word:\n sandwich_pairs = proc_sw_only_word(word, uni_word, backward_pairs)\n elif morphs:\n sandwich_pairs = proc_sw_only_morphs(forward_pairs, morphs, backward_pairs)\n return sandwich_pairs",
"def generate_solutions(possible_words, labels):\r\n return []",
"def gen_all_strings(word):\r\n if len(word) == 0:\r\n return ['']\r\n else:\r\n first = word[0]\r\n rest = gen_all_strings(word[1:])\r\n new = []\r\n for item in rest:\r\n if len(item) > 0:\r\n for pos in range(len(item)):\r\n new.append(item[:pos] + first + item[pos:])\r\n new.append(item + first)\r\n new.append(first)\r\n new.extend(rest)\r\n return new"
]
| [
"0.7723539",
"0.74257666",
"0.7352363",
"0.6786749",
"0.67455274",
"0.6643182",
"0.66158676",
"0.66040707",
"0.6596281",
"0.6596281",
"0.6585903",
"0.6577314",
"0.65148395",
"0.6471836",
"0.6451061",
"0.6432114",
"0.64205796",
"0.64113396",
"0.6296403",
"0.62510425",
"0.62165225",
"0.6193558",
"0.6190284",
"0.61464626",
"0.61378324",
"0.6120472",
"0.61125576",
"0.60994667",
"0.6086236",
"0.6074528"
]
| 0.827604 | 0 |
Check if a path points to a file or directory and if it has read or exec access rights. | def isAccessible(self,path):
if isdir(path):
return access(path, R_OK and X_OK and W_OK)
else:
return access(path, R_OK) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def isPathExecutable(path):\n return os.path.isfile(path) and os.access(path, os.X_OK)",
"def is_valid_path(path):\n if not os.path.exists(path):\n raise IOError(\"{path} is not a valid path\".format(path=path))\n if not os.access(path, os.R_OK):\n raise OSError(\"{path} is not a readable path\".format(path=path))",
"def check_path(path, isfile=False, isdir=False):\n \n return os.path.isfile(path) if isfile else os.path.isdir(path)",
"def check_perms(resource):\r\n stmode = os.stat(resource).st_mode\r\n return (getattr(stat, 'S_IROTH') & stmode) > 0",
"def check_perms(resource):\r\n stmode = os.stat(resource).st_mode\r\n return (getattr(stat, 'S_IROTH') & stmode) > 0",
"def isfile_strict(path):\r\n try:\r\n st = os.stat(path)\r\n except OSError:\r\n err = sys.exc_info()[1]\r\n if err.errno in (errno.EPERM, errno.EACCES):\r\n raise\r\n return False\r\n else:\r\n return stat.S_ISREG(st.st_mode)",
"def _check_path(self, path):\n result = False\n if self._check_dir_exists(path):\n # ok, path is an existing file system object and a directory. But is it also writeable?\n if self._check_access(os.path.abspath(path), os.W_OK):\n # Perfect.\n result = True\n else:\n # hm, the path doesn't exist. but could we create it? let's find the last existing parent...\n parent = os.path.dirname(os.path.abspath(path))\n while not self._check_dir_exists(parent):\n parent = os.path.dirname(parent)\n if self._check_access(os.path.abspath(parent), os.W_OK):\n # good news, we could create the path\n result = True\n return result",
"def test_file(path, mode, exception=RuntimeError, isdir=False):\n what = (\"directory\" if isdir else \"file\")\n if not os.access(path, os.F_OK):\n raise exception(\"Cannot access %s '%s'.\" % (what, path))\n if isdir and not os.path.isdir(path):\n raise exception(\n \"Expected '%s' to be a directory, but it's not.\" % path)\n if (mode & os.R_OK) and not os.access(path, os.R_OK):\n raise exception(\"Cannot read %s '%s'.\" % (what, path))\n if (mode & os.W_OK) and not os.access(path, os.W_OK):\n raise exception(\"Cannot write to %s '%s'.\" % (what, path))\n if (mode & os.X_OK) and not os.access(path, os.X_OK):\n if isdir:\n raise exception(\"Cannot traverse directory '%s':\"\n \" lacks 'x' permission.\" % path)\n else:\n raise exception(\"File '%s' lacks execute ('x') permission.\" % path)\n return True",
"def is_executable(path):\n return (os.path.exists(path) and\n not os.path.isdir(path) and\n os.access(path, os.F_OK | os.X_OK))",
"def is_executable(file_path):\n\n return os.path.isfile(file_path) and os.access(file_path, os.X_OK)",
"def _is_file(value: str) -> bool:\n file_in = os.path.expanduser(value)\n return os.path.isfile(file_in) and os.access(file_in, os.R_OK)",
"def check_path(filename):\n return not bool(checkPath(filename))",
"def _have_permissions(self, location):\n if not os.path.isfile(location):\n return True\n \n stats = os.stat(location)\n # check specifically for write permission\n return bool(stats.st_mode & stat.S_IWUSR)",
"def check_writable ( self,\n fspath, mkdir_chown=False, mkdir_chmod=False, mkdir_p=True\n ):\n success = False\n\n ERRNOS_IGNORE = { errno.EACCES, }\n\n try:\n if self.do_touch ( fspath ):\n success = True\n\n except IOError as ioerr:\n if ioerr.errno == errno.EPERM:\n pass\n elif ioerr.errno == errno.ENOENT:\n try:\n if self.dodir (\n os.path.dirname ( fspath ),\n chown=mkdir_chown, chmod=mkdir_chmod, mkdir_p=mkdir_p\n ) and self.do_touch ( fspath ):\n success = True\n\n except ( OSError, IOError ) as err:\n if err.errno == errno.EPERM:\n pass\n elif err.errno in ERRNOS_IGNORE:\n self.error (\n 'Got {name} with unexpected '\n 'errno={code:d} ({code_name})\\n'.format (\n name = err.__class__.__name__,\n code = err.errno,\n code_name = errno.errorcode [err.errno],\n )\n )\n else:\n raise\n # -- end <try again>\n elif ioerr.errno in ERRNOS_IGNORE:\n self.error (\n 'Got {name} with unexpected '\n 'errno={code:d} ({code_name})\\n'.format (\n name = ioerr.__class__.__name__,\n code = ioerr.errno,\n code_name = errno.errorcode [ioerr.errno],\n )\n )\n else:\n raise\n return success",
"def is_in_path(self):\n exe = self.command.split()[0]\n for try_path in os.environ[\"PATH\"].split(os.pathsep):\n try_path = try_path.strip('\"')\n exe_try = os.path.join(try_path, exe).strip()\n if os.path.isfile(exe_try) and os.access(exe_try, os.X_OK):\n return True\n return False",
"def can_use_symlinks():\n if sys.platform.startswith(\"win\"):\n try:\n return ctypes.windll.shell32.IsUserAnAdmin()\n except Exception:\n return False\n else:\n return True",
"def is_file_readable(local_path, reporter=None):\n if not os.path.exists(local_path):\n if reporter is not None:\n reporter.local_access_error(local_path)\n return False\n elif not os.access(local_path, os.R_OK):\n if reporter is not None:\n reporter.local_permission_error(local_path)\n return False\n return True",
"def _checkPermission(self, module):\r\n permission = []\r\n\r\n for p in sys.path:\r\n path = os.path.join(p, module[0])\r\n\r\n if os.path.isdir(path):\r\n if not os.access(path, os.R_OK | os.X_OK):\r\n permission.append(True)\r\n elif (len(module) > 1 and\r\n any(os.access(os.path.join(path, init), os.F_OK)\r\n for init in ['__init__.py', '__init__.pyc'])):\r\n permission.append(self._checkPermission(module[1:]))\r\n\r\n return bool(permission and all(permission))",
"def is_remote_access_allowed(self, path: str):\n return self.public_path_marker.test(path) or self.is_public(path) and not self.is_private(path)",
"def is_writable(filename): \n return os.access(filename, os.W_OK)",
"def is_exe(fpath):\n\treturn os.path.isfile(fpath) and os.access(fpath, os.X_OK)",
"def are_readable_files(self, fnames):\n for fname in fnames:\n if not os.access(fname, os.R_OK):\n self.cli_parser.error(\"%s doesn't exist or you do \"\n \"not have read permissions to it.\" % fname)",
"def checkExistenceFile(path):\n path = os.path.abspath(path)\n return os.path.isfile(path)",
"def path_exists(path):\n if path.startswith('http://') or path.startswith('https://'):\n return True\n\n return isfile(path)",
"def is_exe(fpath):\n return os.path.isfile(fpath) and os.access(fpath, os.X_OK)",
"def is_exe(fpath):\n return os.path.isfile(fpath) and os.access(fpath, os.X_OK)",
"def is_exe(fpath):\n return os.path.isfile(fpath) and os.access(fpath, os.X_OK)",
"def is_exe(fpath):\n return os.path.isfile(fpath) and os.access(fpath, os.X_OK)",
"def isPath(self,pin,head=\"check path exist\",exit_on_error=False,logmsg=False):\n p = os.path.abspath(self.expandvars(pin))\n if os.path.isdir(p):\n if logmsg:\n logger.info(head + \"\\n --> dir exist: {}\\n -> abs dir{:>18} {}\".format(pin,':',p))\n return p\n #--- error no such file\n logger.error(head + \"\\n --> no such directory: {}\\n -> abs dir{:>18} {}\".format(pin,':',p))\n if exit_on_error:\n raise SystemError(self.__MSG_CODE_PATH_NOT_EXIST)\n return False",
"def _islink(path):\n if not os.path.isdir(path):\n return False\n\n if not isinstance(path, str):\n path = str(path)\n\n attributes = ctypes.windll.kernel32.GetFileAttributesW(path)\n if attributes == INVALID_FILE_ATTRIBUTES:\n return False\n\n return (attributes & FILE_ATTRIBUTE_REPARSE_POINT) > 0"
]
| [
"0.7121918",
"0.70543575",
"0.68055344",
"0.6785841",
"0.6785841",
"0.67321527",
"0.6600794",
"0.65523005",
"0.65245205",
"0.63908297",
"0.62918913",
"0.6234298",
"0.62216395",
"0.621974",
"0.6214841",
"0.61981165",
"0.6191218",
"0.61509806",
"0.6112042",
"0.60897356",
"0.6070015",
"0.6068522",
"0.6058709",
"0.6039164",
"0.6016507",
"0.6003123",
"0.5962175",
"0.5962175",
"0.5956206",
"0.5933814"
]
| 0.72072667 | 0 |
Add a CPS local file system object to a folder | def addCPSLocalFS(container, id, **kw):
ob = CPSLocalFS(id, **kw)
return CPSBase_adder(container, ob) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def put_object(local_path: str, file_name: str, configuration):\n pass",
"def add_file(self, path):\n pass",
"def put_object(self, account, container, object, content):#put a file to server\n \n pass",
"def create_object(self, container_name, local_file, object_name):\n with open(local_file, 'r') as test_file:\n response = self.client.put_object(container_name, object_name, contents=test_file.read())\n return response",
"def add_file(self, path):\n assert self._root_dir.is_parent_of(path)\n self._files.append(path)",
"def put(lpath, spath):\n lpath = os.path.expanduser(lpath)\n abs_path = navigate.get_abs_path(spath)\n parent, name = navigate.split_path(abs_path)\n up_parent, up_name = navigate.split_path(parent)\n file_size = os.stat(lpath).st_size\n if up_parent is not None and not db.directory_exists(up_parent, up_name):\n print \"Error: '\" + parent + \"' is not a valid directory.\"\n elif db.file_exists(parent, name):\n print \"Error: '\" + spath + \"' already exists.\"\n elif file_size > 2 * 1024 * 1024 * 1024 - ACCOUNT_BUFFER:\n print \"Error: individual files must be 2GB or smaller.\"\n else: \n dbox_path = '/' + name\n access_token = accounts.get_useable_account(file_size)\n client = dropbox.client.DropboxClient(access_token)\n lfile = open(lpath)\n client.put_file(dbox_path, lfile)\n lfile.close()\n db.add_file(access_token, parent, name)",
"def add(self,path):\n path = os.path.abspath(path)\n self.files[path] = None\n return True",
"def create_folder(self, c_path):\n raise NotImplementedError",
"def put_file(self, path, f):\n return self.client._perform_json_upload(\n \"POST\", \"/projects/%s/managedfolders/%s/contents/%s\" % (self.project_key, self.odb_id, utils.quote(path)),\n \"\", f).json()",
"def file(c, path=local.http_path):\r\n c = conn(c)\r\n print(\"make file repo on {}, path [{}]\".format(c.host, path))\r\n\r\n system.install(c, 'createrepo')\r\n c.run('createrepo {}'.format(path))",
"def addObjectToFolder(self, target, obj):\n\n objSelfUrl = vsdModels.APIBase(**obj.to_struct())\n\n if not objSelfUrl in target.containedObjects:\n target.containedObjects.append(objSelfUrl)\n res = self.putRequest('folders', data=target.to_struct())\n\n target = vsdModels.Folder(**res)\n return target\n\n else:\n return target",
"def put_object_as_file(self, ctx):\n req = ctx.req\n\n virtual_path = urllib_parse.unquote(req.path)\n put_location_req = rpc.put_location_request(virtual_path)\n\n request_etag = req.headers.get(\"ETag\", \"\")\n hasher = hashlib.md5()\n wsgi_input = SnoopingInput(req.environ[\"wsgi.input\"], hasher.update)\n\n # TODO: when the upload size is known (i.e. Content-Length is set),\n # ask for enough locations up front that we can consume the whole\n # request with only one call to RpcPutLocation(s).\n\n # TODO: ask to validate the path a bit better; if we are putting an\n # object at /v1/a/c/kitten.png/whoops.txt (where kitten.png is a\n # file), we should probably catch that before reading any input so\n # that, if the client sent \"Expect: 100-continue\", we can give them\n # an error early.\n\n physical_path_gen = (\n rpc.parse_put_location_response(\n self.rpc_call(ctx, put_location_req))\n for _ in itertools.repeat(None))\n\n error_response = swift_code.check_object_creation(req)\n if error_response:\n return error_response\n\n # Since this upload can be arbitrarily large, we split it across\n # multiple log segments.\n log_segments = []\n i = 0\n while True:\n # First, make sure there's more data to read from the client. No\n # sense allocating log segments and whatnot if we're not going\n # to use them.\n subinput = LimitedInput(wsgi_input, self.max_log_segment_size)\n if not subinput.has_more_to_read:\n break\n\n # Ask ProxyFS for the next log segment we can use\n phys_path = next(physical_path_gen)\n\n # Set up the subrequest with the bare minimum of useful headers.\n # This lets us avoid headers that will break the PUT immediately\n # (ETag), headers that may complicate GETs of this object\n # (X-Static-Large-Object, X-Object-Manifest), things that will\n # break the GET some time in the future (X-Delete-At,\n # X-Delete-After), and things that take up xattr space for no\n # real gain (user metadata).\n subreq = swob.Request.blank(phys_path)\n subreq.method = 'PUT'\n subreq.environ['wsgi.input'] = subinput\n subreq.headers[\"Transfer-Encoding\"] = \"chunked\"\n\n # This ensures that (a) every subrequest has its own unique\n # txid, and (b) a log search for the txid in the response finds\n # all of the subrequests.\n trans_id = req.headers.get('X-Trans-Id')\n if trans_id:\n subreq.headers['X-Trans-Id'] = trans_id + (\"-%03x\" % i)\n\n # Actually put one chunk of the data into Swift\n subresp = subreq.get_response(self.app)\n if not 200 <= subresp.status_int < 299:\n # Something went wrong; may as well bail out now\n return subresp\n\n log_segments.append((phys_path, subinput.bytes_read))\n i += 1\n\n if should_validate_etag(request_etag) and \\\n hasher.hexdigest() != request_etag:\n return swob.HTTPUnprocessableEntity(request=req)\n\n # All the data is now in Swift; we just have to tell proxyfsd\n # about it. Mung any passed ETags values to include the\n # number of writes to the file (basically, the object's update\n # count) and supply the MD5 hash computed here which becomes\n # object's future ETag value until the object updated.\n obj_metadata = extract_object_metadata_from_headers(req.headers)\n mung_etags(obj_metadata, hasher.hexdigest(), len(log_segments))\n\n put_complete_req = rpc.put_complete_request(\n virtual_path, log_segments, serialize_metadata(obj_metadata))\n try:\n mtime_ns, inode, __writes = rpc.parse_put_complete_response(\n self.rpc_call(ctx, put_complete_req))\n except utils.RpcError as err:\n # We deliberately don't try to clean up our log segments on\n # failure. ProxyFS is responsible for cleaning up unreferenced\n # log segments.\n if err.errno == pfs_errno.NotEmptyError:\n return swob.HTTPConflict(\n request=req,\n headers={\"Content-Type\": \"text/plain\"},\n body=\"This is a non-empty directory\")\n elif err.errno == pfs_errno.NotDirError:\n return swob.HTTPConflict(\n request=req,\n headers={\"Content-Type\": \"text/plain\"},\n body=\"Path element is a file, not a directory\")\n else:\n # punt to top-level error handler\n raise\n\n # For reference, an object PUT response to plain Swift looks like:\n # HTTP/1.1 201 Created\n # Last-Modified: Thu, 08 Dec 2016 22:51:13 GMT\n # Content-Length: 0\n # Etag: 9303a8d23189779e71f347032d633327\n # Content-Type: text/html; charset=UTF-8\n # X-Trans-Id: tx7b3e2b88df2f4975a5476-005849e3e0dfw1\n # Date: Thu, 08 Dec 2016 22:51:12 GMT\n #\n # We get Content-Length, X-Trans-Id, and Date for free, but we need\n # to fill in the rest.\n resp_headers = {\n \"Etag\": hasher.hexdigest(),\n \"Content-Type\": guess_content_type(req.path, False),\n \"Last-Modified\": last_modified_from_epoch_ns(mtime_ns)}\n return swob.HTTPCreated(request=req, headers=resp_headers, body=\"\")",
"def new(self, obj):\n FileStorage.__objects[obj.__class__.__name__ + \".\" + obj.id] = obj",
"def add(self, path):\n \n try:\n self._client.add(self._workingCopyPath + path, recurse=True)\n except ClientError, error:\n raise SubversionError(error)",
"def test_add_and_read_file(self, tmpdir, fs):\n with fs.add_file('my-dir', 'my-file') as f:\n f.write('hello')\n assert fs.read('my-dir', 'my-file') == 'hello'",
"def addfile(self, abspath, sourcetree=pbxconsts.SOURCE_TREE.group, move=True):\n fileref = self.project().fileref_for_path(abspath)\n if fileref is None:\n fileref = self.project().new_object(u'PBXFileReference')\n pbxpath.set_path_with_source_tree(fileref, abspath, source_tree=sourcetree, \\\n parent_group=self)\n fileref.pbx_lastKnownFileType = pbxhelper.get_filetype(abspath)\n self.addchild(fileref, move=move)\n return fileref",
"def test_local_folder_is_created(self, mock):\r\n mock.save.return_value = True\r\n u = LocalUploader()\r\n u.upload_folder = tempfile.mkdtemp()\r\n file = FileStorage(filename='test.jpg')\r\n container = 'mycontainer'\r\n res = u.upload_file(file, container=container)\r\n path = os.path.join(u.upload_folder, container)\r\n err_msg = \"This local path should exist: %s\" % path\r\n assert os.path.isdir(path) is True, err_msg",
"def add(path):\n print(uc.add(path))",
"def add(self, filetype, **kwargs):\n\n location = self.location(filetype, **kwargs)\n sas_module, location = location.split(sep, 1) if location else (None, location)\n\n # set proper sasdir based on access method\n sasdir = 'sas' if self.access_mode == 'curl' else ''\n source = self.url(filetype, sasdir=sasdir, **kwargs)\n\n # raise error if attempting to add a software product path\n if 'svn.sdss.org' in source:\n raise AccessError('Rsync/Curl Access not allowed for svn paths. Please use HttpAccess.')\n\n if 'full' not in kwargs:\n destination = self.full(filetype, **kwargs)\n else:\n destination = kwargs.get('full')\n\n if sas_module and location and source and destination:\n self.initial_stream.append_task(\n sas_module=sas_module, location=location, source=source, destination=destination)\n else:\n print(\"There is no file with filetype=%r to access in the tree module loaded\" % filetype)",
"def new(self, obj):\n FileStorage.__objects[obj.id] = obj",
"def put_file(container, filepath, content):\n return put_files(container, [(filepath, content)])",
"def add(path: str) -> None:\n wit = WitEditor(path)\n\n if os.path.isfile(wit.real_path):\n rel_path = os.path.relpath(\n os.path.dirname(wit.real_path), wit.parent_wit_dir\n )\n else:\n rel_path = os.path.relpath(wit.real_path, wit.parent_wit_dir)\n\n if rel_path == '.':\n final_dir = wit.stage_dir\n else:\n final_dir = os.path.join(wit.stage_dir, rel_path)\n wit.create_dirs(final_dir)\n\n if os.path.isdir(wit.real_path):\n wit.copy_tree(\n src=wit.real_path, dst=wit.stage_dir, rel=wit.parent_wit_dir\n )\n else:\n shutil.copy2(wit.real_path, final_dir)\n _logger.info('%s has been added to the stage backup', wit.real_path)",
"def add_file(self, path):\n self.files.append(filetypes.WrapVideoFile(path))",
"def new(self, obj):\n\n FileStorage.__objects[key(type(obj), obj.id)] = obj",
"def new(self, obj):\n if obj is not None:\n key = type(obj).__name__ + \".\" + obj.id\n FileStorage.__objects[key] = obj",
"def create_obj(destination,mtl_name):\r\n\tshutil.copyfile(\"file_cube.obj\",destination)\r\n\tf=open(destination,\"r\")\r\n\tlines=f.readlines()\r\n\tlines[0]=\"mtllib \"+mtl_name+\"\\n\"\r\n\tf.close()\r\n\tf=open(destination,\"w\")\r\n\tf.writelines(lines)\r\n\tf.close()",
"def put_file(*paths, **kwargs):\n local_path = path.join(LOCAL_FILES_DIR, *paths)\n remote_path = posixpath.join(\"/\", *paths)\n sudo(\"mkdir -p %s\" % posixpath.dirname(remote_path))\n put(local_path, remote_path, use_sudo=True, **kwargs)\n sudo(\"chown root %s\" % remote_path)",
"def addFile(self, path):\n self._model.insertFile(path)",
"def add(self):\n self.create(self.fs.name)\n # Mark a volume as 'static' if created from a snapshot\n # Note that if a volume is marked as 'static', it is assumed it\n # can be deleted upon cluster termination!\n if (ServiceRole.GALAXY_DATA not in self.fs.svc_roles and\n (self.from_snapshot_id is not None or self.from_archive is not\n None)):\n log.debug(\"Marked volume '%s' from file system '%s' as 'static'\" %\n (self.volume_id, self.fs.name))\n # FIXME: This is a major problem - any new volumes added from a snapshot\n # will be assumed 'static'. This is OK before being able to add an\n # arbitrary volume as a file system but is no good any more. The\n # problem is in automatically detecting volumes that are supposed\n # to be static and are being added automatically at startup\n if self.from_archive:\n self.fs.kind = 'volume' # Treated as a regular volume after initial extraction\n else:\n self.static = True\n self.fs.kind = 'snapshot'\n else:\n self.fs.kind = 'volume'\n if self.attach():\n us = os.path.join(self.app.path_resolver.galaxy_data, 'upload_store')\n misc.remove(us)\n log.debug(\"Volume attached, mounting {0}\".format(self.fs.mount_point))\n self.mount(self.fs.mount_point)",
"def upload_file( processor, user, local_path ):\n operations.publish_work_item(\n operations.create_asset_from_file(\n file_name = local_path,\n owner = user,\n producer = processor,\n child_number = 0,\n asset_class = models.AssetClass.UPLOAD ))"
]
| [
"0.6798031",
"0.63411236",
"0.62318945",
"0.6201793",
"0.6044068",
"0.59653485",
"0.5933506",
"0.58449936",
"0.5841136",
"0.58149403",
"0.5778442",
"0.57666314",
"0.5732546",
"0.57216054",
"0.57189363",
"0.5698509",
"0.5671398",
"0.56620854",
"0.5660115",
"0.5656925",
"0.56358874",
"0.56327164",
"0.5592363",
"0.5576887",
"0.557037",
"0.5567463",
"0.5566833",
"0.55622643",
"0.5559051",
"0.5546259"
]
| 0.6432148 | 1 |
build a Homebrew forumula file for lrosecore | def build_lrose_formula(tar_url, tar_name, formula_name):
dash = tar_name.find('-')
period = tar_name.find('.', dash)
version = tar_name[dash+1:period]
checksum = subprocess.check_output(("sha256sum", tar_name))
checksum = checksum.split()[0]
formula = template.format(tar_url, version, checksum)
outf = open(formula_name, 'w')
outf.write(formula)
outf.close() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_homebrew_formula(archive_url: str, head_url: str) -> str:\n repository_root = Path(__file__).parent.parent\n direct_requires = _get_dependencies(\n requirements_file=repository_root / 'requirements.txt',\n )\n indirect_requires = _get_dependencies(\n requirements_file=repository_root / 'indirect-requirements.txt',\n )\n\n requirements = direct_requires + indirect_requires\n\n first = requirements[0]\n\n args = ['poet', first]\n for requirement in requirements[1:]:\n args.append('--also')\n args.append(requirement)\n\n result = subprocess.run(args=args, stdout=subprocess.PIPE, check=True)\n resource_stanzas = str(result.stdout.decode())\n\n pattern = dedent(\n \"\"\"\\\n class Dcose2e < Formula\n include Language::Python::Virtualenv\n\n url \"{archive_url}\"\n head \"{head_url}\"\n homepage \"http://dcos-e2e.readthedocs.io/en/latest/cli.html\"\n depends_on \"python3\"\n depends_on \"pkg-config\"\n\n {resource_stanzas}\n\n def install\n virtualenv_install_with_resources\n end\n\n test do\n ENV[\"LC_ALL\"] = \"en_US.utf-8\"\n ENV[\"LANG\"] = \"en_US.utf-8\"\n system \"#{{bin}}/dcos_docker\", \"--help\"\n end\n end\n \"\"\",\n )\n\n return pattern.format(\n resource_stanzas=resource_stanzas,\n archive_url=archive_url,\n head_url=head_url,\n )",
"def mac_add():\n brew_installed = shutil.which('brew')\n if not brew_installed:\n print('Installing brew')\n url = 'https://raw.githubusercontent.com/Homebrew/install/master/install'\n command_line = \"/usr/bin/ruby -e \\\"$(curl -fsSL %s)\\\"\" % url\n subprocess.check_call(command_line, shell=True)\n else:\n print('brew present')\n gmsh_installed = shutil.which('gmsh')\n if not gmsh_installed:\n print('Installing gmsh')\n folder_path = os.path.dirname(os.path.abspath(__file__))\n dmginstall_path = os.path.join(folder_path, 'dmginstall.sh')\n url = 'http://gmsh.info/bin/MacOSX/gmsh-3.0.5-MacOSX.dmg'\n command_line = '%s %s' % (dmginstall_path, url)\n print('command_line=%s' % command_line)\n subprocess.check_call(command_line, shell=True)\n gmsh_path = '/Applications/Gmsh.app/Contents/MacOS/gmsh'\n command_line = \"ln -s %s /usr/local/bin/gmsh\" % gmsh_path\n subprocess.check_call(command_line, shell=True)\n else:\n print('gmsh present')\n ccx_installed = shutil.which('ccx')\n if not ccx_installed:\n mac_add_ccx()\n else:\n print('calculix (ccx) present')",
"def maint_brew():\n os.system('brew update')",
"def make_readme(digest):\n o = 'SHA1 digest: %s\\n\\n'%digest[:10]\n print '...build readme file for GitHub' \n open('README.md','w').write(o + make_readme.__doc__)",
"def main():\n\n options = get_options()\n\n cf.use_style(\"solarized\")\n if options[\"nocolor\"]:\n cf.disable()\n\n newline()\n header(\"Thumbor v%s (of %s)\" % (__version__, __release_date__))\n\n newline()\n print(\n \"Thumbor doctor will analyze your install and verify if everything is working as expected.\"\n )\n\n errors = check_modules()\n errors += check_compiled_extensions()\n errors += check_filters()\n errors += check_extensions()\n\n newline()\n\n if errors:\n print(cf.bold_red(\"😞 Oh no! We found some things that could improve... 😞\"))\n newline()\n print(\"\\n\".join([\"* %s\" % str(err) for err in errors]))\n newline()\n newline()\n print(\n cf.cyan(\n \"If you don't know how to fix them, please open an issue with thumbor.\"\n )\n )\n print(\n cf.cyan(\n \"Don't forget to copy this log and add it to the description of your issue.\"\n )\n )\n print(\"Open an issue at https://github.com/thumbor/thumbor/issues/new\")\n sys.exit(1)\n return\n\n print(cf.bold_green(\"🎉 Congratulations! No errors found! 🎉\"))",
"def cmd_makecldf(self, args):\n wl = lingpy.Wordlist(self.raw_dir.joinpath(\"GEM-CNL.csv\").as_posix())\n concepts = args.writer.add_concepts(\n id_factory=lambda x: x.id.split(\"-\")[-1] + \"_\" + slug(x.english), lookup_factory=\"Name\"\n )\n for concept in self.conceptlists[0].concepts.values():\n for cis in concept.attributes[\"lexibank_gloss\"]:\n if cis not in concepts:\n concepts[cis] = concepts[concept.english]\n\n languages = args.writer.add_languages(lookup_factory=\"STEDT_Name\")\n args.writer.add_sources()\n\n for idx, language, concept, value, pos in wl.iter_rows(\n \"doculect\", \"concept\", \"reflex\", \"gfn\"\n ):\n # Fix for 251479\n if concept == \"top (i.e. highest point\":\n concept = \"top (i.e. highest point)\"\n\n if concept not in concepts:\n args.log.warning(concept)\n else:\n args.writer.add_forms_from_value(\n Language_ID=languages[language],\n Parameter_ID=concepts[concept],\n Value=value,\n Source=[\"Marrison1967\"],\n )",
"def main(base_path):\n current = os.getcwd()\n try:\n if not(os.path.exists(base_path)):\n ans = 'y'\n if p_out:\n print(\"Do you want to create \" + base_path + \"?(y/n)\")\n ans = sys.stdin.read(1)\n print(\"\")\n if ans in ('y', 'Y'):\n pass\n elif ans in ('n', 'N'):\n raise NoneOutput\n else:\n raise InputError\n else:\n m_path = os.path.join(base_path, 'nzmath/manual')\n if os.path.exists(m_path):\n ans = 'y'\n if p_out:\n print(\"Do you want to remove \" + m_path + \"?(y/n)\")\n ans = sys.stdin.read(1)\n print(\"\")\n if ans in ('y', 'Y'):\n for root, dirs, files in os.walk(m_path, topdown=False):\n for name in files:\n os.remove(os.path.join(root, name))\n for name in dirs:\n os.rmdir(os.path.join(root, name))\n elif ans in ('n', 'N'):\n raise NoneOutput\n else:\n raise InputError\n dirname = os.path.join(base_path, 'nzmath/manual/modules')\n if not(os.path.exists(dirname)):\n os.makedirs(dirname)\n os.chdir(os.path.join(base_path, 'nzmath/manual/'))\n csspage = convertHPURL('manual/default.css')\n if p_out:\n print(\"get css from \" + csspage)\n retryConnection(urllib.request.urlretrieve, csspage, 'default.css')\n while ad_list:\n files = ad_list.pop()\n MyWikiParser(files).feeds()\n if p_out:\n print(\"\\n\" + \"All process is done!\" + \"\\n\")\n print(\"Ok, now created nzmath-current manual located to\")\n print(os.path.join(base_path, \"nzmath\"))\n print(\"if you check difference between nzmath-cvs manual, with GNU diff,\")\n print(\"$ diff -ubBr /tmp/nzmath/manual {your-nzmathcvs-repo}/manual\")\n print(\"or you check only new version files,\")\n print(\"$ diff -r --brief /tmp/nzmath/manual {your-nzmathcvs-repo}/manual .\")\n except NoneOutput:\n if p_out:\n print('end.')\n except InputError:\n print(\"Error: Invalid input!\")\n except LookupError:\n print(\"Error: Maybe, Japanese encodings(ex.euc_jp) is not supported.\")\n except:\n if p_out:\n print(\"Check \" + base_path + \" (dir? truly path? and so on.)\")\n print(\"Delete \" + base_path + \" and try again.\")\n print(\"(Maybe, caused by problem of network connection)\\n\")\n print(sys.exc_info()[0])\n os.chdir(current)",
"def main():\n\n util.protontricks('mf_install')",
"def make_user_database():\n createblast_out, createblast_error = Popen([\"makeblastdb\", \"-in\", args.blast_database, \"-dbtype\", \"nucl\"], stdout=PIPE, stderr=PIPE).communicate()\n admin_log(createblast_out, createblast_error, \"create database:\")",
"def about():\n# return about string\n about = (\"\"\"\nJADM %s\n-------------\ncreator: Nikolay Georgiev Dachev, <[email protected]>\nsupport: [email protected] (only for bugs report and jadm issues)\n\nJadm is FreeBSD jail administration framework with jail.conf, vnet and zfs support.\n\n---------------- JADM is BSD 3-Clause Licensed ---------------------\n\nCopyright (c) <2014>, <Nikolay Georgiev Dachev> <[email protected]>\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distr\n\n3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES,\nINCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\nIN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,\nOR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\") % (jadm_version)\n return about",
"def build_latex_standalone(x):\n eingabe=[]\n anhaenge_file=[]\n eingabe.append(\"\\\\title{%s}\" %(x[2]))\n eingabe.append(\"\\\\author{%s}\" %(x[1]))\n eingabe.append(\"\\\\date{%s}\" %(x[0]))\n eingabe.append(\"\\maketitle\") \n eingabe.append(\"\\section{Infos}\")\n eingabe.append(\"\\\\begin{tabularx}{\\linewidth}{@{}lX}\")\n eingabe.append(r\"\\textbf{Anatrag/Beschluss wurde} & %s\\\\\" %(x[9]))\n x[11]=x[11].replace(\" \",\"\")\n kw=x[11].split(\",\")\n for i in range(0,len(kw)):\n if i==0:\n eingabe.append(r\"\\textbf{Keyword:} & %s\\\\\" %(kw[i]))\n else:\n eingabe.append(r\" & %s\\\\\" %(kw[i]))\n eingabe.append(\"\\end{tabularx}\")\n eingabe.append(\"\\\\begin{tabularx}{\\linewidth}{@{}XXX}\")\n eingabe.append(r\"\\textbf{Abstimmungsergebniss:}&&\\\\\")\n eingabe.append(r\"Zustimmung & Ablehnung & Enthaltungen \\\\\")\n eingabe.append(r\"{} & {} & {} \\\\\".format(x[6],x[7],x[8]))\n eingabe.append(\"\\end{tabularx}\")\n eingabe.append(\"\\section{Antrags/Beschlusstext}\")\n eingabe.append(x[3])\n eingabe.append(\"\\section{Begründung}\")\n eingabe.append(x[4])\n if x[23]==\"Ja\" and x[24]!=\"\":\n delta=7\n anzahl=int((len(x)-23)/delta)\n if anzahl==1:\n eingabe.append(\"\\section{Änderungsantrag}\")\n eingabe.append(\"\\subsection*{Vorschlag}\")\n eingabe.append(x[24])\n eingabe.append(\"\\subsection*{Begründung}\")\n eingabe.append(x[25]+\"\\\\vspace{1.5ex} \\\\\\\\\")\n eingabe.append(\"\\\\begin{tabularx}{\\linewidth}{@{}XXX}\")\n eingabe.append(r\"\\textbf{Abstimmungsergebniss:}&&\\\\\")\n eingabe.append(r\"Zustimmung & Ablehnung & Enthaltungen \\\\\")\n eingabe.append(r\"{} & {} & {} \\\\\".format(x[26],x[27],x[28]))\n eingabe.append(r\"\\multicolumn{@{}2}{l}{\\textbf{Änderungsantrag wurde:}} & %s \\\\\" %(x[29]))\n eingabe.append(\"\\end{tabularx}\")\n else:\n eingabe.append(\"\\section{Änderungsanträge}\")\n for i in range(0,anzahl):\n eingabe.append(\"\\subsection{Änderungsvorschlag %s}\" %(i+1))\n eingabe.append(\"\\subsubsection*{Vorschlag}\")\n eingabe.append(x[24+(delta*i)])\n eingabe.append(\"\\subsubsection*{Begründung}\")\n eingabe.append(x[25+(delta*i)]+\"\\\\vspace{1.5ex} \\\\\\\\\")\n eingabe.append(\"\\\\begin{tabularx}{\\linewidth}{@{}XXX}\")\n eingabe.append(r\"\\textbf{Abstimmungsergebniss:}&&\\\\\")\n eingabe.append(r\"Zustimmung & Ablehnung & Enthaltungen \\\\\")\n eingabe.append(r\"{} & {} & {} \\\\\".format(x[26+(delta*i)],x[27+(delta*i)],x[28+(delta*i)]))\n eingabe.append(r\"\\multicolumn{@{}2}{l}{\\textbf{Änderungsantrag wurde:}} & %s \\\\\" %(x[29+(delta*i)]))\n eingabe.append(\"\\end{tabularx}\")\n if x[10]!=\"\":\n #\\includepdf[pages=-]{Anhang/Geschaeftsordnung_Jugendausschuss.pdf}\n eingabe.append(\"\\\\appendix\")\n eingabe.append(\"\\section*{Anhang}\")\n anhang=x[10].split(\",\")\n bennenung=x[11].split(\",\")\n eingabe[14]=eingabe[14]+\"\\\\\\\\ \\n Dieser Antrag enthält %s Anhänge: \" %(len(anhang))\n for i in range(0,len(anhang)):\n eingabe.append(\"\\subsection*{%s} \\label{An:%s}\" % (bennenung[i],str(i+1)))\n eingabe.append(\"\\includepdf[pages=-]{%s}\" %(anhang[i]))\n anhaenge_file.append(anhang[i])\n if i!=len(anhang)-1:\n eingabe[14]=eingabe[14]+\"\\\\nameref{An:%s}, \" % (str(i+1))\n else:\n eingabe[14]=eingabe[14]+\"\\\\nameref{An:%s} \" % (str(i+1))\n \n\n \n ausgabe=\"\"\n for i in range(0,len(eingabe)):\n ausgabe=ausgabe+eingabe[i]+\"\\n\"\n \n return ausgabe,anhaenge_file",
"def create_readme(histfile, vb):\n\tme = \"Utils.create_readme: \"\n\treadmefile = os.path.dirname(histfile)+\"/README.txt\"\n\ttry:\n\t\tassert os.path.isfile(readmefile)\n\texcept AssertionError:\n\t\tnow = str(datetime.now().strftime(\"%Y-%m-%d %H.%M\"))\n\t\tcommit = subprocess.check_output(['git', 'rev-parse', 'HEAD'])\n\t\theader = \"Time:\\t\"+now+\"\\nCommit hash:\\t\"+commit+\"\\n\\n\"\n\t\twith open(readmefile,\"w\") as f:\n\t\t\tf.write(header)\n\t\tif vb: print me+\"Created readme file \"+readmefile\n\treturn",
"def frame(text,\n preamble_file='magyarpreambulum',\n pagesize='a4paper',\n fontsize=11,\n lhead='-',\n rhead='-',\n lfoot='-',\n rfoot='-',\n cfoot=None,\n definitions=None,\n doc_type='testpaper',\n ):\n\n preamble_text = []\n preamble_text.append(_('''% The value of \\\\doctype can be: testpaper, plain és draft.\\n'''))\n preamble_text.append('\\\\newcommand{\\doctype}{%s}\\n' % doc_type)\n\n preamble_text.append('''\n\\\\usepackage{ucs}\n\\\\usepackage[utf8x]{inputenc}\n\\\\newcommand{\\groupname}{}\n\\n''')\n\n if wide:\n preamble_text.append(r\"\"\"\\voffset -2.2cm\n\\textheight 25.4cm\n\\hoffset -1.2cm\n\\textwidth 17.5cm\n\\headheight -3.cm\n%\\footheight 1.5cm\n\\oddsidemargin 3mm\n\\topskip 7mm\n\\parskip 3mm\n\\parindent 1cm\n\\baselineskip 6mm\n\"\"\")\n\n preamble_text.append(r\"\"\"\n%% FEJ és LÁBLÉC\n%%%%%%%%%%%%%%%%%%%%%\n\\usepackage{fancyhdr}\n\\pagestyle{fancy}\n\"\"\")\n\n if cfoot:\n preamble_text.append(\"\\\\cfoot{%s}\\n\" % cfoot)\n else:\n preamble_text.append(\"\\\\cfoot{\\\\groupname}\\n\")\n preamble_text.append(\"\\n\")\n\n if preamble_file:\n preamble_text.append(\"\\\\input{%s}\\n\" % preamble_file)\n preamble_text.append('\\n')\n\n if definitions:\n if definitions[-1] == \"\\n\":\n definitions = definitions[:-1]\n preamble_text.append(\"%begin{definitions}\\n\")\n preamble_text.extend(definitions)\n preamble_text.append(\"%end{definitions}\\n\")\n preamble_text.append(\"\\n\")\n\n preamble_text.append(r'''\\input{formats}\n\n% Itt esetleg állíthatod a többi logikai változót. (def_hu.tex)\n\n\\input{commands_hu}''')\n\n if lhead:\n preamble_text.append(\"\\\\lhead{%s}\\n\" % lhead)\n if rhead:\n preamble_text.append(\"\\\\rhead{%s}\\n\" % rhead)\n if lfoot:\n preamble_text.append(\"\\\\lfoot{%s}\\n\" % lfoot)\n if rfoot:\n preamble_text.append(\"\\\\rfoot{%s}\\n\" % rfoot)\n\n if fontsize not in [10, 11, 12]:\n if type == 'list':\n fontsize = 10\n else:\n fontsize = 11\n\n return general_frame(text,\n class_argument='[%s, %dpt]' % (pagesize, fontsize),\n preamble_text=preamble_text)",
"def main():\n init_latex()",
"def install(i):\n\n cm_kernel.print_for_con('***********************************************')\n cm_kernel.print_for_con('Installing code ...')\n\n # Check vars\n if 'target_os_uoa' not in i: return {'cm_return':1, 'cm_error':'\"target_os_uoa\" is not defined in \"code install\"'}\n\n # Create entry\n ii={'cm_run_module_uoa':ini['cm_module_uid'],\n 'cm_action':'update'}\n if 'install_data_uid' in i and i['install_data_uid']!='': \n ii['cm_data_uid']=i['install_data_uid']\n if 'install_data_alias' in i and i['install_data_alias']!='': \n ii['cm_data_uoa']=i['install_data_alias']\n if 'install_data_display_as_alias' in i: \n ii['cm_display_as_alias']=i['install_data_display_as_alias']\n if 'install_module_uoa' in i and i['install_module_uoa']!='':\n ii['cm_run_module_uoa']=i['install_module_uoa']\n if 'cm_array' in i and len(i['cm_array'])>0: ii['cm_array']=i['cm_array']\n if 'install_repo_uoa' in i and i['install_repo_uoa']!='': \n ii['cm_repo_uoa']=i['install_repo_uoa']\n r=cm_kernel.access(ii)\n if r['cm_return']>0: return r\n\n target_path=r['cm_path']\n target_uid=r['cm_uid']\n target_alias=r['cm_alias']\n\n # Prepare script\n rx=get_env({'cm_data_uoa':target_uid,\n 'os_uoa':i['target_os_uoa']})\n if rx['cm_return']>0: return rx\n\n script=rx['cm_string']\n\n ii={'script_name':script,\n 'skip_extension':'yes',\n 'target_os_uoa':i['target_os_uoa'],\n 'cm_path':target_path}\n if 'code_deps' in i and i.get('skip_code_deps','')!='yes':\n ii['code_deps']=i['code_deps']\n\n # Add remark about how code was built\n if 'add_rem_to_script' in i:\n run_commands_before=[]\n run_commands_before.append('')\n for x in i['add_rem_to_script']:\n run_commands_before.append(x)\n ii['run_commands_before']=run_commands_before\n\n rx=prepare_script(ii)\n if rx['cm_return']>0: return rx\n\n r['script_name']=rx['cm_path']\n r['script_filename']=script\n\n return r",
"def install_step(self):\n\n# if LooseVersion(self.version) < LooseVersion('2012-10-05'):\n\tif (False):\n self.inchworm()\n self.chrysalis()\n self.kmer()\n self.butterfly()\n\n bwapluginver = self.cfg['bwapluginver']\n if bwapluginver:\n self.trinityplugin('bwa-%s-patched_multi_map' % bwapluginver)\n\n if self.cfg['RSEMmod']:\n self.trinityplugin('RSEM-mod', cc=os.getenv('CXX'))\n\n else:\n self.jellyfish()\n\n inchworm_flags = self.inchworm(run=False)\n chrysalis_flags = self.chrysalis(run=False)\n\n cc = os.getenv('CC')\n cxx = os.getenv('CXX')\n\n lib_flags = \"\"\n for lib in ['ncurses', 'zlib']:\n libroot = get_software_root(lib)\n if libroot:\n lib_flags += \" -L%s/lib\" % libroot\n\n fn = \"Makefile\"\n for line in fileinput.input(fn, inplace=1, backup='.orig.eb'):\n\n line = re.sub(r'^(INCHWORM_CONFIGURE_FLAGS\\s*=\\s*).*$', r'\\1%s' % inchworm_flags, line)\n line = re.sub(r'^(CHRYSALIS_MAKE_FLAGS\\s*=\\s*).*$', r'\\1%s' % chrysalis_flags, line)\n line = re.sub(r'(/rsem && \\$\\(MAKE\\))\\s*$',\n r'\\1 CC=%s CXX=\"%s %s\" CFLAGS_EXTRA=\"%s\"\\n' % (cc, cxx, lib_flags, lib_flags), line)\n line = re.sub(r'(/fastool && \\$\\(MAKE\\))\\s*$',\n r'\\1 CC=\"%s -std=c99\" CFLAGS=\"%s ${CFLAGS}\"\\n' % (cc, lib_flags), line)\n\n sys.stdout.write(line)\n\n trinity_compiler = None\n comp_fam = self.toolchain.comp_family()\n if comp_fam in [toolchain.INTELCOMP]:\n trinity_compiler = \"intel\"\n elif comp_fam in [toolchain.GCC]:\n trinity_compiler = \"gcc\"\n else:\n self.log.error(\"Don't know how to set TRINITY_COMPILER for %s compiler\" % comp_fam)\n\n cmd = \"make TRINITY_COMPILER=%s\" % trinity_compiler\n run_cmd(cmd)\n\n # butterfly is not included in standard build\n self.butterfly()\n\n # remove sample data if desired\n if not self.cfg['withsampledata']:\n try:\n shutil.rmtree(os.path.join(self.cfg['start_dir'], 'sample_data'))\n except OSError, err:\n self.log.error(\"Failed to remove sample data: %s\" % err)",
"def test_man6ext(self):\n self.chck_triple('man6ext')",
"def brew(self):\n pass",
"def usage(fd):\n fd.write('Usage:\\n')\n exe = os.path.basename(sys.argv[0])\n fd.write(\" %s <formula file> <sha> <os/arch> <root url>\\n\" % exe)\n fd.write('\\n')\n fd.write('For example:\\n')\n fd.write(\n \" %s /some/dir/gcc.rb 123... tiger_g3 https://some.server/bottles\\n\" \\\n % exe\n )",
"def setup_prereqs():\n # Run the contrib download script -- easier that way\n gmp = os.path.join(flag_gcc_subdir, \"gmp\")\n if not os.path.exists(gmp):\n dochdir(flag_gcc_subdir)\n docmd(\"./contrib/download_prerequisites\")\n # Hack -- fix up gmp dir\n patch_gmp_configure()\n dochdir(\"..\")",
"def build_htmlhome(sample, htmldir, htmlhome='index.html', pixscale=0.262,\n racolumn='RA', deccolumn='DEC', #diamcolumn='GROUP_DIAMETER',\n maketrends=False, fix_permissions=True):\n import legacyhalos.html\n \n htmlhomefile = os.path.join(htmldir, htmlhome)\n print('Building {}'.format(htmlhomefile))\n\n js = legacyhalos.html.html_javadate() \n\n # group by RA slices\n #raslices = np.array([get_raslice(ra) for ra in sample[racolumn]])\n #rasorted = raslices)\n\n with open(htmlhomefile, 'w') as html:\n html.write('<html><body>\\n')\n html.write('<style type=\"text/css\">\\n')\n html.write('table, td, th {padding: 5px; text-align: center; border: 1px solid black;}\\n')\n html.write('</style>\\n')\n\n html.write('<h1>MaNGA-NSF</h1>\\n')\n html.write('<p style=\"width: 75%\">\\n')\n html.write(\"\"\"Multiwavelength analysis of the MaNGA sample.</p>\\n\"\"\")\n \n if maketrends:\n html.write('<p>\\n')\n html.write('<a href=\"{}\">Sample Trends</a><br />\\n'.format(trendshtml))\n html.write('<a href=\"https://github.com/moustakas/legacyhalos\">Code and documentation</a>\\n')\n html.write('</p>\\n')\n\n # The default is to organize the sample by RA slice, but support both options here.\n if False:\n html.write('<p>The web-page visualizations are organized by one-degree slices of right ascension.</p><br />\\n')\n\n html.write('<table>\\n')\n html.write('<tr><th>RA Slice</th><th>Number of Galaxies</th></tr>\\n')\n for raslice in sorted(set(raslices)):\n inslice = np.where(raslice == raslices)[0]\n html.write('<tr><td><a href=\"RA{0}.html\"><h3>{0}</h3></a></td><td>{1}</td></tr>\\n'.format(raslice, len(inslice)))\n html.write('</table>\\n')\n else:\n html.write('<br /><br />\\n')\n html.write('<table>\\n')\n html.write('<tr>\\n')\n html.write('<th> </th>\\n')\n #html.write('<th>Index</th>\\n')\n html.write('<th>Galaxy</th>\\n')\n html.write('<th>RA</th>\\n')\n html.write('<th>Dec</th>\\n')\n html.write('<th>Redshift</th>\\n')\n html.write('<th>Viewer</th>\\n')\n html.write('</tr>\\n')\n \n galaxy, galaxydir, htmlgalaxydir = get_galaxy_galaxydir(sample, html=True)\n for gal, galaxy1, htmlgalaxydir1 in zip(sample, np.atleast_1d(galaxy), np.atleast_1d(htmlgalaxydir)):\n\n htmlfile1 = os.path.join(htmlgalaxydir1.replace(htmldir, '')[1:], '{}.html'.format(galaxy1))\n pngfile1 = os.path.join(htmlgalaxydir1.replace(htmldir, '')[1:], '{}-custom-montage-grz.png'.format(galaxy1))\n thumbfile1 = os.path.join(htmlgalaxydir1.replace(htmldir, '')[1:], 'thumb2-{}-custom-montage-grz.png'.format(galaxy1))\n\n ra1, dec1, diam1 = gal[racolumn], gal[deccolumn], 5 * MOSAICRADIUS / pixscale\n viewer_link = legacyhalos.html.viewer_link(ra1, dec1, diam1, dr10=True)\n\n html.write('<tr>\\n')\n html.write('<td><a href=\"{0}\"><img src=\"{1}\" height=\"auto\" width=\"100%\"></a></td>\\n'.format(pngfile1, thumbfile1))\n #html.write('<td>{}</td>\\n'.format(gal['INDEX']))\n html.write('<td><a href=\"{}\">{}</a></td>\\n'.format(htmlfile1, galaxy1))\n html.write('<td>{:.7f}</td>\\n'.format(ra1))\n html.write('<td>{:.7f}</td>\\n'.format(dec1))\n html.write('<td>{:.5f}</td>\\n'.format(gal[ZCOLUMN]))\n html.write('<td><a href=\"{}\" target=\"_blank\">Link</a></td>\\n'.format(viewer_link))\n html.write('</tr>\\n')\n html.write('</table>\\n')\n \n # close up shop\n html.write('<br /><br />\\n')\n html.write('<b><i>Last updated {}</b></i>\\n'.format(js))\n html.write('</html></body>\\n')\n\n if fix_permissions:\n shutil.chown(htmlhomefile, group='cosmo')",
"def create_umap(name):\n\tglobal dir\n\tdirec = dir + \"/\" + name + \"/\"\n\tos.chdir(direc + \"representations/\")\n\t\n\t# Palette size of 2x50 required. 1-49 for labeled nat data, 51-100 for labeled syn data, 50 for unlabeled nat data\n\tpalette = sns.color_palette(\"Blues_d\", 30)# Syn data in blue\n\tpalette.extend(sns.dark_palette(\"purple\", 20)) # Unimportant, just a filler\n\tpalette.extend(sns.color_palette(\"Reds_d\", 30))# Nat data in red\n\tpalette.extend(sns.dark_palette(\"purple\", 20))# Unimportant, just a filler\n\tpalette[49]=\"#50B689\"# Unlabeled nat data in green\n\t# print(\"size of palette \" + str(len(palette)))\n\t\n\tfor file in glob.glob(\"*.pt\"):\n\t\t\trepresentation = torch.load(file)\n\t\t\ttarfile = file[:-3] # Removes the .pt ending\n\t\t\ttarfile = \"tar\" + tarfile[4:] + \".log\"\n\t\t\tall_targets = []\n\t\t\twith open(tarfile, \"r\") as f:\n\t\t\t\tfor tar in f:\n\t\t\t\t\tall_targets.append(float(tar.strip()))\n\n\t\t\tsns.set(style='white', context='notebook', rc={'figure.figsize': (14, 10)})\n\t\t\treducer = umap.UMAP()\n\t\t\tembedding = reducer.fit_transform(representation.cpu())\n\t\t\t\n\t\t\tprint(\"scattering\")\n\t\t\t# print(all_targets)\n\t\t\tplt.scatter(embedding[:, 0], embedding[:, 1], c=[palette[int(y-1)] for y in all_targets], alpha=0.8)\n\t\t\tplt.gca().set_aspect('equal', 'datalim')\n\t\t\tplt.title('UMAP projection of cell data', fontsize=24);\n\t\t\tplt.savefig(\"./umap_\" + str(file[4:-3]) + \".png\")\n\t\t\tplt.clf()\n\tos.chdir(\"../../../../\")",
"def build_apple(arch: str, debug: bool = False) -> None:\n import platform\n import subprocess\n from efro.error import CleanError\n\n # IMPORTANT; seems we currently wind up building against /usr/local gettext\n # stuff. Hopefully the maintainer fixes this, but for now I need to\n # remind myself to blow it away while building.\n if 'MacBook-Fro' in platform.node():\n if (subprocess.run('which gettext', shell=True,\n check=False).returncode == 0):\n raise CleanError('NEED TO TEMP-KILL GETTEXT')\n\n builddir = 'build/python_apple_' + arch + ('_debug' if debug else '')\n efrotools.run('rm -rf \"' + builddir + '\"')\n efrotools.run('mkdir -p build')\n efrotools.run('git clone '\n '[email protected]:pybee/Python-Apple-support.git \"' +\n builddir + '\"')\n os.chdir(builddir)\n\n # TEMP: Check out a particular commit while the branch head is broken.\n # We can actually fix this to use the current one, but something\n # broke in the underlying build even on old commits so keeping it\n # locked for now...\n # efrotools.run('git checkout bf1ed73d0d5ff46862ba69dd5eb2ffaeff6f19b6')\n efrotools.run(f'git checkout {PYTHON_VERSION_MAJOR}')\n\n # On mac we currently have to add the _scproxy module or urllib will\n # fail.\n txt = efrotools.readfile('patch/Python/Setup.embedded')\n if arch == 'mac':\n txt += ('\\n'\n '# ericf added - mac urllib needs this\\n'\n '_scproxy _scproxy.c '\n '-framework SystemConfiguration '\n '-framework CoreFoundation')\n\n # Turn off sqlite module. (scratch that; leaving it in.)\n # txt = efrotools.replace_one(txt, '_sqlite3 -I$(', '#_sqlite3 -I$(')\n # txt = txt.replace(' _sqlite/', '# _sqlite/')\n\n # Turn off xz compression module. (scratch that; leaving it in.)\n # txt = efrotools.replace_one(txt, '_lzma _', '#_lzma _')\n\n # Turn off bzip2 module.\n txt = efrotools.replace_one(txt, '_bz2 _b', '#_bz2 _b')\n\n # Turn off openssl module (only if not doing openssl).\n if not ENABLE_OPENSSL:\n txt = efrotools.replace_one(txt, '_hashlib _hashopenssl.c',\n '#_hashlib _hashopenssl.c')\n\n # Turn off various other stuff we don't use.\n for line in [\n '_codecs _codecsmodule.c',\n '_codecs_cn cjkcodecs/_codecs_cn.c',\n '_codecs_hk cjkcodecs/_codecs_hk.c',\n '_codecs_iso2022 cjkcodecs/',\n '_codecs_jp cjkcodecs/_codecs_jp.c',\n '_codecs_jp cjkcodecs/_codecs_jp.c',\n '_codecs_kr cjkcodecs/_codecs_kr.c',\n '_codecs_tw cjkcodecs/_codecs_tw.c',\n '_lsprof _lsprof.o rotatingtree.c',\n '_multibytecodec cjkcodecs/multibytecodec.c',\n '_multiprocessing _multiprocessing/multiprocessing.c',\n '_opcode _opcode.c',\n 'audioop audioop.c',\n 'grp grpmodule.c',\n 'mmap mmapmodule.c',\n 'parser parsermodule.c',\n 'pyexpat expat/xmlparse.c',\n ' expat/xmlrole.c ',\n ' expat/xmltok.c ',\n ' pyexpat.c ',\n ' -I$(srcdir)/Modules/expat ',\n ' -DHAVE_EXPAT_CONFIG_H -DUSE_PYEXPAT_CAPI'\n ' -DXML_DEV_URANDOM',\n 'resource resource.c',\n 'syslog syslogmodule.c',\n 'termios termios.c',\n '_ctypes_test _ctypes/_ctypes_test.c',\n '_testbuffer _testbuffer.c',\n '_testimportmultiple _testimportmultiple.c',\n '_crypt _cryptmodule.c', # not on android so disabling here too\n ]:\n txt = efrotools.replace_one(txt, line, '#' + line)\n\n if ENABLE_OPENSSL:\n\n # _md5 and _sha modules are normally only built if the\n # system does not have the OpenSSL libs containing an optimized\n # version.\n # Note: seems we still need sha3 or we get errors\n for line in [\n '_md5 md5module.c',\n '_sha1 sha1module.c',\n # '_sha3 _sha3/sha3module.c',\n '_sha256 sha256module.c',\n '_sha512 sha512module.c',\n ]:\n txt = efrotools.replace_one(txt, line, '#' + line)\n else:\n txt = efrotools.replace_one(txt, '_ssl _ssl.c', '#_ssl _ssl.c')\n efrotools.writefile('patch/Python/Setup.embedded', txt)\n\n txt = efrotools.readfile('Makefile')\n\n # Fix a bug where spaces in PATH cause errors (darn you vmware fusion!)\n txt = efrotools.replace_one(\n txt, '&& PATH=$(PROJECT_DIR)/$(PYTHON_DIR-macOS)/dist/bin:$(PATH) .',\n '&& PATH=\"$(PROJECT_DIR)/$(PYTHON_DIR-macOS)/dist/bin:$(PATH)\" .')\n\n # Remove makefile dependencies so we don't build the\n # libs we're not using.\n srctxt = '$$(PYTHON_DIR-$1)/dist/lib/libpython$(PYTHON_VER)m.a: '\n txt = efrotools.replace_one(\n txt, srctxt, '$$(PYTHON_DIR-$1)/dist/lib/libpython$(PYTHON_VER)m.a: ' +\n ('build/$2/Support/OpenSSL ' if ENABLE_OPENSSL else '') +\n 'build/$2/Support/XZ $$(PYTHON_DIR-$1)/Makefile\\n#' + srctxt)\n srctxt = ('dist/Python-$(PYTHON_VER)-$1-support.'\n '$(BUILD_NUMBER).tar.gz: ')\n txt = efrotools.replace_one(\n txt, srctxt,\n 'dist/Python-$(PYTHON_VER)-$1-support.$(BUILD_NUMBER).tar.gz:'\n ' $$(PYTHON_FRAMEWORK-$1)\\n#' + srctxt)\n\n # Turn doc strings on; looks like it only adds a few hundred k.\n txt = txt.replace('--without-doc-strings', '--with-doc-strings')\n\n # Set mac/ios version reqs\n # (see issue with utimensat and futimens).\n txt = efrotools.replace_one(txt, 'MACOSX_DEPLOYMENT_TARGET=10.8',\n 'MACOSX_DEPLOYMENT_TARGET=10.14')\n # And equivalent iOS (11+).\n txt = efrotools.replace_one(txt, 'CFLAGS-iOS=-mios-version-min=8.0',\n 'CFLAGS-iOS=-mios-version-min=12.0')\n # Ditto for tvOS.\n txt = efrotools.replace_one(txt, 'CFLAGS-tvOS=-mtvos-version-min=9.0',\n 'CFLAGS-tvOS=-mtvos-version-min=12.0')\n\n if debug:\n\n # Add debug build flag\n # (Currently expect to find 2 instances of this).\n dline = '--with-doc-strings --enable-ipv6 --without-ensurepip'\n splitlen = len(txt.split(dline))\n if splitlen != 3:\n raise Exception('unexpected configure lines')\n txt = txt.replace(dline, '--with-pydebug ' + dline)\n\n # Debug has a different name.\n # (Currently expect to replace 12 instances of this).\n dline = 'python$(PYTHON_VER)m'\n splitlen = len(txt.split(dline))\n if splitlen != 13:\n raise RuntimeError(f'Unexpected configure line count {splitlen}.')\n txt = txt.replace(dline, 'python$(PYTHON_VER)dm')\n\n efrotools.writefile('Makefile', txt)\n\n # Ok; let 'er rip.\n # (we run these in parallel so limit to 1 job a piece;\n # otherwise they inherit the -j12 or whatever from the top level)\n # (also this build seems to fail with multiple threads)\n efrotools.run('make -j1 ' + {\n 'mac': 'Python-macOS',\n 'ios': 'Python-iOS',\n 'tvos': 'Python-tvOS'\n }[arch])\n print('python build complete! (apple/' + arch + ')')",
"def make_readme_txt(self, args):\n with open(self.readme_txt, 'w') as writer:\n log.info(\"args=%s\\n\", args)\n writer.write(\"# Created by pbtranscript-internal-validation.ValidationRunner.make_readme_txt()\\n\")\n writer.write(\"args=%s\\n\\n\" % args)\n\n files = self.common_files + self.collapse_human_files + self.reseq_human_files + self.sirv_files\n for desc, fn in files:\n if op.exists(fn):\n writer.write(\"%s=%s\\n\" % (desc, fn))",
"def usage():\n print(\"Usage: python3 c_header_md5.py <dir>\")",
"def doc_begin(fdoc):\n fdoc.write(\n '\\\\documentclass{{hyperiondoc}}\\n'\n '\\n'\n '\\\\usepackage{{adjustbox}}\\n'\n '\\\\usepackage{{{0}}}\\n'\n '\\\\newfontfamily{{\\\\symbola}}{{Symbola}}\\n'\n '\\\\USymbolAllStyle{{\\\\symbola}}\\n'\n '\\n'\n '\\\\newcommand{{\\\\symboldemo}}[3]{{%\\n'\n ' \\\\noindent\\\\begin{{minipage}}[c]{{.1\\\\textwidth}}\\n'\n ' \\\\centering\\\\textlarger[2]{{#3}}\\n'\n ' \\\\end{{minipage}}%\\n'\n ' \\\\begin{{minipage}}{{.8\\\\textwidth}}\\n'\n ' $\\\\mathtt{{0x#1}}$\\\\\\\\[-0.4em]\\n'\n ' \\\\adjustbox{{max width=.9\\\\textwidth}}{{\\\\code{{\\\\bs #2}}}}\\n'\n ' \\\\end{{minipage}}\\\\\\\\[0.6em]\\n'\n '}}\\n'\n '\\n'\n '\\\\begin{{document}}\\n'\n .format(package_name('all'))\n )",
"def convert_corpusdiplomaticum():\n local('cd import_scripts;../bin/python import_corpusdiplomaticum.py import')",
"def main():\n\n dofile = \"thebook\"\n\n #spellcheck()\n\n common_options = '--encoding=utf-8 --examples_as_exercises '\n\n # --- HTML ---\n\n common_html_options = ' '\n\n # HTML Bootstrap\n bootstrap_options = ' --html_style=bootswatch_readable --html_code_style=inherit --html_pre_style=inherit --toc_depth=2 --pygments_html_style=default --html_template=template_bootstrap_wtoc.html --html_figure_caption=bottom --html_figure_hrule=top+bottom' \n\n html(\n dofile,\n options=common_options + common_html_options + bootstrap_options,\n split=True)\n\n # One long HTML file\n #html(dofile, options=common_options + common_html_options + ' --html_style=bloodish --html_output=%s-1' % dofile, split=False)\n\n # Solarized HTML\n #html(dofile, options=common_options + common_html_options + ' --html_style=solarized3 --html_output=%s-solarized' % dofile, split=True)\n\n mksnippets()\n sys.exit(1)\n\n # --- latex ---\n\n common_latex_options = ' --latex_code_style=vrb'\n\n for version in 'paper', 'screen': # , 'A4', '2up', 'A4-2up':\n latex(\n dofile,\n latex_program='pdflatex',\n options=common_options + common_latex_options,\n version=version,\n postfix='auto')\n\n # --- Sphinx ---\n\n# sphinx_themes = ['pyramid',]\n# for theme in sphinx_themes:\n# dirname = 'sphinx-rootdir' if len(sphinx_themes) == 1 else 'sphinx-rootdir-%s' % theme\n# sphinx(\n# dofile,\n# options=common_options + '',\n# dirname=dirname,\n# theme=theme,\n# automake_sphinx_options='',\n# split=False)\n\n # Dump all Unix commands run above as a Bash script\n bash = open('tmp_make.sh', 'w')\n print 'see tmp_make.sh for an equivalent auto-generated unix script'\n bash.write('''#!/bin/bash\nset -x # display all commands in output\n\n# Safe execution of a Unix command: exit if failure\nfunction system {\n \"$@\"\n if [ $? -ne 0 ]; then\n echo \"make.sh: unsuccessful command $@\"\n echo \"abort!\"\n exit 1\n fi\n}\n''')\n for cmd in unix_command_recorder:\n if cmd.startswith('doconce format') or cmd.startswith('rm '):\n bash.write('\\n') # delimiter line in script\n bash.write('system ' + cmd + '\\n')\n bash.close()\n\n print 'see tmp_output.log for the output of all the commands'",
"def home() :\n st.markdown(\"This application provides 3 mains modules :\")\n st.markdown(\"* **The prediction module :** enables you to assess client's liability based on its file\")\n st.markdown(\"* **The explorer module :** enables you to dig deeper into your client informations,\"\n \" particularly historcial data coming from federal loan bureau and historical\"\n \" Home Credit's data if available.\")\n st.markdown(\"* **The statistics module** : enables you to explore the database at a macro scale :\"\n \" understand how variables such as age, sex and income impact probability of repayment\")",
"def _install():\n download_file='http://www.ipol.im/pub/art/2015/136/inpaint_8.tgz'\n tools.download_and_extract(download_file) \n this_file_path=os.path.dirname(__file__)\n subprocess.call(' mkdir build; cd build; cmake ..; make', shell=True,cwd=exec_folder)"
]
| [
"0.59271795",
"0.5161424",
"0.51153964",
"0.50794196",
"0.49461532",
"0.48992947",
"0.48970073",
"0.4855891",
"0.4850761",
"0.4779826",
"0.47689286",
"0.47110814",
"0.4706798",
"0.46762416",
"0.4674272",
"0.46457046",
"0.4642398",
"0.46235317",
"0.46116403",
"0.45985612",
"0.45930472",
"0.45910084",
"0.45896816",
"0.45490688",
"0.4537403",
"0.45360243",
"0.4528842",
"0.45247266",
"0.4516938",
"0.4516726"
]
| 0.53471965 | 1 |
Updates our local poorly cached representation of a Vault server. This runs in a thread, one per Vault server. Probably needs more mutex. | def update_server(finished, server):
name = server['name']
while not finished.wait(2):
new_s = fetch_server(VAULTZ[name], server)
if 'cluster_id' in new_s:
my_cluster = [x['name']
for _name, x
in iteritems(SERVERZ)
if x.get('cluster_id', None) == new_s['cluster_id']]
new_s['cluster_members'] = my_cluster
SERVERZ[name] = new_s | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def start_server_thread(server):\n client = None\n name = server['name']\n if name in VAULTZ:\n client = VAULTZ[name]\n else:\n client = VAULTZ[name] = get_vault(server)\n\n if name in SERVERZ:\n return SERVERZ[name]\n\n server = SERVERZ[name] = fetch_server(client, server)\n sthread = threading.Thread(target=update_server,\n args=(FINISHED, server,))\n VTHREADZ.append(sthread)\n sthread.setDaemon(True)\n sthread.start()\n return server",
"def run(self):\n try:\n # Sleep for the first two seconds after which the cache is loaded,\r\n # so that we don't interfere with the user interface startup.\r\n Thread.sleep(2000)\n except InterruptedException as e:\n e.printStackTrace()\n return\n remoteRepository = RemoteGameRepository(self.theRepoURL)\n print \"Updating the game cache...\"\n beginTime = System.currentTimeMillis()\n # Since games are immutable, we can guarantee that the games listed\r\n # by the repository server includes the games in the local cache, so\r\n # we can be happy just updating/refreshing the listed games.\r\n theGameKeys = remoteRepository.getGameKeys()\n if theGameKeys == None:\n return\n # If the server offers a single combined metadata file, download that\r\n # and use it to avoid checking games that haven't gotten new versions.\r\n bundledMetadata = remoteRepository.getBundledMetadata()\n if bundledMetadata != None:\n for theKey in theGameKeys:\n try:\n if myGameVersion == None:\n continue \n # Skip updating the game cache entry if the version is the same\r\n # and the cache entry was written less than a week ago.\r\n if myGameVersion.getRepositoryURL() == remoteVersionedGameURL and getCacheEntryAge(theKey) < 604800000:\n unchangedKeys.add(theKey)\n except Exception as e:\n continue \n theGameKeys.removeAll(unchangedKeys)\n # Start threads to update every entry in the cache (or at least verify\r\n # that the entry doesn't need to be updated).\r\n theThreads = HashSet()\n for gameKey in theGameKeys:\n t.start()\n theThreads.add(t)\n # Wait until we've updated the cache before continuing.\r\n for t in theThreads:\n try:\n t.join()\n except InterruptedException as e:\n endTime = System.currentTimeMillis()\n print \"Updating the game cache took: \" + (endTime - beginTime) + \"ms.\"",
"def _update(self, host):\n pass",
"def UpdateFromServer(self):\n self.status = GetUserStatus(self.accesskey)",
"def do_api_calls_update_cache(self):\n self.get_nodes()\n self.write_to_cache(self.inventory, self.cache_path_cache)\n self.write_to_cache(self.index, self.cache_path_index)",
"def update_servers(self, req, closest_serv):\n\n\t\t# Request is a put --> Key, value insert in the cache of the closest server\n\t\tif req.type == 0:\n\t\t\t# Iterate through all servers\n\t\t\tfor serv in self.servers:\n\t\t\t\tif serv.ip != closest_serv.ip: # If Server has not been updated\n\t\t\t\t\tserv.cache.put(req.key, req.value) # Update server cache\n\n\t\t# Request is a get --> Value retrived from the cache of the closest server\n\t\telif req.type == 1:\n\t\t\t# Iterate through all servers\n\t\t\tfor serv in self.servers:\n\t\t\t\tif serv.ip != closest_serv.ip: # If server has not been updated\n\t\t\t\t\tserv.cache.get(req.key)",
"def sync_with_server(self):\n response = self.conn_mng.dispatch_request('get_server_snapshot', '')\n if response is None:\n self.stop(1, '\\nReceived bad snapshot. Server down?\\n')\n\n server_timestamp = response['server_timestamp']\n files = response['files']\n\n sync_commands = self._sync_process(server_timestamp, files)\n self.update_local_dir_state(server_timestamp)\n\n # Initialize the variable where we put the timestamp of the last operation we did\n last_operation_timestamp = None\n\n # makes all synchronization commands\n for command, path in sync_commands:\n if command == 'delete':\n event_timestamp = self.conn_mng.dispatch_request(command, {'filepath': path})\n if event_timestamp:\n print 'event_timestamp di \"delete\" INTO SYNC:', event_timestamp\n last_operation_timestamp = event_timestamp['server_timestamp']\n # If i can't find path inside client_snapshot there is inconsistent problem in client_snapshot!\n if self.client_snapshot.pop(path, 'ERROR') == 'ERROR':\n print 'Error during delete event INTO SYNC! Impossible to find \"{}\" inside client_snapshot'.format(path)\n else:\n self.stop(1, 'Error during connection with the server. Server fail to \"delete\" this file: {}'.format(path))\n\n elif command == 'modified' or command == 'upload':\n event_timestamp = self.conn_mng.dispatch_request(command, {'filepath': path})\n if event_timestamp:\n print 'event_timestamp di \"{}\" INTO SYNC: {}'.format(command, event_timestamp)\n last_operation_timestamp = event_timestamp['server_timestamp']\n else:\n self.stop(1, 'Error during connection with the server. Server fail to \"{}\" this file: {}'.format(command, path))\n\n else: # command == 'download'\n print 'skip di download'\n self.observer.skip(self.absolutize_path(path))\n connection_result = self.conn_mng.dispatch_request(command, {'filepath': path})\n if connection_result:\n print 'Downloaded file with path \"{}\" INTO SYNC'.format(path)\n self.client_snapshot[path] = files[path]\n else:\n self.stop(1, 'Error during connection with the server. Client fail to \"download\" this file: {}'.format(path))\n\n if last_operation_timestamp:\n self.update_local_dir_state(last_operation_timestamp)",
"def run_forever(self, *args, **kwargs):\n try:\n self.logger.debug('Begin account update')\n\n # get account-updater server ownership\n self.get_ownership_obj = threading.Thread(target = self.msg.get_my_ownership)\n self.get_ownership_obj.setDaemon(True)\n self.get_ownership_obj.start()\n\n self.walker_obj = Walker(self.walker_map, self.__param, self.logger)\n self.walker_obj.setDaemon(True)\n self.walker_obj.start()\n self.logger.info(\"Walker Started\")\n self.reader_obj = Reader(self.walker_map, self.reader_map, \\\n self.__param, self.logger)\n self.reader_obj.setDaemon(True)\n self.reader_obj.start() \n self.logger.info(\"Reader Started\")\n self.account_sweeper = AccountSweep(self.__param, self.logger)\n self.account_sweeper.setDaemon(True)\n self.account_sweeper.start()\n self.logger.info(\"Account Sweeper Started\") \n self.updater_obj = Updater(self.walker_map, self.reader_map, \\\n self.__param, self.logger)\n self.updater_obj.setDaemon(True)\n self.updater_obj.start() \n self.logger.info(\"Updater Started\") \n self.container_sweeper = ContainerSweeper(self.walker_map, \\\n self.reader_map, self.__param, self.logger)\n self.container_sweeper.setDaemon(True)\n self.container_sweeper.start()\n self.logger.info(\"Container Sweeper Started\") \n\n account_updater_server = ThreadedAccountUpdaterServer(\\\n (self.__get_node_ip(gethostname()), \\\n self.__account_updater_port), HttpListener)\n account_updater_server.serve_forever()\n except Exception as ex:\n self.logger.error(\"Exception occured: %s\" % ex)",
"def fetch_server(client, server):\n server_obj = {\n \"name\": server['name'],\n \"url\": server['url'],\n \"client\": client\n }\n if (server['name'] in SERVERZ) \\\n and ('cluster_members' in SERVERZ[server['name']]):\n clustered = SERVERZ[server['name']]['cluster_members']\n server_obj['cluster_members'] = clustered\n\n if 'parent' in server:\n server_obj['parent'] = server['parent']\n\n try:\n init = client.is_initialized()\n except hvac.exceptions.InternalServerError:\n return server_obj\n except requests.ConnectionError:\n return server_obj\n except requests.ReadTimeout:\n return server_obj\n\n server_obj['init'] = init\n if init:\n try:\n status = client.seal_status\n except (hvac.exceptions.VaultDown, requests.exceptions.ReadTimeout):\n return server_obj\n\n server_obj['version'] = status['version']\n seal = server_obj['sealed'] = status['sealed']\n if seal:\n server_obj['unseal_required'] = status['t']\n server_obj['unseal_progress'] = status['progress']\n else:\n server_obj['cluster_name'] = status['cluster_name']\n server_obj['cluster_id'] = status['cluster_id']\n\n server_obj['rekey'] = False\n try:\n rekey_obj = client.rekey_status\n rekey = server_obj['rekey'] = rekey_obj['started']\n if rekey:\n server_obj['rekey_backup'] = rekey_obj['backup']\n server_obj['rekey_progress'] = rekey_obj['progress']\n server_obj['rekey_required'] = rekey_obj['required']\n except (hvac.exceptions.VaultDown, requests.exceptions.ReadTimeout):\n pass\n except hvac.exceptions.InternalServerError as vault_exception:\n if vault_exception.message == 'node not active but active '\\\n 'node not found':\n pass\n\n server_obj['ha'] = False\n server_obj['leader'] = False\n try:\n leader_obj = client.read('sys/leader')\n server_obj['ha'] = leader_obj['ha_enabled']\n if leader_obj['ha_enabled']:\n server_obj['leader'] = leader_obj['is_self']\n except (hvac.exceptions.VaultDown, requests.exceptions.ReadTimeout):\n pass\n except hvac.exceptions.InternalServerError as e:\n if e.message == 'node not active but active node not found':\n pass\n\n if not server_obj['ha'] or \\\n (server_obj['ha'] and server_obj['leader']):\n try:\n regen_obj = client.read('sys/generate-root/attempt')\n server_obj['regenerating'] = regen_obj.get('started', False)\n if server_obj['regenerating']:\n server_obj['regen_progress'] = regen_obj['progress']\n server_obj['regen_required'] = regen_obj['required']\n except (hvac.exceptions.VaultDown, requests.exceptions.ReadTimeout):\n pass\n except hvac.exceptions.InternalServerError as e:\n if e.message == 'node not active but ' \\\n 'active node not found':\n pass\n\n client = root_client(server_obj)\n server_obj['is_root'] = False\n if client:\n try:\n key_obj = client.key_status\n server_obj['key_term'] = key_obj['term']\n server_obj['is_root'] = True\n except (hvac.exceptions.VaultDown, requests.exceptions.ReadTimeout):\n pass\n except hvac.exceptions.InternalServerError as e:\n if e.message == 'node not active but active '\\\n 'node not found':\n pass\n\n return server_obj",
"async def updateserver(self, ctx):\n if is_support_guild(ctx.guild.id):\n await ctx.send('Sorry, this discord does not allow update, saveid, '\n 'leaderboard, and series commands so as not to overload me. '\n 'Try `!careerstats` or `!yearlystats` with your customer ID to test '\n 'or go to #invite-link to bring the bot to your discord for all functionality')\n return\n\n await ctx.send(f'Updating server data. This may take a while')\n\n try:\n guild = await Guild.get(discord_id=str(ctx.guild.id))\n await self.updater.update_server_background(guild)\n await ctx.send(f'Server update complete!')\n except:\n await ctx.send('Make sure at least 1 user has set their ID with `!saveid` before calling this command')",
"def update(self, server):\n if server in self.servers:\n self.servers.remove(server)\n self.servers.append(server)\n self.sync()\n return True\n return False",
"def run(self):\n try:\n rcvdConfig = self.conn.recv(self.size)\n if self.verbosity >= 1:\n print('%s: Received request from client %s:%s.' % (self.feederName, self.addr, self.port))\n except socket.error as e:\n raise Error(e.strerror)\n self.lockConfig.acquire()\n self.config.newConfig = json.loads(rcvdConfig)\n self.config.updateConfig()\n self.lockConfig.release()\n self.conn.close()",
"def sync_remote(self, otp_params, local_params, server_nonce, required_answers, timeout=1):\n # Construct URLs\n responses = []\n dqueue = queue.Queue()\n for row in self.db.get_queue(otp_params['modified'], server_nonce):\n url = '%(server)s?otp=%(otp)s&modified=%(modified)s' % row\n url += '&' + row['info'].split(',')[0]\n _thread = threading.Thread(target=self._fetch_remote,\n args=(dqueue, row['server'], url, timeout))\n _thread.daemon = True\n _thread.start()\n loop_start = time.time()\n while len(responses) < required_answers and time.time() < loop_start + timeout * 1.5:\n try:\n resp = dqueue.get(timeout=0.2)\n responses.append(resp)\n # Delete entry from table\n self.db.remove_from_queue(resp['server'], otp_params['modified'], server_nonce)\n except queue.Empty:\n pass\n\n answers = len(responses)\n # Parse response\n valid_answers = 0\n for resp in responses:\n resp_params = resp['params']\n logger.debug('[%s] local DB contains %s',\n otp_params['yk_publicname'], local_params)\n logger.debug('[%s] response contains %s',\n otp_params['yk_publicname'], resp_params)\n logger.debug('[%s] OTP contains %s',\n otp_params['yk_publicname'], otp_params)\n # Update Internal DB (conditional)\n self.db.update_db_counters(resp_params)\n # Check for Warnings\n # https://developers.yubico.com/yubikey-val/doc/ServerReplicationProtocol.html\n # NOTE: We use local_params for validationParams comparison since they are actually\n # the same in this situation and we have them at hand.\n if counters_gt(local_params, resp_params):\n logger.warning('[%(yk_publicname)s] Remote server out of sync', otp_params)\n if counters_gt(resp_params, local_params):\n logger.warning('[%(yk_publicname)s] Local server out of sync', otp_params)\n if counters_eq(resp_params, local_params) \\\n and resp_params['nonce'] != local_params['nonce']:\n logger.warning('[%(yk_publicname)s] Servers out of sync. '\n 'Nonce differs.', otp_params)\n if counters_eq(resp_params, local_params) \\\n and resp_params['modified'] != local_params['modified']:\n logger.warning('[%(yk_publicname)s] Servers out of sync. '\n 'Modified differs.', otp_params)\n if counters_gt(resp_params, otp_params):\n logger.warning('[%(yk_publicname)s] OTP is replayed. '\n 'Sync response counters higher than OTP counters.', otp_params)\n elif counters_eq(resp_params, otp_params) \\\n and resp_params['nonce'] != otp_params['nonce']:\n logger.warning('[%(yk_publicname)s] OTP is replayed. Sync '\n 'response counters equal to OTP counters and nonce '\n 'differs.', otp_params)\n else:\n # The answer is ok since a REPLAY was not indicated\n valid_answers += 1\n if required_answers == valid_answers:\n break\n\n # NULL queued_time for remaining entries in queue, to allow\n # daemon to take care of them as soon as possible.\n self.db.null_queue(server_nonce)\n return {'answers': answers, 'valid_answers': valid_answers}",
"def update(self):\r\n\t\ttry:\r\n\t\t\tself.telnet_server.poll()\r\n\t\texcept UnicodeDecodeError:\r\n\t\t\treturn\r\n\t\t\r\n\t\tfor connection in self.pending_connection_list:\r\n\t\t\tif (connection.cmd_ready is True):\r\n\t\t\t\tdata = \"\".join(filter(lambda x: ord(x)<127 and ord(x)>31, connection.get_command()))\r\n\t\t\t\tcommand_data = string.split(data, ' ')\r\n\r\n\t\t\t\t# Try and perform the authentification process\r\n\t\t\t\tif (len(command_data) < 3):\r\n\t\t\t\t\tconnection.send('%s\\n' % (self.auth_low_argc))\r\n\t\t\t\telif (len(command_data) >= 3 and string.lower(command_data[0]) == 'connect'):\r\n\t\t\t\t\tname = string.lower(command_data[1])\r\n\t\t\t\t\tpassword = command_data[2]\r\n\t\t\t\t\t\r\n\t\t\t\t\ttarget_player = self.world.find_player(name=name)\r\n\t\t\t\t\tif (target_player is None):\r\n\t\t\t\t\t\tconnection.send('%s\\n' % self.auth_invalid_combination)\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tplayer_hash = target_player.hash\r\n\t\t\t\t\t\tif (player_hash == bcrypt.hashpw(password.encode(\"UTF_8\"), player_hash.encode(\"UTF_8\")) == player_hash):\r\n\t\t\t\t\t\t\tconnection.id = target_player.id\r\n\t\t\t\t\t\t\ttarget_player.connection = connection\r\n\r\n\t\t\t\t\t\t\t# Check if our work factors differ\r\n\t\t\t\t\t\t\twork_factor = int(player_hash.split('$')[2])\r\n\t\t\t\t\t\t\tif (work_factor != self.work_factor):\r\n\t\t\t\t\t\t\t\ttarget_player.set_password(password)\r\n\t\t\t\t\t\t\t\tself.logger.info('%s had their hash updated.' % (target_player.display_name))\r\n\r\n\t\t\t\t\t\t\tself.connection_logger.info('Client %s:%u signed in as user %s.' % (connection.address, connection.port, target_player.display_name))\r\n\t\t\t\t\t\t\tself.post_client_authenticated.send(None, sender=target_player)\r\n\t\t\t\t\t\t\tfor player in target_player.location.players:\r\n\t\t\t\t\t\t\t\tif (player is not target_player):\r\n\t\t\t\t\t\t\t\t\tplayer.send(self.auth_connected % target_player.display_name)\r\n\r\n\t\t\t\t\t\t\tfor player in self.established_connection_list:\r\n\t\t\t\t\t\t\t\tif (player.id == connection.id):\r\n\t\t\t\t\t\t\t\t\tplayer.send('%s\\n' % self.auth_replace_connection)\r\n\t\t\t\t\t\t\t\t\tplayer.socket_send()\r\n\t\t\t\t\t\t\t\t\tplayer.deactivate()\r\n\t\t\t\t\t\t\t\t\tplayer.sock.close()\r\n\t\t\t\t\t\t\t\t\tconnection.send('%s\\n' % self.auth_connection_replaced)\r\n\t\t\t\t\t\t\t\t\tself.world.find_room(id=target_player.location_id).broadcast(self.auth_replace_connection_global % target_player.display_name, target_player)\r\n\t\t\t\t\t\t\t\t\tself.established_connection_list.remove(player)\r\n\t\t\t\t\t\t\t\t\tbreak\r\n\t\t\t\t\t\t\tself.pending_connection_list.remove(connection)\t\r\n\t\t\t\t\t\t\tself.established_connection_list.append(connection)\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tconnection.send('You have specified an invalid username/password combination.\\n')\r\n\t\t\t\telif (len(command_data) >= 3 and string.lower(command_data[0]) != 'connect'):\r\n\t\t\t\t\tconnection.send('%s\\n' % (self.auth_connect_suggestion))\r\n\t\t\t\t\t#connection.send('You must use the \"connect\" command:\\n')\r\n\t\t\t\t\t#connection.send('connect <username> <password>\\n')\r\n\r\n\t\t# With already connected clients, we'll now deploy the command interface.\r\n\t\tfor index, connection in enumerate(self.established_connection_list):\r\n\t\t\tif (connection.cmd_ready):\r\n\t\t\t\tinput = \"\".join(filter(lambda x: ord(x)<127 and ord(x)>31, connection.get_command()))\r\n\t\t\t\ttry:\r\n\t\t\t\t\tsending_player = self.world.find_player(id=connection.id)\r\n\t\t\t\texcept game.exception.DatabaseError:\r\n\t\t\t\t\tconnection.send('A critical error has occurred. Please reconnect later.\\n')\r\n\t\t\t\t\tconnection.socket_send()\r\n\t\t\t\t\tconnection.deactivate()\r\n\t\t\t\t\tconnection.sock.close()\r\n\t\t\t\telse:\r\n\t\t\t\t\tif (sending_player is not None):\r\n\t\t\t\t\t\tsending_player.connection = connection\r\n\t\t\t\t\t\tself.interface.parse_command(sender=sending_player, input=input)\r\n\r\n\t\tself.world_tick.send(None)",
"def onObjectUpdateCached(self, packet):\n\n # ToDo: handle these 2 variables properly\n _RegionHandle = packet['RegionData'][0]['RegionHandle']\n _TimeDilation = packet['RegionData'][0]['TimeDilation']\n\n _request_list = []\n\n for ObjectData_block in packet['ObjectData']:\n\n LocalID = ObjectData_block['ID']\n _CRC = ObjectData_block['CRC']\n _UpdateFlags = ObjectData_block['UpdateFlags']\n\n # Objects.request_object_update() expects a tuple of (_ID, CacheMissType)\n\n # see if we have the object stored already\n _object = self.get_object_from_store(LocalID = LocalID)\n\n if _object == None or _object == []:\n CacheMissType = 1\n else:\n CacheMissType = 0\n\n _request_list.append((LocalID, CacheMissType))\n\n # ask the simulator for updates\n self.request_object_update(self.agent.agent_id, self.agent.session_id, ID_CacheMissType_list = _request_list)",
"def update(self):\n _LOGGER.debug(\"update called.\")\n try:\n # Get our Authentication Token from SEMS Portal API\n _LOGGER.debug(\"SEMS - Getting API token\")\n\n # Prepare Login Headers to retrieve Authentication Token\n login_headers = {\n 'Content-Type': 'application/json',\n 'Accept': 'application/json',\n 'token': '{\"version\":\"v2.1.0\",\"client\":\"ios\",\"language\":\"en\"}',\n }\n\n # Prepare Login Data to retrieve Authentication Token\n login_data = '{\"account\":\"'+self._config.get(CONF_USERNAME)+'\",\"pwd\":\"'+self._config.get(CONF_PASSWORD)+'\"}'\n\n # Make POST request to retrieve Authentication Token from SEMS API\n login_response = requests.post(_URL, headers=login_headers, data=login_data, timeout=_RequestTimeout)\n\n # Process response as JSON\n jsonResponse = json.loads(login_response.text)\n\n # Get all the details from our response, needed to make the next POST request (the one that really fetches the data)\n requestTimestamp = jsonResponse[\"data\"][\"timestamp\"]\n requestUID = jsonResponse[\"data\"][\"uid\"]\n requestToken = jsonResponse[\"data\"][\"token\"]\n\n _LOGGER.debug(\"SEMS - API Token recieved: \"+ requestToken)\n # Get the status of our SEMS Power Station\n _LOGGER.debug(\"SEMS - Making Power Station Status API Call\")\n\n # Prepare Power Station status Headers\n headers = {\n 'Content-Type': 'application/json',\n 'Accept': 'application/json',\n 'token': '{\"version\":\"v2.1.0\",\"client\":\"ios\",\"language\":\"en\",\"timestamp\":\"'+str(requestTimestamp)+'\",\"uid\":\"'+requestUID+'\",\"token\":\"'+requestToken+'\"}',\n }\n\n data = '{\"powerStationId\":\"'+self._config.get(CONF_STATION_ID)+'\"}' \n\n response = requests.post(_PowerStationURL, headers=headers, data=data, timeout=_RequestTimeout)\n\n # Process response as JSON\n jsonResponseFinal = json.loads(response.text)\n\n _LOGGER.debug(\"REST Response Recieved\")\n\n for key, value in jsonResponseFinal[\"data\"][\"inverter\"][0][\"invert_full\"].items():\n if(key is not None and value is not None):\n self._attributes[key] = value\n _LOGGER.debug(\"Updated attribute %s: %s\", key, value)\n except Exception as exception:\n _LOGGER.error(\n \"Unable to fetch data from SEMS. %s\", exception)",
"async def async_update(self):\n try:\n self._data = requests.get(self._build_url(), timeout=10, headers={'accept-encoding': None}).json()\n _LOGGER.debug(\"TOON fetched data = %s\", self._data)\n except (requests.exceptions.RequestException) as error:\n _LOGGER.error(\"Unable to connect to TOON: %s\", error)\n self._data = None",
"def update_service_data_cache(self):\n\n self.services_loaded = False\n thread = threading.Thread(target=self.load_all_services, args=(True,))\n thread.start()\n self.cache_updated = True",
"def update_status(self, server):\r\n\r\n\t\tcards = self.ice.getCardsAlive()\r\n\t\talarm_list = self.ice.getAlarmStatus()\r\n\t\tstatus_list = self.ice.getStatus()\r\n\t\twarning_list = self.ice.getWarnings()\r\n\r\n\t\tdateTimeObj = datetime.now()\r\n\t\ttimestampStr = dateTimeObj.strftime(\"%d-%b-%Y (%H:%M:%S)\")\r\n\t\tfor i in range(len(cards)):\r\n\t\t\tjson_body = {'alarm':alarm_list[i], 'status':status_list[i], 'warning':warning_list[i], 'update':timestampStr, 'hostname':self.ip}\r\n\t\t\tserver.update(index='icepap_info', id=self.ip + '_' + str(cards[i]), body={\"doc\":json_body})",
"def update(self):\n for uid, server in self.servers_online.items():\n if len(server.jobs):\n self.populate_server(server)\n for uid, server in self.servers_online.items():\n if server.jobs:\n server.jobs[0].task_time -= time_interval\n server.waiting_time -= time_interval\n if server.jobs[0].task_time <= 0:\n completed_task = server.jobs.pop(0)\n print(f\"Task '{completed_task.description}' completed\")\n self.all_tasks.remove(completed_task)\n self.servers_jobs_list[uid].pop(0)\n for uid, server in self.all_servers.items():\n if server.status:\n print(f\"{server.server_name} has {len(set(server.jobs))} job(s)\")\n else:\n print(f\"{server.server_name} is offline\")",
"def __update_data(self):\r\n # loop = asyncio.get_event_loop()\r\n api_base_info_req = self.loop.run_in_executor(None, self.__get_base_info_api)\r\n api_status_req = self.loop.run_in_executor(None, self.__get_status_api)\r\n api_status_res = yield from api_status_req\r\n api_base_info_res = yield from api_base_info_req\r\n\r\n self.__set_base_info_api(api_base_info_res)\r\n self.__set_status_api(api_status_res)",
"def update_state(self):\n\n # Start off assuming no space in the queues and no pointer to a\n # shortest queue.\n self.min_queue = None\n self.has_space_in_a_server_queue = False\n self.queue_size = 0\n self.online_server_count = 0\n\n # Loop through all the servers.\n for server in self.server_list:\n\n # If server is online....\n if server.online is True:\n\n # Increment count of online servers\n self.online_server_count += 1\n\n # If any server has space...\n if len(server.queue) < server.max_queue_size:\n\n # 'Has Space' is True and remains true.\n if self.has_space_in_a_server_queue is False:\n self.has_space_in_a_server_queue = True\n\n # First non-full server we come to.\n if self.min_queue is None:\n self.min_queue = server\n\n # If we already had a non-full queue in hand,\n # compare it to the present one.\n elif len(server.queue) < len(self.min_queue.queue):\n self.min_queue = server\n\n # Increment the count of the parallel server block.\n self.queue_size += len(server.queue)",
"async def update_all_servers(self):\n await self.updater.update_all_servers()",
"def __init__(self, server_object):\n self.server = server_object\n self.old_values = {}",
"def sync(self):\n database = open(self._db,'wb')\n db_content = Settings.version, [s.serialize() for s in self.servers]\n pickle.dump(db_content, database)\n database.close()",
"def get_servers(self):\n json_scheme = self.gen_def_json_scheme('GetServers')\n json_obj = self.call_method_post(method='GetServers', json_scheme=json_scheme)\n self.json_servers = json_obj\n # if this method is called I assume that i must re-read the data\n # so i reinitialize the vmlist\n self.vmlist = VMList()\n # getting all instanced IP in case the list is empty\n if len(self.iplist) <= 0:\n self.get_ip()\n for elem in dict(json_obj)[\"Value\"]:\n if elem['HypervisorType'] is 4:\n s = Smart(interface=self, sid=elem['ServerId'])\n else:\n s = Pro(interface=self, sid=elem['ServerId'])\n s.vm_name = elem['Name']\n s.cpu_qty = elem['CPUQuantity']\n s.ram_qty = elem['RAMQuantity']\n s.status = elem['ServerStatus']\n s.datacenter_id = elem['DatacenterId']\n s.wcf_baseurl = self.wcf_baseurl\n s.auth = self.auth\n s.hd_qty = elem['HDQuantity']\n s.hd_total_size = elem['HDTotalSize']\n if elem['HypervisorType'] is 4:\n ssd = self.get_server_detail(elem['ServerId'])\n try:\n s.ip_addr = str(ssd['EasyCloudIPAddress']['Value'])\n except TypeError:\n s.ip_addr = 'Not retrieved.'\n else:\n s.ip_addr = []\n for ip in self.iplist:\n if ip.serverid == s.sid:\n s.ip_addr = ip\n self.vmlist.append(s)\n return True if json_obj['Success'] is True else False",
"def serverStatusHealth(request):\n\n initRequest(request)\n periodOfAllServWorkRestart = 15 #minutes.\n restartTimeWindow = 5\n\n debug = True\n\n # Here we should load all the servers from the settingsdjangosettings.\n # next is just for tests\n\n data = getCacheEntry(request, \"StatusHealth\")\n\n print (\"serverStatusHealth \", datetime.now(), \" runninghost:\", request.session[\"hostname\"], \" \", data)\n\n if data is None:\n q = collections.deque()\n q.append(\"aipanda100\")\n q.append(\"aipanda105\")\n q.append(\"aipanda106\")\n q.append(\"aipanda115\")\n q.append(\"aipanda116\")\n q.append(\"aipanda107\")\n q.append(\"aipanda108\")\n lastupdate = datetime.now()\n data['q'] = pickle.dumps(q)\n data['lastupdate'] = lastupdate\n setCacheEntry(request, \"StatusHealth\", json.dumps(data, cls=DateEncoder), 60 * 60)\n else:\n data = json.loads(data)\n q = pickle.loads(data['q'])\n lastupdate = datetime.strptime(data['lastupdate'], djangosettings.defaultDatetimeFormat)\n\n # end of test filling\n\n currenthost = q.popleft()\n runninghost = request.session[\"hostname\"]\n\n if (currenthost == runninghost):\n if (datetime.now() - lastupdate) > timedelta(minutes=(periodOfAllServWorkRestart)) and \\\n (datetime.now() - lastupdate) < timedelta(minutes=(periodOfAllServWorkRestart+restartTimeWindow)):\n return HttpResponse(\"Awaiting restart\", content_type='text/html')\n elif (datetime.now() - lastupdate) > timedelta(minutes=(periodOfAllServWorkRestart)) and \\\n (datetime.now() - lastupdate) > timedelta(minutes=(periodOfAllServWorkRestart+restartTimeWindow)):\n data = {}\n q.append(currenthost)\n data['q'] = pickle.dumps(q)\n data['lastupdate'] = datetime.now().strftime(djangosettings.defaultDatetimeFormat)\n setCacheEntry(request, \"StatusHealth\", json.dumps(data, cls=DateEncoder), 60 * 60)\n return HttpResponse(\"Normal operation\", content_type='text/html')\n\n # rows = subprocess.check_output('ps -eo cmd,lstart --sort=start_time | grep httpd', shell=True).split('\\n')[:-2]\n # print \"serverStatusHealth \", datetime.now(), \" rows:\", rows\n #\n # if (currenthost == runninghost) and (datetime.now() - lastupdate) > timedelta(minutes=periodOfAllServWorkRestart):\n #\n # if len(rows) > 0:\n # httpdStartTime = list(datefinder.find_dates(rows[0]))[0]\n # if (datetime.now() - httpdStartTime) < timedelta(minutes=periodOfAllServWorkRestart):\n #\n # print \"serverStatusHealth \", \"httpdStartTime\", httpdStartTime\n #\n # data = {}\n # data['q'] = pickle.dumps(q)\n # data['lastupdate'] = datetime.now().strftime(defaultDatetimeFormat)\n # setCacheEntry(request, \"StatusHealth\", json.dumps(data, cls=DateEncoder), 60 * 60)\n #\n # print \"serverStatusHealth \", \"Normal operation0\"\n # return HttpResponse(\"Normal operation\", content_type='text/html')\n # # We think that wsgi daemon recently restarted and we can change order to the next server\n # # q.put(currenthost)\n # # q. put to cache\n # # lastupdate put to cache\n # # return success\n #\n # # we return failed by default\n # print \"serverStatusHealth \", \"Awaiting restart\"\n # return HttpResponse(\"Awaiting restart\", content_type='text/html')\n #\n # print \"serverStatusHealth \", \"Normal operations1\"\n return HttpResponse(\"Normal operation\", content_type='text/html')",
"def server():",
"def server():",
"def run(self):\r\n self.rpc_server.serve_forever(0.5)"
]
| [
"0.6254933",
"0.5772624",
"0.5732598",
"0.5688727",
"0.5564018",
"0.551672",
"0.5488269",
"0.5466718",
"0.5388758",
"0.5338336",
"0.5329839",
"0.5298457",
"0.5295529",
"0.5287433",
"0.5282714",
"0.52717865",
"0.52582383",
"0.52428687",
"0.52365357",
"0.52337533",
"0.5221746",
"0.51665217",
"0.5160952",
"0.5150852",
"0.5146775",
"0.5143502",
"0.5122671",
"0.5089442",
"0.5089442",
"0.50757253"
]
| 0.637584 | 0 |
Starts a thread responsible for updating local info on Vault servers | def start_server_thread(server):
client = None
name = server['name']
if name in VAULTZ:
client = VAULTZ[name]
else:
client = VAULTZ[name] = get_vault(server)
if name in SERVERZ:
return SERVERZ[name]
server = SERVERZ[name] = fetch_server(client, server)
sthread = threading.Thread(target=update_server,
args=(FINISHED, server,))
VTHREADZ.append(sthread)
sthread.setDaemon(True)
sthread.start()
return server | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _StartStatusUpdateThread(self):\n self._status_update_active = True\n self._status_update_thread = threading.Thread(\n name='Status update', target=self._StatusUpdateThreadMain)\n self._status_update_thread.start()",
"def start(self):\n self.open()\n #t = Thread(target=self._cache_update, args=())\n #t.daemon = True\n #t.start()",
"def start(self):\n self.synchronizer = SyncThread(self.api, self.sync_dir)\n self.synchronizer.start()\n self.tray.on_login()",
"def run(self):\n # get the active node before we start anything...\n self.active_node_ip_port = self.get_active_node()\n if self.active_node_ip_port is None:\n logger.critical(\"ERROR: Could not get active vault node from \"\n \"Consul. Exiting.\")\n raise SystemExit(3)\n logger.warning(\"Initial Vault active node: %s\",\n self.active_node_ip_port)\n site = Site(VaultRedirectorSite(self))\n # setup our HTTP(S) listener\n if self.tls_factory is not None:\n self.listentls(site)\n else:\n self.listentcp(site)\n # setup the update_active_node poll every POLL_INTERVAL seconds\n self.add_update_loop()\n logger.warning('Starting Twisted reactor (event loop)')\n self.run_reactor()",
"def run_forever(self, *args, **kwargs):\n try:\n self.logger.debug('Begin account update')\n\n # get account-updater server ownership\n self.get_ownership_obj = threading.Thread(target = self.msg.get_my_ownership)\n self.get_ownership_obj.setDaemon(True)\n self.get_ownership_obj.start()\n\n self.walker_obj = Walker(self.walker_map, self.__param, self.logger)\n self.walker_obj.setDaemon(True)\n self.walker_obj.start()\n self.logger.info(\"Walker Started\")\n self.reader_obj = Reader(self.walker_map, self.reader_map, \\\n self.__param, self.logger)\n self.reader_obj.setDaemon(True)\n self.reader_obj.start() \n self.logger.info(\"Reader Started\")\n self.account_sweeper = AccountSweep(self.__param, self.logger)\n self.account_sweeper.setDaemon(True)\n self.account_sweeper.start()\n self.logger.info(\"Account Sweeper Started\") \n self.updater_obj = Updater(self.walker_map, self.reader_map, \\\n self.__param, self.logger)\n self.updater_obj.setDaemon(True)\n self.updater_obj.start() \n self.logger.info(\"Updater Started\") \n self.container_sweeper = ContainerSweeper(self.walker_map, \\\n self.reader_map, self.__param, self.logger)\n self.container_sweeper.setDaemon(True)\n self.container_sweeper.start()\n self.logger.info(\"Container Sweeper Started\") \n\n account_updater_server = ThreadedAccountUpdaterServer(\\\n (self.__get_node_ip(gethostname()), \\\n self.__account_updater_port), HttpListener)\n account_updater_server.serve_forever()\n except Exception as ex:\n self.logger.error(\"Exception occured: %s\" % ex)",
"def run(self):\n self.logger.info(\"Plugin '{}': start method called\".format(self.get_fullname()))\n # get additional parameters from files\n self.csrf = self.parse_cookie_file(self.cookiefile)\n \n # Check login-state - if logged off and credentials are availabel login in\n if os.path.isfile(self.cookiefile):\n self.login_state=self.check_login_state()\n self.check_refresh_login()\n if (self.login_state == False and self.credentials != ''):\n try:\n os.remove(self.update_file)\n except:\n pass\n self.check_refresh_login()\n self.login_state=self.check_login_state()\n \n # Collect all devices \n if (self.login_state):\n self.Echos = self.get_devices_by_request()\n else:\n self.Echos = None\n # enable scheduler if Login should be updated automatically\n \n if self.credentials != '':\n self.scheduler_add('check_login', self.check_refresh_login,cycle=300)\n #self.scheduler.add('plugins.alexarc4shng.check_login', self.check_refresh_login,cycle=300,from_smartplugin=True)\n \n if self.ImportPyOTPError:\n logline = str(self.shtime.now())[0:19] + ' no pyOTP installed you can not use MFA'\n else:\n logline = str(self.shtime.now())[0:19] + ' pyOTP installed you can use MFA' \n self._insert_protocoll_entry(logline)\n \n self.alive = True\n \n # if you want to create child threads, do not make them daemon = True!\n # They will not shutdown properly. (It's a python bug)",
"def _StatusUpdateThreadMain(self):\n while self._status_update_active:\n self._UpdateStatus()\n time.sleep(self._status_update_interval)",
"def start_pull_thread(self):\r\n threading.Thread(target=self._pull_thread).start()",
"def start(self):\n def f():\n if (self.started): return\n self.started = True\n with client.ServerProxy(self.host) as proxy:\n while (not self.req_shutdown):\n self.update_speed(proxy)\n time.sleep(self.com_freq)\n self.started = False\n self.req_shutdwon = False\n\n Thread(target=f).start()",
"def start(self):\n Thread(target=self.update, args=()).start()\n return self",
"def start(self):\n Thread(target=self.update, args=()).start()\n return self",
"def run(self):\n self.etcd.start()",
"def sync_start(self):",
"def start(self):\r\n t = Thread(target=self.update, args=())\r\n t.daemon = True\r\n t.start()\r\n return self",
"def start(self):\r\n t = Thread(target=self.update, args=())\r\n t.daemon = True\r\n t.start()\r\n return self",
"def on_start(self):\n self.id = str(self.locust).split('object at')[1].strip().replace('>','')\n\n # get data from common config\n self.read_locust_config()\n\n # get data from tempest config\n self.keystone_user = self.get_tempest_config_value('identity','username')\n self.keystone_tenant = self.get_tempest_config_value('identity','tenant_name')\n self.keystone_pw = self.get_tempest_config_value('identity','password')\n self.keystone_uri = self.get_tempest_config_value('identity','uri')\n\n self.output(\"Prepare to be rannsaka'd...\")",
"def run(self):\n self.sync_state()\n self.periodic_resync()\n self.lease_relay.start()\n self.notifications.run_dispatch(self)",
"def run(self):\r\n self.rpc_server.serve_forever(0.5)",
"def startup(self,context):\n master_socket = int(12345)\n self.task_queue = context.InputQueue\n self.result_queue = context.OutputQueue\n manager = Manager()\n self.dict_position = manager.dict()\n self.dict_cycle = manager.dict()\n self.dict_worker_info = manager.dict()\n\n TaskManager.register('get_job_queue',\n callable = lambda:self.task_queue)\n TaskManager.register('get_result_queue',\n callable = lambda:self.result_queue)\n TaskManager.register('get_data',\n callable = lambda:self.dict_position)\n TaskManager.register('get_cycle',\n callable = lambda:self.dict_cycle)\n TaskManager.register('set_worker_info',\n callable = lambda:self.dict_worker_info)\n self.m = TaskManager(address = ('', master_socket),\n authkey = b'secret')\n\n\n thread = Thread(target=self.runServer)\n thread.start()",
"def run(self):\n self.monitor.start()",
"def __init__(self):\n threading.Thread.__init__(self)\n self.shutdown = False\n self.name = 'api'\n self.rpc = rpc(self.name)\n self.rpc.log_info(\"starting rpc api thread\")\n rpchandle = domoslog.rpchandler(self.rpc)\n self.logger = logging.getLogger('api')\n self.logger.addHandler(rpchandle)\n self.logger.setLevel(domosSettings.getLoggingLevel('api'))\n self.rpc.handle(self.listModules, \"getModules\")\n self.rpc.handle(self.listSensors, \"getSensors\")\n self.rpc.handle(self.listSensorArgs, \"getArgs\")\n self.rpc.handle(self.listPrototypes, \"getProtos\")\n self.db = dbhandler()",
"def main():\n # Create the Updater and pass it the token of your bot.\n updater = Updater(\"1276131699:AAHhrtxf6vv-bmFMCnwv2AsjYCn-Ji6zJMs\",\n use_context=True)\n\n dispatcher = updater.dispatcher\n dispatcher.add_handler(CommandHandler(\"login\", login))\n dispatcher.add_handler(CommandHandler(\"lighton\", lighton))\n dispatcher.add_handler(CommandHandler(\"lightoff\", lightoff))\n # get current temperature\n dispatcher.add_handler(CommandHandler(\"temp\", temp))\n # switch on oven\n dispatcher.add_handler(CommandHandler(\"heat\", heat))\n # increase heat by until it flips to the lower limit again\n dispatcher.add_handler(CommandHandler(\"heatmore\", heatmore))\n # switch everything off\n dispatcher.add_handler(CommandHandler(\"off\", off))\n dispatcher.add_handler(CommandHandler(\"on\", on))\n # get status\n dispatcher.add_handler(CommandHandler(\"status\", status))\n # online help\n dispatcher.add_handler(CommandHandler(\"help\", help_command))\n # set reporting interval for temperature reports\n dispatcher.add_handler(CommandHandler(\"set\", add_job_timer))\n # delete reporting interval\n dispatcher.add_handler(CommandHandler(\"unset\", remove_job_timer))\n # on a non-command i.e message - echo the message on Telegram\n dispatcher.add_handler(MessageHandler(Filters.text & ~Filters.command, echo))\n # Start the Bot\n updater.start_polling()\n # Run the bot until you press Ctrl-C in the shell or\n # the process receives SIGINT, SIGTERM or SIGABRT.\n updater.idle()",
"async def start(self):",
"async def start(self):",
"def _starting(self, sender, **kwargs):\n _log.info('Starting: {}'.format(self.__name__))\n self.vip.heartbeat.start()\n # _log.debug(self.vip.ping('', \"PING ROUTER?\").get(timeout=3))\n #\n q = query.Query(self.core)\n # TODO: Use all addresses for fallback, #114\n self._external_addresses = q.query(b'addresses').get(timeout=30)\n assert self._external_addresses\n self._serverkey = q.query(b'serverkey').get(timeout=30)\n\n _log.debug(\"external addresses are: {}\".format(\n self._external_addresses\n ))\n\n # self._local_address = q.query('local_address').get(timeout=30)\n # _log.debug('Local address is? {}'.format(self._local_address))\n _log.info('Registering jsonrpc and /.* routes with {}'.format(\n MASTER_WEB\n ))\n\n self.vip.rpc.call(MASTER_WEB, 'register_agent_route',\n r'^/jsonrpc.*',\n self.core.identity,\n 'jsonrpc').get(timeout=10)\n\n self.vip.rpc.call(MASTER_WEB, 'register_path_route', VOLTTRON_CENTRAL,\n r'^/.*', self._webroot).get(timeout=20)\n\n self.webaddress = self.vip.rpc.call(\n MASTER_WEB, 'get_bind_web_address').get(timeout=30)\n\n # Remove so that dynamic agents don't inherit the identity.\n os.environ.pop('AGENT_VIP_IDENTITY')",
"def start(self):\n threading.Thread(target=self.serve_forever).start()",
"def _run(self) -> None:\n\n log.debug(\"Volumio 2 Web Service client starting ...\")\n log.debug(f\"Connecting to Volumio 2 Web Service on {self._server}:{self._port}\")\n\n with SocketIO(self._server, self._port) as socketIO:\n log.debug(\"Connected to Volumio 2 Web Service\")\n socketIO.on(\"pushState\", self._on_state_response)\n socketIO.emit(\"GetState\", on_GetState_response)\n\n # Request initial values\n socketIO.emit(\"getState\", \"\")\n\n while not self._stop_event.is_set():\n # rely on Volumio to push states mostly, but request an update\n # at a low frequency to get some lacy update.\n socketIO.wait_for_callbacks(seconds=10)\n socketIO.emit(\"getState\", \"\")",
"def run(updater: Updater):\n logger = getLogger()\n logger.info(\"Starting polling\")\n updater.start_polling()",
"def start( self ):\n\n self.service()",
"def setup():\r\n thread = threading.Thread(target = backup)\r\n thread.start()"
]
| [
"0.64313036",
"0.6330135",
"0.62208235",
"0.6204642",
"0.607325",
"0.5856703",
"0.58469695",
"0.583723",
"0.57949686",
"0.5775334",
"0.5775334",
"0.57446307",
"0.57366014",
"0.57127655",
"0.57127655",
"0.57011217",
"0.56858456",
"0.5678261",
"0.5667453",
"0.5637588",
"0.5628594",
"0.5608399",
"0.56062937",
"0.56062937",
"0.5605687",
"0.5585912",
"0.5572593",
"0.55593246",
"0.5553494",
"0.55407435"
]
| 0.64510936 | 0 |
Fetches details from Vault, generating a complicate dict representing a server | def fetch_server(client, server):
server_obj = {
"name": server['name'],
"url": server['url'],
"client": client
}
if (server['name'] in SERVERZ) \
and ('cluster_members' in SERVERZ[server['name']]):
clustered = SERVERZ[server['name']]['cluster_members']
server_obj['cluster_members'] = clustered
if 'parent' in server:
server_obj['parent'] = server['parent']
try:
init = client.is_initialized()
except hvac.exceptions.InternalServerError:
return server_obj
except requests.ConnectionError:
return server_obj
except requests.ReadTimeout:
return server_obj
server_obj['init'] = init
if init:
try:
status = client.seal_status
except (hvac.exceptions.VaultDown, requests.exceptions.ReadTimeout):
return server_obj
server_obj['version'] = status['version']
seal = server_obj['sealed'] = status['sealed']
if seal:
server_obj['unseal_required'] = status['t']
server_obj['unseal_progress'] = status['progress']
else:
server_obj['cluster_name'] = status['cluster_name']
server_obj['cluster_id'] = status['cluster_id']
server_obj['rekey'] = False
try:
rekey_obj = client.rekey_status
rekey = server_obj['rekey'] = rekey_obj['started']
if rekey:
server_obj['rekey_backup'] = rekey_obj['backup']
server_obj['rekey_progress'] = rekey_obj['progress']
server_obj['rekey_required'] = rekey_obj['required']
except (hvac.exceptions.VaultDown, requests.exceptions.ReadTimeout):
pass
except hvac.exceptions.InternalServerError as vault_exception:
if vault_exception.message == 'node not active but active '\
'node not found':
pass
server_obj['ha'] = False
server_obj['leader'] = False
try:
leader_obj = client.read('sys/leader')
server_obj['ha'] = leader_obj['ha_enabled']
if leader_obj['ha_enabled']:
server_obj['leader'] = leader_obj['is_self']
except (hvac.exceptions.VaultDown, requests.exceptions.ReadTimeout):
pass
except hvac.exceptions.InternalServerError as e:
if e.message == 'node not active but active node not found':
pass
if not server_obj['ha'] or \
(server_obj['ha'] and server_obj['leader']):
try:
regen_obj = client.read('sys/generate-root/attempt')
server_obj['regenerating'] = regen_obj.get('started', False)
if server_obj['regenerating']:
server_obj['regen_progress'] = regen_obj['progress']
server_obj['regen_required'] = regen_obj['required']
except (hvac.exceptions.VaultDown, requests.exceptions.ReadTimeout):
pass
except hvac.exceptions.InternalServerError as e:
if e.message == 'node not active but ' \
'active node not found':
pass
client = root_client(server_obj)
server_obj['is_root'] = False
if client:
try:
key_obj = client.key_status
server_obj['key_term'] = key_obj['term']
server_obj['is_root'] = True
except (hvac.exceptions.VaultDown, requests.exceptions.ReadTimeout):
pass
except hvac.exceptions.InternalServerError as e:
if e.message == 'node not active but active '\
'node not found':
pass
return server_obj | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_servers(self):\n json_scheme = self.gen_def_json_scheme('GetServers')\n json_obj = self.call_method_post(method='GetServers', json_scheme=json_scheme)\n self.json_servers = json_obj\n # if this method is called I assume that i must re-read the data\n # so i reinitialize the vmlist\n self.vmlist = VMList()\n # getting all instanced IP in case the list is empty\n if len(self.iplist) <= 0:\n self.get_ip()\n for elem in dict(json_obj)[\"Value\"]:\n if elem['HypervisorType'] is 4:\n s = Smart(interface=self, sid=elem['ServerId'])\n else:\n s = Pro(interface=self, sid=elem['ServerId'])\n s.vm_name = elem['Name']\n s.cpu_qty = elem['CPUQuantity']\n s.ram_qty = elem['RAMQuantity']\n s.status = elem['ServerStatus']\n s.datacenter_id = elem['DatacenterId']\n s.wcf_baseurl = self.wcf_baseurl\n s.auth = self.auth\n s.hd_qty = elem['HDQuantity']\n s.hd_total_size = elem['HDTotalSize']\n if elem['HypervisorType'] is 4:\n ssd = self.get_server_detail(elem['ServerId'])\n try:\n s.ip_addr = str(ssd['EasyCloudIPAddress']['Value'])\n except TypeError:\n s.ip_addr = 'Not retrieved.'\n else:\n s.ip_addr = []\n for ip in self.iplist:\n if ip.serverid == s.sid:\n s.ip_addr = ip\n self.vmlist.append(s)\n return True if json_obj['Success'] is True else False",
"def get_instances() -> dict:\n url = f\"{app.config['COMPUTE_SERVERS_REF']}/detail\"\n instances_rq = request(\n method=\"GET\", url=url, headers=build_header(), params={\"vm_state\": \"active\"},\n )\n\n if not instances_rq.ok:\n HTTPError(instances_rq.status_code)\n\n answer = {\"servers\": list()}\n for instance in instances_rq.json()[\"servers\"]:\n instance_info = dict(name=instance[\"name\"])\n instance_info[\"ip_addresses\"] = list()\n for network, info in instance[\"addresses\"].items():\n instance_info[\"ip_addresses\"].extend(entry[\"addr\"] for entry in info)\n answer[\"servers\"].append(instance_info)\n\n return answer",
"def _GetServers(self) -> List[Dict[str, str]]:\n return [\n {\n \"url\": \"/\",\n \"description\": \"Root path of the GRR API\",\n },\n ]",
"def server_info(ctx):\n data = ctx.obj.get_server_info()\n output_json_data(data)",
"def get_info():\n message = \"GET information about glancesync server\"\n\n logger_api.info(message)\n\n message = '''\n {\n \"id\": \"%s\",\n \"owner\": \"%s\",\n \"status\": \"%s\",\n \"version\": \"%s\",\n \"updated\": \"%s\",\n \"runningfrom\": \"%s\",\n \"href\": \"%s\"\n }\n ''' % (ID, OWNER, STATUS, VERSION, UPDATED, RUNNINGFROM, API_INFO_URL)\n\n resp = make_response(message, httplib.OK)\n resp.headers[SERVER_HEADER] = SERVER\n resp.headers[CONTENT_TYPE] = JSON_TYPE\n\n logger_api.info('Return result: %s', message)\n\n return resp",
"def get_servers(self) -> dict:\n uri = f\"{self.uri}/servers\"\n\n response = self.request(uri=uri)\n return response.json()",
"def _untranslate_server_summary_view(server):\n d = {}\n d['id'] = server.id\n d['status'] = server.status\n d['flavor'] = server.flavor['id']\n d['name'] = server.name\n d['image'] = server.image['id']\n d['created'] = server.created\n d['addresses'] = server.addresses\n d['networks'] = server.networks\n d['tenant_id'] = server.tenant_id\n d['user_id'] = server.user_id\n d['security_groups'] = getattr(server, 'security_groups', [])\n\n return d",
"def get_servers_info(self):\n return self.mrr_obj.get('/info/servers')",
"def vcac_getvm_detail_svrreq(self, srid):\n \n self.reqid=srid\n try:\n #Get the name of the vm and return JSON formatted response\n \n jfile=os.path.join(\"%s\", \"%s.json\") % (self.data['rundir'], self.reqid )\n print \"\\n\"\n print \"######## [Waiting for customization for SR: %s] ########\" % self.reqid\n print \"\\n\"\n time.sleep(300.0)\n vrapath=BASE_DIR + '/' + 'tools/vracc/bin/'\n cmd=\"cd %s && ./cloudclient.sh vra machines list --requestId %s --format \" \\\n \"JSON --export %s\" % ( vrapath, self.reqid, jfile )\n request = execute_action(cmd)\n except APIError, e:\n print \"Found error## vcac_getvm_detail_svrreq: %s\" % str(e)\n sys.exit(1)\n else:\n logging.debug(\"Verify return value after validation query: %s\" % (request))\n self.gtintval = self.gtintval + 300\n if os.path.exists(jfile) and os.stat(jfile).st_size > 0:\n logging.info(\"After provision data file: %s\" % (jfile))\n try:\n with open(jfile) as data_file:\n reqData = json.load(data_file)\n except APIError, e:\n print \"Loading Json found problem: %s\" % str(e)\n sys.exit(1)\n\n \n if 'name' in reqData[0] and 'status' in reqData[0]:\n logging.debug(\"Value ##### %s\" % reqData[0]['name'])\n for j in range(len(reqData[0]['networks'])):\n logging.info(\"Hostname %s configured \" \\\n \"with Ip address %s\" % \\\n ( reqData[0]['name'], reqData[0]['networks'][j]['address']))\n self.vmstat[self.reqid]['vmname']=reqData[0]['name']\n self.vmstat[self.reqid]['ipaddress']=reqData[0]['networks'][j]['address']\n self.vmstat[self.reqid]['vmid']=reqData[0]['catalogResource']['parentResourceRef']['id']\n print \"\\n\"\n print \"SR Reached IP: %s (HH:MM:SS)\" % \\\n str(datetime.timedelta(seconds=self.gtintval))\n break\n else:\n self.vmstat[self.reqid]['vmname'] = \"\"\n self.vmstat[self.reqid]['ipaddress'] = \"\"\n self.vmstat[self.reqid]['ipaddress'] = \"\"\n\n else:\n logging.warn(\"- vcac cloudclient json file missing \" \\\n \"or does not contains hostname or Ip \" \\\n \"details i.e empty\")\n self.vmstat[self.reqid]['vmname'] = \"\"\n self.vmstat[self.reqid]['ipaddress'] = \"\"\n self.vmstat[self.reqid]['ipaddress'] = \"\"\n #self.update_helpdesk(self.reqdata)\n \n \n logging.debug(\"Before return: %s\" % reqData )\n logging.debug(\"Real Value return: %s\" % self.vmstat )\n return self.vmstat",
"def get_server_info(self):\n\t\treturn handle_to_object(call_sdk_function('PrlSrv_GetServerInfo', self.handle))",
"def fp_meta(self):\n for server in self.machines:\n s = self.machines[server]\n print \"%s: %s (%s)\" % (s.id, s.adminPass, s)",
"def get_info(obj):\n res = {}\n res['vserver_group_id'] = obj.vserver_group_id\n if hasattr(obj, 'backend_servers'):\n res['backend_servers'] = obj.backend_servers\n if hasattr(obj, 'vserver_group_name'):\n res['vserver_group_name'] = obj.vserver_group_name\n return res",
"def get_ded_info(server, show=False):\n # start Requests session\n sc = requests.Session()\n\n # import cookies from Firefox\n sc.cookies.update(get_cookies('imhsc.imhadmin.net'))\n\n # send request\n vpx = sc.get('https://imhsc.imhadmin.net/index.php',\n params={'v': \"Dedicated\", 'selectServer': server})\n\n # check if login failed\n check_sc_login(vpx.text)\n\n # parse with BS4\n bs = BeautifulSoup(vpx.text, \"xml\")\n\n # server=0 ip=4 net=5 psc=6 user=11 type=14\n trr = bs.tbody.find_all('tr')\n if len(trr) > 0:\n tsrv = {\n 'hostname': trr[0].find_all('td')[0].string,\n 'ip': trr[0].find_all('td')[2].string,\n 'net': trr[0].find_all('td')[3].string,\n 'psc': trr[0].find_all('td')[4].a.string,\n 'user': trr[0].find_all('td')[9].string,\n 'type': trr[0].find_all('td')[12].string,\n 'status': trr[0].find_all('td')[13].string.strip()\n }\n else:\n tsrv = None\n\n if show:\n if tsrv:\n print(\"[%(hostname)s] IP: %(ip)s (%(net)s) / PSC: %(psc)s / User: %(user)s / Type: %(type)s / Status: %(status)s\" % tsrv)\n else:\n print(\"!! Server '%s' not found\" % (server))\n\n return tsrv",
"def server_info(subresource, server, proxyfilename, baseurl):\n server = HTTPRequests(url=server, localcert=proxyfilename, localkey=proxyfilename, version='HC')\n\n dictresult, status, reason = server.get(baseurl, {'subresource' : subresource})\n\n return dictresult['result'][0]",
"def normalize_server_list_json(server_list):\n myservers = dict()\n global most_fields\n #most_fields = dict()\n #most_fields = {'none': 0} # too lazy to make complex condition\n\n for server in server_list:\n \"\"\"\n Iterate over servers and cherry pick wanted variables/data\n \"\"\"\n myservers[server['name']] = {\n \"name\": server['name'],\n \"flavor_id\": server['flavor']['id'],\n \"flavor_name\": str(server['flavor']['name']),\n \"image_id\": server['image']['id'],\n \"region_name\": server['location']['region_name'],\n \"project_id\": server['location']['project']['id'],\n \"access_ip4\": server['accessIPv4'],\n \"access_ip6\": server['accessIPv6'],\n \"interface_ip4\": server['interface_ip'],\n \"created_at\": server['created_at'],\n \"updated_at\": server['updated'],\n \"terminated_at\": server['terminated_at'],\n \"status\": server['status'],\n \"power_state\": server['power_state'],\n \"provider_ip_zone\": server['RAX-PUBLIC-IP-ZONE-ID:publicIPZoneId'],\n \"host_id\": server['host_id'],\n \"id\": server['id'],\n \"tenant_id\": server['tenant_id']\n }\n\n # @TODO: move this to function add checks when some fields are missing\n if len(server['volumes']) > 0:\n i = 0\n for vol in server['volumes']:\n myservers[server['name']].update({\n \"vol\" + str(i) + '_id': vol['id'],\n \"vol\" + str(i) + '_name': vol['name'],\n \"vol\" + str(i) + '_status': vol['status'],\n \"vol\" + str(i) + '_size': vol['size'],\n \"vol\" + str(i) + '_created_at': vol['created_at'],\n \"vol\" + str(i) + '_updated_at': vol['updated_at'],\n \"vol\" + str(i) + '_type': vol['volume_type'],\n \"vol\" + str(i) + '_device': vol['device'],\n \"vol\" + str(i) + '_storage_node': vol['metadata']['storage-node'],\n #\"vol\" + str(i) + '_storage_mode': vol['metadata']['attached_mode'],\n \"vol\" + str(i) + '_server_id': vol['attachments'][0]['server_id'],\n \"vol\" + str(i) + '_attachment_id': vol['attachments'][0]['attachment_id'],\n \"vol\" + str(i) + '_host_name': vol['attachments'][0]['host_name'],\n \"vol\" + str(i) + '_volume_id': vol['attachments'][0]['volume_id'],\n \"vol\" + str(i) + '_az': vol['availability_zone']\n })\n i = i + 1\n\n else:\n myservers[server['name']].update({\n \"additional_storage\": 0\n })\n\n if int(len(myservers[server['name']])) > int(list(most_fields.values())[-1]):\n most_fields = dict()\n most_fields[server['name']] = int(len(myservers[server['name']]))\n\n # @TODO: add iteration via server['metadata'] when len > 0\n # @TODO: add iteration via server['properties'] when len > 0\n # @TODO: add iteration via server['addresses'] and dynamically add 'networks - Galaxy, public, private ..'\n\n return myservers",
"def detail(self):\n info = self.info()\n info[u'services'] = {}\n for item in self.get_endpoints():\n try:\n info[u'services'][item.service].append(item.endpoint)\n except:\n info[u'services'][item.service] = [item.endpoint]\n return info",
"async def server_info(self, ctx):\n guild = ctx.guild\n id = guild.id\n boost_count = guild.premium_subscription_count\n region = str(guild.region)\n channels = len(guild.channels)\n vc = len(guild.voice_channels)\n text_channels = len(guild.text_channels)\n emoji_limit = guild.emoji_limit\n bitrate = guild.bitrate_limit\n filesize = guild.filesize_limit\n members = str(len(guild.members))\n owner = guild.owner.name\n icon = guild.icon_url\n roles = len(guild.roles)\n banned = len(await guild.bans())\n invites = len(await guild.invites())\n created = str(guild.created_at)\n embed = discord.Embed(\n title=guild.name,\n description=\"Server Info:\",\n color=discord.Colour.purple()\n )\n embed.set_thumbnail(url=icon)\n embed.add_field(name=\"ID:\", value=str(id))\n embed.add_field(name=\"Owner: \", value=owner)\n embed.add_field(name=\"Region: \", value=region)\n embed.add_field(name=\"created at: \", value=created)\n embed.add_field(name=\"Boost count: \", value=boost_count)\n embed.add_field(name=\"Members: \", value=members)\n embed.add_field(name=\"Roles:\", value=str(roles))\n embed.add_field(name=\"Channels:\", value=str(channels))\n embed.add_field(name=\"Text Channels:\", value=str(text_channels))\n embed.add_field(name=\"Voice Channels:\", value=str(vc))\n embed.add_field(name=\"Emoji Limit:\", value=str(emoji_limit))\n embed.add_field(name=\"Max Bitrate:\", value=bitrate)\n embed.add_field(name=\"Max Filesize:\", value=filesize)\n embed.add_field(name=\"Banned Members:\", value=str(banned))\n embed.add_field(name=\"Active Invites:\", value=str(invites))\n await ctx.send(\"\", embed=embed)",
"def Servers(self, server=None):\n if server:\n self.current = server\n return \"successful\"\n\n servers = []\n for x in XbmcServers.select():\n servers.append({'name': x.name, 'id': x.id})\n if len(servers) < 1:\n return\n return {'current': self.current, 'servers': servers}",
"def get(self):\n session = consulate.Consulate()\n\n # track number of servers\n counter = 0\n\n # prepare data for jinja to consume for the jinja template\n data = {\"servers\": {}}\n\n # get a list of available servers in the cluster\n accessible_addresses = [srv[\"Addr\"] for srv in session.agent.members()]\n\n # session.catalog.services() returns a list with a single dictionary\n services = session.catalog.services()\n\n # get all names of services provided by cluster\n service_keys = []\n if isinstance(services, list) and len(services) > 0 and isinstance(services[0], dict):\n service_keys = services[0].keys()\n elif isinstance(services, dict):\n service_keys = services.keys()\n\n for service in service_keys:\n data[\"servers\"][service] = []\n\n # figure out servers with that service\n servers = session.catalog.service(service)\n\n for server in servers:\n ip_address = server[\"Address\"]\n\n # only add server if it's in the current cluster\n if ip_address in accessible_addresses:\n data[\"servers\"][service].append((counter, ip_address))\n counter += 1\n\n return data",
"def get_vm_info(self, tenant, instance_id):\n try:\n req = Request(self.compute_url +\n \"/servers/%s\" % instance_id)\n self._upgrade_to_authenticated_request(req)\n resp = urlopen(req)\n content = resp.read().decode('utf-8')\n encoded = json.loads(content)\n resp.close()\n except URLError as e:\n return {}\n except Exception as e:\n raise Exception(\"Unable to process compute reponse: %s\" % e)\n\n return encoded['server']",
"def kitero():\n return dict(hostname=hostname)",
"def get_servers(self):\n url = '%s/servers/detail' % self.catalog['compute']\n res = self.get(url)\n if res['status'] == 200:\n return json.loads(res['body'])['servers']\n else:\n LOG.error('Get servers failed: %s %s %s' %\n (res['status'], res['reason'], res['body']))\n raise InvalidResponse(res)",
"def info(client):\n\n return client.get_info()",
"def test_get_servers(self):\n self.assertIsInstance(network.get_servers(), dict)",
"def _get_server(vm_, volumes, nics):\n # Apply component overrides to the size from the cloud profile config\n vm_size = _override_size(vm_)\n\n # Set the server availability zone from the cloud profile config\n availability_zone = config.get_cloud_config_value(\n \"availability_zone\", vm_, __opts__, default=None, search_global=False\n )\n\n # Assign CPU family from the cloud profile config\n cpu_family = config.get_cloud_config_value(\n \"cpu_family\", vm_, __opts__, default=None, search_global=False\n )\n\n # Contruct server object\n return Server(\n name=vm_[\"name\"],\n ram=vm_size[\"ram\"],\n availability_zone=availability_zone,\n cores=vm_size[\"cores\"],\n cpu_family=cpu_family,\n create_volumes=volumes,\n nics=nics,\n )",
"async def list(self, ctx):\n server = ctx.message.server\n if server.id not in self.db:\n self.db[server.id] = {}\n dataIO.save_json(\"data/lootbox/servers.json\", self.db)\n if len(self.db[server.id]) < 1:\n await self.bot.say(\"No boxes have been created for this server yet, please create some using [p]box create\"\n \" first, thanks\")\n return\n boxes = self.db[server.id].keys()\n await self.bot.say(\"Here are this server's boxes:\\n{}\".format(\"\\n\".join(boxes)))",
"def getServerAndSecret(self):\n\n if self._default_master_server is not None and self._default_master_server != \"AUTOMATIC\":\n return (self._default_master_server,self.getSecret())\n \n sl=RadiusServerList(server_file=self._server_file)\n\n rs=sl.getServer()\n\n return((rs.ip,rs.secret))",
"def get_db_info(self):\n db_info = {}\n db_info[\"Mongo Server Info\"] = self.db_client.server_info()\n return db_info",
"def get_info(self, charger):\n data = {\n \"device_id\": self.uuid,\n \"cmd\": \"get_info\",\n \"token\": charger.token(),\n \"account_token\": self.api_token\n }\n headers = {\n \"Content-Type\": \"application/json\"\n }\n\n response = requests.post(\"{}/box_api_secure\".format(self.BASE_URL),\n data=json.dumps(data),\n headers=headers)\n response_json = response.json()\n return response_json",
"def get():\n\n l2ca_info = caps.l2ca_info()\n\n res = {\n 'cache_size': l2ca_info['cache_size'],\n 'cw_size': l2ca_info['cache_way_size'],\n 'cw_num': l2ca_info['cache_ways_num'],\n 'clos_num': l2ca_info['clos_num'],\n 'cdp_supported': l2ca_info['cdp_supported'],\n 'cdp_enabled': l2ca_info['cdp_enabled']\n }\n return res, 200"
]
| [
"0.63295335",
"0.6017063",
"0.5981714",
"0.5955315",
"0.593461",
"0.5877589",
"0.58129734",
"0.57993543",
"0.57707304",
"0.5751457",
"0.5737767",
"0.5727913",
"0.569141",
"0.5685779",
"0.5670609",
"0.56326616",
"0.5612336",
"0.5601548",
"0.55832845",
"0.5554736",
"0.55362964",
"0.55217755",
"0.55178803",
"0.54982394",
"0.5494391",
"0.54805374",
"0.54739237",
"0.54514873",
"0.5447994",
"0.54432887"
]
| 0.6626059 | 0 |
Builds up list of data arrays from all raft files (one array per CCD), plus list of segment names. | def get_scandata_raft(inputfile, datadir=''):
raftarrays = []
if os.path.splitext(inputfile)[1] in [".fits", ".fz"]:
# starts with 00 through 22
seglist = ["%d%d" % (i, j) for i in range(3) for j in range(3)]
# when REB2 data is missing
#seglist = ["%d%d" % (i, j) for i in range(2) for j in range(3)]
raftfits = [inputfile.replace("00_", s + '_') for s in seglist]
for f in raftfits:
raftarrays.append(scope.get_scandata_fromfile(f, datadir))
else:
# starts with Reb0 through Reb2
reblist = ["Reb0", "Reb1", "Reb2"]
rebraws = [inputfile.replace("Reb0", s) for s in reblist]
seglist = [r + "-%s" % stripe for r in reblist for stripe in ['A', 'B', 'C'] ]
for f in rebraws:
fullreb = scope.get_scandata_fromfile(f, datadir) # 3D array: 48 channels, lines, columns
#print fullreb.shape
raftarrays.extend([a for a in np.split(fullreb, 3, axis=0)]) # splits REB data into 3 CCDs
return raftarrays, seglist | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_data(files):\n data = []\n for fn in files:\n data += parse_data(fn).tolist()\n return np.array(data)",
"def getDatasets(self, dirname, dataset_list):\r\n \r\n files = self.loadDirectory(dirname)\r\n \r\n result = []\r\n for dataset_name in dataset_list:\r\n arr = np.concatenate([f[dataset_name] for f in files])\r\n result.append(arr)\r\n \r\n return result",
"def loadFiles(root=\"data/TAIWAN_RAW_DATA/ADHD\"):\n\tdata_rt = [] # realtime.csv\n\tdata_trial = [] # trialdata.csv\n\tdata_id = [] # caseid/subjectid\n\tRealTime = \"A2RealTime_\"\n\tTrialData = \"A2TrialData_\"\n\tfolder_list = os.listdir(root) # list of subfolders in the root\n\tfor folders in folder_list:\n\t\tfolders_path = os.path.join(root,folders)\n\t\tif folders.find(\"pass\") != -1:\n\t\t\tcontinue\n\t\t\t\n\t\ttry:\n\t\t\tdata_rt.append(pd.read_csv(os.path.join\n\t\t\t\t\t\t\t\t (folders_path,\n\t\t\t\t\t\t\t\t RealTime+folders[3:]+\".csv\")))\n\t\t\tdata_trial.append(pd.read_csv(os.path.join\n\t\t\t\t\t\t\t\t\t (folders_path,\n\t\t\t\t\t\t\t\t\t TrialData+folders[3:]+\".csv\")))\n\t\t\tdata_id.append(int(folders.split('_')[1]))\n\t\texcept:\n\t\t\tprint(os.path.join(folders_path,TrialData+folders[3:]+\".csv\"))\n\t\t\t\n\treturn data_rt,data_trial,data_id,folder_list",
"def zarr_concat(input_zarrs: List[str], output_zarr: str, verbose: bool = False) -> None:\n\n output_dataset = ChunkedDataset(output_zarr)\n if os.path.exists(output_zarr):\n output_dataset.open(\"a\")\n else:\n output_dataset.initialize()\n\n for input_zarr in input_zarrs:\n\n input_dataset = ChunkedDataset(input_zarr)\n input_dataset.open()\n\n if verbose:\n print(f\"input scenes size: {input_dataset.scenes.shape[0]}\")\n print(f\"input frames size: {input_dataset.frames.shape[0]}\")\n print(f\"input agents size: {input_dataset.agents.shape[0]}\")\n\n frame_offset = output_dataset.frames.shape[0]\n new_scenes = np.zeros(input_dataset.scenes.shape[0], dtype=SCENE_DTYPE)\n\n for i, scene in enumerate(input_dataset.scenes): # add new scenes to zarr\n scene[\"frame_index_interval\"] = scene[\"frame_index_interval\"] + frame_offset\n new_scenes[i] = scene\n output_dataset.scenes.append(new_scenes)\n\n agent_offset = output_dataset.agents.shape[0]\n new_frames = np.zeros(input_dataset.frames.shape[0], dtype=FRAME_DTYPE)\n for i, frame in enumerate(input_dataset.frames): # add new frames to the zarr\n frame[\"agent_index_interval\"] = frame[\"agent_index_interval\"] + agent_offset\n new_frames[i] = frame\n output_dataset.frames.append(new_frames)\n\n output_dataset.agents.append(input_dataset.agents) # add new agents to the zarr\n\n if verbose:\n print(f\"output scenes size: {output_dataset.scenes.shape[0]}\")\n print(f\"output frames size: {output_dataset.frames.shape[0]}\")\n print(f\"output agents size: {output_dataset.agents.shape[0]}\")",
"def readFullRelaxFiles(folder_path):\n\n run_arr = []\n Nrun_arr = []\n dod_arr = []\n crate_arr = []\n count=0\n\n # find number of files that starts with run\n # (this is the data file we want to read)\n for file in os.listdir(folder_path):\n if file.startswith(\"relaxrun\"):\n count+=1\n\n # order the data files by run number, so we get descending crates\n Nrun=1\n for i in range(count+5):\n for file in os.listdir(folder_path):\n if file.startswith(\"relaxrun_\"+str(Nrun)+\"-\"):\n run_arr.append(file)\n dod = re.search('dod=(.*).txt', file).group(1)\n crate = re.search('Crate=(.*)_',file).group(1)\n Nrun_arr.append(np.round(int(Nrun),decimals=0))\n dod_arr.append(float(dod))\n crate_arr.append(float(crate))\n Nrun+=1\n print(len(run_arr))\n\n return run_arr, Nrun_arr, dod_arr, crate_arr",
"def create_subarrays(input_hdulist, subarrays):\n subarray_files = []\n #\n # Extract the data\n #\n for subarray in subarrays:\n outputfilename = create_single_subarray(input_hdulist, subarray)\n subarray_files.append(outputfilename)\n #\n # All done, now we return the names of the subarray files\n #\n return subarray_files",
"def make_photon_arrays(path, numevents):\n xcoords = []\n zcoords = []\n \n nsipmarrays = []\n nabsarrays = []\n \n for filename in os.listdir(path):\n\n photondata = np.loadtxt(path+'/'+filename,delimiter=',',usecols=[1,4])\n\n coords = filename[0:8]\n\n arraylen = len(photondata.flatten('F'))\n \n nsipmphotons = photondata.flatten('F')[numevents:arraylen]\n #print(len(nsipmphotons))\n nabsphotons = photondata.flatten('F')[0:numevents] \n \n nsipmarrays.append(nsipmphotons)\n nabsarrays.append(nabsphotons)\n \n x = re.findall('(-[0-9]+)x',coords) \n \n if bool(x) == False:\n x = re.findall('([0-9]+)x', coords)\n \n z = re.findall('(-[0-9]+)z',coords) \n\n if bool(z) == False:\n z = re.findall('([0-9]+)z',coords)\n\n xcoords.append(x[0])\n zcoords.append(z[0])\n \n xcoords = np.array(xcoords).astype(np.float)\n zcoords = np.array(zcoords).astype(np.float)\n \n return xcoords, zcoords, nsipmarrays, nabsarrays",
"def dataArr(filename):\r\n #Open the file\r\n f=h5py.File(filename,'r')\r\n \r\n #Initialize the data arrays\r\n cdata=[]\r\n idxset=[]\r\n vertices=[]\r\n \r\n #Open groups in the file\r\n for group in f.keys():\r\n# print('Group- '+group)\r\n \r\n #Get the group\r\n currGroup=f[group]\r\n \r\n #Open keys in the group\r\n for key in currGroup.keys():\r\n# print('Key- '+key)\r\n \r\n #Append the data to the respective arrays\r\n if key=='cdata(Complex)':\r\n cdataGroup=currGroup[key]\r\n \r\n imag=[]\r\n real=[]\r\n #Open the keys in cdata\r\n for subkey in cdataGroup.keys():\r\n# print('Subkey- '+subkey)\r\n \r\n #Get the real and imaginary parts of the array\r\n if subkey=='Imag':\r\n imag=cdataGroup[subkey][()]\r\n elif subkey=='Real':\r\n real=cdataGroup[subkey][()]\r\n \r\n #Convert lists to numpy arrays\r\n imag=np.array(imag)\r\n real=np.array(real)\r\n #Get the cdata value\r\n cdata=real+1j*imag\r\n \r\n elif key=='idxset':\r\n idxset=currGroup[key][()]\r\n elif key=='vertices':\r\n vertices=currGroup[key][()]\r\n \r\n #Remove the z component from the vertices\r\n xVals=[]\r\n yVals=[]\r\n newVertices=[]\r\n for vertex in vertices:\r\n xVals.append(vertex[0])\r\n yVals.append(vertex[1])\r\n newVertices.append([vertex[0],vertex[1]])\r\n vertices=newVertices\r\n \r\n #Convert to numpy arrays\r\n cdata=np.array(cdata)\r\n xVals=np.array(xVals)\r\n yVals=np.array(yVals)\r\n \r\n #Close the file\r\n f.close()\r\n \r\n return cdata, xVals, yVals",
"def build_list(self, directory):\n\n list_of_data = []\n # parse the labels\n for i, label in enumerate(self.label_list):\n dir = self.root_dir + label +'/'\n print(i, dir)\n folders = os.listdir(dir)\n # for each label, parse all the videos associated to it\n for folder in folders:\n folder_path = dir + folder + '/'\n list_of_data.append((i, folder_path))\n return list_of_data",
"def loaddata(filelist, path='', band=1):\n # 2008-07-25 16:26 IJC: Created\n # 2010-01-20 12:58 IJC: Made function work for IRTF low-res data.\n # Replaced 'warn' command with 'print'.\n # 2011-04-08 11:42 IJC: Updated; moved inside analysis.py.\n # 2011-04-12 09:57 IJC: Fixed misnamed imports\n\n import pyfits\n\n data = array([])\n\n if filelist.__class__==str or isinstance(filelist,np.string_):\n filelist = ns.file2list(filelist)\n elif filelist.__class__<>list:\n print('Input to loaddata must be a python list or string')\n return data\n\n num = len(filelist)\n\n # Load one file just to get the dimensions right.\n irspec = pyfits.getdata(filelist[0])\n\n ii = 0\n for element in filelist:\n irspec = pyfits.getdata(element)\n\n if ii==0:\n irsh = irspec.shape\n data = zeros((num,)+irsh[1::], float)\n \n if len(irsh)>2:\n for jj in range(irsh[1]):\n data[ii, jj, :] = irspec[band-1, jj, :]\n else:\n data[ii,:] = irspec[band-1,:]\n\n ii=ii+1\n\n return data",
"def _load_fmri(fmri_filenames):\n a = np.array([pd.read_csv(subject_filename,\n header=None).values\n for subject_filename in fmri_filenames])\n Z = np.zeros((500, a[0].shape[1]))\n\n for i in range(len(a)):\n Z[:a[i].shape[0], :] = a[i]\n a[i] = Z[:TRUNC, ]\n Z = 0*Z\n\n return a",
"def assemble(self, dt_range=None):\n if dt_range is not None:\n self.dt_list = trace_source.time_list(dt_range[0],\n dt_range[1],\n self.config['time']['step'])\n\n # only for the testcase\n traj_dir = self.config['partposit_dir']\n days_avail = os.listdir(traj_dir)\n # filter only for the trajectory files with tdump extension\n days_avail = [f for f in days_avail if len(f) == 11]\n print(days_avail)\n folders = [f for f in days_avail if datetime.datetime.strptime(f, \"%Y%m%d_%H\") in self.dt_list]\n\n assert len(folders) > 0, 'no folders with flexpart partposit data'\n\n # the defaultdict is used here to sort the files by datetime within a dictionary\n # filtered_files = defaultdict(list)\n # for f in files:\n # # regex the yyyymmdd-hh timestamp in the filename\n # dt = datetime.datetime.strptime(re.search('([0-9]{8})-([0-9]){2}', f).group(0), '%Y%m%d-%H')\n # height = float(re.search('([0-9]{3,6})(?=_0[0-9-]{1,4}.tdump)', f).group(0))\n # #print(f, dt, height)\n # if dt >= self.dt_list[0] and dt <= self.dt_list[-1]:\n # filtered_files[dt].append((f,height))\n\n # here an empty dict is generated with a zero containing array\n self.stat2d_dict = defaultdict(lambda: np.zeros((len(self.dt_list), len(self.height_list))))\n\n self.statls_dict = defaultdict(lambda: np.zeros((len(self.dt_list), len(self.height_list), 7)))\n\n self.raw_dict = defaultdict(lambda: np.zeros((len(self.dt_list), len(self.height_list),\n abs(self.config['time']['tr_duration'])+1)))\n\n # TODO make more than 7 geo names possible\n ng = trace_source.land_sfc.named_geography(self.config['geonames'])\n self.geo_names = ng.geo_names\n no_geo_names = len(list(self.geo_names.keys()))\n self.statgn_dict = defaultdict(lambda: np.zeros((len(self.dt_list),\n len(self.height_list),\n no_geo_names)))\n\n\n self.lat_names = {0: '<-60', 1: '-60..-30', 2:'-30..0', 3: '0..30', 4: '30..60', 5: '>60'}\n self.statlat_dict = defaultdict(lambda: np.zeros((len(self.dt_list),\n len(self.height_list),\n len(list(self.lat_names.keys())))))\n\n\n ls = trace_source.land_sfc.land_sfc()\n self.ls_categories = ls.categories\n\n\n for it, dt in enumerate(self.dt_list[:]):\n print('trajectories eding at ', dt)\n files_for_time = os.listdir(traj_dir + dt.strftime(\"%Y%m%d_%H\"))\n files_for_time = sorted([f for f in files_for_time if \"partposit_\" in f])\n folder = traj_dir + dt.strftime(\"%Y%m%d_%H\") + \"/\"\n print('files_for_time ', files_for_time)\n\n print('heights ', len(self.height_list), self.height_list)\n\n flex_stat = [flex_statistics(self.config, ls=ls, ng=ng) for h in self.height_list]\n traj_meta = read_flexpart_traj_meta(folder + \"trajectories.txt\")\n\n self.no_part.append(traj_meta['releases_meta'][1]['no_particles'])\n self.time_res.append(10*24/len(files_for_time))\n\n # different structure than hysplit\n # 1. loop through the ending times of the current day\n # 2. load partposit for a specified time\n # 3. loop through heights\n\n for f in files_for_time:\n print('files_for_time ', f)\n part_pos = read_partpositions(folder + f, 1, ctable=True)\n part_pos = np.array(part_pos)\n\n for ih, h in enumerate(self.height_list):\n #print(\"at \", ih, h)\n this_population = np.where(part_pos[:,0] == ih+1)[0]\n #release_sel = np.array([list(p) for p in part_pos if p[0]==ih+1])\n release_sel = part_pos[this_population, :]\n #assert np.all(release_sel == other_release)\n meta = traj_meta['releases_meta'][ih+1]\n #print(meta)\n assert np.mean(meta['heights']) == h, f\"{meta['heights']} {h} do not fit\"\n flex_stat[ih].add_partposits_gn(release_sel)\n\n flex_stat[ih].add_partposits_ls(release_sel)\n flex_stat[ih].add_partposits_thres(release_sel)\n\n # now assemble the statistics for all heights\n for ih, h in enumerate(self.height_list): \n flex_stat[ih].calc_gn_stat()\n for k in list(flex_stat[ih].stat_gn.keys()):\n self.stat2d_dict[k+'_no_below'][it, ih] = flex_stat[ih].stat_gn[k].no_below\n print('stat gn ', h, k, flex_stat[ih].stat_gn[k])\n self.statgn_dict[k][it, ih] = list(flex_stat[ih].stat_gn[k].counter.values())\n\n flex_stat[ih].calc_ls_stat()\n for k in list(flex_stat[ih].stat_ls.keys()):\n self.stat2d_dict[k+'_no_below'][it, ih] = flex_stat[ih].stat_ls[k].no_below\n print('stat ls ', h, k, flex_stat[ih].stat_ls[k])\n self.statls_dict[k][it, ih] = list(flex_stat[ih].stat_ls[k].counter.values())\n\n flex_stat[ih].calc_thres_stat()\n for k in list(flex_stat[ih].stat_lat.keys()):\n self.stat2d_dict[k+'_no_below'][it, ih] = flex_stat[ih].stat_lat[k].no_below\n print('stat_lat ', h, k, flex_stat[ih].stat_lat[k])\n self.statlat_dict[k][it, ih] = list(flex_stat[ih].stat_lat[k].counter.values())\n\n\n # #assert len(f_list) > 1\n # for ih, f in enumerate(f_list):\n # print(it, ih, f[1], dt)\n # traj = trajectory(self.config)\n # traj.load_file(traj_dir+f[0], silent=True)\n # savepath = '{}/{}'.format(self.config['plot_dir'], dt.strftime('%Y%m%d'))\n\n\n # if \"timeinterval\" in self.config['plotmap']:\n # timeinterval = self.config['plotmap']['timeinterval']\n # else:\n # timeinterval = 12\n # if \"heights\" in self.config['plotmap']:\n # heightlist = self.config['plotmap']['heights']\n # else:\n # heightlist = [1500.0, 3000.0, 4500.0]\n # #if f[1] == 3000.0 and dt.hour % 12 == 0:\n # if f[1] in heightlist and dt.hour % timeinterval == 0:\n # print(\"plotting \", f[1], dt.hour)\n # plot_trajectories_ens(traj, savepath, ls=ls, config=self.config)\n # #continue\n\n # traj.evaluate(silent=True)\n # traj.add_land_sfc(ls, silent=True)\n # traj.add_ensemble_land_sfc(ls)\n # traj.add_ensemble_geo_names(ng)\n # #traj.add_area_land_sfc('md', ls, silent=True)\n # #traj.add_area_land_sfc(2000, ls, silent=True)\n\n # #print(\"at step\", it, dt, ih, f)\n # #print('keys ', traj.statistics.keys())\n # # now the empty dict is filled with the keys (and values) of the statistics dict from traj\n # for k in list(traj.statistics.keys()):\n # self.stat2d_dict[k][it, ih] = traj.statistics[k]\n # # subset of trajectory data to collect\n # param_collect = ['latitude', 'longitude', 'height', \"PRESSURE\", \"AIR_TEMP\",\n # \"RAINFALL\", \"RELHUMID\", \"TERR_MSL\", 'age']\n # if 'land_sfc_category' in list(traj.data.keys()):\n # param_collect.append('land_sfc_category')\n # for k in param_collect:\n # #self.raw_dict[k][it, ih, :traj.data[1][k].shape[0]] = traj.data[1][k]\n # self.raw_dict[k][it, ih, :] = traj.data[1][k]\n # #self.raw_dict[k][it, ih, traj.data[1][k].shape[0]:] = -999.\n\n # for k in list(traj.stat_ls.keys()):\n # self.stat2d_dict[k+'_no_below'][it, ih] = traj.stat_ls[k].no_below\n # print('stat ls ', k, traj.stat_ls[k])\n # self.statls_dict[k][it, ih] = list(traj.stat_ls[k].counter.values())\n\n # for k in list(traj.stat_gn.keys()):\n # self.stat2d_dict[k+'_no_below'][it, ih] = traj.stat_gn[k].no_below\n # print('stat gn ', k, traj.stat_gn[k])\n # self.statgn_dict[k][it, ih] = list(traj.stat_gn[k].counter.values())\n\n # trying to free memory\n del ls\n del ng",
"def read_datasets(itype, att, nfiles=16):\n # Output array.\n data = []\n # Loop over each file and extract the data.\n for i in range(nfiles):\n f = h5.File(fname+'.%i.hdf5'%i, 'r')\n tmp = f['PartType%i/%s'%(itype, att)][...]\n data.append(tmp)\n\n f.close()\n # Combine to a single array.\n if len(tmp.shape) > 1:\n data = np.vstack(data)\n else:\n data = np.concatenate(data)\n return data",
"def eeg_loaddata(filedir,filemask):\t\n\tfiles = glob.glob1(filedir,filemask)\n\tprint \"loading %d files\" %len(files)\n\teeg,tim,nchan,ntpts = eeg_readavr(op.join(filedir,files[0])) #just to initialize the next line\n\tdata = np.zeros((len(files),eeg.shape[0],eeg.shape[1]))\n\tfor i in range(len(files)):\n\t\teeg,tim,nchan,ntpts = eeg_readavr(op.join(filedir,files[i]))\n\t\tdata[i,:,0:ntpts]=eeg[:,0:ntpts]\n\t\tcnt = 100.0*i/len(files)\t\n\t\tsys.stdout.write(\"progress: \\r%d%%\" %cnt)\n\n return data,tim,nchan,files",
"def combine1(prefix):\n files = glob.glob(prefix + '/*.npz')\n files = [numpy.load(f) for f in files]\n edges = files[0]['edges']\n r = []\n m = []\n e = []\n r = files[0]['xi'][0, 0]\n l = []\n for i in range(len(edges) - 1):\n xi = [f['xi'][i, 1, :] for f in files]\n l.append(\n (r, \n numpy.mean(xi, axis=0),\n numpy.std(xi, axis=0) * len(xi) ** -0.5))\n return numpy.array(l)",
"def read_data(dirs, idx_90d, idx_0d, idx_45d, idx_m45d, img_size):\n raw_data_90d = np.zeros(shape=(img_size, img_size, 3, 9, len(dirs)), dtype=np.float32)\n raw_data_0d = np.zeros(shape=(img_size, img_size, 3, 9, len(dirs)), dtype=np.float32)\n raw_data_45d = np.zeros(shape=(img_size, img_size, 3, 9, len(dirs)), dtype=np.float32)\n raw_data_m45d = np.zeros(shape=(img_size, img_size, 3, 9, len(dirs)), dtype=np.float32)\n raw_label = np.zeros(shape=(img_size, img_size, len(dirs)), dtype=np.float32)\n\n i_scence = 0\n for dir in dirs:\n print(\"loading...\", dir)\n for idx in range(len(idx_0d)):\n raw_data_90d[:, :, :, idx, i_scence] = np.float32(\n imageio.imread(dir + '/input_Cam0%02d.png' % (idx_90d[idx])))\n raw_data_0d[:, :, :, idx, i_scence] = np.float32(\n imageio.imread(dir + '/input_Cam0%02d.png' % (idx_0d[idx])))\n raw_data_45d[:, :, :, idx, i_scence] = np.float32(\n imageio.imread(dir + '/input_Cam0%02d.png' % (idx_45d[idx])))\n raw_data_m45d[:, :, :, idx, i_scence] = np.float32(\n imageio.imread(dir + '/input_Cam0%02d.png' % (idx_m45d[idx])))\n raw_label[:, :, i_scence] = np.array(read_pfm(dir + '/gt_disp_lowres.pfm'), dtype=np.float32)\n i_scence += 1\n return raw_data_90d, raw_data_0d, raw_data_45d, raw_data_m45d, raw_label",
"def dataArr(filename):\r\n #Open the file\r\n f=h5py.File(filename,'r')\r\n \r\n #Initialize the data arrays\r\n cdata=[]\r\n idxset=[]\r\n vertices=[]\r\n \r\n #Open groups in the file\r\n for group in f.keys():\r\n# print('Group- '+group)\r\n \r\n #Get the group\r\n currGroup=f[group]\r\n \r\n #Open keys in the group\r\n for key in currGroup.keys():\r\n# print('Key- '+key)\r\n \r\n #Append the data to the respective arrays\r\n if key=='cdata(Complex)':\r\n cdataGroup=currGroup[key]\r\n \r\n imag=[]\r\n real=[]\r\n #Open the keys in cdata\r\n for subkey in cdataGroup.keys():\r\n# print('Subkey- '+subkey)\r\n \r\n #Get the real and imaginary parts of the array\r\n if subkey=='Imag':\r\n imag=cdataGroup[subkey][()]\r\n elif subkey=='Real':\r\n real=cdataGroup[subkey][()]\r\n \r\n #Convert lists to numpy arrays\r\n imag=np.array(imag)\r\n real=np.array(real)\r\n #Get the cdata value\r\n cdata=real+1j*imag\r\n \r\n elif key=='idxset':\r\n idxset=currGroup[key][()]\r\n elif key=='vertices':\r\n vertices=currGroup[key][()]\r\n \r\n #Remove the y component from the vertices\r\n xVals=[]\r\n yVals=[]\r\n newVertices=[]\r\n for vertex in vertices:\r\n xVals.append(vertex[0])\r\n yVals.append(vertex[2])\r\n newVertices.append([vertex[0],vertex[1]])\r\n vertices=newVertices\r\n \r\n #Convert to numpy arrays\r\n cdata=np.array(cdata)\r\n xVals=np.array(xVals)\r\n yVals=np.array(yVals)\r\n \r\n #Close the file\r\n f.close()\r\n \r\n return cdata, xVals, yVals",
"def CollectDatasets(redirector_str):\n \n \n # uploadDir = 'srv/' for lpcjobqueue shell or TTbarAllHadUproot/ for coffea casa and WinterFell\n \n if 'cmsxrootd' in redirector_str:\n uploadDir = 'srv'\n else:\n uploadDir = 'TTbarAllHadUproot'\n \n uploadDir = ''\n \n filedir = 'nanoAODv9Files/'\n Years = ['UL16', 'UL17', 'UL18']\n VFP = ['preVFP', 'postVFP'] # preVFP unavailable in Winterfell for the moment\n # VFP = ['postVFP'] # Only for simple test in WinterFell\n filesets = {} # To be filled and returned by this function\n \n # ---- Before concatenation with +=, lists should be declard ---- # \n \n for y in Years:\n if '16' in y:\n for v in VFP:\n filesets[y+v+'_QCD'] = []\n filesets[y+v+'_TTbar_700_1000'] = []\n filesets[y+v+'_TTbar_1000_Inf'] = []\n # ---- JetHT and SingleMu ---- #\n for l in ['', 'B', 'C', 'D', 'E', 'F']:\n filesets[y+'preVFP_JetHT'+l+'_Data'] = []\n filesets[y+'preVFP_SingleMu'+l+'_Data'] = []\n for l in ['', 'F', 'G', 'H']:\n filesets[y+'postVFP_JetHT'+l+'_Data'] = []\n filesets[y+'postVFP_SingleMu'+l+'_Data'] = []\n \n \n elif '17' in y:\n filesets[y+'postVFP_QCD'] = []\n filesets[y+'postVFP_TTbar'] = []\n for l in ['', 'B', 'C', 'D', 'E', 'F']:\n filesets[y+'postVFP_JetHT'+l+'_Data'] = []\n filesets[y+'postVFP_SingleMu'+l+'_Data'] = []\n \n else:\n filesets[y+'postVFP_QCD'] = []\n filesets[y+'postVFP_TTbar'] = []\n for l in ['', 'A', 'B', 'C', 'D']:\n filesets[y+'postVFP_JetHT'+l+'_Data'] = []\n filesets[y+'postVFP_SingleMu'+l+'_Data'] = []\n \n # ---- Loop through years and VFP status, filling the filesets dictionary with the MC file locations from corresponding txt files ---- #\n \n for y in Years:\n if '16' in y:\n for v in VFP:\n # ---- QCD ---- #\n ulqcdfilename = filedir + 'QCD/QCD_NanoAODv9_' + y + '_' + v + '.txt'\n with open(ulqcdfilename) as f:\n ulqcdfiles = [redirector_str + s.strip() for s in f.readlines() if not s.startswith('#')]\n filesets[y+v+'_QCD'] += ulqcdfiles\n \n # ---- TTbar ---- #\n ulttbar700to1000filename = filedir + 'TT/TT_Mtt-700to1000_NanoAODv9_' + y + '_' + v + '.txt'\n with open(ulttbar700to1000filename) as f:\n ulttbar700to1000files = [redirector_str + s.strip() for s in f.readlines() if not s.startswith('#')]\n ulttbar1000toInffilename = filedir + 'TT/TT_Mtt-1000toInf_NanoAODv9_' + y + '_' + v + '.txt'\n with open(ulttbar1000toInffilename) as f:\n ulttbar1000toInffiles = [redirector_str + s.strip() for s in f.readlines() if not s.startswith('#')]\n filesets[y+v+'_TTbar_700_1000'] += ulttbar700to1000files\n filesets[y+v+'_TTbar_1000_Inf'] += ulttbar1000toInffiles\n \n # ---- JetHT ---- #\n datafilelist = os.listdir(filedir + 'JetHT/')\n for filename in datafilelist:\n if 'pre' in v:\n if 'Run2016' in filename: #preVFP\n with open(filedir + 'JetHT/' + filename) as f:\n jetdatafiles2016 = [redirector_str + s.strip() for s in f.readlines() if ('HIPM' in s and not s.startswith('#'))] \n filesets[y+v+'_JetHT_Data'] += jetdatafiles2016 \n elif 'post' in v:\n if 'Run2016' in filename: #postVFP\n with open(filedir + 'JetHT/' + filename) as f:\n jetdatafiles2016 = [redirector_str + s.strip() for s in f.readlines() if ('HIPM' not in s and not s.startswith('#'))] \n filesets[y+v+'_JetHT_Data'] += jetdatafiles2016\n \n # ---- Z' Dark Matter Mediator ---- #\n ulZprimeDMfilename = filedir + 'ZprimeDMToTTbar/ZprimeDMToTTbar_NanoAODv9_' + y + '_' + v + '.txt'\n ulDMfiles=[]\n k=0\n for i in range(1000, 5500, 500):\n with open(ulZprimeDMfilename) as f:\n ulDMfiles.append([redirector_str + s.strip() for s in f.readlines() if (\"ResoIncl_MZp\"+str(i) in s and not s.startswith('#'))])\n filesets[y+v+'_DM'+str(i)] = ulDMfiles[k]\n k += 1\n \n# # ---- RS KK Gluon ---- #\n# ulRSGluonfilename = filedir + 'RSGluonToTT/RSGluonToTT_NanoAODv9_' + y + '_' + v + '.txt'\n# ulRSGluonfiles=[]\n# l=0\n# for i in range(1000, 5500, 500):\n# with open(ulRSGluonfilename) as f:\n# ulRSGluonfiles.append([redirector_str + s.strip() for s in f.readlines() if (\"RSGluonToTT_M-\"+str(i) in s and not s.startswith('#'))])\n# filesets[y+v+'_RSGluon'+str(i)] += ulRSGluonfiles[l]\n# l += 1\n \n else: # UL17 and UL18\n v = VFP[1] # No preVFP after 2016 Run vertex problem was fixed\n \n # ---- QCD ---- #\n ulqcdfilename = filedir + 'QCD/QCD_NanoAODv9_' + y + '_' + v + '.txt'\n with open(ulqcdfilename) as f:\n ulqcdfiles = [redirector_str + s.strip() for s in f.readlines() if not s.startswith('#')]\n filesets[y+v+'_QCD'] += ulqcdfiles\n\n# # ---- TTbar ---- #\n# ulttbar700to1000filename = filedir + 'TT/TT_Mtt-700to1000_NanoAODv9_' + y + '_' + v + '.txt'\n# with open(ulttbar700to1000filename) as f:\n# ulttbar700to1000files = [redirector_str + s.strip() for s in f.readlines() if not s.startswith('#')]\n# ulttbar1000toInffilename = filedir + 'TT/TT_Mtt-1000toInf_NanoAODv9_' + y + '_' + v + '.txt'\n# with open(ulttbar1000toInffilename) as f:\n# ulttbar1000toInffiles = [redirector_str + s.strip() for s in f.readlines() if not s.startswith('#')]\n# filesets[y+v+'_TTbar_700_1000'] += ulttbar700to1000files\n# filesets[y+v+'_TTbar_1000_Inf'] += ulttbar1000toInffiles\n \n # ---- JetHT ---- #\n datafilelist = os.listdir(filedir + 'JetHT/')\n for filename in datafilelist: \n if 'Run2017' in filename: #postVFP\n with open(filedir + 'JetHT/' + filename) as f:\n jetdatafiles2017 = [redirector_str + s.strip() for s in f.readlines() if (not s.startswith('#'))] \n filesets[y+v+'_JetHT_Data'] += jetdatafiles2017\n elif 'Run2018' in filename: #postVFP\n with open(filedir + 'JetHT/' + filename) as f:\n jetdatafiles2018 = [redirector_str + s.strip() for s in f.readlines() if (not s.startswith('#'))] \n filesets[y+v+'_JetHT_Data'] += jetdatafiles2018\n\n # ---- Z' Dark Matter Mediator ---- #\n ulZprimeDMfilename = filedir + 'ZprimeDMToTTbar/ZprimeDMToTTbar_NanoAODv9_' + y + '_' + v + '.txt'\n ulDMfiles=[]\n k=0\n for i in range(1000, 5500, 500):\n with open(ulZprimeDMfilename) as f:\n ulDMfiles.append([redirector_str + s.strip() for s in f.readlines() if (\"ResoIncl_MZp\"+str(i) in s and not s.startswith('#'))])\n filesets[y+v+'_DM'+str(i)] = ulDMfiles[k]\n k += 1\n \n # ---- RS KK Gluon ---- #\n ulRSGluonfilename = filedir + 'RSGluonToTT/RSGluonToTT_NanoAODv9_' + y + '_' + v + '.txt'\n ulRSGluonfiles=[]\n l=0\n for i in range(1000, 5500, 500):\n with open(ulRSGluonfilename) as f:\n ulRSGluonfiles.append([redirector_str + s.strip() for s in f.readlines() if (\"RSGluonToTT_M-\"+str(i) in s and not s.startswith('#'))])\n filesets[y+v+'_RSGluon'+str(i)] = ulRSGluonfiles[l]\n l += 1\n \n \n # ---- JetHT Eras---- #\n \n datafilelist = os.listdir(filedir + 'JetHT/')\n for filename in datafilelist:\n \n if 'Run2016B' in filename:\n with open(filedir + 'JetHT/' + filename) as b:\n jetdatafiles2016b = [redirector_str + s.strip() for s in b.readlines() if not s.startswith('#')] \n filesets['UL16preVFP_JetHTB_Data'] += jetdatafiles2016b\n elif 'Run2016C' in filename:\n with open(filedir + 'JetHT/' + filename) as c:\n jetdatafiles2016c = [redirector_str + s.strip() for s in c.readlines() if not s.startswith('#')] \n filesets['UL16preVFP_JetHTC_Data'] += jetdatafiles2016c\n elif 'Run2016D' in filename:\n with open(filedir + 'JetHT/' + filename) as d:\n jetdatafiles2016d = [redirector_str + s.strip() for s in d.readlines() if not s.startswith('#')] \n filesets['UL16preVFP_JetHTD_Data'] += jetdatafiles2016d\n elif 'Run2016E' in filename:\n with open(filedir + 'JetHT/' + filename) as e:\n jetdatafiles2016e = [redirector_str + s.strip() for s in e.readlines() if not s.startswith('#')] \n filesets['UL16preVFP_JetHTE_Data'] += jetdatafiles2016e\n elif 'Run2016F' in filename:\n with open(filedir + 'JetHT/' + filename) as fold:\n jetdatafiles2016fold = [redirector_str + s.strip() for s in fold.readlines() if ('HIPM' in s and not s.startswith('#'))]\n with open(filedir + 'JetHT/' + filename) as fnew:\n jetdatafiles2016fnew = [redirector_str + s.strip() for s in fnew.readlines() if ('HIPM' not in s and not s.startswith('#'))]\n filesets['UL16preVFP_JetHTF_Data'] += jetdatafiles2016fold\n filesets['UL16postVFP_JetHTF_Data'] += jetdatafiles2016fnew\n elif 'Run2016G' in filename:\n with open(filedir + 'JetHT/' + filename) as g:\n jetdatafiles2016g = [redirector_str + s.strip() for s in g.readlines() if not s.startswith('#')] \n filesets['UL16postVFP_JetHTG_Data'] += jetdatafiles2016g\n elif 'Run2016H' in filename:\n with open(filedir + 'JetHT/' + filename) as h:\n jetdatafiles2016h = [redirector_str + s.strip() for s in h.readlines() if not s.startswith('#')] \n filesets['UL16postVFP_JetHTH_Data'] += jetdatafiles2016h\n \n if 'Run2017B' in filename:\n with open(filedir + 'JetHT/' + filename) as b:\n jetdatafiles2017b = [redirector_str + s.strip() for s in b.readlines()[::3] if not s.startswith('#')] \n filesets['UL17postVFP_JetHTB_Data'] += jetdatafiles2017b\n elif 'Run2017C' in filename:\n with open(filedir + 'JetHT/' + filename) as c:\n jetdatafiles2017c = [redirector_str + s.strip() for s in c.readlines()[::3] if not s.startswith('#')] \n filesets['UL17postVFP_JetHTC_Data'] += jetdatafiles2017c\n elif 'Run2017D' in filename:\n with open(filedir + 'JetHT/' + filename) as d:\n jetdatafiles2017d = [redirector_str + s.strip() for s in d.readlines()[::3] if not s.startswith('#')] \n filesets['UL17postVFP_JetHTD_Data'] += jetdatafiles2017d\n elif 'Run2017E' in filename:\n with open(filedir + 'JetHT/' + filename) as e:\n jetdatafiles2017e = [redirector_str + s.strip() for s in e.readlines()[::3] if not s.startswith('#')] \n filesets['UL17postVFP_JetHTE_Data'] += jetdatafiles2017e\n elif 'Run2017F' in filename:\n with open(filedir + 'JetHT/' + filename) as f:\n jetdatafiles2017f = [redirector_str + s.strip() for s in f.readlines()[::3] if not s.startswith('#')] \n filesets['UL17postVFP_JetHTF_Data'] += jetdatafiles2017f\n \n if 'Run2018A' in filename:\n with open(filedir + 'JetHT/' + filename) as a:\n jetdatafiles2018a = [redirector_str + s.strip() for s in a.readlines()[::3] if not s.startswith('#')] \n filesets['UL18postVFP_JetHTA_Data'] += jetdatafiles2018a\n elif 'Run2018B' in filename:\n with open(filedir + 'JetHT/' + filename) as b:\n jetdatafiles2018b = [redirector_str + s.strip() for s in b.readlines()[::3] if not s.startswith('#')] \n filesets['UL18postVFP_JetHTB_Data'] += jetdatafiles2018b\n elif 'Run2018C' in filename:\n with open(filedir + 'JetHT/' + filename) as c:\n jetdatafiles2018c = [redirector_str + s.strip() for s in c.readlines()[::3] if not s.startswith('#')] \n filesets['UL18postVFP_JetHTC_Data'] += jetdatafiles2018c\n elif 'Run2018D' in filename:\n with open(filedir + 'JetHT/' + filename) as d:\n jetdatafiles2018d = [redirector_str + s.strip() for s in d.readlines()[::3] if not s.startswith('#')] \n filesets['UL18postVFP_JetHTD_Data'] += jetdatafiles2018d\n \n\n \n # ---- Single Muon ---- #\n datafilelist = os.listdir(filedir + 'SingleMu/')\n for filename in datafilelist:\n \n if 'Run2016B' in filename:\n with open(filedir + 'SingleMu/' + filename) as b:\n jetdatafiles2016b = [redirector_str + s.strip() for s in b.readlines() if not s.startswith('#')] \n filesets['UL16preVFP_SingleMuB_Data'] += jetdatafiles2016b\n elif 'Run2016C' in filename:\n with open(filedir + 'SingleMu/' + filename) as c:\n jetdatafiles2016c = [redirector_str + s.strip() for s in c.readlines() if not s.startswith('#')] \n filesets['UL16preVFP_SingleMuC_Data'] += jetdatafiles2016c\n elif 'Run2016D' in filename:\n with open(filedir + 'SingleMu/' + filename) as d:\n jetdatafiles2016d = [redirector_str + s.strip() for s in d.readlines() if not s.startswith('#')] \n filesets['UL16preVFP_SingleMuD_Data'] += jetdatafiles2016d\n elif 'Run2016E' in filename:\n with open(filedir + 'SingleMu/' + filename) as e:\n jetdatafiles2016e = [redirector_str + s.strip() for s in e.readlines() if not s.startswith('#')] \n filesets['UL16preVFP_SingleMuE_Data'] += jetdatafiles2016e\n elif 'Run2016F' in filename:\n with open(filedir + 'SingleMu/' + filename) as fold:\n jetdatafiles2016fold = [redirector_str + s.strip() for s in fold.readlines() if ('HIPM' in s and not s.startswith('#'))]\n with open(filedir + 'SingleMu/' + filename) as fnew:\n jetdatafiles2016fnew = [redirector_str + s.strip() for s in fnew.readlines() if ('HIPM' not in s and not s.startswith('#'))]\n filesets['UL16preVFP_SingleMuF_Data'] += jetdatafiles2016fold\n filesets['UL16postVFP_SingleMuF_Data'] += jetdatafiles2016fnew\n elif 'Run2016G' in filename:\n with open(filedir + 'SingleMu/' + filename) as g:\n jetdatafiles2016g = [redirector_str + s.strip() for s in g.readlines() if not s.startswith('#')] \n filesets['UL16postVFP_SingleMuG_Data'] += jetdatafiles2016g\n elif 'Run2016H' in filename:\n with open(filedir + 'SingleMu/' + filename) as h:\n jetdatafiles2016h = [redirector_str + s.strip() for s in h.readlines() if not s.startswith('#')] \n filesets['UL16postVFP_SingleMuH_Data'] += jetdatafiles2016h\n \n if 'Run2017B' in filename:\n with open(filedir + 'SingleMu/' + filename) as b:\n jetdatafiles2017b = [redirector_str + s.strip() for s in b.readlines()[::3] if not s.startswith('#')] \n filesets['UL17postVFP_SingleMuB_Data'] += jetdatafiles2017b\n elif 'Run2017C' in filename:\n with open(filedir + 'SingleMu/' + filename) as c:\n jetdatafiles2017c = [redirector_str + s.strip() for s in c.readlines()[::3] if not s.startswith('#')] \n filesets['UL17postVFP_SingleMuC_Data'] += jetdatafiles2017c\n elif 'Run2017D' in filename:\n with open(filedir + 'SingleMu/' + filename) as d:\n jetdatafiles2017d = [redirector_str + s.strip() for s in d.readlines()[::3] if not s.startswith('#')] \n filesets['UL17postVFP_SingleMuD_Data'] += jetdatafiles2017d\n elif 'Run2017E' in filename:\n with open(filedir + 'SingleMu/' + filename) as e:\n jetdatafiles2017e = [redirector_str + s.strip() for s in e.readlines()[::3] if not s.startswith('#')] \n filesets['UL17postVFP_SingleMuE_Data'] += jetdatafiles2017e\n elif 'Run2017F' in filename:\n with open(filedir + 'SingleMu/' + filename) as f:\n jetdatafiles2017f = [redirector_str + s.strip() for s in f.readlines()[::3] if not s.startswith('#')] \n filesets['UL17postVFP_SingleMuF_Data'] += jetdatafiles2017f\n \n if 'Run2018A' in filename:\n with open(filedir + 'SingleMu/' + filename) as a:\n jetdatafiles2018a = [redirector_str + s.strip() for s in a.readlines()[::3] if not s.startswith('#')] \n filesets['UL18postVFP_SingleMuA_Data'] += jetdatafiles2018a\n elif 'Run2018B' in filename:\n with open(filedir + 'SingleMu/' + filename) as b:\n jetdatafiles2018b = [redirector_str + s.strip() for s in b.readlines()[::3] if not s.startswith('#')] \n filesets['UL18postVFP_SingleMuB_Data'] += jetdatafiles2018b\n elif 'Run2018C' in filename:\n with open(filedir + 'SingleMu/' + filename) as c:\n jetdatafiles2018c = [redirector_str + s.strip() for s in c.readlines()[::3] if not s.startswith('#')] \n filesets['UL18postVFP_SingleMuC_Data'] += jetdatafiles2018c\n elif 'Run2018D' in filename:\n with open(filedir + 'SingleMu/' + filename) as d:\n jetdatafiles2018d = [redirector_str + s.strip() for s in d.readlines()[::3] if not s.startswith('#')] \n filesets['UL18postVFP_SingleMuD_Data'] += jetdatafiles2018d\n \n \n # print(filesets['UL16postVFP_JetHT_Data'])\n # print('==========================================================================================================')\n # print(filesets['UL16postVFP_TTbar'])\n \n return filesets",
"def _parse_data_dir(self, data_dir):\n categories = os.listdir(data_dir)\n for folder_name in categories:\n all_fnames_list_fname = os.path.join(data_dir, folder_name,\n folder_name + \".bmf\")\n if not os.path.isfile(all_fnames_list_fname):\n raise IOError(\"Not found file {}\".format(all_fnames_list_fname))\n all_fnames_list = np.loadtxt(all_fnames_list_fname, dtype=np.str,\n skiprows=1)\n # Correct from pgm to jpg\n all_fnames_list = [f.split('.')[0]+'.jpg' for f in all_fnames_list]\n\n all_fnames_list = [os.path.join(data_dir, folder_name, f) for f \\\n in all_fnames_list]\n\n self.samples += len(all_fnames_list)\n # Append the last\n self.image_filenames.append(all_fnames_list)",
"def load_data(self) -> None:\n self.paths: List[str] = []\n self.durations: List[float] = []\n self.transcriptions: List[str] = []\n\n def raise_(err):\n \"\"\"raises error if problem during os.walk\"\"\"\n raise err\n\n for subset in self.subsets:\n subset_path = os.path.join(self.root, self.base_dir, subset)\n for root, dirs, files in os.walk(subset_path, onerror=raise_):\n if not files:\n continue\n matches = fnmatch.filter(files, \"*.trans.txt\")\n assert len(matches) == 1, \"> 1 transcription file found\"\n self._parse_transcription_file(root, matches[0])\n\n self._sort_by_duration()",
"def init_data(partitions_file):\n mapping = []\n\n drive_size = None\n for line in partitions_file:\n if drive_size is None:\n drive_size = parse_drive_size(line.rstrip())\n else:\n partitions_list = parse_partitions(line.rstrip())\n mapping.append((drive_size, partitions_list))\n drive_size = None\n\n return mapping",
"def read_tracers(datadir = 'data/', fileName = 'tracers.dat', zlim = [], head_size = 3, post = False):\n \n class data_struct:\n def __init__(self):\n self.xi = []\n self.yi = []\n self.xf = []\n self.yf = []\n self.zf = []\n self.l = []\n self.q = []\n\n data = []\n data = data_struct()\n\n # compute the offset in order to skip Fortran's header byte\n if (post):\n head_size = 0\n off = 2\n if (head_size == 3):\n off = 2\n if (head_size == 5):\n off = 3\n\n # read the cpu structure\n dim = pc.read_dim(datadir = datadir)\n if (dim.nprocz > 1):\n print(\": number of cores in z-direction > 1\")\n return -1\n\n # read the parameters\n params = pc.read_param(datadir = datadir, quiet = True)\n\n # read the grid\n grid = pc.read_grid(datadir = datadir, quiet = True)\n\n # determine the file structure\n if (post):\n n_proc = 1\n tracer_file = open(datadir+fileName, 'rb')\n trace_sub = struct.unpack(\"f\", tracer_file.read(4))[0]\n tracer_file.close()\n n_times = int((os.path.getsize(datadir+fileName)-4)/(4*7*int(dim.nx*trace_sub)*int(dim.ny*trace_sub)))\n # sub sampling of the tracers\n if (not(post)):\n n_proc = dim.nprocx*dim.nprocy\n trace_sub = params.trace_sub\n n_times = int(os.path.getsize(datadir+'proc0/'+fileName)/(4*(head_size + 7*np.floor(dim.nx*trace_sub)*np.floor(dim.ny*trace_sub)/dim.nprocx/dim.nprocy)))\n\n # prepare the output arrays\n tracers = np.zeros((int(dim.nx*trace_sub), int(dim.ny*trace_sub), n_times, 7))\n mapping = np.zeros((int(dim.nx*trace_sub), int(dim.ny*trace_sub), n_times, 3))\n\n # temporary arrays for one core\n if (post):\n tracers_core = tracers\n mapping_core = mapping\n else:\n tracers_core = np.zeros((int(int(dim.nx*trace_sub)/dim.nprocx), int(int(dim.ny*trace_sub)/dim.nprocy), n_times, 7))\n mapping_core = np.zeros((int(int(dim.nx*trace_sub)/dim.nprocx), int(np.floor(dim.ny*trace_sub)/dim.nprocy), n_times, 3))\n\n # set the upper z-limit to the domain boundary\n if zlim == []:\n zlim = grid.z[-dim.nghostz-1]\n\n # read the data from all cores\n for i in range(n_proc):\n # read the cpu structure\n if (post):\n dim_core = pc.read_dim(datadir = datadir, proc = -1)\n dim_core.ipx = 0\n dim_core.ipy = 0\n else:\n dim_core = pc.read_dim(datadir = datadir, proc = i)\n stride = int(dim_core.nx*trace_sub)*int(dim_core.ny*trace_sub)\n llen = head_size + 7*stride + post\n\n if (post):\n tracer_file = open(datadir+fileName, 'rb')\n else:\n tracer_file = open(datadir+'proc{0}/'.format(i)+fileName, 'rb')\n tmp = array.array('f')\n tmp.read(tracer_file, int((head_size + post + 7*int(dim_core.nx*trace_sub)*int(dim_core.ny*trace_sub))*n_times)+post)\n tracer_file.close()\n\n t = []\n\n for j in range(n_times):\n t.append(tmp[off-1+j*llen])\n data.xi = tmp[off+j*llen : off+1*stride+j*llen]\n data.yi = tmp[off+1*stride+j*llen : off+2*stride+j*llen]\n data.xf = tmp[off+2*stride+j*llen : off+3*stride+j*llen]\n data.yf = tmp[off+3*stride+j*llen : off+4*stride+j*llen]\n data.zf = tmp[off+4*stride+j*llen : off+5*stride+j*llen]\n data.l = tmp[off+5*stride+j*llen : off+6*stride+j*llen]\n data.q = tmp[off+6*stride+j*llen : off+7*stride+j*llen]\n\n # Squeeze the data into 2d array. This make the visualization much faster.\n for l in range(len(data.xi)):\n tracers_core[l%(int(dim_core.nx*trace_sub)),int(l/(int(dim_core.nx*trace_sub))),j,:] = \\\n [data.xi[l], data.yi[l], data.xf[l], data.yf[l], data.zf[l], data.l[l], data.q[l]]\n if data.zf[l] >= zlim:\n if (data.xi[l] - data.xf[l]) > 0:\n if (data.yi[l] - data.yf[l]) > 0:\n mapping_core[l%(int(dim_core.nx*trace_sub)),int(l/(int(dim_core.nx*trace_sub))),j,:] = [0,1,0]\n else:\n mapping_core[l%(int(dim_core.nx*trace_sub)),int(l/(int(dim_core.nx*trace_sub))),j,:] = [1,1,0]\n else:\n if (data.yi[l] - data.yf[l]) > 0:\n mapping_core[l%(int(dim_core.nx*trace_sub)),int(l/(int(dim_core.nx*trace_sub))),j,:] = [0,0,1]\n else:\n mapping_core[l%(int(dim_core.nx*trace_sub)),int(l/(int(dim_core.nx*trace_sub))),j,:] = [1,0,0]\n else:\n mapping_core[l%(int(dim_core.nx*trace_sub)),int(l/(int(dim_core.nx*trace_sub))),j,:] = [1,1,1]\n\n # copy single core data into total data arrays\n if (not(post)):\n tracers[np.round(dim_core.ipx*int(dim_core.nx*trace_sub)):np.round((dim_core.ipx+1)*np.floor(dim_core.nx*trace_sub)), \\\n np.round(dim_core.ipy*int(dim_core.ny*trace_sub)):np.round((dim_core.ipy+1)*np.floor(dim_core.ny*trace_sub)),j,:] = \\\n tracers_core[:,:,j,:]\n mapping[np.round(dim_core.ipx*int(dim_core.nx*trace_sub)):np.round((dim_core.ipx+1)*np.floor(dim_core.nx*trace_sub)), \\\n np.round(dim_core.ipy*int(dim_core.ny*trace_sub)):np.round((dim_core.ipy+1)*np.floor(dim_core.ny*trace_sub)),j,:] = \\\n mapping_core[:,:,j,:]\n\n # swap axes for post evaluation\n tracers = tracers.swapaxes(0, 1)\n mapping = mapping.swapaxes(0, 1)\n\n return tracers, mapping, t",
"def readNSRelaxFiles(folder_path):\n\n run_arr = []\n Nrun_arr = []\n dod_arr = []\n crate_arr = []\n count=0\n\n # find number of files that starts with run\n # (this is the data file we want to read)\n for file in os.listdir(folder_path):\n if file.startswith(\"relaxrun\"):\n count+=1\n\n # order the data files by run number, so we get descending crates\n Nrun=1\n for i in range(count+5):\n for file in os.listdir(folder_path):\n if file.startswith(\"relaxrun_\"+str(Nrun)+\"_\"):\n run_arr.append(file)\n dod = re.search('dod=(.*).txt', file).group(1)\n crate = re.search('Crate=(.*)_',file).group(1)\n Nrun_arr.append(np.round(int(Nrun),decimals=0))\n dod_arr.append(float(dod))\n crate_arr.append(float(crate))\n Nrun+=1\n print(len(run_arr))\n\n return run_arr, Nrun_arr, dod_arr, crate_arr",
"def main():\n onlyfiles = [f for f in listdir(RAWDATA_PATH) if isfile(join(RAWDATA_PATH, f))]\n for file in onlyfiles:\n create_RCSB_fastas(file)",
"def split_batches(filenames):\n by_time = {}\n for path_name in filenames:\n file_name = path.basename(path_name)\n parsed_fn = parse_agdc_fn(file_name)\n dt = parsed_fn['datetime']\n by_time.setdefault(dt, []).append((path_name, parsed_fn))\n\n rv = list(by_time.values())\n\n for group in rv:\n # Will raise exception if group is non-homogeneous\n check_sane(parsed for _, parsed in group)\n\n return rv",
"def build(self, datas):\n\t\t# Browse the list of files\n\t\tfor data in datas:\n\t\t\tif isString(data):\n\t\t\t\tdata = Data(data)\n\t\t\telif isList(data):\n\t\t\t\tstate = None\n\t\t\t\tname = \"\"\n\t\t\t\tif len(data) >= 1:\n\t\t\t\t\tname = data[0]\n\t\t\t\tif len(data) >= 2:\n\t\t\t\t\tstate = data[1]\n\t\t\t\tdata = Data(name, state)\n\t\t\t# Cut the path of the file folder and piece\n\t\t\tself.addNode(self.tree,data.path(),data)",
"def _buildindex( self ):\n try:\n import ROOT as rt\n except:\n print \"Could not load ROOT\"\n sys.exit(-1)\n \n # sigh. this is a mess\n self.producers = [] # all producer names found in ROOT files\n self.datatypes = [] # all data types\n self.flavors = [] # flavor = hash of string listing set of trees found in a given file\n self.flavor_def = {} # map from flavor to list of tree names\n self.rawdigits_entrymap = {} # only used if file type is raw digits. maps rse to (position,wfms) in data tree\n self.rawdigits_tpcindex = {}\n flavor_eventset = {}\n eventsets = []\n events_to_files = {}\n events_to_flavors = {}\n\n # this loop is going into each file in our list and\n # - taking the list of trees in the file and making a has out of their names\n # - this hash is used to define the 'flavor' of the file\n # - we also make a list of events in the tree, labeling each entry with (run,subrun,event) ID\n # - we keep track of such list of entries and group files (and flavors) with the same event list\n # - determine filetype: LArCV or LArLite\n self.filetype = None\n for f in self.larlitefilelist:\n r = rt.TFile(f)\n nfkeys = r.GetListOfKeys().GetEntries()\n\n # now here we parse the type of objects in the ROOT file\n # we are looking to determine three file types supported by pylard\n # (1) larlite (2) larcv (3) rawdigitreader\n trees = []\n for i in range(nfkeys):\n keyname = r.GetListOfKeys().At(i).GetName()\n if keyname==\"larlite_id_tree\":\n found_id_tree = True\n elif \"_tree\" in keyname:\n producer = keyname.split(\"_\")[1]\n dtype = keyname.split(\"_\")[0]\n if producer not in self.producers:\n self.producers.append( producer )\n if dtype not in self.datatypes:\n self.datatypes.append( dtype )\n elif \"rawdigitwriter\" in keyname:\n trees.append( \"rawdigitwriter/RawDigits\" )\n trees.append( \"rawdigitwriter/OpDetWaveforms\" )\n trees.append( \"rawdigitwriter/IndexRawDigits\" )\n trees.append( \"rawdigitwriter/IndexOpDetWfms\" )\n if keyname not in trees:\n trees.append(keyname)\n hashstr = \"\"\n trees.sort()\n for keyname in trees:\n hashstr += keyname +\";\"\n\n # determine filetype from type of keys we see\n is_supported_rootfile = False\n idtreename = None\n if \"larlite_id_tree\" in trees:\n thisfiletype = \"LARLITE\"\n is_supported_rootfile = True\n if \"image2d\" in self.datatypes:\n thisfiletype = \"LARCV\"\n is_supported_rootfile = True\n if \"partroi\" in self.datatypes:\n thisfiletype = \"LARCV\"\n is_supported_rootfile = True\n if \"rawdigitwriter/OpDetWaveforms\" in trees:\n thisfiletype = \"RAWDIGITS\"\n is_supported_rootfile = True\n if not is_supported_rootfile:\n continue\n\n if self.filetype is not None and self.filetype!=thisfiletype:\n print \"Error in parsing filelist: Cannot mix filetypes (LArCV/LArLite/RawDigitTree)\"\n return\n elif self.filetype is None:\n self.filetype = thisfiletype\n \n # now we determine the idtree to use\n if self.filetype==\"LARLITE\":\n idtreename = \"larlite_id_tree\"\n elif self.filetype==\"LARCV\":\n if self.loaded_larcv == False:\n s = time.time()\n import larcv as larcv\n print \"LOADING LARCV: \",time.time()-s,\"secs\"\n self.loaded_larcv = True\n for treename in trees:\n if \"image2d\" in treename:\n if idtreename is None:\n idtreename = treename\n else:\n pass # we only use this if we have to\n if \"partroi\" in treename:\n idtreename = treename # we prefer to use this tree for speed\n break\n elif self.filetype==\"RAWDIGITS\":\n idtreename = \"rawdigitwriter/IndexOpDetWfms\"\n\n if idtreename is None:\n print \"Error: Could not setup a proper ID tree for this file\"\n continue\n\n # now we parse the tree contents. define a flavor for it based on all the trees\n # we also get the (run,subrun,event) id for the event\n m = hashlib.md5()\n m.update(hashstr)\n flavor = m.digest()\n if flavor not in self.flavors:\n self.flavors.append( flavor )\n flavor_eventset[flavor] = []\n self.flavor_def[flavor] = hashstr\n if self.filetype==\"LARLITE\":\n idtree = r.Get(idtreename)\n elif self.filetype==\"LARCV\":\n idtree = r.Get(idtreename)\n elif self.filetype==\"RAWDIGITS\":\n idtree = r.Get(idtreename)\n \n eventset = [] # list of events\n for n in range(idtree.GetEntries()):\n idtree.GetEntry(n)\n if self.filetype==\"LARLITE\":\n rse = ( idtree._run_id, idtree._subrun_id, idtree._event_id )\n elif self.filetype==\"LARCV\":\n idbranchname = idtreename.replace(\"_tree\",\"_branch\")\n idbranch = None\n exec(\"idbranch=idtree.%s\"%(idbranchname))\n rse = ( idbranch.run(), idbranch.subrun(), idbranch.event() )\n elif self.filetype==\"RAWDIGITS\":\n rse = ( idtree.idx_run, idtree.idx_subrun, idtree.idx_event )\n self.rawdigits_entrymap[rse] = (idtree.entrystart, idtree.nentries )\n eventset.append(rse)\n if rse not in flavor_eventset[flavor]:\n flavor_eventset[flavor].append( rse )\n else:\n raise ValueError( \"found a repeated run/subrun/event index (%s). what?\"%( str(rse) ) )\n if self.filetype==\"RAWDIGITS\":\n # rawdigits has another tree index for the TPC\n tpcindex = r.Get(\"rawdigitwriter/IndexRawDigits\")\n for n in range(tpcindex.GetEntries()):\n tpcindex.GetEntry(n)\n rse = ( tpcindex.idx_run, tpcindex.idx_subrun, tpcindex.idx_event )\n self.rawdigits_tpcindex[rse] = (tpcindex.entrystart, tpcindex.nentries)\n \n eventset = tuple(eventset)\n if eventset not in events_to_files:\n events_to_files[eventset] = {}\n events_to_flavors[eventset] = []\n eventsets.append( eventset )\n events_to_files[eventset][flavor] = f\n events_to_flavors[eventset].append( flavor )\n del idtree\n r.Close()\n self.parsed = True\n\n # now we take our collection of event lists and\n # - sort the event lists\n # - make lists of files with the same set of events in the order of the sorted event list\n # - for each list we also make a dictionary between (run,subrun,event) index to the entry number\n # - we pick the list with the biggest number of events as the \"official\" file list\n eventsets.sort()\n flavorfiles = {}\n flavorsets = []\n\n flavorset_rse_dict = {}\n flavorset_entry_dict = {}\n for eventset in eventsets:\n events_to_flavors[eventset].sort() # sort the flavors with this event-set\n flavorset = tuple( events_to_flavors[eventset] )\n if flavorset not in flavorfiles:\n flavorfiles[flavorset] = []\n flavorsets.append(flavorset)\n flavorset_rse_dict[flavorset] = {}\n flavorset_entry_dict[flavorset] = {}\n for flavor in flavorset:\n flavorfiles[flavorset].append( events_to_files[eventset][flavor] )\n for rse in eventset:\n ientry = len( flavorset_rse_dict[flavorset] )\n flavorset_rse_dict[flavorset][rse] = ientry\n flavorset_entry_dict[flavorset][ientry] = rse\n\n # look for largest fileset\n maxset = None\n nfiles = 0\n for fset in flavorsets:\n n = len(flavorfiles[fset])\n if n>nfiles:\n nfiles = n\n maxset = fset\n # these are the final file list and event dictionary we want\n self.sorted_filelist = flavorfiles[maxset]\n self.rse_dict = flavorset_rse_dict[maxset]\n self.entry_dict = flavorset_entry_dict[maxset]\n\n # for rawdigits, we also build the entry to data map\n if self.filetype==\"RAWDIGITS\":\n treepos = 0\n treepos_tpc = 0\n for entry in range(len(self.entry_dict)):\n rse = self.entry_dict[entry] \n # update OPDET tree\n pos_entries = self.rawdigits_entrymap[rse] # pos is from start of file, nentries is for the event block\n merged_pos_entries = ( treepos, pos_entries[1] )\n treepos += pos_entries[1]\n self.rawdigits_entrymap[rse] = merged_pos_entries # update \n # update TPC tree\n pos_entries = self.rawdigits_tpcindex[rse]\n merged_pos_entries = ( treepos_tpc, pos_entries[1] )\n treepos_tpc += pos_entries[1]\n self.rawdigits_tpcindex[rse] = merged_pos_entries # update",
"def _build_datasets(self):\n self._build_datasets_sis3302()\n self._build_datasets_sis3305()",
"def load_data(npz_dir):\n files = glob.glob('%s/*.npz' % npz_dir)\n data_list = []\n for f in files:\n data_list += load_npz_to_data_list(f)\n return data_list",
"def get_data(tstart, tstop, year, grad_list, out_dir):\n print(\"Period: \" + str(tstart) + '<-->' + str(tstop) + ' in Year: ' + str(year))\n#\n#--- extract ecach group data\n#\n for group in grad_list:\n print(group)\n\n line = 'operation=retrieve\\n'\n line = line + 'dataset = mta\\n'\n line = line + 'detector = grad\\n'\n line = line + 'level = 0.5\\n'\n line = line + 'filetype = ' + group + '\\n'\n line = line + 'tstart = ' + str(tstart) + '\\n'\n line = line + 'tstop = ' + str(tstop) + '\\n'\n line = line + 'go\\n'\n\n data_list = mcf.run_arc5gl_process(line)\n#\n#--- read the first fits file and prep for the data list\n#\n [cols, tbdata] = ecf.read_fits_file(data_list[0])\n col_list = []\n for ent in cols:\n if ent.lower() == 'time':\n continue\n mc = re.search('st_', ent.lower())\n if mc is not None:\n continue\n\n col_list.append(ent)\n\n mcf.rm_files(data_list[0])\n tdata = tbdata['time']\n mdata = []\n for col in col_list:\n mdata.append(tbdata[col])\n#\n#--- read the rest of the data\n#\n clen = len(col_list)\n for k in range(1, len(data_list)):\n fits = data_list[k]\n [cols, tbdata] = ecf.read_fits_file(fits)\n tdata = numpy.append(tdata, tbdata['time'])\n\n for m in range(0, clen):\n cdata = tbdata[col_list[m]]\n mdata[m] = numpy.append(mdata[m], cdata)\n\n mcf.rm_files(fits)\n\n dout = out_dir + group.capitalize() + '/'\n\n if not os.path.isdir(dout):\n cmd = 'mkdir ' + dout\n os.system(cmd)\n#\n#--- write out the data to fits file\n#\n for k in range(0, clen):\n col = col_list[k]\n ocols = ['time', col.lower()]\n cdata = [tdata, mdata[k]]\n\n ofits = dout + col.lower()+ '_full_data_' + str(year) +'.fits'\n\n if os.path.isfile(ofits):\n ecf.update_fits_file(ofits, ocols, cdata)\n else:\n ecf.create_fits_file(ofits, ocols, cdata)\n\n#\n#--- zip the fits file from the last year at the beginning of the year\n#\n ecf.check_zip_possible(dout)"
]
| [
"0.6153713",
"0.6088378",
"0.60083175",
"0.59189504",
"0.58487946",
"0.57951975",
"0.57138914",
"0.571033",
"0.5709755",
"0.56899613",
"0.56622064",
"0.5660998",
"0.5647151",
"0.56439656",
"0.561935",
"0.561761",
"0.56089336",
"0.56031334",
"0.55937725",
"0.5593059",
"0.55671346",
"0.5539171",
"0.55324036",
"0.55302376",
"0.553012",
"0.55125934",
"0.55112016",
"0.55108875",
"0.55062246",
"0.5496775"
]
| 0.6675086 | 0 |
Check whether this field is nullable, i.e., can be `None`. | def is_nullable(self) -> bool: # pragma: no cover
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _is_nullable(self) -> bool:\n return self.__nullable",
"def is_nullable(self):\n return self._is_nullable",
"def is_nullable(schema_obj):\n\n if isinstance(schema_obj, schema.Field):\n return schema_obj.is_nullable\n return False",
"def is_null(self):\n return self.value is None",
"def is_field_nullable(\n nullable: Optional[bool],\n default: Any,\n server_default: Any,\n pydantic_only: Optional[bool],\n) -> bool:\n if nullable is None:\n return (\n default is not None\n or server_default is not None\n or (pydantic_only is not None and pydantic_only)\n )\n return nullable",
"def isNullable(self):\n if self.isPrimaryKey():\n return False\n else:\n return self._nullable",
"def allow_null_values(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"allow_null_values\")",
"def _val_is_null(self, val):\r\n return val is None",
"def is_nullable_type(self):\n raise exceptions.NotImplementedError()",
"def isValid(self, value):\n return value is None if self._onlyNullAllowed else value is not None",
"def is_null(value: Any) -> bool:\n return not value",
"def is_not_none(e):\n return e is not None",
"def not_null(value):\n if value is None or value == \"\":\n raise ValidationError(\"The value must not be None\")\n\n return True",
"def is_null(val):\n return (val is None)",
"def is_nullable(self, col_name: str) -> bool:\n return col_name in self._nullable",
"def isNull(self):\n return self.__column is None",
"def _is_null_value(self, value):\n if value is None:\n return True\n\n if IS_PY3:\n # Python 3.X\n if isinstance(value, str) and len(value) == 0:\n return True\n else:\n # Python 2.X\n if isinstance(value, basestring) and len(value) == 0: # NOQA: F821\n return True\n\n # TODO: This should probably be removed when solved in core Solr level?\n return False",
"def allow_null_values(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"allow_null_values\")",
"def __null_check(self, record_attribute, attribute_schema):\n if attribute_schema[NULLABLE_KEY]:\n return True\n elif record_attribute is not None:\n return True\n else:\n IS_VALID_FILE = False\n return False",
"def isNullAllowed(self):\n return self._onlyNullAllowed",
"def is_none(obj):\n return obj is None",
"def is_null(cls, json_field: str) -> \"JsonPattern\":\n return jsii.sinvoke(cls, \"isNull\", [json_field])",
"def _validate_on_value(self, value: Any) -> None:\n if not self._is_nullable and value is None:\n msg = \"Non-nullable field cannot have None value\"\n if self._resolve_field_name() is not None:\n msg += f\" (field name = '{self._resolve_field_name()}')\"\n raise FieldValueValidationError(msg)",
"def not_none(value):\n return not value is None",
"def isnull(obj):\n return _isnull(obj)",
"def get_postgres_is_nullable(self, tablename, column_name: str) -> bool:\n return (\n self.get_postgres_column_definition(\n tablename=tablename, column_name=column_name\n ).is_nullable.upper()\n == \"YES\"\n )",
"def is_optional(self) -> bool:\n return bool(AnnotationWrapper.opt_field_re.match(self.data))",
"def is_null(self):\n return self._internal_handle() == 0",
"def is_null(self):\n return self._internal_handle() == 0",
"def is_null(self):\n return self._internal_handle() == 0"
]
| [
"0.8449764",
"0.83130455",
"0.786248",
"0.7661689",
"0.74965847",
"0.7488365",
"0.7440954",
"0.7377148",
"0.73132867",
"0.71837443",
"0.7122148",
"0.71193844",
"0.71191525",
"0.70252806",
"0.6993334",
"0.6983965",
"0.6930141",
"0.69181836",
"0.68292755",
"0.6795569",
"0.678991",
"0.6783171",
"0.6769086",
"0.67507786",
"0.6563241",
"0.6555164",
"0.65267783",
"0.6521993",
"0.6521993",
"0.6521993"
]
| 0.84807235 | 0 |
Check if object is the type of data model class that this model adapter works with. | def is_model_type(obj: Any) -> bool: # pragma: no cover
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _isinstance(self, obj, raise_error=True):\n rv = isinstance(obj, self.__model__)\n if not rv and raise_error:\n raise ValueError('%s is not of type %s' % (obj, self.__model__))\n return rv",
"def is_dataclass_instance(obj: Any) -> bool:\n return dataclasses.is_dataclass(obj) and not isinstance(obj, type)",
"def _is_dataclass_instance(obj):\n return hasattr(type(obj), '__dataclass_fields__')",
"def _is_user_class(obj):\n type_dict = type(obj).__dict__\n is_user_class = '_pandas_type' in type_dict\n return is_user_class",
"def verify_type(self, obj):\n return isinstance(obj, self.type_)",
"def _isinstance(self, instance, raise_error=True):\n\n if isinstance(instance, self.__model__):\n return True\n elif raise_error:\n raise ValueError('{} is not of type {}.'.format(\n instance, self.__model__,\n ))\n else:\n return False",
"def isclass(object):\r\n return isinstance(object, (type, types.ClassType))",
"def is_type(obj):\n return type(obj) is type or type(obj) is types.ClassType",
"def is_object_type(self):\n raise exceptions.NotImplementedError()",
"def isclass(object):\n if not inspect.isclass(object):\n return False\n if isbuiltin(object):\n return False\n return type not in inspect.getmro(object)",
"def isdatatype(object):\n return isinstance(object, (str, int, bool, float, type(None)))",
"def is_object(value, class_name):\n\n return isinstance(value, getattr(schema, class_name))",
"def test_isinstance(self):\n obj = BaseModel()\n self.assertIsInstance(obj, BaseModel)",
"def _valid_typable_object(ui_object, platform=Platform.ANDROID):\n if platform == Platform.ANDROID:\n return ui_object.obj_type in _TYPABLE_OBJECT_DESC.keys()\n else:\n assert False, 'Wrong Platform'",
"def predicate(obj):\n return inspect.isclass(obj) and issubclass(obj, MafColumnRecord)",
"def _type_check(data):\n if data.__class__.__name__ != \"Matrix3\":\n return False\n return True",
"def isinstance_blender_object(self, b_obj):\n # lame and slow, but functional\n return b_obj in Blender.Object.Get()",
"def match(self, cls):\n return isinstance(self, cls)",
"def applies(cls, obj):\n return type(obj) in cls.types",
"def _isinstance(cls, x):\n return isinstance(x, cls.PYTHON_TYPE_CHECK)",
"def is_type(obj: Any) -> bool:\n return type(obj).__name__ == \"type\"",
"def _is_typing_object(type_object):\n return type_object.__module__ == \"typing\"",
"def object_type_present(self, object_type):\n # Check input.\n if not isinstance(object_type, str):\n raise TypeError('object_type must be a string.')\n\n # Lookup object type and return.\n return object_type in self.model_map['object']",
"def _is_object_type(df, field):\n return df[field].dtype.name == 'object'",
"def isclassinstance(object):\n if not hasattr(object, \"__class__\"):\n return False\n if isbuiltin(object.__class__):\n return False\n return True",
"def check_type(self):\n return True",
"def is_kind_of_class(obj, a_class):\n return(isinstance(obj, a_class))",
"def is_peewee_model(obj) -> bool:\n return (inspect.isclass(obj) and\n issubclass(obj, peewee.Model) and\n not obj == peewee.Model and\n not obj.__name__.startswith('_'))",
"def is_kind_of_class(obj, a_class):\n return isinstance(obj, a_class)",
"def is_kind_of_class(obj, a_class):\n return isinstance(obj, a_class)"
]
| [
"0.79185575",
"0.75479585",
"0.75399536",
"0.7518996",
"0.7395414",
"0.7328052",
"0.71795946",
"0.7038402",
"0.7015072",
"0.68881327",
"0.6878079",
"0.68056995",
"0.67854327",
"0.67750436",
"0.6754461",
"0.6694614",
"0.6690304",
"0.66749346",
"0.66486806",
"0.66303533",
"0.6605912",
"0.6588578",
"0.6582805",
"0.6494985",
"0.64883286",
"0.64537454",
"0.64487773",
"0.64129096",
"0.6396754",
"0.6396754"
]
| 0.78210646 | 1 |
Docstring for this data model. | def docstring(self) -> str:
out = f"{self.model.__module__}.{self.model.__qualname__}"
docstring = inspect.getdoc(self.model)
if docstring:
out += "\n\n" + docstring
return out | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def model_definition(self):\n pass",
"def description(self) -> str:\n raise NotImplementedError",
"def description(self) -> str:\n raise NotImplementedError",
"def description(self) -> str:\n raise NotImplementedError",
"def description(self) -> str:\r\n raise NotImplementedError",
"def description(self) -> str:\r\n raise NotImplementedError",
"def description(self) -> str:\r\n raise NotImplementedError",
"def __str__(self):\n return \"DataModel(name={},attributes={},description={})\".format(\n self.name, {a.name: str(a) for a in self.attributes}, self.description\n )",
"def __repr__(self) -> str:\n return f\"<Doc[{self.desc}]>\"",
"def description(self):\n pass",
"def description(self):\n pass",
"def description(self):",
"def describe(self):\n raise NotImplementedError()",
"def describe(self):\n raise NotImplementedError()",
"def __str__(self):\n return \"Description(values={},data_model={})\".format(\n self._values, self.data_model\n )",
"def description(self) -> str:\n pass",
"def description(cls) -> str:\n\n return cls.__doc__ or \"\"",
"def __str__(self):\r\n desc = f'{self.name}\\n' + '-' * len(self.name) + '\\n'\r\n desc += f'Contains {len(self)} data entries.\\n'\r\n if self.is_superset:\r\n for ds in self.data:\r\n desc += f'\\tSubset {ds.name}: {len(ds)} entries.\\n'\r\n return desc",
"def __repr__(self):\n doc_string = \"# %s class description:\\n%s\\n# Instance attributes:\\n\" % (self.__class__, self.__doc__)\n # write each argument with its value\n properties = dir(self)\n for elem in properties:\n if not elem.startswith(\"_\"):\n doc_string += \"\\t%s:%s\\n\" % (elem, self.__getattribute__(elem))\n return doc_string",
"def describe(self) -> Text:\n return self.__repr__()",
"def describe(self):\n return str(self)",
"def __str__(self):\n return self.__class__.__name__ + '\\n' + self.__class__.__doc__",
"def __metadata__(self):\n raise NotImplementedError",
"def data(self):\r\n raise NotImplementedError",
"def description(self):\n return (self.__doc__ or \"\").strip()",
"def dataDescription(self, role):\n return None",
"def get_description(self):\n raise NotImplementedError",
"def getDescription(self):\n raise NotImplementedError",
"def Description(self) -> str:",
"def Description(self) -> str:"
]
| [
"0.6768882",
"0.674206",
"0.674206",
"0.674206",
"0.6739793",
"0.6739793",
"0.6739793",
"0.66971123",
"0.66580635",
"0.6608247",
"0.6608247",
"0.6567727",
"0.65560997",
"0.65560997",
"0.65407693",
"0.6489474",
"0.64783865",
"0.64439154",
"0.64217913",
"0.639244",
"0.6386248",
"0.6368312",
"0.6362589",
"0.6325747",
"0.6306453",
"0.62908626",
"0.628294",
"0.6277388",
"0.62385803",
"0.62385803"
]
| 0.6756573 | 1 |
Returns the DOT language "HTMLlike" syntax specification of a table for this data model. It is used as the `label` attribute of data model's node in the graph's DOT representation. | def dot_label(self) -> str:
rows = "\n".join(field.dot_row() for field in self.fields)
return _table_template.format(name=self.name, rows=rows).replace("\n", "") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _repr_html_(self):\n return (\n f'<b>{self.__class__.__name__}</b>'\n f'<br> <b>defined by:</b> {self._str_meta_()}'\n f'<br> <b>with columns:</b> {self._str_colnames()}'\n f'<br> {len(self)} objects'\n f'<br> {self._html_table()}'\n )",
"def build_dot_str(self) -> Text:\n s = []\n s.append(\"digraph {\")\n for node in self.nodes:\n label = str(node)\n if node in self.start_nodes:\n label += \"S\"\n if node in self.accept_nodes:\n label += \"A\"\n s.append(f' \"{node}\" [label=\"{label}\"];')\n s.append(\"\")\n for from_node, transitions in self.nodes.items():\n for transition, to_nodes in transitions.items():\n if not transition:\n transition = \"ε\"\n for to_node in to_nodes:\n s.append(f' \"{from_node}\" -> \"{to_node}\" [label=\"{transition}\"];')\n s.append(\"}\")\n return \"\\n\".join(s)",
"def _repr_html_(self):\n return html_table(self)",
"def html_data_table(self):\n return \"XXX\"",
"def dot_string(self) -> str:\n\n ret = \"{0}[label = \\\"{1}\\\"];\\n\".format(self._index, self.label)\n ret += \"{0}--{{\".format(self._index)\n ret += ''.join([f'{child.index} ' for child in self._children])\n ret += \"}\\n\"\n self._is_drawn = True\n ret += ''.join([child.dot_string() for child in self._children])\n\n return ret",
"def __str__(self):\n ts_rows = [\n [\"Trees\", str(self.num_trees)],\n [\n \"Sequence Length\",\n str(\n int(self.sequence_length)\n if self.discrete_genome\n else self.sequence_length\n ),\n ],\n [\"Time Units\", self.time_units],\n [\"Sample Nodes\", str(self.num_samples)],\n [\"Total Size\", util.naturalsize(self.nbytes)],\n ]\n header = [\"Table\", \"Rows\", \"Size\", \"Has Metadata\"]\n table_rows = []\n for name, table in self.tables.table_name_map.items():\n table_rows.append(\n [\n str(s)\n for s in [\n name.capitalize(),\n table.num_rows,\n util.naturalsize(table.nbytes),\n \"Yes\"\n if hasattr(table, \"metadata\") and len(table.metadata) > 0\n else \"No\",\n ]\n ]\n )\n return util.unicode_table(ts_rows, title=\"TreeSequence\") + util.unicode_table(\n table_rows, header=header\n )",
"def _repr_html_(self):\n return \"<td><b>{0}</b></td><td>{1}</td>\".format(self.id, self.title)",
"def as_html(self): # pragma: no cover\n\n return render_to_string(\n self._meta.template,\n { \"table\": self } )",
"def _repr_html_(self) -> str:\n return self.all(pandas=True)._repr_html_() # type: ignore",
"def __str__(self):\n return '<table summary=\"%s\" class=\"%s\" %s>\\n%s </table>\\n' % (self.summary, self.cssclass, self.options, self.text)",
"def dyntable(self):\n table = ANSITable(\n Column(\"j\", colalign=\">\", headalign=\"^\"),\n Column(\"m\", colalign=\"<\", headalign=\"^\"),\n Column(\"r\", colalign=\"<\", headalign=\"^\"),\n Column(\"I\", colalign=\"<\", headalign=\"^\"),\n Column(\"Jm\", colalign=\"<\", headalign=\"^\"),\n Column(\"B\", colalign=\"<\", headalign=\"^\"),\n Column(\"Tc\", colalign=\"<\", headalign=\"^\"),\n Column(\"G\", colalign=\"<\", headalign=\"^\"), border=\"thin\")\n\n for j, link in enumerate(self):\n table.row(link.name, *link._dyn2list())\n return str(table)",
"def get_dot(self):\n return \"digraph G{\\n%s}\" % (\"\" if self.val is None else (\n \"\\t%s;\\n%s\\n\" % (\n self.val,\n \"\\n\".join(self._get_dot())\n )\n ))",
"def get_dot(self):\n return \"digraph G{\\n%s}\" % (\"\" if self.val is None else (\n \"\\t%s;\\n%s\\n\" % (\n self.val,\n \"\\n\".join(self._get_dot())\n )\n ))",
"def create_styled_dot_file(user_list, edge_list):\n \n NODE = '$id [label=< <table border=\"0\" cellborder=\"0\" cellspacing=\"0\"'\\\n ' bgcolor=\"#CCCCCC\"> <tr> <td colspan=\"2\" cellpadding=\"2\"'\\\n ' align=\"center\" bgcolor=\"#33CCFF\"> <font face=\"Helvetica Bold\">'\\\n '$id</font> </td> </tr> $rows </table> >]'\n ATTRIBUTE = '<tr> <td align=\"left\" cellpadding=\"2\"><font face=\"Helvetica'\\\n ' Bold\">$key</font></td> <td align=\"left\" cellpadding=\"2\">$value'\\\n '</td> </tr>'\n node = Template(NODE)\n attribute = Template(ATTRIBUTE)\n nodes = '\\n'.join([node.substitute(id=u,\n rows='\\n'.join([attribute.substitute(key=k, value=v) for k, v in\n d.iteritems()])) for u, d in user_list.iteritems()])\n edges = ' '.join(['%s -> %s;'%(src, tgt) for src, tgt in edge_list])\n graph = 'digraph G { node [ fontname = \"Helvetica\" fontsize = 8 shape ='\\\n ' \"plaintext\" ] %s %s }'%(nodes, edges)\n return graph",
"def __repr__(self) -> str:\n\t\trep_str: str\n\t\theader_char: str\n\t\tcode_name = codify(self.name)\n\n\t\t# Get header character, with nested nodes under their parent\n\t\tif self.nested:\n\t\t\theader_char = \"^\"\n\t\telse:\n\t\t\theader_char = \"-\"\n\n\t\t# Get the header, set as start of string\n\t\trep_str = get_header(code_name, header_char)\n\t\t# Add description\n\t\trep_str += self.__str__()\n\t\t# Add newlines\n\t\trep_str += get_line_padding(2)\n\n\t\t# Handle nested nodes\n\t\tif self.has_nodes():\n\t\t\t# Setup\n\t\t\tnested_nodes = self.get_nodes()\n\t\t\tnested_reps = \"\"\n\n\t\t\t# Start Nested Nodes table\n\t\t\trep_str += get_table_start((\n\t\t\t\t\"Nested Nodes\",\n\t\t\t\t\"Description\",\n\t\t\t))\n\n\t\t\t# Iterate over nested node names\n\t\t\tfor nested_node in nested_nodes:\n\t\t\t\t# Add to table\n\t\t\t\trep_str += get_table_line((codify(nested_node.name), str(nested_node)))\n\t\t\t\t# Store full node sections, will be added after table\n\t\t\t\tnested_reps += repr(nested_node)\n\n\t\t\t# End Nested Nodes Table\n\t\t\trep_str += get_table_end(1)\n\n\t\t\t# Add stored sections\n\t\t\trep_str += nested_reps\n\n\t\t# Handle SubNodes, should be present\n\t\tif self.has_subnodes():\n\t\t\t# Get header, different if nested\n\t\t\tif self.nested:\n\t\t\t\trep_str += get_header(\"{}'s SubNodes\".format(code_name), '\"')\n\t\t\telse:\n\t\t\t\trep_str += get_header(\"SubNodes\", \"^\")\n\n\t\t\t# Iterate over SubNodes\n\t\t\tfor curr_subnode in self.get_subnodes():\n\t\t\t\trep_str += repr(curr_subnode)\n\n\t\t\t# Add extra newline\n\t\t\trep_str += get_line_padding(1)\n\n\t\t# Add final newline\n\t\trep_str += get_line_padding(1)\n\t\t# Return\n\t\treturn rep_str",
"def _intermediary_to_dot(tables, relationships):\n t = '\\n'.join(t.to_dot() for t in tables)\n r = '\\n'.join(r.to_dot() for r in relationships)\n return '{}\\n{}\\n{}\\n}}'.format(GRAPH_BEGINNING, t, r)",
"def _html_repr(self):\n html = '<table id=%s>' % (self._id,)\n\n for row in range(self.rows):\n html += '<tr>'\n for col in range(self.columns):\n if row == 0 and self.header_row or col == 0 and self.header_column:\n tag = 'th'\n else:\n tag = 'td'\n html += '<%(tag)s id=%(id)s></%(tag)s>' % {\n 'tag': tag,\n 'id': self._get_cell_id(row, col),\n }\n html += '</tr>'\n html += '</table>'\n return html",
"def __repr__(self) -> str:\n\t\t# Get Header\n\t\trep_str: str = get_header(self.name, \"*\")\n\n\t\t# Get table for Nodes in XML Type in a table, only if present\n\t\tif len(self.node_names):\n\t\t\tnode_info = \"\"\n\n\t\t\t# Start Nodes Table\n\t\t\trep_str += get_table_start((\"Node\", \"Description\"))\n\t\t\t# Iterate over Nodes\n\t\t\tfor node in self.get_nodes():\n\t\t\t\trep_str += get_table_line((codify(node.name), str(node)))\n\t\t\t\tnode_info += repr(node)\n\t\t\trep_str += get_table_end(1)\n\n\t\t\t# Add node docs\n\t\t\trep_str += node_info\n\n\t\t# Get all SubNodes if no nodes are present\n\t\telse:\n\t\t\t# Get all SubNode Descriptions\n\t\t\trep_str += get_header(\"Direct SubNodes\", \"-\")\n\t\t\tfor subnode in self.get_subnodes():\n\t\t\t\trep_str += repr(subnode)\n\n\t\t# Return\n\t\treturn rep_str + \"\\n\"",
"def _html_table(self):\n return '</i>'.join(APtable._repr_html_(self).split('</i>')[1:])",
"def __str__(self):\n tree_rows = [\n [\"Index\", str(self.index)],\n [\n \"Interval\",\n f\"{self.interval.left:.8g}-{self.interval.right:.8g} ({self.span:.8g})\",\n ],\n [\"Roots\", str(self.num_roots)],\n [\"Nodes\", str(len(self.preorder()))],\n [\"Sites\", str(self.num_sites)],\n [\"Mutations\", str(self.num_mutations)],\n [\"Total Branch Length\", f\"{self.total_branch_length:.8g}\"],\n ]\n return util.unicode_table(tree_rows, title=\"Tree\")",
"def gremlin(self):\r\n initial = '{} = g.makeType().name(\"{}\").{}{}makeEdgeLabel()'\r\n primary_key = ''\r\n if self.primary_key:\r\n primary_key = \"primaryKey({}).\".format(self.primary_key)\r\n\r\n functional = \"functional().\" if self.functional else \"\"\r\n\r\n return initial.format(self.label, self.label, primary_key, functional)",
"def __repr__(self: 'DotTree') -> str:\n return 'DotTree({}, {})'.format(repr(self.children[0]), \n repr(self.children[1]))",
"def dot_row(self) -> str:\n return _row_template.format(name=self.name, type_name=self.type_name)",
"def to_latex_table(self, tab=\" \", caption=\"TODO\", label=\"TODO\"):\n return \"\".join(\n (\n \"\\\\begin{center}\\n\",\n f\"{tab}\\\\begin{{table}}[ht]\\n\",\n f\"{tab*2}\\\\centering\\n\",\n f'{tab*2}\\\\rowcolors{{2}}{{white}}{{gray!25}}\\n'\n f\"{tab*2}\\\\begin{{tabular}}{{crrrrrr}}\\n\",\n (\n f\"{tab*3}\\\\cellcolor[gray]{{0.7}} & \\\\multicolumn{{2}}{{c}}\"\n \"{BT\\\\cellcolor[gray]{0.7}} & \\\\multicolumn{2}{c}{BJ\"\n \"\\\\cellcolor[gray]{0.7}} & \\\\multicolumn{2}{c}\"\n \"{CBJ\\\\cellcolor[gray]{0.7}} \\\\\\\\\\n\"\n ),\n (\n f\"{tab*3}\\\\cellcolor[gray]{{0.7}} Test suite & \"\n \"\\\\multicolumn{1}{c}{\\\\cellcolor[gray]{0.7}Nodes} & \"\n \"\\\\multicolumn{1}{c}{\\\\cellcolor[gray]{0.7}Time(s)} & \"\n \"\\\\multicolumn{1}{c}{\\\\cellcolor[gray]{0.7}Nodes} & \"\n \"\\\\multicolumn{1}{c}{\\\\cellcolor[gray]{0.7}Time(s)} & \"\n \"\\\\multicolumn{1}{c}{\\\\cellcolor[gray]{0.7}Nodes} & \"\n \"\\\\multicolumn{1}{c}{\\\\cellcolor[gray]{0.7}Time(s)}\\\\\\\\\\n\"\n ),\n \"\".join(\n (\n f\"{tab*3}{i} & {bt.nodes_expanded} & {bt.time} \"\n f\"& {bj.nodes_expanded} & {bj.time} & {cbj.nodes_expanded} & \"\n f\"{cbj.time}\\\\\\\\\\n\"\n for i, (bt, bj, cbj) in enumerate(zip(*self.data))\n )\n ),\n f\"{tab*2}\\\\end{{tabular}}\\n\"\n f\"{tab*2}\\\\caption{{{caption}}}\\n\"\n f\"{tab*2}\\\\label{{tab:{label}}}\\n\"\n f\"{tab}\\\\end{{table}}\\n\"\n \"\\\\end{center}\",\n )\n )",
"def _repr_markdown_(self):\n mod = f'**{self.__class__.__name__} Model**'\n try:\n mod +=f': {self.filename}'\n except:\n pass\n s = [mod,'']\n if self.metadata:\n s += ['|Parameter|Value|',\n '|:--------|:----:|']\n for name, v in self.metadata.items():\n try: \n s += [f\"|{name} | ${v.value:g}$ {v.unit:latex}|\"]\n except:\n s += [f\"|{name} | {v} |\"]\n return '\\n'.join(s)",
"def __str__(self):\n\n table_list = [self.headers]\n\n for row in self.data:\n table_list.append([row[col] or \"\" for col in self.headers])\n\n return create_table_string(table_list)",
"def _get_tabletype(cls) -> str:\n return 'HTML'",
"def markdown_table(self, which):\n if which == 'C':\n coef = 'C'\n elif which == 'c':\n coef = 'c'\n elif which == 'f':\n coef = 'f'\n str = '|order|'\n for i in range(1,N+1):\n str = str + '$%s_{%d}$ |' % (coef,i)\n str = str + '\\n|'\n for i in range(1,N+1):\n str = str + '-|'\n str = str + '\\n'\n for i in range(1,self.N+1):\n str = str + (self.dat[i]).markdown_row(self.N, which)\n return str",
"def dot(self) -> str:\n dot = to_pydot(self._graph)\n return dot.to_string()",
"def htmlise(s):\n return '<div><pre class=\"tablecell\">' + html.escape(s) + '</pre></div>'"
]
| [
"0.61612487",
"0.61515945",
"0.61384416",
"0.61309093",
"0.61250234",
"0.6045786",
"0.60448545",
"0.59829056",
"0.59372824",
"0.5933345",
"0.5923659",
"0.5885648",
"0.5885648",
"0.58676755",
"0.5851817",
"0.58498627",
"0.58488524",
"0.58396506",
"0.5816444",
"0.57942003",
"0.57005954",
"0.5699299",
"0.56961066",
"0.5663159",
"0.5661389",
"0.56425595",
"0.5620379",
"0.5617764",
"0.55712247",
"0.5569057"
]
| 0.7110429 | 0 |
Create decorator to register a concrete [`Model`][erdantic.base.Model] adapter subclass that will be identified under the key `type_name`. A concrete `Model` subclass must be registered for it to be available to the diagram creation workflow. | def register_model_adapter(type_name: str) -> Callable[[Type[Model]], Type[Model]]:
def decorator(cls: type) -> type:
global model_adapter_registry
if not issubclass(cls, Model):
raise ValueError("Only subclasses of Model can be registered.")
model_adapter_registry[type_name] = cls
return cls
return decorator | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def RegisterModel(model_name):\n\n def decorator(f):\n MODEL_REGISTRY[model_name] = f\n return f\n\n return decorator",
"def register_model(name):\n\n def register_model_cls(cls):\n if name in MODEL_REGISTRY:\n raise ValueError('Cannot register duplicate model ({})'.format(name))\n MODEL_REGISTRY[name] = cls\n return cls\n\n return register_model_cls",
"def addModelType(convertContext, typeName, convertFunc):\n\tif not hasattr(convertContext, 'modelTypeMap'):\n\t\tconvertContext.modelTypeMap = dict()\n\n\tif typeName in convertContext.modelTypeMap:\n\t\traise Exception('Model type \"' + typeName + '\" is already registered.')\n\tconvertContext.modelTypeMap[typeName] = convertFunc",
"def register_model(name: str, model=None):\n global REGISTRY\n if model is not None:\n REGISTRY[name] = model\n return model\n\n def do_registration(model):\n REGISTRY[name] = model\n return model\n\n return do_registration",
"def set_model(*, name: str, model: typing.Type) -> None:\n setattr(open_alchemy.models, name, model)",
"def make(model: Type[Model], **kwargs: Any) -> Model:\n return modelfactory_factory(model)(**kwargs)",
"def from_config(cls, model_config: Union[dict, ModelConfig]) -> Type[AbstractModel]:\n\n if not (model_config and isinstance(model_config, (ModelConfig, dict))):\n msg = f\"Need a valid model config to create a text/tagger model in AutoModel. \" \\\n f\"Found model_config={model_config} of type({type(model_config)})\"\n raise ValueError(msg)\n\n # get model type upon validation\n model_config = cls._resolve_model_config(model_config)\n model_type = cls._get_model_type(model_config)\n\n # load metadata and return\n if model_type == \"text\":\n model_class = AutoTextModel.get_model_class(model_config)\n elif model_type == \"tagger\":\n model_class = AutoTaggerModel.get_model_class(model_config)\n\n return model_class(model_config)",
"def model(self) -> Type[Model]:",
"def create_reid_model(name, *args, **kwargs):\r\n if name not in __factory:\r\n raise KeyError(\"Unknown model:\", name)\r\n return __factory[name](*args, **kwargs)",
"def modelClass(self):\n raise NotImplementedError",
"def get_model_adapter(config):\n if config['task'] == 'joint':\n return JointModelAdapter()\n elif config['task'] == 'keypoints':\n return KeypointsModelAdapter()\n elif config['task'] == 'headsegmentation':\n return HeadSegmentationModelAdapter()\n elif config['task'] == 'detect':\n return DetectionModelAdapter(config['model'])\n return ClassificationModelAdapter()",
"def __init__(self, model: Type[ModelType]):\n self.model = model",
"def __init__(self, model: Type[ModelType]):\n self.model = model",
"def model(self, key, model_type:T, default=undefined, description=None, **kwargs) -> T:\n return self._process(key, description=description, default=default, cast=cast_pydantic(model_type),type=model_type, **kwargs)",
"def initialize_model(model_type, **kwargs):\n try:\n model_class = MODEL_DICT[model_type]\n except KeyError:\n raise RuntimeError(f\"Cannot find model class for {model_type}. Pick one of {list(MODEL_DICT.keys())}\")\n\n return model_class(**kwargs)",
"def register(self, py_type, visitor=None):\n if visitor:\n self[py_type] = visitor\n else:\n\n def decorator(f):\n self[py_type] = f\n return f\n\n return decorator",
"def register(self, model, mediator=None, for_concrete_model=True):\n if mediator is None:\n mediator = ActivityMediator()\n if not isinstance(mediator, ActivityMediator):\n raise AttributeError(\"'mediator' argument required to be an \"\n \"instance of ActivityMediator (sub)class.\")\n # connect the model to the overseer\n mediator.connect(model)\n # register the mediator\n opts = self._get_opts(model, for_concrete_model)\n natural_key = \"{}.{}\".format(\n opts.app_label,\n opts.model_name,\n )\n self._registry[natural_key] = mediator",
"def register(widget):\n w = widget.class_traits()\n _registry.register(w['_model_module'].default_value,\n w['_model_module_version'].default_value,\n w['_model_name'].default_value,\n w['_view_module'].default_value,\n w['_view_module_version'].default_value,\n w['_view_name'].default_value,\n widget)\n return widget",
"def register_model(self, model: Type[Model]):\n\n if not model in self._registered_models:\n self._registered_models.add(model)",
"def register_model(self, model):\n\n self._model = model",
"def register(cls, model):\n cls.models[model] = True",
"def register(account_type):\n def class_rebuilder(cls):\n ACCOUNT_TYPES[account_type] = cls\n return cls\n return class_rebuilder",
"def _default_make_sa_model(model):\n name = model._meta.object_name + \".__aldjemy__\"\n return type(name, (), {\"__module__\": model.__module__})",
"def register_type(self, type_name, mapping, model=None):\n assert type_name not in self.mappings\n self.mappings[type_name] = TypeMapping(type_name, mapping, model)",
"def register_config(type_hint: str,\n plugin: Optional[str] = None,\n upgrader: Optional[Callable] = None) -> Callable:\n\n def _register_config(cls: Type):\n new_cls = create_model(\n cls.__name__,\n __base__=cls,\n __module__=cls.__module__,\n # add a new field called \"type_hint\" with type Literal[type_hint]\n # and default value type_hint to the config\n type_hint=(Literal[type_hint], type_hint), # type: ignore\n )\n\n _plugin = plugin or get_plugin(cls)\n registry.add_config(type_hint, new_cls, _plugin, upgrader)\n\n # retain docstring after wrapping\n new_cls.__doc__ = cls.__doc__\n\n return new_cls\n\n return _register_config",
"def register_model(self, model_name: str, model: Any, training_columns: List[str]):\n self.models[model_name] = (model, training_columns)",
"def __init__(self, model: str, **kwargs):\n super().__init__(model=model)",
"def get_model_type(self):\n pass",
"def get_model(name):\n\n name_to_fun = {'audio': audio_model, 'video': video_model, 'both': combined_model}\n\n if name in name_to_fun:\n model = name_to_fun[name]\n else:\n raise ValueError('Requested name [{}] not a valid model'.format(name))\n\n def wrapper(*args, **kwargs):\n return recurrent_model(model(*args), **kwargs)\n\n return wrapper",
"def modelfactory_factory(model: Type[Model], **kwargs: Any) -> Type[MIZModelFactory]:\n model_name, label = model._meta.model_name, model._meta.label\n # Check the cache for a factory with that label.\n if label in _cache:\n return _cache[label]\n # Check this module and the factory's base module for a factory\n # matching the default factory name.\n # Note that a factory's name may not be unique across multiple apps; need\n # to verify that any factory matching the name is a factory for the\n # requested model.\n factory_name = model_name.capitalize() + 'Factory'\n if hasattr(sys.modules[__name__], factory_name):\n modelfac = getattr(sys.modules[__name__], factory_name)\n if modelfac._meta.model == model:\n return modelfac\n # TODO: is it safe to use sys.modules to check for contents of a external\n # module, or should an import be attempted instead?\n # -- ALSO: why even bother checking factory.base?\n if hasattr(sys.modules['factory.base'], factory_name):\n modelfac = getattr(sys.modules['factory.base'], factory_name)\n if modelfac._meta.model == model:\n return modelfac\n # Create a new factory class:\n if 'Meta' not in kwargs:\n kwargs['Meta'] = type('Options', (MIZDjangoOptions,), {'model': model})\n modelfac = type(factory_name, (MIZModelFactory,), kwargs)\n _cache[label] = modelfac\n return modelfac"
]
| [
"0.66247785",
"0.6454164",
"0.61407036",
"0.6008762",
"0.57344675",
"0.5627918",
"0.55794394",
"0.5565715",
"0.5555287",
"0.5550958",
"0.55120885",
"0.5458099",
"0.5458099",
"0.54393405",
"0.54377514",
"0.54216254",
"0.5397311",
"0.53699",
"0.5312761",
"0.53078943",
"0.5251868",
"0.52442527",
"0.52439153",
"0.5223513",
"0.52100646",
"0.51825213",
"0.5182318",
"0.51230407",
"0.51214105",
"0.5114054"
]
| 0.8330304 | 0 |
A product token for use in UserAgent headers. | def product_token(self):
return 'gtr/{0}'.format(__version__) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cart_token(self):\n return self._dict.get('cart_token')",
"def token(self) -> str:\n return pulumi.get(self, \"token\")",
"def token(self) -> str:\n return pulumi.get(self, \"token\")",
"def token(self) -> str:\n return pulumi.get(self, \"token\")",
"def product(self) -> str:\n return pulumi.get(self, \"product\")",
"def generate_product_number():\n return str(uuid.uuid4())",
"def _generate_token_value():\n return secrets.token_urlsafe()",
"def token(self):\n return self[\"token\"]",
"async def token(request) -> ResponseText:\n return ResponseText(\n \"\".join(random.choices(string.ascii_uppercase + string.digits, k=42)) # noqa: S311\n )",
"def get_token(self):\n if time.time() > self.expiration:\n # need to re-authenticate and get a new token and catalog\n self._authenticate()\n \n return self.token, self.catalog",
"def _generate_token(self):\n return sha1(\"%s#%s\" % (time(),\n self.app.cfg['sessions/secret'])).hexdigest()",
"def get_client_token(**_):\n return str(uuid.uuid4())",
"def token():\n return os.environ.get('TOKEN', None)",
"def device_token(self):\n return self._device_token",
"def product_version(context):\n return {'PRODUCT_VERSION': settings.PRODUCT_VERSION}",
"def token(self):\n if not self._token:\n self._token = self.authenicate().token\n\n return self._token",
"def token(self):\n return self._generate_jwt_token()",
"def token(self):\n return self._generate_jwt_token()",
"def token(self):\n return self._generate_jwt_token()",
"def product_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"product_id\")",
"def long_token():\n hash = hashlib.sha1(shortuuid.uuid().encode('utf-8'))\n hash.update(settings.SECRET_KEY.encode('utf-8'))\n return hash.hexdigest()",
"def get_token():\n token = getpass.getpass('Paste in your RDR API token and press Enter:')\n return {'Authorization': 'token ' + token}",
"def generate_token(self):\n self.__get_auth_token_and_secret()\n return self.get_token()",
"def test_cannot_view_a_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/products/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def long_token():\n hash = hashlib.sha1(force_bytes(shortuuid.uuid()))\n hash.update(force_bytes(settings.SECRET_KEY))\n return hash.hexdigest()",
"def UserToken(self) -> object:",
"def auth_token(self):",
"def get_token(self):\n auth_data = {\"auth\": {\"tenantName\": 'service',\n \"passwordCredentials\":{ \"username\": 'vsm',\n \"password\": self._password}}}\n\n auth_request = urllib2.Request(self._auth_url)\n auth_request.add_header(\"content-type\", \"application/json\")\n auth_request.add_header('Accept', 'application/json')\n auth_request.add_header('User-Agent', 'python-mikeyp')\n auth_request.add_data(json.dumps(auth_data))\n auth_response = urllib2.urlopen(auth_request)\n response_data = json.loads(auth_response.read())\n\n self._token = response_data['access']['token']['id']\n\n service_list = response_data['access']['serviceCatalog']\n for s in service_list:\n if s['type'] == 'vsm' and s['name'] == 'vsm':\n self._vsm_url = s['endpoints'][0]['publicURL']\n break\n\n url_id = self._vsm_url.split('/')[-1]\n return self._token + \"-\" + url_id",
"def generate_token():\n return uuid4()",
"def short_token():\n hash = hashlib.sha1(force_bytes(shortuuid.uuid()))\n hash.update(force_bytes(settings.SECRET_KEY))\n return hash.hexdigest()[::2]"
]
| [
"0.6108767",
"0.60351366",
"0.60351366",
"0.60351366",
"0.59434474",
"0.5846661",
"0.5842875",
"0.5732221",
"0.56980956",
"0.56795764",
"0.5656738",
"0.5639308",
"0.5635904",
"0.5624893",
"0.5620505",
"0.5617921",
"0.56037295",
"0.56037295",
"0.56037295",
"0.55853003",
"0.55840343",
"0.5570505",
"0.55638206",
"0.5554782",
"0.55541277",
"0.55388635",
"0.5536706",
"0.5535999",
"0.5531673",
"0.5524629"
]
| 0.80278295 | 0 |
Return all hidden inputs in elements list as a dict. | def _hidden_inputs_as_dict(self, elements):
data = {}
# Make sure elements is a list
if not isinstance(elements, list):
elements = [elements]
for element in elements:
for input in element.select('input[type=hidden]'):
data[input.attrs['name']] = input.attrs.get('value', '')
return data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_hidden(self, soup):\n hidden = soup.find_all(\"input\", {\"type\": \"hidden\"})\n return {field[\"name\"]: field[\"value\"] for field in hidden}",
"def get_inputs(self):\n inputs = self.view.main_panel.get_inputs()\n result = {}\n for _input in inputs:\n value = inputs[_input]\n if \"bool\" in _input and not isinstance(value, bool):\n value = value.get()\n result[_input] = value\n return result",
"def get_inputs():\n inputs = {}\n for obj in vars(acsploit.input).values():\n if hasattr(obj, 'INPUT_NAME'):\n inputs[obj.INPUT_NAME] = obj\n\n return inputs",
"def to_dict(self) -> List[Dict[str, Any]]:\n return [x.to_dict() for x in self.inputs]",
"def get_inputs(self):\n return self.attributes[\"inputs\"]",
"def as_dict(self):\n for k, v in zip(self._input_names, self._flattened_inputs):\n yield k, v",
"def inputs(self):\n return self._inputs",
"def make_param_inputs(g, node, layer, hidden_size, num_layers):\n\n bidirect_len = 4 if node.attr(\"is_bidirec\") else 2\n all_layer_param_len = len(node.input(\"WeightList\"))\n weight_list = node.input(\"WeightList\")[: all_layer_param_len // 2]\n bias_list = node.input(\"WeightList\")[all_layer_param_len // 2 :]\n\n layer_weight_list = weight_list[layer * bidirect_len : layer * bidirect_len + bidirect_len]\n layer_bias_list = bias_list[layer * bidirect_len : layer * bidirect_len + bidirect_len]\n param_list = layer_weight_list + layer_bias_list\n param_list_len = len(param_list)\n\n input_weights = param_list[0 : param_list_len // 2 : 2]\n hidden_weights = param_list[1 : param_list_len // 2 : 2]\n\n input_bias = param_list[param_list_len // 2 : param_list_len : 2]\n hidden_bias = param_list[param_list_len // 2 + 1 : param_list_len : 2]\n\n return input_weights, hidden_weights, input_bias, hidden_bias",
"def input_fields(self):\r\n return self.input.fields",
"def get_attributes(self):\n\t\treturn dict(list(self.__element.items()))",
"def get_inputs(self):\n for widget in self.frame.winfo_children():\n if isinstance(widget, tkinter.Entry):\n entry_name = str(widget).split('.')[-1]\n entry_value = widget.get()\n self.items[entry_name] = entry_value\n return self.items",
"def get_inputs(self):\n return self.inputs",
"def get_inputs(self) -> List[NodeValue]:\n\n return self.inputs_",
"def hidden_node_ids(self):\n return [i for i in range(self.n_inputs, self.n_inputs + self.n_hidden)]",
"def _get_inputs(self):\n return self.__inputs",
"def _get_inputs(self):\n return self.__inputs",
"def _get_inputs(self):\n return self.__inputs",
"def inputs(self):\n return self.inputs",
"def get_main_inputs(self, input) -> dict:\n values = {}\n for element in input.parameters:\n try:\n name = input.name + element['name']\n value = getattr(self, name).text().replace(\",\", \".\")\n parse_value = element['parse_function'](value) * element['to_SI']\n if parse_value < 0: raise Exception('Hodnota {} je mensi nez nula. Zadejte kladne cislo.'.format(name))\n values[element['name']] = parse_value\n except ValueError:\n self.show_error_dialog_to_user('Nezadali jste cislo u hodnoty {}!'.format(name))\n except Exception as error:\n self.show_error_dialog_to_user(error.args[0])\n\n return values",
"def asdict(self) -> dict[str, Any]:\n return {\n w.name: getattr(w, \"value\", None)\n for w in self._list\n if w.name and not w.gui_only\n }",
"def get_inputs(labels):\r\n str_inputs = []\r\n for label_item in labels:\r\n if label_item != \"id\":\r\n item = get_input(label_item)\r\n str_inputs.append(item)\r\n else:\r\n str_inputs.append(\"PLACEHOLDER\")\r\n return str_inputs",
"def get_inputs(self):\n inputs = set()\n # simply combine the inputs for the interaction\n inputs.update(self.get_interaction().get_inputs())\n return inputs",
"def get_inputs(list_labels, title):\n\n # your code\n print(title)\n\n inputs = []\n for i in list_labels:\n inputs.append(input(i))\n\n return inputs",
"def get_inputs(list_labels, title):\n inputs = []\n\n for labels in range(len(list_labels)):\n user_input = input(list_labels[labels])\n inputs.append(user_input)\n\n return inputs",
"def _get_input_dict(input_ids: List[Tensor], attention_mask: List[Tensor]) ->Dict[str, Tensor]:\n output_dict = {'input_ids': torch.cat(input_ids), 'attention_mask': torch.cat(attention_mask)}\n return output_dict",
"def inputs(self):\n return self._inputs",
"def inputs(self):\n return self._inputs",
"def inputs(self):\n return self._inputs",
"def items(self):\r\n return self.elements.values()",
"def ew(node):\n cY = {}\n gM = node.dependent(b.INPUTS | b.HIDDEN_INPUTS)\n for p in gM:\n cY[p] = []\n for c in range(p.inputs()):\n if p.input(c) == node:\n cY[p].append(c)\n\n return cY"
]
| [
"0.71392256",
"0.655237",
"0.624078",
"0.61002916",
"0.60492",
"0.5750601",
"0.57237935",
"0.5712929",
"0.5706916",
"0.5644363",
"0.56369454",
"0.56110394",
"0.5592949",
"0.5583895",
"0.55714357",
"0.55714357",
"0.55714357",
"0.5556644",
"0.5502055",
"0.54522103",
"0.53868526",
"0.5368069",
"0.5350839",
"0.5345055",
"0.53311515",
"0.53234965",
"0.53234965",
"0.53234965",
"0.53146505",
"0.52378744"
]
| 0.8503397 | 0 |
Fix the bank balance and return it as a float. | def _fix_balance(self, balance):
return float(balance.replace(',', '.').replace(' ', '')) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def account_balance():\n return float(pareto.rvs(1.161))",
"def balance(self) -> float:\n\t\tbalance = 0\n\t\tfor transaction in self.transactions:\n\t\t\tsign = 1 if transaction.receiving_account == self.__number else -1\n\t\t\tbalance += sign*transaction.usd*transaction.completed\n\t\t# The bank has infinite money\n\t\tif self.name == Account.BANK:\n\t\t\tbalance = Decimal('Infinity')\n\t\treturn balance",
"def __get__(self) -> float:\n\n return float(self.balance)",
"def __balance__(self) -> float:\n\n with dataset.connect(database.get_db()) as db:\n # Find last bank transaction.\n statement = statement = f\"\"\"\n SELECT opening_balance, transaction_amount\n FROM bank\n WHERE author_id = {self.user.id}\n ORDER BY id DESC\n LIMIT 1\n \"\"\"\n result = db.query(statement)\n\n for row in result:\n balance = row[\"opening_balance\"] + row[\"transaction_amount\"]\n break\n else:\n # If there was no result for the user, default balance is given.\n balance = 500\n\n return float(balance)",
"def get_balance(self) -> float:\n return self._balance",
"def getBalance(self):\n connection = sqlite3.connect('/home/BorneAgain/Desktop/flasktest/accounts.db')\n\n cursor = connection.cursor()\n\n sql_command = \"\"\"select amount from accounts where name=?;\"\"\"\n\n cursor.execute(sql_command, (self.name, ))\n\n return round(float(re.sub(r'[\\(\\),]', '', str(cursor.fetchone()))), 2)",
"def balance(self) -> float:\n return self.position.exchange.wallet_balance",
"def sanitize_balance(balance: str) -> Union[int, float]:\n if balance.lower() in [\"unlimited\", \"n/a\"]:\n return -1\n # Take the string and convert it to a numeric type.\n to_number = float(balance.replace(\",\", \"\"))\n # Only return a float if we need decimal precision.\n return to_number if to_number % 1 else int(to_number)",
"def stripe_amount(self):\n if self.currency.code in (\n 'BIF', 'XAF', 'XPF', 'CLP',\n 'KMF', 'DJF', 'GNF', 'JPY',\n 'MGA', 'PYG', 'RWF', 'KRW',\n 'VUV', 'VND', 'XOF'):\n return int(self.amount)\n return int(self.amount * 100)",
"def getBudgetBalance(self, budgetName):\r\n assert budgetName in self.budgets, \"Specified budget doesn't exist\"\r\n return \"%.2f\" % float(self.budgets[budgetName])",
"def get_wallet_balance():\n try:\n if CONF.exchange == 'bitmex':\n return EXCHANGE.fetch_balance()['info'][0]['walletBalance'] * CONF.satoshi_factor\n if CONF.exchange == 'kraken':\n asset = CONF.base if CONF.base != 'BTC' else 'XBt'\n return float(EXCHANGE.private_post_tradebalance({'asset': asset})['result']['tb'])\n if CONF.exchange == 'liquid':\n result = EXCHANGE.private_get_accounts_balance()\n if result is not None:\n for bal in result:\n if bal['currency'] == CONF.base:\n return float(bal['balance'])\n\n except (ccxt.ExchangeError, ccxt.NetworkError) as error:\n LOG.error(RETRY_MESSAGE, type(error).__name__, str(error.args))\n sleep_for(4, 6)\n get_wallet_balance()",
"def budget_balance(self):\n budget_balance = round(self.budget() - self.total_spent(), 2)\n budget_balance_degree = round( (9000 * self.total_spent()) / (self.budget()), 4) #convert to degrees and round to four decimal places\n return (budget_balance, budget_balance_degree)",
"def balance(self):\n return self._rbal - self._lbal",
"def balance(self) -> Decimal:\n withdrawals = self.withdrawal_requests.filter(\n status=WithdrawalStatus.open,\n )\n if len(withdrawals) == 0:\n return self.internal_balance\n else:\n withdrawal_total = sum(map(lambda w: w.amount, withdrawals))\n return self.internal_balance - withdrawal_total",
"def get_balance(card):\n data = {\n \"Card.Number\": card[0],\n \"Card.Pin\": card[1],\n }\n\n response = requests.post(BALANCE_URL, data=data, headers=HEADERS)\n if response.status_code == 200:\n match = BALANCE_RE.search(response.text)\n if match:\n return float(match.group(1))",
"def get_transfer_fee(value: float) -> float:\n return (value * (0.99 / 100)) + 4.9",
"def get_balance(self):\n final_amount = 0\n for i in range(len(self.ledger)):\n final_amount += self.ledger[i]['amount']\n return final_amount",
"def get_account_balance(self):\n return int(self.request('get', 'fort/accounts')['balance'])",
"def initial_cash_balance(self) -> float:\n return self.buy_budget * len(self.stocks)",
"def get_balance(self):\r\n return self.balance",
"def balance(self):\n url = self.base_url + 'account/balance'\n self.session.headers.update(self.sign(url))\n resp = self.session.get(url)\n try:\n data = resp.json()\n data['amount'] = float(data['amount'])\n return pd.Series(data)\n except:\n return resp",
"def balance(self, acct_id):\n acct = self.data.get(acct_id)\n if acct:\n bal = float(acct[\"due\"]) - float(acct[\"paid\"])\n return f\"${bal:.2f}\"\n return None",
"def from_float(amount : float):\n rao = int(amount * pow(10, 9))\n return Balance(rao)",
"def balance(self):\n return self._balance",
"def get_fee(self):\n fee = round(self.order_payment.amount * Decimal(0.015), 2)\n return fee",
"def get_account_balance(account):\n balance = 0\n\n for address in get_addresses_by_account(account):\n balance += get_address_balance(address)\n\n return float(balance)",
"def get_balance(self):\n return self.balance",
"def get_balance(self):\n return self.balance",
"def balance(self) -> int:\r\n if self._top == None:\r\n return 0\r\n return self._top.balance()",
"def get_fiat_balance():\n return get_balance(CONF.quote)"
]
| [
"0.72123927",
"0.7061845",
"0.7009867",
"0.6872777",
"0.67247254",
"0.64945155",
"0.64721006",
"0.64484996",
"0.6287387",
"0.6281066",
"0.62781847",
"0.62356645",
"0.6213261",
"0.61990225",
"0.6180062",
"0.6111443",
"0.61091805",
"0.6072259",
"0.6057195",
"0.6021365",
"0.6020943",
"0.6010339",
"0.6007213",
"0.599562",
"0.599236",
"0.5984826",
"0.5981688",
"0.5981688",
"0.5943686",
"0.5932958"
]
| 0.7926201 | 0 |
Parse the token from body. | def _parse_token(self, body):
token_match = re.search('var\s*token\s*=[\s\']*(\d+)', body)
return int(token_match.group(1)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _parse_tokens(self, body):\n\n old_token = self.token\n old_json_token = self.json_token\n\n self.token = self._parse_token(body)\n self.json_token = self._parse_json_token(body)\n\n logger.debug('Token set to: %s (Old: %s)', self.token, old_token)\n logger.debug('JSON token set to: %s (Old: %s)', self.json_token,\n old_json_token)",
"def _parse_json_token(self, body):\n\n token_match = re.search('var\\s*jsonToken\\s*=[\\s\\']*([\\w-]+)', body)\n return token_match.group(1)",
"def parse(self, tokenizer):\n pass",
"def parse(token):\n\n pass",
"def _get_token(self):\n # Skip initial whitespace.\n pos = self._skip_whitespace()\n\n # Find the token here, if there's one.\n token = None\n\n for (token_type, regex) in TOKEN_REGEXEN:\n re_match = regex.match(self.body, pos)\n if re_match:\n token_content = next(g for g in re_match.groups() if g is not None)\n token = Token(token_type, token_content, re_match.end())\n break\n\n return token",
"def _parse_token(self, response=None):\n token_url = 'https://tinychat.com/start?#signin'\n if response is None:\n response = util.web.http_get(url=token_url, referer=token_url, proxy=self._proxy)\n\n if response is not None and response['content'] is not None:\n soup = BeautifulSoup(response['content'], 'html.parser')\n\n token = soup.find(attrs={'name': 'csrf-token'})\n self._token = token['content']",
"def parse_token(token):\n return jwt.decode(token, app.config['JWT_SECRET'])",
"def parse_token(req):\n auth_string_list = req.headers.get('Authorization').split()\n # Check in correct format i.e. Bearer: 39xds03lda0...\n if len(auth_string_list) == 1:\n raise ValueError('Authorization has invalid format')\n else:\n token = auth_string_list[1]\n data = jwt.decode(token, config.SECRET_KEY, algorithms='HS256')\n return data",
"def read_token(self):\n self._skip_white_space()\n return self._get_token()",
"async def process(self, tokens):\n return await self.parser.process(tokens)",
"def parse_contents(self):\n self.parsed_contents = tokenize(self.contents)[0]",
"def parse_token_result(self, res: dict, what: str) -> None:\n if 'error' in res:\n message: str = '{}: {}'.format(what, res['error'].get('message'))\n code: int = int(res['error'].get('code'))\n\n if code == 401:\n raise TokenExpiredError(message, code)\n else:\n raise AuthenticationTokenError(message, code)\n\n self.token = res.get('_TOKEN')\n\n expires_at = res.get('expires-at')\n if expires_at:\n self.expires_at = int(expires_at)\n else:\n expires_in = res.get('expires_in')\n if expires_in:\n self.expires_at = self.get_epoch_millis() + int(expires_in) * 1000\n\n refresh_token = res.get('refresh_token')\n if refresh_token:\n self.refresh_token = refresh_token\n\n self.last_update = self.get_epoch_millis()",
"def parse_token(self, token, tags=[WORD, POS, CHUNK, PNP, REL, ANCHOR, LEMMA]):\n p = { WORD: \"\", \n POS: None, \n IOB: None,\n CHUNK: None,\n PNP: None,\n REL: None,\n ROLE: None,\n ANCHOR: None,\n LEMMA: None }\n # Split the slash-formatted token into separate tags in the given order.\n # Decode &slash; characters (usually in words and lemmata).\n # Assume None for missing tags (except the word itself, which defaults to an empty string).\n custom = {}\n for k, v in izip(tags, token.split(\"/\")):\n if SLASH0 in v:\n v = v.replace(SLASH, \"/\")\n if k not in p:\n custom[k] = None\n if v != OUTSIDE or k == WORD or k == LEMMA: # \"type O negative\" => \"O\" != OUTSIDE.\n (p if k not in custom else custom)[k] = v\n # Split IOB-prefix from the chunk tag:\n # B- marks the start of a new chunk, \n # I- marks inside of a chunk.\n ch = p[CHUNK]\n if ch is not None and ch.startswith((\"B-\", \"I-\")):\n p[IOB], p[CHUNK] = ch[:1], ch[2:] # B-NP\n # Split the role from the relation:\n # NP-SBJ-1 => relation id is 1 and role is SBJ, \n # VP-1 => relation id is 1 with no role.\n # Tokens may be tagged with multiple relations (e.g., NP-OBJ-1*NP-OBJ-3).\n if p[REL] is not None:\n ch, p[REL], p[ROLE] = self._parse_relation(p[REL])\n # Infer a missing chunk tag from the relation tag (e.g., NP-SBJ-1 => NP).\n # For PP relation tags (e.g., PP-CLR-1), the first chunk is PP, the following chunks NP.\n if ch == \"PP\" \\\n and self._previous \\\n and self._previous[REL] == p[REL] \\\n and self._previous[ROLE] == p[ROLE]: \n ch = \"NP\"\n if p[CHUNK] is None and ch != OUTSIDE:\n p[CHUNK] = ch\n self._previous = p\n # Return the tags in the right order for Sentence.append().\n return p[WORD], p[LEMMA], p[POS], p[CHUNK], p[ROLE], p[REL], p[PNP], p[ANCHOR], p[IOB], custom",
"def parse(self, content):\n pass",
"def _upgrade_token(self, http_body):\n self.token_string = auth_sub_string_from_body(http_body)",
"def tokenize_body(self, msg):\n\n if options[\"Tokenizer\", \"check_octets\"]:\n # Find, decode application/octet-stream parts of the body,\n # tokenizing the first few characters of each chunk.\n for part in octetparts(msg):\n try:\n text = part.get_payload(decode=True)\n except:\n yield \"control: couldn't decode octet\"\n text = part.get_payload(decode=False)\n\n if text is None:\n yield \"control: octet payload is None\"\n continue\n\n yield \"octet:%s\" % text[:options[\"Tokenizer\",\n \"octet_prefix_size\"]]\n\n parts = imageparts(msg)\n if options[\"Tokenizer\", \"image_size\"]:\n # Find image/* parts of the body, calculating the log(size) of\n # each image.\n\n total_len = 0\n for part in parts:\n try:\n text = part.get_payload(decode=True)\n except:\n yield \"control: couldn't decode image\"\n text = part.get_payload(decode=False)\n\n total_len += len(text or \"\")\n if text is None:\n yield \"control: image payload is None\"\n\n if total_len:\n yield \"image-size:2**%d\" % round(log2(total_len))\n\n if options[\"Tokenizer\", \"crack_images\"]:\n engine_name = options[\"Tokenizer\", 'ocr_engine']\n from spambayes.ImageStripper import crack_images\n text, tokens = crack_images(engine_name, parts)\n for t in tokens:\n yield t\n for t in self.tokenize_text(text):\n yield t\n\n # Find, decode (base64, qp), and tokenize textual parts of the body.\n for part in textparts(msg):\n # Decode, or take it as-is if decoding fails.\n try:\n text = part.get_payload(decode=True)\n except:\n yield \"control: couldn't decode\"\n text = part.get_payload(decode=False)\n if text is not None:\n text = try_to_repair_damaged_base64(text)\n\n if text is None:\n yield 'control: payload is None'\n continue\n\n # Replace numeric character entities (like a for the letter\n # 'a').\n text = numeric_entity_re.sub(numeric_entity_replacer, text)\n\n # Normalize case.\n text = text.lower()\n\n if options[\"Tokenizer\", \"replace_nonascii_chars\"]:\n # Replace high-bit chars and control chars with '?'.\n text = text.translate(non_ascii_translate_tab)\n\n for t in find_html_virus_clues(text):\n yield \"virus:%s\" % t\n\n # Get rid of uuencoded sections, embedded URLs, <style gimmicks,\n # and HTML comments.\n for cracker in (crack_uuencode,\n crack_urls,\n crack_html_style,\n crack_html_comment,\n crack_noframes):\n text, tokens = cracker(text)\n for t in tokens:\n yield t\n\n # Remove HTML/XML tags. Also . <br> and <p> tags should\n # create a space too.\n text = breaking_entity_re.sub(' ', text)\n # It's important to eliminate HTML tags rather than, e.g.,\n # replace them with a blank (as this code used to do), else\n # simple tricks like\n # Wr<!$FS|i|R3$s80sA >inkle Reduc<!$FS|i|R3$s80sA >tion\n # can be used to disguise words. <br> and <p> were special-\n # cased just above (because browsers break text on those,\n # they can't be used to hide words effectively).\n text = html_re.sub('', text)\n\n for t in self.tokenize_text(text):\n yield t",
"def _parse(tokens: Deque[Token]) -> object:\n token = tokens.popleft()\n\n if token.type == 'left_bracket':\n return parse_array(tokens)\n\n if token.type == 'left_brace':\n return parse_object(tokens)\n\n if token.type == 'string':\n return parse_string(token)\n\n if token.type == 'number':\n return parse_number(token)\n\n special_tokens = {\n 'true': True,\n 'false': False,\n 'null': None,\n }\n if token.type in ('boolean', 'null'):\n return special_tokens[token.value]\n\n raise ParseError(\n f\"Unexpected token: {token.value} \"\n f\"(line {token.line} column {token.column})\")",
"def auth_sub_string_from_body(http_body):\n for response_line in http_body.splitlines():\n if response_line.startswith('Token='):\n # Strip off Token= and return the token value string.\n return response_line[6:]\n return None",
"def __parse(self, ch, method, properties, body: bytes):\n input_msg = body.decode()\n request = json.loads(input_msg)\n answer_msg = {}\n err_code = 0\n err_msg = \"\"\n req_id = request.get(\"id\")\n try:\n tag = request[\"tag\"]\n if tag == \"who_are_you\":\n answer_msg = self.whoami()\n elif tag == \"task\":\n answer_msg = self.generate()\n else:\n err_code = -2\n err_msg = \"Unexpected param\"\n except KeyError:\n err_code = -1\n err_msg = \"Error request parsing\"\n finally:\n self.__answer(json.dumps({\"req_id\": req_id,\n \"data\": answer_msg,\n \"err\": {\"code\": err_code,\n \"msg\": err_msg}}))",
"def smart_parse(body):\n try:\n data_dict = json.loads(body)\n except ValueError:\n return form_urlencoded_parse(body)\n return data_dict",
"def parse(self):\n\t\tsub = self.body.split(' ')\n\t\tif len(sub) == 3:\n\t\t\tself.latitude = float(sub[1])\n\t\t\tself.longitude = float(sub[2])\n\t\telse:\n\t\t\tself.latitude = None\n\t\t\tself.longitude = None\n\t\t\traise Exception(\"Invalid message\")",
"def read_token(stream):\n strip_whitespace(stream)\n\n if stream.eof():\n raise VeryUnexpectedEndException(stream, \"Encountered EOF while scanning for token\")\n\n pos = stream.pos()\n while not stream.eof() and stream.peek() in VALID_TOKEN_CHARS:\n stream.consume()\n\n return stream.slice(pos)",
"def parse_token(page_html):\n offset = 7\n token = page_html.find(\"token\")\n start_pos = (page_html[token:]).find('value=\"') + token\n end_pos = (page_html[start_pos + offset:]).find('\"') + start_pos + offset\n\n return page_html[start_pos + offset:end_pos]",
"def parse_markdown(tokens):\r\n body = Body_Parser(tokens)\r\n if body.consumed != -1 + tokens.length():\r\n if not tokens.grab(body.consumed-1).context == \"EOF\":\r\n list = tokens.grab_num(body.consumed-3, 5)\r\n context = \"\"\r\n for i in list:\r\n context += i.context + \"\\n\"\r\n click.secho(\r\n \"error at %s\\n%s\" % (tokens.grab(body.consumed-1).at, context), fg=\"red\",\r\n err=True)\r\n return body",
"def parse_tokens(self, tokens):\n for token in tokens:\n self.parse_token(token)",
"async def validate_token(self, token):",
"def parse(self, parser, tokens):\n self.parser = parser\n self.bits = tokens.split_contents()\n self.tagname = self.bits.pop(0)\n self.kwargs = {}\n self.blocks = {}\n self.arguments = self.options.get_arguments()\n self.current_argument = None\n self.todo = list(self.bits)\n for bit in self.bits:\n self.handle_bit(bit)\n self.finish()\n self.parse_blocks()\n return self.kwargs, self.blocks",
"def _parse_token(token: str):\r\n if token in OPERATOR_TOKENS:\r\n return Operator(token)\r\n if token.isdigit():\r\n return Number(int(token))\r\n if \".\" in token:\r\n if token.count(\".\") > 1 or token[-1] == '.':\r\n raise BadNumber(token)\r\n return Number(float(token))\r\n if token == \"i\":\r\n return ComplexNumber(0, 1)\r\n if token.isalpha():\r\n return Variable(token)\r\n raise UnknownToken(token)",
"def __init__(self, token):\n self.token = token",
"def __init__(self, token):\n self.token = token"
]
| [
"0.7431582",
"0.7366899",
"0.6743963",
"0.6730944",
"0.66467994",
"0.6132901",
"0.58613884",
"0.5779626",
"0.57630026",
"0.5718362",
"0.57095325",
"0.5676709",
"0.564161",
"0.56361425",
"0.5579346",
"0.5576902",
"0.5567636",
"0.5556165",
"0.5551522",
"0.5546756",
"0.55331093",
"0.54325",
"0.5426524",
"0.542548",
"0.5424067",
"0.5406913",
"0.53864604",
"0.53673434",
"0.53478473",
"0.53478473"
]
| 0.7520695 | 0 |
Parse the JSON token from body. | def _parse_json_token(self, body):
token_match = re.search('var\s*jsonToken\s*=[\s\']*([\w-]+)', body)
return token_match.group(1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _parse_tokens(self, body):\n\n old_token = self.token\n old_json_token = self.json_token\n\n self.token = self._parse_token(body)\n self.json_token = self._parse_json_token(body)\n\n logger.debug('Token set to: %s (Old: %s)', self.token, old_token)\n logger.debug('JSON token set to: %s (Old: %s)', self.json_token,\n old_json_token)",
"def _parse_token(self, body):\n\n token_match = re.search('var\\s*token\\s*=[\\s\\']*(\\d+)', body)\n return int(token_match.group(1))",
"def smart_parse(body):\n try:\n data_dict = json.loads(body)\n except ValueError:\n return form_urlencoded_parse(body)\n return data_dict",
"def parse_token(token):\n return jwt.decode(token, app.config['JWT_SECRET'])",
"def parse(content):\n return json.loads(content)",
"def ingest_json_body(request):\n # log.debug(request.body)\n try:\n data = json.loads(str(request.body, encoding='utf-8'))\n except Exception as e:\n log.error(log.exc(e))\n return None\n return data",
"def parse_value(tokens: deque) -> JSON:\n tk = tokens[0]\n\n if tk == \"[\":\n return parse_list(tokens)\n elif tk.type == \"NUMBER\":\n tokens.popleft() # É necessário consumir o 1o token\n return float(tk)\n \n # Complete com as outras regras de objeto, STRING, BOOL e NULL\n # ...\n else:\n raise SyntaxError(\"token inesperada em lista: %r\" % tk)",
"def do_post_parse_json(self, *args, **kwargs): # real signature unknown\n pass",
"def parse_token(req):\n auth_string_list = req.headers.get('Authorization').split()\n # Check in correct format i.e. Bearer: 39xds03lda0...\n if len(auth_string_list) == 1:\n raise ValueError('Authorization has invalid format')\n else:\n token = auth_string_list[1]\n data = jwt.decode(token, config.SECRET_KEY, algorithms='HS256')\n return data",
"def _parse_json(req, resp):\n try:\n body = req.stream.read()\n return json.loads(body)\n except ValueError as e:\n err_msg = str(e) + ': ' + body\n resp.status = falcon.HTTP_400\n resp.body = make_error_body(err_msg)\n return",
"def parse(token):\n\n pass",
"def parse_body(self, body):\n\n try:\n json = simplejson.loads(body)\n except ValueError:\n raise ParseError()\n\n messages = []\n if isinstance(json, (list, tuple)):\n if len(json) == 0:\n raise InvalidRequestError('Recieved an empty batch message')\n batch_request = True\n for obj in json:\n msg = JsonRpcMessage(obj)\n messages.append(msg)\n\n if isinstance(json, (dict)):\n batch_request = False\n msg = JsonRpcMessage(json)\n messages.append(msg)\n return messages, batch_request",
"def parse_json(response):\r\n return json.loads(response.content)",
"def parse(message):\n try:\n return json.loads(message)\n except TypeError:\n print(\"Ignoring message because it did not contain valid JSON.\")",
"def get_token_from_json(json):\r\n return PodiumToken(json[\"access_token\"], json[\"token_type\"], json[\"created_at\"])",
"def parse(self, tokenizer):\n pass",
"def parse(json_string: str) -> object:\n tokens = tokenize(json_string)\n\n value = _parse(tokens)\n if len(tokens) != 0:\n raise ParseError(\n f\"Invalid JSON at {tokens[0].value} \"\n f\"(line {tokens[0].line} column {tokens[0].column})\")\n\n return value",
"def _parsejson(x):\n return json.loads(x.read().decode('utf-8'))",
"def parse_response(self, response):\n\n return json.loads(response.text)",
"def parse(self, stream, media_type=None, parser_context=None):\n parser_context = parser_context or {}\n encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)\n\n try:\n data = stream.read().decode(encoding)\n return json.loads(data)\n except ValueError as exc:\n raise ParseError('JSON parse error - %s' % six.text_type(exc))",
"def parse_json(data):\n return json.loads(data)",
"def _get_token(self):\n # Skip initial whitespace.\n pos = self._skip_whitespace()\n\n # Find the token here, if there's one.\n token = None\n\n for (token_type, regex) in TOKEN_REGEXEN:\n re_match = regex.match(self.body, pos)\n if re_match:\n token_content = next(g for g in re_match.groups() if g is not None)\n token = Token(token_type, token_content, re_match.end())\n break\n\n return token",
"def parse(self, stream, media_type=None, parser_context=None):\n parser_context = parser_context or {}\n encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)\n\n try:\n decoded_stream = codecs.getreader(encoding)(stream)\n parse_constant = strict_constant if self.strict else None\n return ujson.load(decoded_stream, parse_constant=parse_constant)\n except ValueError as exc:\n raise ParseError('JSON parse error - %s' % str(exc))",
"def json(self):\n return json.loads(self.text)",
"def _parse_request(self):\n if len(self.request.body) > 0:\n try:\n return tornado.escape.json_decode(self.request.body)\n except Exception:\n #Not Json, Using Form data\n return self.request.arguments\n else:\n return self.request.arguments",
"def form_urlencoded_parse(body):\n try:\n data = urlparse.parse_qs(body, strict_parsing=True)\n for key in data:\n data[key] = data[key][0]\n return data\n except ValueError:\n raise InvalidJSON()",
"def deserialize(token):\n\n if token.type == TYPE_BOOLEAN:\n return _to_boolean(token)\n elif token.type == TYPE_INTEGER:\n return _to_int(token)\n elif token.type == TYPE_FLOAT:\n return _to_float(token)\n elif token.type == TYPE_DATE:\n return _to_date(token)\n elif token.type in (TYPE_STRING, TYPE_MULTILINE_STRING, TYPE_BARE_STRING,\n TYPE_LITERAL_STRING, TYPE_MULTILINE_LITERAL_STRING):\n return _to_string(token)\n else:\n raise Exception('This should never happen!')",
"def load_json_body(data):\n # type: (str) -> Union[Dict, List]\n try:\n return json.loads(data)\n except Exception:\n raise HttpQueryError(400, \"POST body sent invalid JSON.\")",
"def parse_json(self, json_cfg: Dict) -> Any:\n raise NotImplementedError",
"def parse(self, payload):\n payload = json.loads(payload)\n \n if payload['response'] in self.possible_responses:\n return self.possible_responses[payload['response']](payload)\n else:\n print 'Response not valid'"
]
| [
"0.7015168",
"0.6799475",
"0.6467686",
"0.60479337",
"0.6021648",
"0.5911621",
"0.5891923",
"0.5796345",
"0.5792454",
"0.5727298",
"0.572379",
"0.56894934",
"0.5660755",
"0.562958",
"0.5594681",
"0.5550532",
"0.55383945",
"0.5516941",
"0.5512666",
"0.54930973",
"0.548546",
"0.5477407",
"0.54663205",
"0.5459137",
"0.5418647",
"0.53980285",
"0.53949046",
"0.5394573",
"0.537382",
"0.5368041"
]
| 0.8396241 | 0 |
Parse and save tokens from body. | def _parse_tokens(self, body):
old_token = self.token
old_json_token = self.json_token
self.token = self._parse_token(body)
self.json_token = self._parse_json_token(body)
logger.debug('Token set to: %s (Old: %s)', self.token, old_token)
logger.debug('JSON token set to: %s (Old: %s)', self.json_token,
old_json_token) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse(self, tokenizer):\n pass",
"async def process(self, tokens):\n return await self.parser.process(tokens)",
"def parse_tokens(self, tokens):\n for token in tokens:\n self.parse_token(token)",
"def _parse_token(self, body):\n\n token_match = re.search('var\\s*token\\s*=[\\s\\']*(\\d+)', body)\n return int(token_match.group(1))",
"def _parse_json_token(self, body):\n\n token_match = re.search('var\\s*jsonToken\\s*=[\\s\\']*([\\w-]+)', body)\n return token_match.group(1)",
"def tokenize_body(self, msg):\n\n if options[\"Tokenizer\", \"check_octets\"]:\n # Find, decode application/octet-stream parts of the body,\n # tokenizing the first few characters of each chunk.\n for part in octetparts(msg):\n try:\n text = part.get_payload(decode=True)\n except:\n yield \"control: couldn't decode octet\"\n text = part.get_payload(decode=False)\n\n if text is None:\n yield \"control: octet payload is None\"\n continue\n\n yield \"octet:%s\" % text[:options[\"Tokenizer\",\n \"octet_prefix_size\"]]\n\n parts = imageparts(msg)\n if options[\"Tokenizer\", \"image_size\"]:\n # Find image/* parts of the body, calculating the log(size) of\n # each image.\n\n total_len = 0\n for part in parts:\n try:\n text = part.get_payload(decode=True)\n except:\n yield \"control: couldn't decode image\"\n text = part.get_payload(decode=False)\n\n total_len += len(text or \"\")\n if text is None:\n yield \"control: image payload is None\"\n\n if total_len:\n yield \"image-size:2**%d\" % round(log2(total_len))\n\n if options[\"Tokenizer\", \"crack_images\"]:\n engine_name = options[\"Tokenizer\", 'ocr_engine']\n from spambayes.ImageStripper import crack_images\n text, tokens = crack_images(engine_name, parts)\n for t in tokens:\n yield t\n for t in self.tokenize_text(text):\n yield t\n\n # Find, decode (base64, qp), and tokenize textual parts of the body.\n for part in textparts(msg):\n # Decode, or take it as-is if decoding fails.\n try:\n text = part.get_payload(decode=True)\n except:\n yield \"control: couldn't decode\"\n text = part.get_payload(decode=False)\n if text is not None:\n text = try_to_repair_damaged_base64(text)\n\n if text is None:\n yield 'control: payload is None'\n continue\n\n # Replace numeric character entities (like a for the letter\n # 'a').\n text = numeric_entity_re.sub(numeric_entity_replacer, text)\n\n # Normalize case.\n text = text.lower()\n\n if options[\"Tokenizer\", \"replace_nonascii_chars\"]:\n # Replace high-bit chars and control chars with '?'.\n text = text.translate(non_ascii_translate_tab)\n\n for t in find_html_virus_clues(text):\n yield \"virus:%s\" % t\n\n # Get rid of uuencoded sections, embedded URLs, <style gimmicks,\n # and HTML comments.\n for cracker in (crack_uuencode,\n crack_urls,\n crack_html_style,\n crack_html_comment,\n crack_noframes):\n text, tokens = cracker(text)\n for t in tokens:\n yield t\n\n # Remove HTML/XML tags. Also . <br> and <p> tags should\n # create a space too.\n text = breaking_entity_re.sub(' ', text)\n # It's important to eliminate HTML tags rather than, e.g.,\n # replace them with a blank (as this code used to do), else\n # simple tricks like\n # Wr<!$FS|i|R3$s80sA >inkle Reduc<!$FS|i|R3$s80sA >tion\n # can be used to disguise words. <br> and <p> were special-\n # cased just above (because browsers break text on those,\n # they can't be used to hide words effectively).\n text = html_re.sub('', text)\n\n for t in self.tokenize_text(text):\n yield t",
"def parse_contents(self):\n self.parsed_contents = tokenize(self.contents)[0]",
"def parse_body(body):\n for line in body.lower().split(\"\\n\"):\n words = line.split()\n try:\n idx = words.index(\"re-run\")\n except ValueError:\n continue\n if words[idx + 1] == \"full\":\n yield words[idx : idx + 3]\n else:\n yield words[idx : idx + 2]",
"def parse(token):\n\n pass",
"def parse(self, tokens: List[str]) -> List:\r\n self._check_brackets(tokens)\r\n\r\n objs = self._parse_individual_tokens(tokens)\r\n objs = self._parse_functions(objs)\r\n objs = self._parse_matrices(objs)\r\n\r\n return objs",
"def _get_token(self):\n # Skip initial whitespace.\n pos = self._skip_whitespace()\n\n # Find the token here, if there's one.\n token = None\n\n for (token_type, regex) in TOKEN_REGEXEN:\n re_match = regex.match(self.body, pos)\n if re_match:\n token_content = next(g for g in re_match.groups() if g is not None)\n token = Token(token_type, token_content, re_match.end())\n break\n\n return token",
"def _parse_individual_tokens(self, tokens: List[str]) -> List:\r\n objs = []\r\n\r\n for token in tokens:\r\n obj = self._parse_token(token)\r\n objs.append(obj)\r\n\r\n return objs",
"def _upgrade_token(self, http_body):\n self.token_string = auth_sub_string_from_body(http_body)",
"def smart_parse(body):\n try:\n data_dict = json.loads(body)\n except ValueError:\n return form_urlencoded_parse(body)\n return data_dict",
"def _preprocess(self, body):\n return body",
"def _next_tokens(self, head):\n state = head.state\n input_str = self.input_str\n position = head.position\n actions = state.actions\n in_len = len(input_str)\n tokens = []\n\n # add special STOP token if they are applicable\n if STOP in actions:\n if not self.consume_input \\\n or (self.consume_input and position == in_len):\n tokens.append(STOP_token)\n\n if position < in_len:\n # Get tokens by trying recognizers - but only if we are not at\n # the end, because token cannot be empty\n if self.custom_token_recognition:\n def get_tokens():\n return self._token_recognition(head)\n\n custom_tokens = self.custom_token_recognition(\n head, get_tokens,\n )\n if custom_tokens is not None:\n tokens.extend(custom_tokens)\n else:\n tokens.extend(self._token_recognition(head))\n\n # do lexical disambiguation if it is enabled\n if self.lexical_disambiguation:\n tokens = self._lexical_disambiguation(tokens)\n\n return tokens",
"def deserialize_tokens():\n\ttry:\n\t\twith open(config.TOKENPATH, \"r+\") as f:\n\t\t\tcontext = f.read()\n\t\t\tres = eval(context)\n\t\t\t# load into memory\n\t\t\treturn res[\"access_token\"], res[\"refresh_token\"]\n\texcept:\n\t\t# unexcept token format\n\t\tfrom common import ApplicationException\n\t\traise ApplicationException(\"authorization file is broken, please run init\")",
"def parse(self, parser, tokens):\n self.parser = parser\n self.bits = tokens.split_contents()\n self.tagname = self.bits.pop(0)\n self.kwargs = {}\n self.blocks = {}\n self.arguments = self.options.get_arguments()\n self.current_argument = None\n self.todo = list(self.bits)\n for bit in self.bits:\n self.handle_bit(bit)\n self.finish()\n self.parse_blocks()\n return self.kwargs, self.blocks",
"def parse_markdown(tokens):\r\n body = Body_Parser(tokens)\r\n if body.consumed != -1 + tokens.length():\r\n if not tokens.grab(body.consumed-1).context == \"EOF\":\r\n list = tokens.grab_num(body.consumed-3, 5)\r\n context = \"\"\r\n for i in list:\r\n context += i.context + \"\\n\"\r\n click.secho(\r\n \"error at %s\\n%s\" % (tokens.grab(body.consumed-1).at, context), fg=\"red\",\r\n err=True)\r\n return body",
"def parse_post_values(self): \n self.parse_values(sys.stdin.read())",
"def __init__(self):\n self.tokens = []",
"def parse(self, tokens):\n self.tokens = tokens\n self.tokens.append(END())\n t = self.e()\n self.expect(END)\n return t",
"def parse(self, content):\n pass",
"def tokenize(self, file_name):\n main_body = self.cast.nodes[0].body[-1]\n token_string = self.visit(main_body)\n\n variable_map = self.dump_var_map()\n value_map = self.dump_val_map()\n\n out_file = open(file_name, \"w\")\n out_file.write(f\"{token_string}\\n\")\n\n for var in variable_map:\n out_file.write(f\"{var}\\n\")\n\n for val in value_map:\n out_file.write(f\"{val}\\n\")",
"def build_tokens(self):\n self.advance()\n while self.__token != \"\":\n self.__tokens.append(self.token_type())\n self.advance()",
"def update_body(self, body: dict[Any, Any]) -> None:\n body[\"data\"][\"AUTHENTICATOR\"] = ID_TOKEN_AUTHENTICATOR\n body[\"data\"][\"TOKEN\"] = self._id_token",
"def process_body(nlp, body: str, replacements=None, require_labels=False):\n\n if replacements is None:\n replacements = []\n\n entry = {\"ents\": [],\n \"cats\": [],\n \"replacements\": [],\n \"text\": None,\n \"docstrings\": []}\n\n body_ = body.lstrip()\n initial_strip = body[:len(body) - len(body_)]\n\n replacements = correct_entities(replacements, [(0, len(initial_strip))])\n\n docsting_offsets = get_docstring(body_)\n\n body_, replacements, docstrings = remove_offsets(body_, replacements, docsting_offsets)\n entry['docstrings'].extend(docstrings)\n\n was_valid = body_valid(body_)\n initial_labels = get_initial_labels(body_)\n\n if require_labels and initial_labels is None:\n return None\n\n returns, return_cuts = unpack_returns(body_, initial_labels)\n annotations, annotation_cuts = unpack_annotations(body_, initial_labels)\n\n body_, replacements_annotations, _ = remove_offsets(body_, replacements + annotations,\n return_cuts + annotation_cuts)\n is_valid = body_valid(body_)\n if was_valid != is_valid:\n print(\"Failed processing\")\n return None\n # raise Exception()\n\n replacements_annotations = adjust_offsets2(replacements_annotations, len(initial_strip))\n body_ = initial_strip + body_\n\n entry['replacements'].extend(list(filter(lambda x: isint(x[2]), replacements_annotations)))\n entry['ents'].extend(list(filter(lambda x: not isint(x[2]), replacements_annotations)))\n entry['cats'].extend(returns)\n entry['text'] = body_\n\n entry['replacements'] = resolve_self_collisions2(entry['replacements'])\n\n # assert isvalid(nlp, body_, entry['replacements'])\n assert isvalid(nlp, body_, entry['ents'])\n\n return entry",
"def parse_tokens(self, tokens, debug=0):\n self.reset()\n self.debug = debug\n for position, token in enumerate(tokens):\n if self.stem:\n token = self.stemmer.stem(token)\n if not isinstance(token, basestring):\n raise TypeError(\n 'Only string tokens are allowed; %s is not a string.' % (token,))\n self.reference(token, self.position, self.position, 0.0)\n preparse = self.check_preparsers(token)\n if preparse:\n self.reference(preparse, self.position, self.position, 0.0)\n self.position = position + 1\n return self.complete_parses(len(tokens))",
"def process_file(path):\r\n\ttokenset = {}\r\n\r\n\tfp = open(path, 'r')\r\n\temailMsg = email.message_from_file(fp)\r\n\tfp.close()\r\n\r\n\ttokenset = parse_body(emailMsg.get_payload().lower())\r\n\r\n\treturn tokenset",
"def _parse(tokens: Deque[Token]) -> object:\n token = tokens.popleft()\n\n if token.type == 'left_bracket':\n return parse_array(tokens)\n\n if token.type == 'left_brace':\n return parse_object(tokens)\n\n if token.type == 'string':\n return parse_string(token)\n\n if token.type == 'number':\n return parse_number(token)\n\n special_tokens = {\n 'true': True,\n 'false': False,\n 'null': None,\n }\n if token.type in ('boolean', 'null'):\n return special_tokens[token.value]\n\n raise ParseError(\n f\"Unexpected token: {token.value} \"\n f\"(line {token.line} column {token.column})\")"
]
| [
"0.6242305",
"0.6075056",
"0.5916095",
"0.5787497",
"0.5784725",
"0.5641238",
"0.5510344",
"0.54594046",
"0.5448223",
"0.54254425",
"0.5420253",
"0.5386509",
"0.53751945",
"0.53578675",
"0.524982",
"0.5234695",
"0.5216267",
"0.5213507",
"0.5203124",
"0.5129996",
"0.5129147",
"0.51177645",
"0.50995696",
"0.50968355",
"0.5093742",
"0.5091778",
"0.5057566",
"0.50383043",
"0.5034179",
"0.5009051"
]
| 0.7977467 | 0 |
Parse and return list of all account transactions. | def _parse_account_transactions(self, body):
transactions = []
soup = BeautifulSoup(body, 'html.parser')
for row in soup.select('.history.data-list-wrapper-inner tr'):
transaction = {
'date': row.select('td')[1].text,
'type': row.select('td')[2].select('span')[0].text,
'text': row.select('td')[2].select('div')[0].text,
'amount': self._fix_balance(row.select('td')[3].text)
}
transactions.append(transaction)
return transactions | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def listtransactions(self, account=None, count=10, from_=0, address=None):\n accounts = [account] if account is not None else list(self.listaccounts(as_dict=True).keys())\n return [TransactionInfo(**tx) for acc in accounts for\n tx in self.proxy.listtransactions(acc, count, from_) if\n address is None or tx[\"address\"] == address]",
"def transactions(self):\r\n return tx.AccountTransactions(self)",
"def transactions(self):\n url = f'{self._ynab.api_url}/budgets/{self.budget.id}/accounts/{self.id}/transactions'\n response = self._ynab.session.get(url)\n if not response.ok:\n self._logger.error('Error retrieving transactions, response was : %s with status code : %s',\n response.text,\n response.status_code)\n return []\n return [Transaction(self._ynab, transaction)\n for transaction in response.json().get('data', {}).get('transactions', [])]",
"def transactions(self) -> List[Transaction]:\n return self.session.get_transactions(self.account_id)",
"def transactions(self):\n url = f'{self._ynab.api_url}/budgets/{self.id}/transactions'\n response = self._ynab.session.get(url)\n if not response.ok:\n self._logger.error('Error retrieving transactions, response was : %s with status code : %s',\n response.text,\n response.status_code)\n return []\n return [Transaction(self._ynab, transaction)\n for transaction in response.json().get('data', {}).get('transactions', [])]",
"def transactions(self):\n return self._call_account_method(\n 'transactions'\n )",
"def get_transactions(self):\n # open a cursor object\n cur = self.get_cursor()\n\n # get transactions from database\n cur.execute(\"SELECT * FROM transactions\")\n transactions_data = cur.fetchall()\n\n # convert into a dict of values.\n transactions_list = []\n [transactions_list.append({'transaction_id': transaction[0],\n 'date': transaction[1],\n 'payee_id': transaction[2],\n 'description': transaction[3],\n 'amount': transaction[4]})\n for transaction in transactions_data]\n\n # close the cursor\n self.close_cursor()\n\n return transactions_list",
"def fetch_bank_transactions(self):\n return self.fetch('/bank_transactions')",
"def get_account_transactions(self, account_number):\n\n logger.debug('Fetching account transactions for account %s',\n account_number)\n\n # Get javax.faces.ViewState from the last request\n last_req_hidden_inputs = self._hidden_inputs_as_dict(\n BeautifulSoup(self.last_req_body, 'html.parser'))\n\n data = {\n 'dialog-overview_showAccount': 'Submit',\n 'menuLinks_SUBMIT': 1,\n 'menuLinks:_idcl': '',\n 'menuLinks:_link_hidden_': '',\n 'javax.faces.ViewState': last_req_hidden_inputs.get(\n 'javax.faces.ViewState'),\n '_token': self.token,\n 'productId': account_number\n }\n\n path = '/im/im/csw.jsf'\n req = self.session.post(self.BASE_URL + path, data=data)\n self.last_req_body = req.content\n\n logger.debug('Transaction request response code %s', req.status_code)\n\n self._parse_tokens(req.text)\n\n # Parse transactions\n transactions = self._parse_account_transactions(req.text)\n\n # Request was ok but but no transactions were found. Try to refetch.\n # Requests seems to loose the connections sometimes with the message\n # \"Resetting dropped connection\". This should work around that\n # problem.\n if req.status_code == requests.codes.ok and not transactions:\n transactions = self.get_account_transactions(account_number)\n\n return transactions",
"def get_transactions(self, account_id, from_date=None, to_date=None,\n page_size=None, type_list=None):\n endpoint = 'accounts/{0}/transactions'.format(account_id)\n\n params = {}\n\n if from_date:\n params[\"from\"] = from_date\n\n if to_date:\n params[\"to\"] = to_date\n\n if page_size:\n params[\"pageSize\"] = page_size\n\n if type_list:\n type_list = \"%2C\".join(type_list)\n params[\"type\"] = type_list\n\n return self._api.request(endpoint, params=params)",
"def get_transactions_for_ynab_account(self, account_name):\n account = self.get_account_by_name(account_name)\n if not account:\n return []\n return [YnabServerTransaction(transaction, transaction.account)\n for transaction in account.transactions]",
"def get_transactions():\n\n wallet = \"TTfoWGU2M939cgZm8CksPtz1ytJRM9GiN7\"\n\n url = \"https://api.trongrid.io/v1/accounts/{}/transactions\".format(wallet)\n\n response = requests.request(\"GET\", url)\n\n print(response.text)",
"def transactions(self, billing_period=0, \n transaction_type='recent'):\n result = defaultdict(list)\n billing_periods = pyamex.utils.to_list(billing_period)\n\n for period in billing_periods:\n options = { 'PayLoadText' : self.client.transactions_request_xml(\n card_index=0, \n billing_period=period, \n transaction_type=transaction_type)}\n\n response = requests.get(self.client.url, options) \\\n .content\n\n xml_tree = xml.etree.cElementTree.fromstring(response)\n\n status = xml_tree.find('ServiceResponse/Status').text\n if status != 'success':\n raise requests.exceptions.RequestException()\n\n for transaction in xml_tree.findall('StatementDetails/CardAccounts/CardAccount/TransactionDetails/Transaction'):\n result[period].append(Transaction(transaction))\n\n return result",
"def get_transactions(self):\n transactions = []\n for subaccount_pointer in range((clargs.args.search_subaccounts or 0) + 1):\n utxos = self.scan_subaccount(subaccount_pointer, clargs.args.key_search_depth)\n if len(utxos) == 0:\n continue\n\n transaction, used_utxo = self.create_transaction(utxos)\n if transaction:\n signed_transaction = self.sign_transaction(transaction, used_utxo)\n transactions.append(signed_transaction)\n\n if transactions:\n self.test_transactions(transactions)\n\n logging.debug('transactions: {}'.format(transactions))\n flags = wally.WALLY_TX_FLAG_USE_WITNESS\n return [(wally.tx_from_hex(transaction, flags), None) for transaction in transactions]",
"def _parse_transactions_file(self, path_to_transactions_file: str) -> List[Transaction]:\n ticker_params_to_ticker = {\n (ticker.name, ticker.security_type, ticker.point_value): ticker for ticker in self.tickers\n }\n\n def get_matching_ticker(row: QFSeries) -> Ticker:\n \"\"\" Returns the matching specific ticker. In case if the ticker does not belong to the list of tickers\n passed as the parameter, the transaction is excluded. \"\"\"\n ticker_str = row.loc[\"Contract symbol\"]\n name = row.loc[\"Asset Name\"]\n sec_type = SecurityType(row.loc[\"Security type\"])\n point_value = row.loc[\"Contract size\"]\n ticker = ticker_params_to_ticker.get((name, sec_type, point_value), None)\n if isinstance(ticker, FutureTicker):\n ticker_type = ticker.supported_ticker_type()\n ticker = ticker_type(ticker_str, sec_type, point_value)\n return ticker\n\n transactions_df = pd.read_csv(path_to_transactions_file)\n transactions = [Transaction(pd.to_datetime(row.loc[\"Timestamp\"]),\n get_matching_ticker(row),\n row.loc[\"Quantity\"],\n row.loc[\"Price\"],\n row.loc[\"Commission\"]) for _, row in transactions_df.iterrows()]\n transactions = [t for t in transactions if t.ticker is not None]\n return transactions",
"def get_all_latest_transactions(self):\n transactions = []\n for account in self.accounts:\n self._logger.debug('Getting transactions for account \"%s\"', account.ynab_account.name)\n for transaction in account.get_latest_transactions():\n if not self._filter_transaction(transaction):\n transactions.append(transaction)\n return transactions",
"def _get_all_transactions(self) -> Iterator[BaseTransaction]:\n raise NotImplementedError",
"def fetch_all_tx(self):\n transactions = []\n for block in self.chain:\n transactions.append(block.data)\n return transactions",
"def get_transaction_list(self, account_id, from_date, to_date,\n type_list=None):\n endpoint = 'accounts/{0}/transactions/idrange'.format(account_id)\n\n params = {}\n\n params[\"from\"] = from_date\n params[\"to\"] = to_date\n\n if type_list:\n type_list = \"%2C\".join(type_list)\n params[\"type\"] = type_list\n\n return self._api.request(endpoint, params=params)",
"def accounts(self):\n # get the summary data\n options = { 'PayLoadText' : self.request_xml() }\n\n print(self.url)\n print(options)\n\n response = requests.get(self.url, params=options) \\\n .content\n print(response)\n xml_tree = xml.etree.cElementTree.fromstring(response)\n\n status = xml_tree.find('ServiceResponse/Status').text\n\n if status != 'success':\n raise requests.exceptions.RequestException()\n\n self.security_token = xml_tree.find('ClientSecurityToken').text\n\n accounts = [ \n self.create_account(account)\n for account in xml_tree.iter('CardAccounts')\n ]\n\n return accounts",
"def get_account_transactions(self, StartTime, EndTime):\n params = clean_locals(locals())\n date_time_sent = datetime.datetime.utcnow()\n response = self.request('ListAccountPostings', params, secure=True)\n data = self.process_response(response, date_time_sent, None)\n return parse_account_postings(data.get('data', {})) if data.get('data') else {}",
"def get_transaction_list(self,\n address: str,\n start_block: Optional[int] = None,\n end_block: Optional[int] = None) -> Tuple[Transaction, ...]:\n ...",
"def query_accounts(self):\n return self._call_txtrader_api('query_accounts', {})",
"def list(self, **params):\n\n _, _, absence_transactions = self.http_client.get(\"/absencetransactions\", params=params)\n return absence_transactions",
"def test_wallets_get_transaction_list(self):\n pass",
"def test_06_get_all_portfolio_transactions(self):\n p = Portfolio.get_portfolio_by_slug(\"test\")\n t = Transaction.get_transactions(p)\n self.assertTrue(isinstance(t, list),\n msg=\"Transaction is NOT returning a list of all transaction instances\")\n print(\"Transaction get transactions is returning the following list: {}\".format(\n t,\n ))",
"def fetch_transactions(self, address, startblock=None, endblock=None, simplify=True, verbose=False):\n all_transactions = []\n while True:\n transactions = self.fetch_transactions_in_range(address, startblock, endblock)\n try:\n if simplify:\n transactions = list(map(simplify_tx, transactions))\n except TypeError:\n print('error', address, 'start block', startblock, 'end block', endblock, 'transactions', transactions)\n all_transactions.extend(transactions)\n if verbose:\n print('fetching block', startblock, 'total transactions', len(all_transactions))\n if len(transactions) < 1000:\n break\n # do not incremement the block, in case there are multiple transactions in one block\n # but spread across paginated results. we dedupe later.\n startblock = int(transactions[-1]['blockNumber'])\n return all_transactions",
"def get_latest_transactions(self):\n first_run = False\n if not self._transactions:\n first_run = True\n transactions = []\n for account in self.accounts:\n self._logger.debug('Getting transactions for account \"%s\"', account.ynab_account.name)\n for transaction in account.get_latest_transactions():\n if not self._filter_transaction(transaction):\n transactions.append(transaction)\n self._logger.debug('Caching %s transactions', len(transactions))\n self._transactions.extend(transactions)\n if first_run:\n self._logger.info('First run detected, discarding transactions until now')\n return []\n return transactions",
"def all_transactions(self, request):\n user_id = request.data[\"user\"]\n user = User.objects.get(id=user_id)\n user_transactions = user.transactions.all()\n serializer = TransactionSerializer(user_transactions, many=True)\n\n return Response(serializer.data)",
"def get(self):\n accounts = database.get_all(Accounts)\n all_accounts = []\n for account in accounts:\n all_transactions = []\n for transaction in account.transactions:\n all_transactions.append(transaction.id)\n new_account = {\n \"id\": account.id,\n \"name\": account.name,\n \"iban\": account.iban,\n \"balance\": float(account.balance),\n \"currency\": account.currency,\n \"transactions ids\": all_transactions\n }\n\n all_accounts.append(new_account)\n return json.dumps(all_accounts), 200"
]
| [
"0.7151018",
"0.7149907",
"0.71008074",
"0.6977178",
"0.68028396",
"0.67775965",
"0.6729908",
"0.66277045",
"0.6610449",
"0.6469714",
"0.64354056",
"0.6426669",
"0.6411786",
"0.6402797",
"0.6374859",
"0.6352835",
"0.631962",
"0.63142496",
"0.6298551",
"0.6292313",
"0.62710595",
"0.62448984",
"0.6243755",
"0.6242648",
"0.61536014",
"0.6128081",
"0.60969037",
"0.6096427",
"0.60906976",
"0.6075746"
]
| 0.7749547 | 0 |
Fetch and return account transactions for account_number. | def get_account_transactions(self, account_number):
logger.debug('Fetching account transactions for account %s',
account_number)
# Get javax.faces.ViewState from the last request
last_req_hidden_inputs = self._hidden_inputs_as_dict(
BeautifulSoup(self.last_req_body, 'html.parser'))
data = {
'dialog-overview_showAccount': 'Submit',
'menuLinks_SUBMIT': 1,
'menuLinks:_idcl': '',
'menuLinks:_link_hidden_': '',
'javax.faces.ViewState': last_req_hidden_inputs.get(
'javax.faces.ViewState'),
'_token': self.token,
'productId': account_number
}
path = '/im/im/csw.jsf'
req = self.session.post(self.BASE_URL + path, data=data)
self.last_req_body = req.content
logger.debug('Transaction request response code %s', req.status_code)
self._parse_tokens(req.text)
# Parse transactions
transactions = self._parse_account_transactions(req.text)
# Request was ok but but no transactions were found. Try to refetch.
# Requests seems to loose the connections sometimes with the message
# "Resetting dropped connection". This should work around that
# problem.
if req.status_code == requests.codes.ok and not transactions:
transactions = self.get_account_transactions(account_number)
return transactions | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def balance(self, account_number: int): \n return self._accounts[account_number][1]",
"def fetch_bank_transactions(self):\n return self.fetch('/bank_transactions')",
"def get_account_transactions(self, min_row=0, max_row=100):\n data = {\n 'min_row': min_row,\n 'max_row': max_row\n }\n query_string = build_query_string(data)\n\n r = requests.get(build_api_call(self.base_url, ACCOUNTID, 'transactions', query_string),\n auth=HTTPBasicAuth(KEY, SECRET))\n\n if r.status_code == 200:\n return r.json()\n else:\n return 'error'",
"def get_transactions():\n\n wallet = \"TTfoWGU2M939cgZm8CksPtz1ytJRM9GiN7\"\n\n url = \"https://api.trongrid.io/v1/accounts/{}/transactions\".format(wallet)\n\n response = requests.request(\"GET\", url)\n\n print(response.text)",
"def listtransactions(self, account=None, count=10, from_=0, address=None):\n accounts = [account] if account is not None else list(self.listaccounts(as_dict=True).keys())\n return [TransactionInfo(**tx) for acc in accounts for\n tx in self.proxy.listtransactions(acc, count, from_) if\n address is None or tx[\"address\"] == address]",
"def get_account(self, account_number):\n\n if not isinstance(account_number, str):\n raise ValueError('Invalid type <{}> for account number'.format(\n type(account_number)))\n\n try:\n if self.di is not None:\n result = self.di.get(account_number)\n else:\n result = self.accounts.get(account_number, None)\n\n except DBConnectionError:\n result = \"Connection error occurred. Try Again.\"\n return result",
"def address_transactions(self, address):\n res = r.get(self.url + self.address_tx + str(address))\n return self.execute(res)",
"def get_transactions(self, account_id, from_date=None, to_date=None,\n page_size=None, type_list=None):\n endpoint = 'accounts/{0}/transactions'.format(account_id)\n\n params = {}\n\n if from_date:\n params[\"from\"] = from_date\n\n if to_date:\n params[\"to\"] = to_date\n\n if page_size:\n params[\"pageSize\"] = page_size\n\n if type_list:\n type_list = \"%2C\".join(type_list)\n params[\"type\"] = type_list\n\n return self._api.request(endpoint, params=params)",
"def transactions(self):\r\n return tx.AccountTransactions(self)",
"def _get_accounts_data(self, accounts, display_account,tables,where_clause,where_params):\n\n account_result = {}\n # Prepare sql query base on selected parameters from wizard\n tables, where_clause, where_params = tables,where_clause,where_params\n\n # print tables, where_clause, where_params\n # print \"tables data\",tables\n # print \"Table Type\",type(tables)\n # print \"where clause data\",where_clause\n # print \"where clause\",type(where_clause)\n # print \"where params data\",where_params\n # print \"where params\",type(where_params)\n\n tables = tables.replace('\"','')\n if not tables:\n tables = 'account_move_line'\n wheres = [\"\"]\n if where_clause.strip():\n wheres.append(where_clause.strip())\n filters = \" AND \".join(wheres)\n # compute the balance, debit and credit for the provided accounts\n request = (\"SELECT account_id AS id, SUM(debit) AS debit, SUM(credit) AS credit, (SUM(debit) - SUM(credit)) AS balance\" +\\\n \" FROM \" + tables + \" WHERE account_id IN %s \" + filters + \" GROUP BY account_id\")\n params = (tuple(accounts.ids),) + tuple(where_params)\n self.env.cr.execute(request, params)\n for row in self.env.cr.dictfetchall():\n account_result[row.pop('id')] = row\n account_res = []\n for account in accounts:\n res = dict((fn, 0.0) for fn in ['credit', 'debit', 'balance'])\n currency = account.currency_id and account.currency_id or account.company_id.currency_id\n res['code'] = account.code\n res['name'] = account.name\n if account.id in account_result.keys():\n res['debit'] = account_result[account.id].get('debit')\n res['credit'] = account_result[account.id].get('credit')\n res['balance'] = account_result[account.id].get('balance')\n if display_account == 'all':\n account_res.append(res)\n if display_account == 'not_zero' and not currency.is_zero(res['balance']):\n account_res.append(res)\n if display_account == 'movement' and (not currency.is_zero(res['debit']) or not currency.is_zero(res['credit'])):\n account_res.append(res)\n print \"data from core report model\",account_res\n return account_res",
"def query_accounts(self):\n return self._call_txtrader_api('query_accounts', {})",
"def transactions(self):\n return self._call_account_method(\n 'transactions'\n )",
"def get_account_balance(self, account_number):\n\n if not isinstance(account_number, str):\n raise ValueError('Invalid type <{}> for account number'.format(\n type(account_number)))\n\n try:\n result = self.di.get(account_number) if self.di is not None \\\n else self.accounts.get(account_number, None)\n if result is not None:\n result = result[\"balance\"]\n\n except DBConnectionError:\n result = \"Connection error occurred. Try Again.\"\n return result",
"def get_accounts(self):\n\n data = {\n 'customerId': self.personal_identity_number,\n 'responseControl': {\n 'filter': {\n 'includes': ['ALL']\n }\n }\n }\n\n headers = {'Content-type': 'application/json',\n 'Accept': 'application/json',\n 'CSRFToken': self.json_token}\n path = '/im/json/overview/getaccounts'\n req = self.session.post(\n self.BASE_URL + path,\n data=json.dumps(data),\n headers=headers)\n\n for account in req.json()['response']['accounts']:\n self.accounts[account['number']] = account\n del(self.accounts[account['number']]['number'])\n\n return self.accounts",
"def transactions(self):\n url = f'{self._ynab.api_url}/budgets/{self.budget.id}/accounts/{self.id}/transactions'\n response = self._ynab.session.get(url)\n if not response.ok:\n self._logger.error('Error retrieving transactions, response was : %s with status code : %s',\n response.text,\n response.status_code)\n return []\n return [Transaction(self._ynab, transaction)\n for transaction in response.json().get('data', {}).get('transactions', [])]",
"def test_companies_company_id_data_bank_accounts_account_id_transactions_get(self):\n pass",
"def list_account_transactions(self,\r\n year,\r\n month=None,\r\n get_as_csv=None):\r\n\r\n # Validate required parameters\r\n self.validate_parameters(year=year)\r\n\r\n # Prepare query URL\r\n _query_builder = Configuration.get_base_uri()\r\n _query_builder += '/admin/invoice'\r\n _query_parameters = {\r\n 'year': year,\r\n 'month': month,\r\n 'getAsCsv': get_as_csv\r\n }\r\n _query_builder = APIHelper.append_url_with_query_parameters(_query_builder,\r\n _query_parameters, Configuration.array_serialization)\r\n _query_url = APIHelper.clean_url(_query_builder)\r\n\r\n # Prepare headers\r\n _headers = {\r\n 'accept': 'application/json'\r\n }\r\n\r\n # Prepare and execute request\r\n _request = self.http_client.get(_query_url, headers=_headers)\r\n OAuth2.apply(_request)\r\n _context = self.execute_request(_request)\r\n\r\n # Endpoint and global error handling using HTTP status codes.\r\n if _context.response.status_code == 400:\r\n raise APIException('Bad request', _context)\r\n elif _context.response.status_code == 403:\r\n raise APIException('Forbidden (Access denied)', _context)\r\n elif _context.response.status_code == 500:\r\n raise APIException('Internal server error', _context)\r\n self.validate_response(_context)\r\n\r\n # Return appropriate type\r\n return APIHelper.json_deserialize(_context.response.raw_body, Transaction.from_dictionary)",
"def get_transactions_for_ynab_account(self, account_name):\n account = self.get_account_by_name(account_name)\n if not account:\n return []\n return [YnabServerTransaction(transaction, transaction.account)\n for transaction in account.transactions]",
"def get_transactions(self, crypto, address, confirmations=1):\n raise NotImplementedError(\n \"This service does not support getting historical transactions. \"\n \"Or rather it has no defined 'get_transactions' method.\"\n )",
"def get_asset_tx_history(account_id, total):\n query = iroha.query(\n \"GetAccountAssetTransactions\", account_id=account_id, page_size=total\n )\n ic.sign_query(query, user_private_key)\n response = net.send_query(query)\n data = MessageToDict(response)\n pprint(data, indent=2)",
"def get_account(self, account):\n \n pass",
"def get_acc_tx_history(account_id, total):\n query = iroha.query(\n \"GetAccountTransactions\", account_id=account_id, page_size=total\n )\n ic.sign_query(query, user_private_key)\n response = net.send_query(query)\n data = MessageToDict(response)\n pprint(data, indent=2)",
"def account(self, acct):\n aMgr = self.acctManager\n if len(aMgr.accounts) <= acct:\n raise Exception(\"requested unknown account number %i\" % acct)\n return aMgr.account(acct)",
"def transactions(self, billing_period=0, \n transaction_type='recent'):\n result = defaultdict(list)\n billing_periods = pyamex.utils.to_list(billing_period)\n\n for period in billing_periods:\n options = { 'PayLoadText' : self.client.transactions_request_xml(\n card_index=0, \n billing_period=period, \n transaction_type=transaction_type)}\n\n response = requests.get(self.client.url, options) \\\n .content\n\n xml_tree = xml.etree.cElementTree.fromstring(response)\n\n status = xml_tree.find('ServiceResponse/Status').text\n if status != 'success':\n raise requests.exceptions.RequestException()\n\n for transaction in xml_tree.findall('StatementDetails/CardAccounts/CardAccount/TransactionDetails/Transaction'):\n result[period].append(Transaction(transaction))\n\n return result",
"def account(self, account_code):\r\n return acc.Account(self, account_code)",
"def get_transaction(self, excludes_list):\n response = client.get(self.url, \"transactions\", {\"exclude_hash\": excludes_list})\n if response.status == 200:\n print(\"Transaction successfully received\")\n return Transaction.parse(response.data)\n elif response.status == 404:\n # print(\"no request to be received\")\n return None\n else:\n print(\"Unknown error while requesting transaction\")\n return None",
"def get_tx_history(account_id, total):\n query = iroha.query(\"GetTransactions\", account_id=account_id, page_size=total)\n ic.sign_query(query, user_private_key)\n response = net.send_query(query)\n data = MessageToDict(response)\n pprint(data, indent=2)",
"def transactions(self) -> List[Transaction]:\n return self.session.get_transactions(self.account_id)",
"def account_history(self, account=None, type='all', range=\"all\"):\n \n if not (utils.check(type) and utils.check(range)):\n return {}\n \n # Imply account\n if account == None:\n account = self.params['account']\n \n # Assemble URL\n url = self.endpoints['base'] +\\\n 'accounts/' +\\\n str(account) +\\\n '/history.json'\n # Add parameters\n data = {\n 'range':range,\n 'transactions':type\n }\n \n # Create HTTP Request objects\n session = requests.Session()\n auth = self.create_auth()\n req = requests.Request('GET',url,params=data,auth=auth).prepare()\n \n \n results = {'response':session.send(req).json()}\n results['request'] = utils.pretty_print_POST(req)\n \n return results['response']['response']['transactions']['transaction']",
"def get(self):\n accounts = database.get_all(Accounts)\n all_accounts = []\n for account in accounts:\n all_transactions = []\n for transaction in account.transactions:\n all_transactions.append(transaction.id)\n new_account = {\n \"id\": account.id,\n \"name\": account.name,\n \"iban\": account.iban,\n \"balance\": float(account.balance),\n \"currency\": account.currency,\n \"transactions ids\": all_transactions\n }\n\n all_accounts.append(new_account)\n return json.dumps(all_accounts), 200"
]
| [
"0.70014375",
"0.676105",
"0.6646065",
"0.66168296",
"0.65606546",
"0.6340439",
"0.6324217",
"0.6267469",
"0.62176937",
"0.6165127",
"0.61637956",
"0.61499494",
"0.61320525",
"0.6065751",
"0.6035426",
"0.6016021",
"0.59824365",
"0.5944884",
"0.59340096",
"0.5916211",
"0.5883035",
"0.58808583",
"0.5850363",
"0.58464366",
"0.58334553",
"0.57833105",
"0.57794726",
"0.57750857",
"0.5750912",
"0.5749473"
]
| 0.81577533 | 0 |
Get active users in a given discord text channel. | def get_active_users(text_channel) -> List[discord.Member]:
active_users = []
for m in text_channel.members:
if m.status.name in ["online", "dnd"] and m.bot == False:
active_users.append(m)
return active_users | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_channel_users(channel):\n try:\n response = client.conversations_members(channel=channel)\n except SlackApiError as err:\n assert err.response[\"error\"]\n user_ids = response[\"members\"]\n return user_ids",
"def get_channel_users(self, channel):\n data = {\n 'token' : app.config['SLACK_BEARER'],\n 'channel' : channel\n }\n\n group = self.__form_call('groups.info', data)\n group_dict = group.json()\n if not group_dict['ok']:\n channel = self.get_group_info(data)\n channel_dict = channel.json()\n return channel_dict['channel']['members']\n return group_dict['group']['members']",
"def get_users_for(self, server, channame):\n skey = server.lower()\n ckey = irc.strings.lower(channame)\n users = []\n if skey in self.serverchans and ckey in self.serverchans[skey]:\n users = self.serverchans[skey][ckey].users.keys()\n return users",
"def list_cheque(channel, user_id):\n return dba.get_user_valid_cheques(user_id)",
"def get_users(msg: telebot.types.Message):\n users = User.select()\n m = ''\n for user in users:\n menu_caption = \"In PVP game\" if user.state == states.USER_IN_PVP_GAME else \"In AI game\" if user.state == states.USER_IN_AI_GAME else \"In menu\"\n m += f'[{user.first_name}](tg://user?id={user.user_id}) - {menu_caption}\\n'\n\n bot.send_message(\n msg.from_user.id,\n m,\n parse_mode='Markdown'\n )",
"def get_slack_users(users=[]):\n api_call = slack_client.api_call(\"users.list\")\n if api_call.get('ok'):\n # retrieve all users so we can find our bot\n users = api_call.get('members')\n return users",
"def get_users_list(self, session):\n\n users = session.query(User.chat_id).filter(User.is_admin==False).all()\n return users",
"def _users_list(self):\n result = self.slack.api_call(\"users.list\", presence=0)\n\n if not result.get(\"ok\"):\n logging.error(result['error'])\n return None\n\n return result['members']",
"async def users(ctx):\n\n if ctx.channel.name.lower() in channels:\n await ctx.send(f\"\"\"# of members: {ctx.guild.member_count}\"\"\")",
"def joined(self, channel):\n # Return user list to Server bot.\n self.get_nicklist()",
"def get_user_list():\n users_tuple = db_session.query(Chat.chatID).all()\n users_list = [user for user, in users_tuple]\n return users_list",
"def users(bot, event, *args):\n yield from command.run(bot, event, *[\"convusers\", \"id:\" + event.conv_id])",
"def get_current_users(self):\n active_sessions = Session.objects.filter(expire_date__gte=timezone.now())\n user_id_list = []\n for session in active_sessions:\n data = session.get_decoded()\n user_id_list.append(data.get('_auth_user_id', None))\n # Query all logged in users based on id list\n return self.filter(id__in=user_id_list)",
"def active_users(self, *args, **kwargs):\r\n return self._get('ActiveUsers', *args, **kwargs)",
"async def _list(self, ctx):\n config = await self.config.guild(ctx.guild).channels()\n data = [self.bot.get_channel(x).mention for x in config]\n if ctx.channel.id in config:\n destination = ctx.author\n else:\n destination = ctx\n if not data:\n return await destination.send(\"There are no channels.\")\n await destination.send(\", \".join(data))",
"def get_all_users():",
"def channels(message):\n load_users(message._client.users)\n for x in message._client.channels:\n chan = message._client.channels[x]\n if 'is_member' in chan:\n if chan['is_member']:\n message.reply(\"{} ({})\".format(chan['name'], chan['id']))\n# message.reply(pretty_json(chan, True))\n elif 'is_im' in chan:\n print(chan)\n friendlyname = chan['user']\n try:\n friendlyname = chan['user'].name\n except KeyError:\n pass\n message.reply(\"User channel: {} ({})\".format(friendlyname,\n chan['id']))",
"def get_user_channels(self):\n\n request = self.youtube.subscriptions().list(\n part='snippet',\n mine=True,\n order='alphabetical'\n )\n subscriptions = []\n while request:\n response = request.execute()\n subscriptions.append(response)\n request = self.youtube.subscriptions().list_next(request, response)\n\n channels = {}\n for subscription in subscriptions:\n for channel in subscription['items']:\n channel_title = channel['snippet']['title']\n channel_id = channel['snippet']['resourceId']['channelId']\n channels[channel_title] = channel_id\n\n return channels",
"def users(message):\n user_list = []\n for userid, user in iteritems(message._client.users):\n user_list.append(user[\"name\"])\n message.reply(Strings['USERS_FOUND'].format(len(user_list)))",
"def get_channels():\n r = slack.channels.list().body\n return [ c for c in r['channels'] if c['is_member'] ]",
"def get_users():\n users = functions.users()\n return users",
"def get_users_list_full(self, session):\n\n users = session.query(\n User.chat_id,\n User.is_banned,\n User.username,\n User.first_name,\n User.last_name,\n User.time_registered\n ).filter(User.is_admin==False).all()\n return users",
"def get_user_channels(self, user_id, count = 30, page = 1):\n uri = 'users/' + user_id + '/channels'\n options = {}\n return self.make_request(uri, options)",
"def get_users():\n return db.fetch_users()",
"def lookup_users(self):\n return self.slack_users",
"def get_users_admins_list(self, session):\n\n users = session.query(User.chat_id).all()\n return users",
"def get_users(self, email):\n print(\"bu\")\n active_users = UserModel._default_manager.filter(**{\n '%s__iexact' % UserModel.get_username_field_name(): username,\n 'is_active': True,\n })\n print(active_users)\n # active_users = UserModel._default_manager.filter(**{\n # '%s__iexact' % UserModel.get_email_field_name(): email,\n # 'is_active': True,\n # })\n return (u for u in active_users if u.has_usable_password())",
"def get_users(self):\n return self.execute(TABELLE['users']['select']['all'])",
"def get_users(self):\r\n\t\tlogger.debug(\"Fetch users\")\r\n\t\t\r\n\t\treturn login.get_users()",
"def get_users(self, email):\n active_users = User.objects.filter(\n email__iexact=email,\n is_active=True\n )\n return (u for u in active_users)"
]
| [
"0.6941497",
"0.68513894",
"0.6534244",
"0.6085032",
"0.606758",
"0.60283536",
"0.59610504",
"0.58857435",
"0.58695865",
"0.5821038",
"0.582083",
"0.5801356",
"0.5788422",
"0.5784686",
"0.5753404",
"0.57440823",
"0.57334614",
"0.5729438",
"0.5721531",
"0.5691711",
"0.5660485",
"0.56561285",
"0.56363684",
"0.56235284",
"0.5614347",
"0.56097746",
"0.5600188",
"0.559924",
"0.5577468",
"0.55469203"
]
| 0.83590263 | 0 |
Returns 'combined_text' content directly within a taxon | def content_for_taxon(self, taxon):
content_ids_for_taxon = list(self.content_taxon_mapping[self.content_taxon_mapping['taxon_id'] == taxon.content_id]['content_id'])
return self.content[self.content['content_id'].isin(content_ids_for_taxon)]['combined_text'].to_list(); | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def combine_text(evt):\n global output\n output = output + evt.result.text\n print(evt.result.text)",
"def get_text(self):\n text_complet = \"\"\n rez_dict = self.__results\n for i in range(0, len(rez_dict[\"text\"])):\n text = rez_dict[\"text\"][i]\n conf = int(rez_dict[\"conf\"][i])\n if conf > self.__min_confidence:\n text_complet += text + \" \"\n return text_complet",
"def concatenate_processed_text(self):\n\n\n\t\tconcatenated_text = \"\"\n\t\tfor line in self.processed_text:\n\t\t\tconcatenated_text += \" \".join(line) + \" \"\n\n\n\t\t# Remove the trailing space character from the concatenated string\n\t\t# of words.\n\t\tconcatenated_text = concatenated_text[:-1]\n\n\t\tself.concatenated_text = concatenated_text",
"def get_extras(self, text=None):\n if text is None:\n text = self.nltk_text(self.text)\n # Tag parts of speech\n tagged = nltk.pos_tag(text)\n # Try for composed NNP / NNPS\n is_proper_noun = False\n text = []\n proper_noun = \"\"\n for (word, tag) in tagged:\n if not is_proper_noun and (tag == 'NNP' or tag == 'NNPS'):\n # Start building a proper noun\n proper_noun = word\n # Set it true\n is_proper_noun = True\n # Add it to annotations anyway\n text.append(word)\n elif tag == 'NNP' or tag == 'NNPS':\n # Previous was proper noun. So it may be combined\n proper_noun += \" \" + word\n # Add the single word to annotations anyway\n text.append(word)\n elif is_proper_noun and tag == 'IN':\n # Add what we have by now to the text\n text.append(proper_noun)\n # Previous was proper noun. So it may be composed\n proper_noun += \" \" + word\n elif is_proper_noun:\n # Add what we have by now to the text\n text.append(proper_noun)\n # Finished with proper noun, so set it false\n is_proper_noun = False\n # Remove duplicates\n seen = {}\n result = []\n for w in text:\n if w in seen:\n continue\n seen[w] = 1\n result.append(w)\n # Eliminate common\n result = [w for w in result if w.lower() not in self.common_words and\n w.lower() not in stopwords.words('english')]\n return result",
"def allText(node):\n return \"\".join(allTextGenerator(node))",
"def get_text(self):",
"def combine(data):\n res = []\n l = len(data['ents'])\n text = data['text']\n\n def combineHelper(path, idx, count):\n if count == 2:\n e1 = path[0]['text']\n e1_label = path[0]['type']\n e2 = path[1]['text']\n e2_label = path[1]['type']\n # e11 = path[0]['start']\n # e12 = path[0]['end']\n # e21 = path[1]['start']\n # e22 = path[1]['end']\n # new_text = text[:e11] + '<e1>' + text[e11:e12] + '</e1> ' + text[e12:e21] + '<e2>' + text[e21:e22] +\\\n # '</e2>' + text[e22:]\n res.append(\n {\n \"text\": text,\n 'h': {'pos': (path[0]['start'], path[0]['end'])},\n 't': {'pos': (path[1]['start'], path[1]['end'])},\n \"e1\": e1,\n \"e2\": e2,\n \"e1_label\": e1_label,\n \"e2_label\": e2_label,\n \"e1_id\": path[0]['id'],\n \"e2_id\": path[1]['id']\n }\n )\n return\n else:\n for i in range(idx, l):\n path.append(data['ents'][i])\n combineHelper(path, i+1, count+1)\n path.pop()\n\n combineHelper([], 0, 0)\n return res",
"def textMerge(self, second):\n if second is None: second__o = None\n else: second__o = second._o\n ret = libxml2mod.xmlTextMerge(self._o, second__o)\n if ret is None:raise treeError('xmlTextMerge() failed')\n __tmp = xmlNode(_obj=ret)\n return __tmp",
"def getText(self):\n return(' '.join(map(lambda x:x.text,self.getNested())))",
"def _text_of(self, elem):\n if isinstance(elem, Tag):\n text = [ ]\n for sub_elem in elem:\n text.append(self._text_of(sub_elem))\n\n return \" \".join(text)\n else:\n return elem.string",
"def get_text():\n global x\n for i in soup.body(\"aside\", {\"id\": \"text-2\"}):\n x = i.get_text()",
"def get_text(node) -> str:\n result = ''\n if node.text:\n result = node.text\n for elem in node:\n result += GLGenerator.get_text(elem)\n if node.tail:\n result += node.tail\n return result",
"def text():\n return {\n \"@context\": \"http://www.w3.org/ns/anno.jsonld\",\n \"type\": \"Annotation\",\n \"body\": {\n \"creator\": \"user\",\n \"type\": \"TextualBody\",\n \"value\": \"string\"\n },\n \"generator\": {\n \"homepage\": \"http://mnemosyne.ml\",\n \"id\": \"string\",\n \"name\": \"Mnemosyne\",\n \"type\": \"Mnemosyne\"\n },\n \"target\": {\n \"id\": \"string\",\n \"type\": \"TextQuoteSelector\",\n \"exact\": \"string\",\n \"format\": \"string\",\n \"source\": \"string\",\n \"prefix\": 0,\n \"suffix\": 0,\n \"refinedBy\": {\n \"type\": \"TextPositionSelector\",\n \"start\": \"/div[2]\",\n \"end\": \"/div[2]\"\n },\n },\n }",
"def text(self, just_text=False):\n lines = []\n for node, data in self.traverse():\n if just_text or data['has_text'] or data['pad']:\n lines += data['text']\n else:\n lines += [data['meta']] + data['title'] + data['text']\n return flatten(lines)",
"def fulltext(self):\n child_contents = ''\n for c in self.children:\n child_contents += c.fulltext\n return self.text + child_contents",
"def __getText(cls, node):\n\n pieces = []\n code = cls.MARKUP.get(node.tag)\n if code:\n pieces.append(f\"{{\\\\{code} \")\n if node.text is not None:\n pieces.append(fix(node.text))\n for child in node.findall(\"*\"):\n pieces.append(cls.__getText(child))\n if child.tail is not None:\n pieces.append(child.tail)\n if code:\n pieces.append(\"}\")\n return \"\".join(pieces)",
"def concat_text(text):\n textout = \" \".join(text)\n return textout",
"def _format_response(self, response):\n texts = []\n for result in response.results: \n texts.append(result.alternatives[0].transcript)\n return texts",
"def generate_txt(self):\n txt_string = ''\n rp_obj = self.env['res.partner']\n for txt in self:\n vat = rp_obj._find_accounting_partner(\n txt.company_id.partner_id).vat[2:]\n vat = vat\n for txt_line in txt.txt_ids:\n vendor, buyer = self.get_buyer_vendor(txt, txt_line)\n period = txt.period_id.name.split('/')\n period2 = period[0] + period[1]\n # TODO: use the start date of the period to get the period2\n # with the 'YYYYmm'\n operation_type = ('V' if txt_line.invoice_id.type in\n ['out_invoice', 'out_refund'] else 'C')\n document_type = self.get_type_document(txt_line)\n document_number = self.get_document_number(\n txt_line, 'inv_number')\n control_number = self.get_number(\n txt_line.invoice_id.nro_ctrl, 'inv_ctrl', 20)\n document_affected = self.get_document_affected(txt_line)\n voucher_number = self.get_number(\n txt_line.voucher_id.number, 'vou_number', 14)\n amount_exempt, amount_untaxed = \\\n self.get_amount_exempt_document(txt_line)\n amount_untaxed = amount_untaxed\n alicuota = self.get_alicuota(txt_line)\n amount_total, amount_exempt = self.get_amount_line(\n txt_line, amount_exempt)\n\n txt_string = (\n txt_string + buyer + '\\t' + period2.strip() + '\\t' +\n txt_line.invoice_id.date_invoice + '\\t' + operation_type +\n '\\t' + document_type + '\\t' + vendor + '\\t' +\n document_number + '\\t' + control_number + '\\t' +\n str(round(amount_total, 2)) + '\\t' +\n str(round(txt_line.untaxed, 2)) + '\\t' +\n str(round(txt_line.amount_withheld, 2)) + '\\t' +\n document_affected + '\\t' + voucher_number + '\\t' +\n str(round(amount_exempt, 2)) + '\\t' + str(alicuota) +\n '\\t' + '0' + '\\n')\n return txt_string",
"def _compute_fulltext(self):\n return ''",
"def get_text(downgrade_titles=False):",
"def getTextWithHeaders():",
"def special_tags_to_text(self):\n if (self.windtag is None and self.tornadotag is None and\n self.hailtag is None and self.tornadodamagetag is None and\n self.waterspouttag is None and not self.flood_tags):\n return \"\"\n\n parts = []\n if self.tornadotag is not None:\n parts.append(\"tornado: %s\" % (\n self.tornadotag))\n if self.waterspouttag is not None:\n parts.append(\"waterspout: %s\" % (\n self.waterspouttag))\n if self.tornadodamagetag is not None:\n parts.append(\"tornado damage threat: %s\" % (\n self.tornadodamagetag))\n if self.windtag is not None:\n parts.append(\"wind: %s%s %s\" % (\n self.winddirtag.replace(\">\", \">\").replace(\"<\", \"<\"),\n self.windtag, self.windtagunits))\n if self.hailtag is not None:\n parts.append(\"hail: %s%s IN\" % (\n self.haildirtag.replace(\">\", \">\").replace(\"<\", \"<\"),\n self.hailtag))\n for k, v in self.flood_tags.items():\n parts.append(\"%s: %s\" % (k.lower(), v.lower()))\n return \" [\" + \", \".join(parts) + \"] \"",
"def getText():",
"def getText():",
"def getText():",
"def getText():",
"def getText():",
"def content_for_regtext(instruction_xml):\n label_parts, amdpar = label_amdpar_from(instruction_xml)\n xml = find_section(amdpar)\n\n def parse_regtext():\n sections = build_from_section(label_parts[0], xml)\n if sections:\n return sections[0]\n\n return xml, parse_regtext",
"def format_output(self):\n brat = Brat.convert_to_brat(self._input_filepath, 'results/brat.txt')\n output_file = self.__output_filepath.open(encoding='utf-8')\n # Assign BRAT span to each token from output\n terms = []\n i = 0\n multiword = False\n for token_tagged in output_file.readlines():\n token_tagged = token_tagged.split()\n if not token_tagged:\n break\n token = token_tagged[0]\n tag = token_tagged[1]\n term = {\n 'token': token,\n 'tag': tag\n }\n if token == brat[i]['token']:\n term['start'] = brat[i]['start']\n term['end'] = brat[i]['end']\n terms.append(term)\n i += 1\n elif ' ' in brat[i]['token'] and token in brat[i]['token']:\n multiword = True\n term['start'] = str(int(brat[i]['start']) + brat[i]['token'].index(token))\n term['end'] = str(int(term['start']) + len(token))\n terms.append(term)\n elif any([char in token for char in ['(', ')', ':', '/']]):\n multiword = True\n term['start'] = brat[i]['start']\n while brat[i]['token'] in token:\n i += 1\n term['end'] = brat[i]['end'],\n terms.append(term)\n elif multiword:\n i += 1\n if token != brat[i]['token']:\n raise Exception('Tokens does not match: {0} {1}'.format(token, brat[i]['token']))\n term['start'] = brat[i]['start']\n term['end'] = brat[i]['end']\n terms.append(term)\n multiword = False\n i += 1\n else:\n raise Exception('Tokens does not match: {0} {1}'.format(token, brat[i]['token']))\n # Generate key phrases from previous terms\n multiword_tags = [\n 'I-Grp_Enfermedad',\n 'B-Estructura_Corporal',\n 'I-Estructura_Corporal',\n 'B-Calificador',\n 'I-Calificador'\n ]\n for term in terms:\n if term['tag'] == 'O':\n continue\n if self._key_phrases != [] and int(self._key_phrases[-1]['span'][-1][1]) == (int(term['start']) - 1) \\\n and term['tag'] in multiword_tags:\n self._key_phrases[-1]['span'].append((term['start'], term['end']))\n self._key_phrases[-1]['term'] += ' ' + term['token']\n else:\n key_phrase = {\n 'span': [(term['start'], term['end'])],\n 'label': 'Concept',\n 'term': term['token'],\n }\n self._key_phrases.append(key_phrase)\n # Format span\n for key_phrase in self._key_phrases:\n span = map(lambda tup: '{0} {1}'.format(tup[0], tup[1]), key_phrase['span'])\n key_phrase['span'] = ';'.join(span)"
]
| [
"0.58846337",
"0.5353593",
"0.52949023",
"0.5253419",
"0.5204414",
"0.5158431",
"0.5110012",
"0.509821",
"0.508579",
"0.5081426",
"0.50729656",
"0.5067092",
"0.5052136",
"0.50272155",
"0.50136214",
"0.5013542",
"0.50008446",
"0.4993583",
"0.49686253",
"0.49405417",
"0.49388504",
"0.49301016",
"0.49186555",
"0.4907214",
"0.4907214",
"0.4907214",
"0.4907214",
"0.4907214",
"0.48709947",
"0.48609835"
]
| 0.682943 | 0 |
Returns content all rows directly within a taxon | def content_rows_for_taxon(self, taxon):
content_ids_for_taxon = list(self.content_taxon_mapping[self.content_taxon_mapping['taxon_id'] == taxon.content_id]['content_id'])
return self.content[self.content['content_id'].isin(content_ids_for_taxon)]; | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def content_for_taxon(self, taxon):\n content_ids_for_taxon = list(self.content_taxon_mapping[self.content_taxon_mapping['taxon_id'] == taxon.content_id]['content_id'])\n return self.content[self.content['content_id'].isin(content_ids_for_taxon)]['combined_text'].to_list();",
"def fetch_by_id(self, taxon):\n res = self.ensembl.get_taxonomy_by_id(taxon)\n return res",
"def _get_rows(self) -> List[htmler.Tr]:\n r = []\n\n for i in range(len(self.value)):\n row_widgets = {w.uid: w for w in self._get_widgets()} # type: Dict[str, Abstract]\n for w_name, w_value in self.value[i].items():\n row_widgets[w_name].value = w_value\n\n r.append(self._get_row(list(row_widgets.values()), i))\n\n return r",
"def test_make_core_taxa(self):\n basic_test_runner(self, 'core_taxa', nrows=-1)",
"def get_rows(self) -> List[List[str]]:\n return self.content",
"def get_gremium_data(self, response):\n\n # e.g. <td class=\"smc_td smc_field_silink\"><a href=\"to0040.asp?__ksinr=11487\" title=\"Details anzeigen: Bezirksvertretung Bochum-Mitte 16.05.2019 \" class=\"smc_doc smc_datatype_si\">16.05.2019</a><!--SMCINFO:si.bi.1.4.1.1.16.1.3 --> 15:00-18:09</td>\n urls = response.xpath('//tr[contains(@class, \"smcrow1\") or contains(@class, \"smcrow2\") or contains(@class, \"smcrown\")]/*/a/@href').getall()\n dates = response.xpath('//tr[contains(@class, \"smcrow1\") or contains(@class, \"smcrow2\") or contains(@class, \"smcrown\")]/*/a/text()').getall()\n\n # e.g. <a href=\"getfile.asp?id=426409&type=do&\" title=\"Einladung \" target=\"_blank\">Einladung <span class=\"smcwrapsmall smcdosize\" title=\"Dateigröße\">266\\xa0KB </span></a>\n einladungen = response.xpath('//a[contains(text(), \"Einladung\")]/@href').getall()\n # e.g. <a href=\"getfile.asp?id=427859&type=do&\" title=\"Niederschrift öffentlich \" target=\"_blank\">Niederschrift öffentlich <span class=\"smcwrapsmall smcdosize\" title=\"Dateigröße\">570\\xa0KB </span></a>\n niederschriften = response.xpath('//a[contains(text(), \"Niederschrift\")]/@href').getall()\n\n # table layout in one table row; has either no, just one, or both Einladung and Niederschrift\n tables = response.xpath('//table[contains(@class, \"smcdocbox smcdocboxright\")]').getall()\n\n # not all einladungen have niederschriften vv. insert None accordingly\n for i in range(len(tables)):\n if \"Niederschrift\" not in tables[i]:\n niederschriften.insert(i, None)\n if \"Einladung\" not in tables[i]:\n einladungen.insert(i, None)\n\n return urls, dates, niederschriften, einladungen",
"def createTNTTempTable(self):\n\t\t\n\t\tquery = \"\"\"\n\t\tSELECT \n\t\tIDENTITY (INT) as rownumber,\n\t\ta.NameID, a.Term, a.Value, a.Reference\n\t\tINTO [{0}]\n\t\tFROM (\n\t\tSELECT a.AnalysisID, l.NameID, SUBSTRING(ac.DisplayText,18,255) AS Term, a.AnalysisValue AS Value, lr.TaxonNameListRefText AS Reference\n\t\tFROM DiversityTaxonNames_{1}.dbo.TaxonNameList l\n\t\t\tINNER JOIN DiversityTaxonNames_{1}.dbo.TaxonNameListReference lr ON lr.NameID=l.NameID AND lr.ProjectID=l.ProjectID\n\t\t\tLEFT JOIN DiversityTaxonNames_{1}.dbo.TaxonNameListAnalysis a ON a.NameID=lr.NameID AND a.ProjectID=lr.ProjectID\n\t\t\tLEFT JOIN DiversityTaxonNames_{1}.dbo.TaxonNameListAnalysisCategory ac ON ac.AnalysisID=a.AnalysisID\n\t\tWHERE\n\t\t\tac.DisplayText IN (\n\t\t\t -- Vertebrata\n\t\t\t'RoteListe_D_2009_LangfristigerBestandstrend',\n\t\t\t'RoteListe_D_2009_Verantwortlichkeit',\n\t\t\t'RoteListe_D_2009_AktuelleBestandssituation',\n\t\t\t'RoteListe_D_2009_RL-Kategorie',\n\t\t\t'RoteListe_D_2009_KurzfristigerBestandstrend',\n\t\t\t'RoteListe_D_2009_Sonderfälle',\n\t\t\t'RoteListe_D_2009_Neobiota',\n\t\t\t'RoteListe_D_2009_LetzterNachweis',\n\t\t\t'RoteListe_D_2009_Risikofaktoren',\n\t\t\t -- Insecta and Animalia\n\t\t\t'RoteListe_D_2016_RL-Kategorie',\n\t\t\t'RoteListe_D_2016_AktuelleBestandssituation',\n\t\t\t'RoteListe_D_2016_LangfristigerBestandstrend',\n\t\t\t'RoteListe_D_2016_KurzfristigerBestandstrend',\n\t\t\t'RoteListe_D_2016_Risikofaktoren',\n\t\t\t'RoteListe_D_2016_Verantwortlichkeit',\n\t\t\t'RoteListe_D_2016_Sonderfälle',\n\t\t\t'RoteListe_D_2016_Neobiota',\n\t\t\t'RoteListe_D_2016_LetzterNachweis',\n\t\t\t'RoteListe_D_2011_AktuelleBestandssituation',\n\t\t\t'RoteListe_D_2011_KurzfristigerBestandstrend',\n\t\t\t'RoteListe_D_2011_LangfristigerBestandstrend',\n\t\t\t'RoteListe_D_2011_Risikofaktoren',\n\t\t\t'RoteListe_D_2011_RL-Kategorie',\n\t\t\t'RoteListe_D_2011_Verantwortlichkeit',\n\t\t\t'RoteListe_D_2011_Neobiota',\n\t\t\t'RoteListe_D_2011_LetzterNachweis'\n\t\t\t) {2}\n\t\t) AS a\n\t\t;\"\"\".format(self.temptable, self.tnt_dbname, self.projectclause)\n\t\t\n\t\t\n\t\tself.cur.execute(query)\n\t\tself.con.commit()",
"def get_price_including_tax(article):\n price_with_tax = article.select(\"tr\")\n return price_with_tax[3].td.text",
"def test_client_tax_information_list(self):\n pass",
"def test_client_tax_information_retrieve(self):\n pass",
"def get_html_content():\n url = \"https://www.worldometers.info/coronavirus/\"\n req_data = requests.get(url).text\n soup = BeautifulSoup(req_data, 'html.parser')\n html_data = soup.select(\"#main_table_countries_today > tbody:nth-child(2) > tr[style='']\")\n return html_data",
"def taxa_data_frame(self):\n cols = list(self._taxa.keys())\n cols.remove(\"uid\")\n cols.remove(\"object\")\n df = DataFrame(self._taxa, columns=cols, index=self._taxa[\"uid\"])\n df.index.name = \"uid\"\n\n return df",
"def _get_rows(self):\n rows = []\n for row in self.plate_meta['rows']:\n rows.append(row['name'])\n self.rows = rows",
"def parse_view_page(self):\n for row in self.driver.find_elements_by_css_selector(\"table\"):\n cells = row.find_elements_by_tag_name(\"td\")\n for cell in cells:\n yield cell.text",
"def get_lift_rows(self):\n lift_rows = []\n\n for element in self.big_table[3].find_all('tr'):\n td_s = element.find_all('td')\n row = [i.text for i in td_s]\n lift_rows.append(row)\n\n return lift_rows",
"def _get_rows(self) -> List[htmler.Tr]:\n r = []\n\n widgets_per_row = len(self._get_widgets())\n for row_num in range(0, len(self.value), widgets_per_row):\n row_widgets = self._get_widgets()\n for col_num in range(len(row_widgets)):\n row_widgets[col_num].value = self.value[row_num + col_num]\n r.append(self._get_row(row_widgets, row_num))\n\n return r",
"def get_taxa(taxa_fname, sample_ids_kept=None):\r\n # future: pass in open file object instead\r\n taxa_f = open(taxa_fname, 'U')\r\n\r\n sample_ids, otu_ids, otu_table, lineages =\\\r\n parse_otu_table(taxa_f, count_map_f=float, remove_empty_rows=True)\r\n if sample_ids_kept:\r\n sam_idxs = [sample_ids.index(sam) for sam in sample_ids_kept]\r\n otu_table = otu_table[:, sam_idxs]\r\n return otu_ids, otu_table",
"def yield_results(self, selector, institution, line_titles):\n\n for line in selector.xpath('//tr'):\n if len(line.xpath('td')) > 0:\n results = dict(zip(line_titles, line.xpath('td//text()').getall()))\n results.update({\n \"Organismo\": institution,\n \"Regimen\": \"Contrata\"\n })\n yield results",
"def get_all_content(self):\n return self._get_all_content()",
"def tax_lines(self):\n raise NotImplemented",
"def test_taxonomy(n=5):\n ecoli_file = join(this_dir, \"e_coli_core.xml.gz\")\n ids = [\"Escherichia_coli_{}\".format(i) for i in range(1, n + 1)]\n taxa = pd.DataFrame({\"id\": ids})\n taxa[\"genus\"] = \"Escherichia\"\n taxa[\"species\"] = \"Eschericia coli\"\n taxa[\"reactions\"] = 95\n taxa[\"metabolites\"] = 72\n taxa[\"file\"] = ecoli_file\n return taxa",
"def gen_raw(self, show_headers=True, show_tags=True):\n if show_headers:\n yield self.headers\n if show_tags:\n yield self.display_tags\n for row in self:\n yield row.values",
"def extract_data(self, root, path, tag):\n data = []\n element = root.xpath(path)\n if element:\n url = self.PODEROPEDIA_BASE_URL + element[0].get('data-w2p_remote', None)\n if url:\n self.logger.debug('Querying {} from {}'.format(tag, url))\n try:\n response = self.session.get(url)\n response.raise_for_status()\n content = response.content\n html_tree = etree.HTML(content, parser=self.parser)\n if html_tree is None:\n return data\n rows = html_tree.xpath('.//*[starts-with(@id, \"collapse\")]/div/table/tr')\n for row in rows:\n target = target_name = target_path = relationship = None\n when = where = where_name = where_path = source = None\n row_id = row.get('id', '')\n cells = row.getchildren()\n idx = 0\n while idx < len(cells) - 1:\n try:\n cell_text = text_strip(cells[idx])\n except AttributeError:\n cell_text = ''\n sources = cells[idx].xpath('.//*[@class=\"fuente\"]')\n if len(sources) > 0:\n source = process_sources(cells[idx])\n elif cell_text == 'es' or cell_text == 'fue':\n when = cell_text\n idx = idx - 1\n target = cells[idx].find('a')\n if target is not None:\n target_path = target.get('href', None)\n target_name = text_strip(target)\n idx = idx + 2\n relationship = text_strip(cells[idx])\n elif cell_text == 'a' or cell_text == 'de':\n idx = idx - 1\n relationship = text_strip(cells[idx])\n idx = idx + 2\n target = cells[idx].find('a')\n if target is not None:\n target_path = target.get('href', None)\n target_name = text_strip(target)\n elif cell_text.startswith('desde'):\n when = cell_text\n elif 'es pasado' in cell_text:\n when = cell_text\n else:\n try:\n ignore = int(cell_text)\n when = cell_text\n except ValueError:\n potential_date = cell_text.split(' ')[0]\n try:\n ignore = datetime.strptime(potential_date, '%d-%m-%Y')\n when = cell_text\n except ValueError:\n try:\n ignore = datetime.strptime(potential_date, '%m-%Y')\n when = cell_text\n except ValueError:\n pass\n idx = idx + 1\n entry = {\n 'type': tag,\n 'target_path': target_path,\n 'relationship': relationship,\n 'when': when,\n 'where': where,\n 'source': source\n }\n data.append(entry)\n self.logger.debug('{}: {}'.format(tag, entry))\n except (requests.exceptions.HTTPError, etree.ParserError):\n self.logger.info('Something bad happened', exc_info=True)\n return data",
"def intrinsicPanel(self,with_this_list=''):\n if with_this_list == '':\n taxonomic_list = self.taxonomies\n else:\n taxonomic_list = with_this_list\n import pandas as pn\n all_entries = {}\n for i,tax_name_i in enumerate(settings.TAXONOMIC_TREE_KEYS):\n li = {}\n for j,tax_name_j in enumerate(settings.TAXONOMIC_TREE_KEYS): \n i_j = map(lambda t : t.intrinsicM[i,j],taxonomic_list)\n li[tax_name_j] = pn.Series(i_j)\n all_entries[tax_name_i] = li\n return pn.Panel(all_entries)",
"def get_table_row_values(self):\n tag_items = self.soup.find_all(\"tr\")\n table_rows = []\n for tag_item in tag_items:\n tag_child_item_values = tag_item.find_all(\"td\")\n tag_item_child_values = []\n for tag_child_item_value in tag_child_item_values:\n tag_item_child_values.append(tag_child_item_value.text.strip())\n table_rows.append(tag_item_child_values)\n return table_rows",
"def get_all_teas(self):\n self.tView.all_teas_display(self.manyTea)\n self.tView.prompt_display(0)",
"def rows_body(self) -> List[List]:\n return self.rows[1:]",
"def do_extract(self, xpath):\n s = Selector(self.driver.page_source)\n for i, result in enumerate(s.xpath(xpath).getall(), 1):\n print(i, result)",
"def get_flights_rows(tree, flight_table):\n return tree.xpath('.//*[@class=\"{} block\"]//tr[attribute::role]'.format(flight_table))",
"def body(self, response):\t\n\t\tx = response.xpath(\"//div[@class='story-content row-fluid']/p/text()\").extract()\n\n\t\tfor i in range(0,len(x)):\n\t\t\tx[i] = x[i].strip(\"\\r\\n\\t\")\n\t\treturn x"
]
| [
"0.64229196",
"0.56819576",
"0.5645641",
"0.5586926",
"0.5515208",
"0.54630274",
"0.53902406",
"0.5377141",
"0.5305812",
"0.5300794",
"0.52996236",
"0.5297317",
"0.5270316",
"0.5267102",
"0.52174985",
"0.51659465",
"0.5137481",
"0.5134758",
"0.51278883",
"0.51209366",
"0.5120395",
"0.50633794",
"0.50563",
"0.5056258",
"0.5045889",
"0.5022707",
"0.5007532",
"0.5004726",
"0.50028753",
"0.49797142"
]
| 0.74151564 | 0 |
Returns the text value of the partition type | def get_type(self):
return DOS_PARTITIONS[self.partition_type] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def partition_description(self) -> pulumi.Output[Optional[Any]]:\n return pulumi.get(self, \"partition_description\")",
"def get_partition_name(partition_type: PartitionType) -> str:\n if partition_type == PartitionType.TRAIN:\n return PARTITION_TRAIN_NAME\n \n elif partition_type == PartitionType.VALIDATION:\n return PARTITION_VALIDATION_NAME\n\n elif partition_type == PartitionType.FINETUNING:\n return PARTITION_FINETUNING_NAME\n\n elif partition_type == PartitionType.TEST:\n return PARTITION_TEST_NAME\n\n else:\n raise InvalidDatasetTypeError()",
"def _disk_type_text( self, disk_type ):\n\t\tif disk_type == 'disk':\n\t\t\treturn _( 'hard drive' )\n\t\telif disk_type == 'cdrom':\n\t\t\treturn _( 'CDROM drive' )\n\t\telif disk_type == 'floppy':\n\t\t\treturn _( 'floppy drive' )\n\t\telse:\n\t\t\treturn _('unknown')",
"def get_partitioning(disk):\n\n #TODO\n return \"Unknown\"",
"def tm_partition(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"tm_partition\")",
"def __str__(self) -> str:\n return str(self.my_partition)",
"def partType(self):\n try:\n ptype = self.partedPartition.type\n except AttributeError:\n ptype = self._partType\n\n if not self.exists and ptype is None:\n ptype = self.req_partType\n\n return ptype",
"def disk_type(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"disk_type\")",
"def partitionname(self) :\n\t\ttry :\n\t\t\treturn self._partitionname\n\t\texcept Exception as e:\n\t\t\traise e",
"def storage_type(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"storage_type\")",
"def _getTextType(self, lineData, column):\n if lineData is None:\n return ' ' # default is code\n \n textTypeMap = lineData[1]\n if column >= len(textTypeMap): # probably, not actual data, not updated yet\n return ' '\n \n return textTypeMap[column]",
"def partition(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"partition\")",
"def value_as_text(self):\n property_name = \"_%s_as_text\" % self.attribute.type\n return getattr(self, property_name, self.value)",
"def partition(self):\n return self.tag(\"partition\")",
"def get_partfstype(self, part):\n t = self.xlist(\"get-blkinfo\", part, \"TYPE\")\n return t[1][0] if t[0] and (len(t[1]) != 0) else \"\"",
"def getText(self):\n return self.graph.get(\"__txt\", '')",
"def get_ent_type(self, line):\n\n\t\treturn str(self.kb_shm.dataType(line))",
"def typeString(self):\n return Parameter.string_dict[self._field.type]",
"def drive_type():",
"def partition_key(self) -> str:\n return pulumi.get(self, \"partition_key\")",
"def tm_partition(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"tm_partition\")",
"def tm_partition(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"tm_partition\")",
"def text(self):\n return self.__r.t.text",
"def Value(self) -> str:",
"def define_text_type(self):\n # only one text\n if len(self.text) == 1:\n text = self.text[0]\n\n # DIRECTORY\n if os.path.isdir(text):\n # retrieve files\n file_list = []\n\n # only fetch files in this folder\n for path, _, files in os.walk(text):\n if self.recursive is False:\n if path == text:\n for filename in files:\n filepath = Path(f\"{path}/{filename}\")\n file_list.append((filepath, filename))\n\n # recursively fetch all files\n else:\n for filename in files:\n filepath = Path(f\"{path}/{filename}\")\n file_list.append((filepath, filename))\n\n file_list.sort()\n self.input = file_list\n return \"file\"\n\n # SINGLE FILE\n elif os.path.isfile(text):\n filepath = Path(text)\n self.input.append((filepath, None))\n return \"file\"\n\n # STRING\n else:\n self.input.append(text)\n return \"string\"\n\n else:\n # MORE STRINGS\n self.input = self.text\n return \"string\"",
"def get_table_type(disk):\n part_type = 'Unknown'\n script = [\n 'select disk {}'.format(disk['Number']),\n 'uniqueid disk']\n\n try:\n result = run_diskpart(script)\n except subprocess.CalledProcessError:\n pass\n else:\n output = result.stdout.decode().strip()\n if REGEX_DISK_GPT.search(output):\n part_type = 'GPT'\n elif REGEX_DISK_MBR.search(output):\n part_type = 'MBR'\n elif REGEX_DISK_RAW.search(output):\n part_type = 'RAW'\n\n return part_type",
"def get_partition_type(part):\n blkid, _ = command(\n [\n 'blkid',\n '-p',\n '-o', 'udev',\n part,\n ]\n )\n saw_part_entry = False\n for line in blkid.splitlines():\n (key, value) = line.split('=')\n if key == 'ID_PART_ENTRY_TYPE':\n return value\n if key == 'ID_PART_ENTRY_SCHEME':\n table_type = value\n if key.startswith('ID_PART_ENTRY_'):\n saw_part_entry = True\n\n # hmm, is it in fact GPT?\n table_type = None\n base = get_partition_base(part)\n blkid, _ = command(\n [\n 'blkid',\n '-p',\n '-o', 'udev',\n base\n ]\n )\n for line in blkid.splitlines():\n (key, value) = line.split('=')\n if key == 'ID_PART_TABLE_TYPE':\n table_type = value\n if table_type != 'gpt':\n return None # not even GPT\n\n if saw_part_entry:\n return None # GPT, and blkid appears to be new, so we're done.\n\n # bah, fall back to sgdisk.\n if 'blkid' not in warned_about:\n LOG.warning('Old blkid does not support ID_PART_ENTRY_* fields, trying sgdisk; may not correctly identify ceph volumes with dmcrypt')\n warned_about['blkid'] = True\n (base, partnum) = split_dev_base_partnum(part)\n sgdisk, _ = command(\n [\n 'sgdisk',\n '-p',\n base,\n ]\n )\n\n for line in sgdisk.splitlines():\n m = re.search('\\s+(\\d+)\\s+\\d+\\s+\\d+\\s+\\S+ \\S+B\\s+\\S+\\s+(.*)', line)\n if m is not None:\n num = m.group(1)\n if num != partnum:\n continue\n desc = m.group(2)\n # assume unencrypted ... blkid has failed us :(\n if desc == 'ceph data':\n return OSD_UUID\n if desc == 'ceph journal':\n return JOURNAL_UUID\n\n return None",
"def guess_part_type(self, data):\n if 'administrativEnhet' in data or 'saksbehandler' in data:\n typename = 'intern'\n elif 'kontaktperson' in data \\\n or -1 != data['navn'].find(' AS'):\n typename = 'enhet'\n else:\n typename = 'person'\n return typename",
"def sample_type_str(t):\n if t == dsl.Type.NUMBER or t == dsl.Type.DIGIT:\n return get_number()\n elif t == dsl.Type.WORD:\n return get_word()\n elif t == dsl.Type.ALPHANUM or t == dsl.Type.CHAR:\n return get_alphanumeric()\n elif t == dsl.Type.ALL_CAPS:\n return get_caps()\n elif t == dsl.Type.PROP_CASE:\n return get_proper_case()\n elif t == dsl.Type.LOWER:\n return get_lower()\n else:\n raise ValueError('Unsupported type: {}'.format(t))",
"def type_as_string(self):\n return self.properties.get('TypeAsString', None)"
]
| [
"0.6335327",
"0.6087373",
"0.6078627",
"0.59970003",
"0.59606653",
"0.5928415",
"0.5862264",
"0.581425",
"0.57787377",
"0.5752302",
"0.57446164",
"0.57380545",
"0.56981283",
"0.5688516",
"0.5678651",
"0.5604231",
"0.55918556",
"0.5568496",
"0.55623966",
"0.5558864",
"0.5552106",
"0.5552106",
"0.55413747",
"0.5509593",
"0.5508727",
"0.54939395",
"0.54805005",
"0.5478874",
"0.5469563",
"0.54554045"
]
| 0.6819308 | 0 |
Returns True if this partition is bootable | def is_bootable(self):
return self.bootable_flag == 0x80 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _is_booted_from_volume(self, instance, disk_mapping=None):\n return not bool(instance.get('image_ref'))",
"def is_booted(self):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)",
"def pilotIsBootValid (self):\n return self.isBootValid()",
"def isBootValid (self):\n if not self._wasSdIdentified:\n self._log(\"is-boot-valid\").notice(\"secure-digital was not identified, its boot partition is not valid.\")\n return False\n\n if not self.isBootPartitionExist():\n self._log(\"is-boot-valid\").notice(\"the secure-digital boot partition does not exist (not valid).\")\n return False\n\n try:\n self.mountBootPartition()\n except:\n self._log(\"is-boot-valid\").exception(\"failed mounting partition, partition is invalid\")\n return False\n\n stateFile = self._getBootInstallationFilePath()\n isValid = os.path.exists(stateFile)\n if isValid:\n self._log(\"is-boot-valid\").notice(\"secure-digital boot partition's state file %s exists, the boot partitions is valid.\", stateFile)\n else:\n self._log(\"is-boot-valid\").notice(\"secure-digital boot partition's state file %s does not exist, the boot partitions is invalid.\", stateFile)\n\n return isValid",
"def CheckBoot(self, instance):\n try:\n serial_out = self.GetSerialPortOutput(instance=instance, port=1)\n self.CheckBootFailure(serial_out, instance)\n return ((self.BOOT_COMPLETED_MSG in serial_out)\n or (self.BOOT_STARTED_MSG in serial_out))\n except errors.HttpError as e:\n if e.code == 400:\n logger.debug(\"CheckBoot: Instance is not ready yet %s\", str(e))\n return False\n raise",
"def _setBootable(self, bootable):\n if self.partedPartition:\n if arch.isS390():\n return\n if self.flagAvailable(parted.PARTITION_BOOT):\n if bootable:\n self.setFlag(parted.PARTITION_BOOT)\n else:\n self.unsetFlag(parted.PARTITION_BOOT)\n else:\n raise errors.DeviceError(\"boot flag not available for this partition\", self.name)\n\n self._bootable = bootable\n else:\n self.req_bootable = bootable",
"def is_allow_select_boot_device(self):\n\t\treturn bool(call_sdk_function('PrlVmCfg_IsAllowSelectBootDevice', self.handle))",
"def IsBootVolumeEncrypted(self):\n uuid = self.GetPrimaryVolumeUUID()\n if not uuid:\n return False\n volumes = self._GetAPFSVolumes(uuid=uuid)\n return bool(volumes and volumes[0].get('Encryption'))",
"def UseExistingBootDisk(disks):\n return any(disk.get('boot', False) for disk in disks)",
"def is_ready(self):\n if not self.is_accessible:\n return False\n\n is_ready_cmd = '/usr/rift/bin/ssh_root {ip} -q -n -o BatchMode=yes -o StrictHostKeyChecking=no stat /var/lib/cloud/instance/boot-finished > /dev/null'\n rc = subprocess.call(is_ready_cmd.format(ip=self._ip), shell=True)\n\n logger.info(\"Checking if {} is ready\".format(self._ip))\n if rc != 0:\n return False\n\n return True",
"def validateBoot (self):\n self.mountBootPartition()\n stateDictionary = self._createBootInstallationDictionary()\n self._writeDictionaryAsJson(stateDictionary, self._getBootInstallationFilePath())\n self._log(\"validate-boot\").notice(\"boot partition is validated\")",
"def is_sys(self):\n if self.mountpoint is not None and self.mountpoint in ['/', '/boot']:\n return True\n return False",
"def is_sys(self):\n if self.mountpoint is not None and self.mountpoint in ['/', '/boot']:\n return True\n return False",
"def ready(self):\n if not self.is_setup:\n return False\n\n if self.pocs.observatory.mount.is_parked:\n print_warning('Mount is parked. To unpark run `unpark`')\n return False\n\n return self.pocs.is_safe()",
"def is_booted_storage_device(disk):\n cmdline = (\"grep -w /ahcexport /proc/mounts | cut -d ' ' -f 1 | \"\n \"sed -e 's/[0-9]*//g'\")\n if '/dev/' not in disk:\n disk = '/dev/%s' % disk\n grep_cmd = subprocess.Popen(cmdline,\n shell=True, stdout=subprocess.PIPE)\n for booted_disk in grep_cmd.stdout:\n booted_disk = booted_disk.decode(errors='ignore')\n booted_disk = booted_disk.rstrip('\\n').strip()\n if booted_disk == disk:\n return True\n return False",
"def is_active(self):\n\t\treturn bool(call_sdk_function('PrlSrvCfgHddPart_IsActive', self.handle))",
"def probe(self):\n log_method_call(self, self.name, exists=self.exists)\n if not self.exists or not self.disklabelSupported:\n return\n\n self._size = Size(self.partedPartition.getLength(unit=\"B\"))\n self.targetSize = self._size\n\n self._partType = self.partedPartition.type\n\n self._bootable = self.getFlag(parted.PARTITION_BOOT)",
"def needs_bootstrap(self):\n return (\n not self.bootstrapped\n or (\n datetime.utcnow() - self.last_bootstrapped > timedelta(seconds=self.bootstrap_interval)\n and self.run_migrations\n )\n )",
"def is_logical(self):\n\t\treturn bool(call_sdk_function('PrlSrvCfgHddPart_IsLogical', self.handle))",
"def is_in_use(self):\n\t\treturn bool(call_sdk_function('PrlBootDev_IsInUse', self.handle))",
"def safe_boot_disabled(self):\n return self._safe_boot_disabled",
"def is_boot_code_present(self):\n\n\t\treturn struct.unpack('<H', self.boot_sector_data[0 : 2])[0] != 0 and struct.unpack('<H', self.boot_sector_data[510 : 512])[0] == 0xAA55",
"def isleaf(self):\n no_kids = super(PartitionDevice, self).isleaf\n # it is possible that the disk that originally contained this partition\n # no longer contains a disklabel, in which case we can assume that this\n # device is a leaf\n if self.disk and self.partedPartition and \\\n self.disk.format.type == \"disklabel\" and \\\n self.partedPartition in self.disk.format.partitions:\n disklabel = self.disk.format\n else:\n disklabel = None\n\n extended_has_logical = (self.isExtended and\n (disklabel and disklabel.logicalPartitions))\n return (no_kids and not extended_has_logical)",
"def available(self):\n return self._adb_available and self._dev_emu and (self._is_root\n or self._is_su)",
"def is_partition(dev):\n dev = os.path.realpath(dev)\n if not stat.S_ISBLK(os.lstat(dev).st_mode):\n raise Error('not a block device', dev)\n\n name = get_dev_name(dev)\n if os.path.exists(os.path.join('/sys/block', name)):\n return False\n\n # make sure it is a partition of something else\n for basename in os.listdir('/sys/block'):\n if os.path.exists(os.path.join('/sys/block', basename, name)):\n return True\n\n raise Error('not a disk or partition', dev)",
"def pilotValidateBoot (self):\n return self.validateBoot()",
"def _is_boot_mode_uefi(self):\n boot_mode = self.get_current_boot_mode()\n if boot_mode == 'UEFI':\n return True\n else:\n return False",
"def available(self) -> bool:\n return (\n self._wrap_device.device.is_duct_zone_enabled(self._zone)\n and self._wrap_device.is_power_on\n )",
"def _is_partitioned(self):\n ## check if the table are partitioned, need the split because of a change in the type of partitions in pydantic\n partitions = self.table_config[\"partitions\"]\n if partitions is None or len(partitions) == 0:\n return False\n\n if isinstance(partitions, list):\n # check if any None inside list.\n # False if it is the case Ex: [None, 'partition']\n # True otherwise Ex: ['partition1', 'partition2']\n return all(item is not None for item in partitions)\n\n raise ValueError(\"Partitions must be a list or None\")",
"def get_boot_mode(self):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)"
]
| [
"0.73547786",
"0.72385293",
"0.71716297",
"0.69085157",
"0.65144926",
"0.64562535",
"0.6442269",
"0.6433018",
"0.6403243",
"0.6394636",
"0.63540614",
"0.6322865",
"0.6322865",
"0.62819284",
"0.6267639",
"0.6255905",
"0.6250459",
"0.62429243",
"0.62306994",
"0.6191209",
"0.61136013",
"0.6091135",
"0.6071143",
"0.6024789",
"0.5999648",
"0.5991405",
"0.5962243",
"0.59483004",
"0.5927826",
"0.590988"
]
| 0.85575104 | 0 |
Returns True if the partition is an extended partition | def is_extended(self):
return 'Extended' in self.get_type() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_partition(disk): #TODO: Could change to use \"Whole\" attrib. Good idea?\n\n return \"s\" in disk.split(\"disk\")[1]",
"def is_extended(self):\n return self._parent is not None",
"def isleaf(self):\n no_kids = super(PartitionDevice, self).isleaf\n # it is possible that the disk that originally contained this partition\n # no longer contains a disklabel, in which case we can assume that this\n # device is a leaf\n if self.disk and self.partedPartition and \\\n self.disk.format.type == \"disklabel\" and \\\n self.partedPartition in self.disk.format.partitions:\n disklabel = self.disk.format\n else:\n disklabel = None\n\n extended_has_logical = (self.isExtended and\n (disklabel and disklabel.logicalPartitions))\n return (no_kids and not extended_has_logical)",
"def _is_extended_slice(s):\n\n return s.step is not None and s.step != 1",
"def _is_partitioned(self):\n ## check if the table are partitioned, need the split because of a change in the type of partitions in pydantic\n partitions = self.table_config[\"partitions\"]\n if partitions is None or len(partitions) == 0:\n return False\n\n if isinstance(partitions, list):\n # check if any None inside list.\n # False if it is the case Ex: [None, 'partition']\n # True otherwise Ex: ['partition1', 'partition2']\n return all(item is not None for item in partitions)\n\n raise ValueError(\"Partitions must be a list or None\")",
"def require_partition_filter(self) -> bool:\n return pulumi.get(self, \"require_partition_filter\")",
"def exist_partition(self, partition_spec):\n return partition_spec in self.partitions",
"def is_bad_partition(par):\n return 'Letter' not in par or REGEX_BAD_PARTITION.search(par['FileSystem'])",
"def is_partition_the_last(dbapi, partition):\n idisk_uuid = partition.get('idisk_uuid')\n onidisk_parts = dbapi.partition_get_by_idisk(idisk_uuid)\n part_number = get_part_number(partition.get('device_path'))\n\n if int(part_number) != len(onidisk_parts):\n return False\n\n return True",
"def is_partition(dev):\n dev = os.path.realpath(dev)\n if not stat.S_ISBLK(os.lstat(dev).st_mode):\n raise Error('not a block device', dev)\n\n name = get_dev_name(dev)\n if os.path.exists(os.path.join('/sys/block', name)):\n return False\n\n # make sure it is a partition of something else\n for basename in os.listdir('/sys/block'):\n if os.path.exists(os.path.join('/sys/block', basename, name)):\n return True\n\n raise Error('not a disk or partition', dev)",
"def is_extension_field(cls) -> bool:\n return cls._degree > 1",
"def is_space_available(partition, size):\n available_space = psutil.disk_usage(partition).free\n return False if available_space < size else True",
"def partition(self):\n return self.tag(\"partition\")",
"def better_partition(graph, part1, part2, independent_set_extraction_strategy):\n\n # TODO: When there are more hyperplanes it often chooses the resulting partition\n # TODO: as best even though it results in more colors (e.g. for DSJC 125.5)\n\n if part2 is None or len(part2) == 0:\n return True\n\n if part1 is None or len(part1) == 0:\n return False\n\n # Remove colors from one endpoint of each illegal edge in each partition.\n nodes_to_delete1 = nodes_to_delete(graph, part1, strategy=independent_set_extraction_strategy)\n nodes_to_color1 = {n for n in graph.nodes() if n not in nodes_to_delete1}\n nr_of_colors1 = len(set(part1.values()))\n\n nodes_to_delete2 = nodes_to_delete(graph, part2, strategy=independent_set_extraction_strategy)\n nodes_to_color2 = {n for n in graph.nodes() if n not in nodes_to_delete2}\n nr_of_colors2 = len(set(part2.values()))\n\n avg1 = float(len(nodes_to_color1)) / nr_of_colors1\n avg2 = float(len(nodes_to_color2)) / nr_of_colors2\n\n return avg1 > avg2",
"def is_extending_or_extended_by(self, other):\n raise NotImplementedError()",
"def extended_acl(self) -> bool:\n return pulumi.get(self, \"extended_acl\")",
"def is_partition(graph, nodeset1, nodeset2):\n if len(nodeset1) < 1:\n return False\n if len(nodeset2) < 1:\n return False\n if(len(nodeset1.intersection(nodeset2)) > 0):\n return False\n return_list = []\n return_list2 = []\n for element in nodeset1:\n return_list.append(element)\n for element in nodeset2:\n return_list2.append(element)\n index = 0\n while index < len(return_list):\n for element in graph.get_node_neighbors(return_list[index]):\n if element not in return_list:\n return_list.append(element)\n index += 1\n index = 0\n while index < len(return_list2):\n for element in graph.get_node_neighbors(return_list2[index]):\n if element not in return_list2:\n return_list2.append(element)\n index += 1\n return len(set(return_list).intersection(set(return_list2))) < 1",
"def is_extension_activity(self, activity) -> bool:\n return self.is_start(activity) or self.is_end(activity)",
"def partition_exists(self, partitioning, partition_id):\n raise Exception(\"unimplemented\")",
"def exist_partitions(self, prefix_spec=None):\n try:\n next(self.partitions.iterate_partitions(spec=prefix_spec))\n except StopIteration:\n return False\n return True",
"def dependsOn(self, dep):\n if isinstance(dep, PartitionDevice) and dep.isExtended and \\\n self.isLogical and self.disk == dep.disk:\n return True\n\n return Device.dependsOn(self, dep)",
"def is_topic_partitions_need_update(self, topic_name, partitions):\n total_partitions = self.get_total_partitions_for_topic(topic_name)\n need_update = False\n\n if partitions != total_partitions:\n if partitions > total_partitions:\n # increasing partition number\n need_update = True\n else:\n # decreasing partition number, which is not possible\n self.close()\n self.module.fail_json(\n msg='Can\\'t update \\'%s\\' topic partition from %s to %s :'\n 'only increase is possible.' % (\n topic_name, total_partitions, partitions\n )\n )\n\n return need_update",
"def is_export_policy_inherited(self):\n return self._is_export_policy_inherited",
"def is_extended(self, value):\r\n if ((int(value) & 0xFF00) > 0):\r\n return True\r\n return False",
"def provide_partition_info(self):\n self.partition_info = True",
"def is_part_of_disk(part_device_path, disk_device_path):\n is_part_of_disk = False\n\n if disk_device_path in part_device_path:\n is_part_of_disk = True\n elif constants.DEVICE_NAME_MPATH in disk_device_path:\n path_split = disk_device_path.split(constants.DEVICE_NAME_MPATH)\n if (path_split[0] in part_device_path and\n path_split[1] in part_device_path):\n is_part_of_disk = True\n\n return is_part_of_disk",
"def partitions_are_in_order(disk_partitions, requested_partitions):\n\n partitions_nr = []\n\n for dp in disk_partitions:\n part_number = get_part_number(dp.get('device_path'))\n partitions_nr.append(int(part_number))\n\n for rp in requested_partitions:\n part_number = get_part_number(rp.get('device_path'))\n partitions_nr.append(int(part_number))\n\n return sorted(partitions_nr) == range(min(partitions_nr),\n max(partitions_nr) + 1)",
"def is_in_use(self):\n\t\treturn bool(call_sdk_function('PrlSrvCfgHddPart_IsInUse', self.handle))",
"def is_inherited(self, fld: str) -> bool:\n return self.read_inheritance(self.get_obj_label(), fld)",
"def _node_only_used_for_sym_size(node: Node, partition_nodes: List[Node]):\n if _is_sym_size_node(node):\n return True\n\n return all(\n ((user not in partition_nodes) or _is_sym_size_node(user))\n for user in node.users\n )"
]
| [
"0.7014364",
"0.63645893",
"0.63630986",
"0.636156",
"0.6255593",
"0.6198892",
"0.6158038",
"0.6097353",
"0.59970516",
"0.58780426",
"0.58487046",
"0.5628743",
"0.5519968",
"0.5459286",
"0.54182494",
"0.5402619",
"0.5378279",
"0.53673804",
"0.53528774",
"0.53508466",
"0.53190166",
"0.5301315",
"0.52947164",
"0.5292334",
"0.52835697",
"0.526851",
"0.5256451",
"0.5219314",
"0.51919377",
"0.5181238"
]
| 0.691445 | 1 |
Returns True if signature = 0xAA55 (a valid MBR signature) | def validate_signature(self):
return self.signature == 0xAA55 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def verify_signature(self, message: BasePendingMessage) -> bool:\n\n if message.signature is None:\n LOGGER.warning(\"'%s': missing signature.\", message.item_hash)\n return False\n\n try:\n signature = json.loads(message.signature)\n sigdata = base58.b58decode(signature[\"signature\"])\n public_key = base58.b58decode(signature[\"publicKey\"])\n except ValueError:\n LOGGER.warning(\"Solana signature deserialization error\")\n return False\n\n if signature.get(\"version\", 1) != 1:\n LOGGER.warning(\n \"Unsupported signature version %s\" % signature.get(\"version\")\n )\n return False\n\n if message.sender != signature[\"publicKey\"]:\n LOGGER.warning(\"Solana signature source error\")\n return False\n\n try:\n verify_key = VerifyKey(public_key)\n verification_buffer = get_verification_buffer(message)\n verif = verify_key.verify(verification_buffer, signature=sigdata)\n result = verif == verification_buffer\n except BadSignatureError:\n result = False\n except Exception:\n LOGGER.exception(\"Solana Signature verification error\")\n result = False\n\n return result",
"def verify_signature(self, sender_address: str, signature, transaction: dict) -> bool:\n try:\n public_key = serialization.load_pem_public_key(\n binascii.unhexlify(sender_address.encode('utf8')),\n backend=default_backend()\n )\n public_key.verify(\n signature,\n str(transaction).encode('utf8'),\n padding.PSS(\n mgf=padding.MGF1(hashes.SHA256()),\n salt_length=padding.PSS.MAX_LENGTH\n ),\n hashes.SHA256()\n )\n except:\n return False\n return True",
"def is_bip66(sig):\n\t#https://raw.githubusercontent.com/bitcoin/bips/master/bip-0066.mediawiki\n\t#0x30 [total-len] 0x02 [R-len] [R] 0x02 [S-len] [S] [sighash]\n\tsig = bytearray.fromhex(sig) if re.match('^[0-9a-fA-F]*$', sig) else bytearray(sig)\n\tif (sig[0] == 0x30) and (sig[1] == len(sig)-2):\t # check if sighash is missing\n\t\t\traise Exception(\"Sighash byte of signature is missing\") \n\t\t\t#sig.extend(b\"\\1\")\t\t\t\t\t\t \t# add SIGHASH_ALL for testing\n\t\t\t#(sig[-1] & 124 == 0) and (not not sig[-1]), \"Bad SIGHASH value\"\n\t\n\tif len(sig) < 9 or len(sig) > 73: return False\n\tif (sig[0] != 0x30): return False\n\tif (sig[1] != len(sig)-3): return False\n\trlen = sig[3]\n\tif (5+rlen >= len(sig)): return False\n\tslen = sig[5+rlen]\n\tif (rlen + slen + 7 != len(sig)): return False\n\tif (sig[2] != 0x02): return False\n\tif (rlen == 0): return False\n\tif (sig[4] & 0x80): return False\n\tif (rlen > 1 and (sig[4] == 0x00) and not (sig[5] & 0x80)): return False\n\tif (sig[4+rlen] != 0x02): return False\n\tif (slen == 0): return False\n\tif (sig[rlen+6] & 0x80): return False\n\tif (slen > 1 and (sig[6+rlen] == 0x00) and not (sig[7+rlen] & 0x80)):\n\t\treturn False\n\treturn True",
"def check_signature(signature, data):\n if SIGNATURE_DISABLED:\n return True\n\n # check signature\n try:\n digest = hmac.new(\n SEGMENT_SHARED_SECRET.encode(), msg=data, digestmod=hashlib.sha1\n ).hexdigest()\n if digest == signature:\n return True\n else:\n print(f\"Invalid signature. Expected {digest} but got {signature}\")\n except KeyError:\n pass\n\n return False",
"def is_bip66(sig):\n #https://raw.githubusercontent.com/bitcoin/bips/master/bip-0066.mediawiki\n #0x30 [total-len] 0x02 [R-len] [R] 0x02 [S-len] [S] [sighash]\n # sig = bytearray.fromhex(sig) if (isinstance(sig, string_types) and\n # RE_HEX_CHARS.match(sig)) else bytearray(sig)\n sig = bytearray.fromhex(sig)\n\n if sig[1] == len(sig)-2: \n sig.extend(b\"\\1\")# add SIGHASH for BIP66 check\n\n if len(sig) < 9 or len(sig) > 73: return False\n if (sig[0] != 0x30): return False\n if (sig[1] != len(sig)-3): return False\n rlen = sig[3]\n if (5+rlen >= len(sig)): return False\n slen = sig[5+rlen]\n if (rlen + slen + 7 != len(sig)): return False\n if (sig[2] != 0x02): return False\n if (rlen == 0): return False\n if (sig[4] & 0x80): return False\n if (rlen > 1 and (sig[4] == 0) and not (sig[5] & 0x80)): return False\n if (sig[4+rlen] != 0x02): return False\n if (slen == 0): return False\n if (sig[rlen+6] & 0x80): return False\n if (slen > 1 and (sig[6+rlen] == 0) and not (sig[7+rlen] & 0x80)): return False\n \n return True",
"async def verify(self, data, signature):\n\t\tsignature_struct = NTLMSSP_MESSAGE_SIGNATURE.from_bytes(signature)\n\t\tcalc_sig = self.MAC(self.crypthandle_server.encrypt, self.SignKey_server, signature_struct.SeqNum, data)\n\t\t#print('server signature : %s' % signature)\n\t\t#print('calculates signature: %s' % calc_sig)\n\t\treturn signature == calc_sig",
"def checksignature(self):\n if(self.name=='ORBIT'): return\n if(self.ctpnum==0): return\n cmd=\"CheckSignature(\"+self.board+\",\"+self.signature+\",\"+self.ctpnum+\")\"\n output=self.vb.io.execute(cmd,log=\"out\",applout=\"<>\")\n print \"input checksignature: \",output\n #self.signatureM=",
"def integrity_digsig_verify(self, signature: bytes, filehash: bytes, filehash_type: str) -> bool:\n fmt = \">BB\"\n if len(signature) < struct.calcsize(fmt):\n logger.warning(\"Malformed signature: not enough bytes\")\n return False\n\n typ, version = struct.unpack(fmt, signature[: struct.calcsize(fmt)])\n if typ not in [EvmImaXattrType.EVM_IMA_XATTR_DIGSIG, EvmImaXattrType.EVM_XATTR_PORTABLE_DIGSIG]:\n logger.warning(\"Malformed signature: wrong type\")\n return False\n\n if version == 2:\n return self._asymmetric_verify(signature, filehash, filehash_type)\n\n logger.warning(\"Malformed signature: wrong version (%d)\", version)\n return False",
"def verify(signature: Signature, pub_key: rsa.RSAPublicKey, msg: bytes) -> bool:\n try:\n pub_key.verify(signature, msg, PADDING, HASH)\n except:\n return False\n return True",
"def verify(self):\n token = \"mytoken\" # set from wx server\n ll = []\n signature = self.get_argument(\"signature\", \"<none>\")\n ll.append(self.get_argument(\"timestamp\", \"<none>\"))\n ll.append(self.get_argument(\"nonce\", \"<none>\"))\n ll.append(token)\n ll.sort()\n m = hashlib.sha1()\n m.update(\"\".join(ll).encode(\"ascii\"))\n digest = m.hexdigest()\n\n if signature != digest:\n print(\"signature not match, discard this msg!\")\n return False\n else:\n print(\"signature match, got a wechat msg!\")\n return True",
"def check(self, request, consumer, token, signature):\r\n built = self.sign(request, consumer, token)\r\n return built == signature",
"def verify_signature(self, local_json: Dict) -> bool:\n return verify_signature(\n self._gateway_key,\n self._construct_verify_signature_str(local_json), local_json['signature']\n )",
"def check_hmac_signature(self, message):\n data = message[:-20]\n checksum = message[-20:]\n hmac_data = hmac.new(bytes(self.settings['hmac_key'].encode('utf-8')), bytes(data), hashlib.sha1)\n\n return True if hmac_data.digest() == checksum else False",
"def _validate_signature(self):\n signing_string = '{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n'.format(\n 'Message',\n self._message_encoded,\n 'MessageId',\n self._message_id,\n 'Timestamp',\n self._timestamp,\n 'TopicArn',\n self._topic_arn,\n 'Type',\n self._type)\n\n crt = crypto.load_certificate(crypto.FILETYPE_PEM, self._pem)\n signature = base64.b64decode(self._signature)\n\n try:\n crypto.verify(\n crt,\n signature,\n signing_string.encode('utf-8'),\n 'sha1')\n except:\n self.error = 'Invalid signature.'\n raise ValueError('Invalid signature.')\n\n return True",
"def _asymmetric_verify(self, signature: bytes, filehash: bytes, filehash_type: str) -> bool:\n\n siglen = len(signature)\n\n # The data are in big endian\n fmt = \">BBBIH\"\n hdrlen = struct.calcsize(fmt)\n if len(signature) < hdrlen:\n logger.warning(\"Signature header is too short\")\n return False\n _, _, hash_algo, keyidv2, sig_size = struct.unpack(fmt, signature[:hdrlen])\n\n siglen -= hdrlen\n\n if siglen != sig_size:\n logger.warning(\"Malformed signature\")\n return False\n\n hashfunc = HASH_FUNCS.get(hash_algo)\n if not hashfunc:\n logger.warning(\"Unsupported hash algo with id '%d'\", hash_algo)\n return False\n\n if filehash_type != hashfunc().name:\n logger.warning(\n \"Mismatching filehash type %s and ima signature hash used %s\", filehash_type, hashfunc().name\n )\n return False\n\n # Try all the keyrings until we find one with a key with the given keyidv2\n pubkey = None\n for keyring in self.get_all_keyrings():\n pubkey = keyring.get_pubkey_by_keyidv2(keyidv2)\n if pubkey:\n break\n\n if not pubkey:\n logger.warning(\"No key with id 0x%08x available\", keyidv2)\n return False\n\n try:\n ImaKeyrings._verify(pubkey, signature[hdrlen:], filehash, hashfunc())\n except InvalidSignature:\n return False\n return True",
"def verify_apk_signature(self):\n verify.verify_apk_sig(self.apk_path) # raises CryptoVerificationError\n print(' - APK signature is valid')",
"def verifySignature(self, message: bytes, signature: bytes, sigAlgo: Optional[SignatureAlgorithm] = None) -> bool:\n\n if self.isRsaKey():\n v = iso9796e2.Dss1Verifier(self._pub_key)\n return v.verifySignature(message, signature)\n elif self.isEcKey():\n # WARNING: THIS SCOPE WAS TESTED WITH ECDSA SIGNATURE NOT FROM eMRTD IC\n if sigAlgo is None:\n raise ValueError(\"Missing required param 'sigAlgo'\")\n return super().verifySignature(message, signature, sigAlgo)\n else:\n raise ValueError(\"Unsupported digital signature scheme\")",
"def verify_rsa(sig_hex, message, public_key):\n sig_int = int(sig_hex , 16)\n m_int = pow(sig_int, public_key.e, public_key.n)\n m_hex = \"%0512x\" % m_int\n h = SHA.new(message).hexdigest()\n return re.match('0001f*' + ASN1_MAGIC + h, m_hex) is not None",
"def ec_verify(ec, digest, signature):\n assert len(signature) == ec_signature_length(ec)\n length = len(signature) / 2\n prefix = pack(\"!L\", length)\n try:\n return bool(ec.verify_dsa(digest, prefix + signature[:length], prefix + signature[length:]))\n except:\n return False",
"def is_valid_message(message: bytes, signature: bytes, public_key: str) -> bool:\n return _get_module_from_key(public_key).is_valid_message(\n message,\n signature,\n public_key,\n )",
"def verify(self, message, signature):\n try:\n self._pk.verify(signature.bytes, message)\n return True\n except InvalidSignature:\n return False",
"def CheckGuardSignature(self):\n signature = win32structures.LONG(0)\n lpNumberOfBytesRead = c_size_t(0)\n ret = win32functions.ReadProcessMemory(\n c_void_p(self.process),\n c_void_p(self.mem_address + self.size),\n pointer(signature), # 0x66666666\n win32structures.ULONG_PTR(4),\n byref(lpNumberOfBytesRead))\n if ret == 0:\n ActionLogger().log('Error: Failed to read guard signature: address = ' +\n hex(self.mem_address) + ', size = ' + str(self.size) +\n ', lpNumberOfBytesRead = ' + str(lpNumberOfBytesRead))\n raise WinError()\n else:\n if hex(signature.value) != '0x66666666':\n raise Exception('---------------------------------------- ' +\n 'Error: read incorrect guard signature = ' + hex(signature.value))",
"def _is_valid_message(tx_message: TransactionMessage) -> bool:\n # TODO check the hash matches the terms of the transaction, this means dm requires knowledge of how the hash is composed\n tx_hash = tx_message.signing_payload.get(\"tx_hash\")\n is_valid = isinstance(tx_hash, bytes)\n return is_valid",
"def match_signature(request_headers, request_data, secret):\n try:\n signature = request_headers.get('X-Hub-Signature').split('=', 1)[1]\n digest = hmac.new(secret, request_data, hashlib.sha1).hexdigest()\n print(\"signature: %s, digest: %s\" % (signature, digest))\n if signature == digest:\n return True\n return False\n except:\n return False",
"def check_mac_signature(request, secret, params=None):\n if params is None:\n params = parse_authz_header(request, {})\n # Any KeyError here indicates a missing parameter,\n # which implies an invalid signature.\n try:\n expected_sig = get_mac_signature(request, secret, params)\n return not strings_differ(params[\"mac\"], expected_sig)\n except KeyError:\n return False",
"def is_signature_valid(self, data, sig):\n if self.verified == False:\n return False\n\n key = self.publickey_set.filter(\n fingerprint=PublicKey.verify(data, sig).fingerprint,\n ).first()\n return key",
"def is_valid_payload(p):\n # if the checksum is valid the checksum calculation, without removing the\n # checksum, should be equal to zero\n\n if checksum16(p) == 0:\n return True\n else:\n return False",
"def is_boot_code_present(self):\n\n\t\treturn struct.unpack('<H', self.boot_sector_data[0 : 2])[0] != 0 and struct.unpack('<H', self.boot_sector_data[510 : 512])[0] == 0xAA55",
"def compare_signature(public_key: str, signature: str, content: dict) -> bool:\n\n public_key = import_key(public_key)\n verifier = PKCS1_v1_5.new(public_key)\n encoded_content = json.dumps(content, sort_keys=True).encode()\n h = SHA256.new(encoded_content)\n\n return verifier.verify(h, binascii.unhexlify(signature))",
"def is_valid(self, card):\n # type: (str, Card) -> bool\n if card.version == \"3.0\":\n return False\n fingerprint = self.crypto.calculate_fingerprint(\n Utils.strtobytes(card.snapshot)\n )\n fingerprint_hex = fingerprint.to_hex\n if fingerprint_hex != card.id:\n return False\n verifiers = self.verifiers.copy()\n card_public_key = self.crypto.import_public_key(card.public_key)\n verifiers[fingerprint_hex] = card_public_key\n for key in verifiers:\n if key not in card.signatures:\n return False\n is_valid = self.crypto.verify(\n fingerprint.value,\n Utils.b64tobytes(card.signatures[key]),\n verifiers[key]\n )\n if not is_valid:\n return False\n return True"
]
| [
"0.65580106",
"0.648093",
"0.64799976",
"0.6410002",
"0.64061147",
"0.6272628",
"0.6233012",
"0.62279487",
"0.6214497",
"0.6153721",
"0.6145588",
"0.6126282",
"0.61166954",
"0.6076573",
"0.59827954",
"0.59784204",
"0.5967099",
"0.59625274",
"0.5952577",
"0.5951595",
"0.59137833",
"0.59090483",
"0.5906357",
"0.5881849",
"0.5878305",
"0.5866797",
"0.5832085",
"0.58253497",
"0.5822058",
"0.5809124"
]
| 0.84841055 | 0 |
Adds partitions from extended partitions to the MBR class | def add_partitions(self, disk):
for partition in self.partitions:
if 'Extended' in partition.get_type():
with open(disk, 'rb') as hd:
hd.seek(partition.read_start)
new_mbr = Mbr(hd.read(512), lba_offset=partition.lba)
self.partitions.extend(new_mbr.partitions)
new_mbr.add_partitions(disk) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_partition_list(self):\n raise NotImplementedError('Must be implemented in subclasses.')",
"def newpart(self, device, primary, ncyls, swap=False):\n # This is a simple partitioning tool, which only supports\n # adding partitions sequentially, with all primary partitions\n # being before the extended partition, so once a logical\n # partition has been added, it is not possible to add further\n # primary ones.\n di = DiskInfo(device)\n pmax = 0 # Record highest partition number\n lim = -1 # Used for seeking last used cylinder\n exp = 0 # Number of extended partition\n ex0, ex1 = 0, -1 # Extended partition start and end\n log0, log1 = 0, -1 # Start and end of area used by logical partitions\n for p in di.parts:\n pn = int(p[0][len(device):])\n scyl, ecyl = p[1:3]\n if pn <= 4:\n if exp:\n run_error(_(\"Not supported: primary partition (%s%d)\\n\"\n \"has higher partition number than extended \"\n \"partition\") % (device, pn))\n return \"\"\n if scyl <= lim:\n run_error(_(\"Partitions must be ordered on the device.\\n\"\n \"%s%d is out of order.\") % (device, pn))\n return \"\"\n if p[3] in (\"5\", \"f\"):\n # extended\n exp = pn\n ex0, ex1 = scyl, ecyl\n continue\n pmax = pn\n lim = ecyl\n\n startcyl = lim + 1\n endcyl = lim + ncyls\n if endcyl >= di.drvcyls:\n run_error(_(\"Too little space at end of drive for new partition\"))\n return \"\"\n if exp and (pmax <= 4):\n # Remove the extended partition, which is empty anyway\n if not self.rmpart(device, exp):\n return \"\"\n pmax = exp - 1\n if primary:\n if pmax >= 4:\n run_error(_(\"Cannot add primary partition to %s\") % device)\n return \"\"\n t = \"primary\"\n else:\n t = \"logical\"\n if pmax > 4:\n # resize extended partition\n if not self.xcheck(\"resizepart\", device, str(exp),\n str(ex0), str(endcyl),\n onfail=_(\"Couldn't resize extended partition %s%d\")\n % (device, exp)):\n return False\n else:\n # create extended partition\n if not self.xcheck(\"newpart\", device,\n str(startcyl), str(endcyl), \"extended\",\n onfail=_(\"Couldn't create extended partition on %s\")\n % device):\n return False\n if pmax < 4:\n pmax = 4\n\n if self.xcheck(\"newpart\", device, str(startcyl), str(endcyl),\n t, \"linux-swap\" if swap else \"ext2\"):\n return \"%s%d\" % (device, pmax + 1)\n else:\n run_error(_(\"Couldn't add new partition to %s\") % device)\n return \"\"",
"def add_partition(self):\n\t\treturn handle_to_object(call_sdk_function('PrlVmDevHd_AddPartition', self.handle))",
"def add_partitions(self, partitions):\n new_partitions = set()\n for topic, partition_ids in six.iteritems(partitions):\n new_partitions.update(set([\n \":\".join([topic, str(partition_id)])\n for partition_id in partition_ids\n ]))\n\n log.info(\n \"Attempting to add %d partitions to consumer group '%s'\",\n len(new_partitions), self.group_name\n )\n\n wait_on_event(self.connected)\n\n self.shared_set.add_items(new_partitions)",
"def add_part(self, begin, length, type, mntpnt):\n end = begin+length-1\n logging.debug(\"add_part - begin %d, length %d, end %d\" % (begin, length, end))\n for part in self.partitions:\n if (begin >= part.begin and begin <= part.end) or \\\n (end >= part.begin and end <= part.end):\n raise Exception('Partitions are overlapping')\n if begin > end:\n raise Exception('Partition\\'s last block is before its first')\n if begin < 0 or end > self.size:\n raise Exception('Partition is out of bounds. start=%d, end=%d, disksize=%d' % (begin,end,self.size))\n part = self.Partition(disk=self, begin=begin, end=end, type=str_to_type(type), mntpnt=mntpnt)\n self.partitions.append(part)\n\n # We always keep the partitions in order, so that the output from kpartx matches our understanding\n self.partitions.sort(cmp=lambda x,y: x.begin - y.begin)",
"def _add_(self, other):\n new_partition = list(self) + list(other)\n new_partition.sort(reverse=True)\n return BosonicPartition(BosonicPartitions(), new_partition)",
"def on_partition_change(self, new_partitions):\n if new_partitions is None:\n self.conn.create(self.partition_path, value=self.partitions)\n return\n\n if new_partitions != self.partitions:\n self.partitions = new_partitions\n self.rebalance()\n\n self.partitions_collected.set()",
"def on_add_clicked(self,button):\n\t\tself.list_partitions.add_partition()",
"def createPartition(self, mp, mtype, fs, size, vg, nr):\n startSector = 0\n endSector = 0\n\n # primary partition: calculate the space according instructions below\n if mtype == 'Pri':\n\n # calculate the start sector\n startSector = self.__primaryStartPoint\n\n # calculate the end sector\n sectorLen = startSector + int(size * MEGABYTE / float(self.__sectorSize))\n endSector = sectorLen - 1\n self.__primaryStartPoint = sectorLen\n\n # decrease disk size\n self.__diskSize -= size\n\n # extended partition: update primary and logical pointers\n # when a extended partition is given, its size is not taken into account\n elif mtype == 'Ext':\n\n # calculate the start sector\n startSector = self.__primaryStartPoint\n\n # calculate end sector pointer\n endSector = int(self.__diskSize * MEGABYTE / float(self.__sectorSize)) + startSector - 1\n if endSector > MAX_SECTOR_POSSIBLE:\n endSector = MAX_SECTOR_POSSIBLE\n\n self.__extEndSector = endSector\n\n # decrease disk size\n self.__diskSize -= EXTENT_SIZE - 1\n\n # logical partition: calculate the space according instructions below\n elif mtype == 'Log':\n\n # FIXME, need to improve\n # just for zkvm without extended partition\n self.__extEndSector = endSector\n # refresh start sector pointer\n startSector = self.__primaryStartPoint + self.__sectorOffset\n\n if size == ALL_AVAILABLE:\n endSector = self.__extEndSector\n size = self.__diskSize - 1\n self.__diskSize = 0\n\n else: \n # calculate end sector pointer\n sectorLen = startSector + int(size * MEGABYTE / float(self.__sectorSize))\n endSector = sectorLen - 1\n self.__primaryStartPoint = sectorLen\n\n # decrease disk size\n self.__diskSize -= size\n\n\n part = {}\n part['command'] = 'create:partition'\n part['id'] = \"%s-part%s\" % (self.__diskId, str(nr))\n part['name'] = self.__disk + str(nr)\n part['mount_point'] = mp\n part['type'] = mtype\n part['fs'] = fs\n part['multipath'] = self.__hasMultipath\n part['raid_name'] = None\n part['disk_name'] = '/dev/%s' % self.__disk\n part['size'] = size\n part['vg'] = vg\n part['nr'] = nr\n part['format'] = True\n part['start'] = startSector\n part['end'] = endSector\n\n if self.__hasMultipath:\n part['disk_name'] = '/dev/mapper/%s' % self.__disk\n\n # extended partition: do not format\n if mtype == 'Ext':\n part['format'] = False\n\n return part",
"def defineTasks(self,partition):\n recv_slots = partition.recvSlices()\n strm_slots = partition.streamSlices()\n recvNodes = partition.recvNodesFromSlots()\n strmNodes = partition.streamNodesFromSlots()\n opt = '/'+self.manager.hostName()+'/'+partition.manager.name()+'/'+partition.name+'/'\n cl0 = '/Class0'+opt\n cl1 = '/Class1'+opt\n\n partition.setDataSources([])\n tasks = []\n pn = self.partitionName()\n print '---------------------- Partition name is:',pn\n for i in xrange(len(recv_slots)):\n slot = recv_slots[i]\n node = slot[:slot.find(':')]\n sub_farm = 'SF%02d'%(i,)\n short_name = sub_farm+'_SND' # Keep this name to ensure storageMon is working!\n task = pn+'_'+node+'_'+short_name\n tasks.append(node+'/'+task+'/'+short_name+'/RecStorageSend'+cl1+'(\"'+sub_farm+'\",'+str(i)+',)')\n partition.setRecvSenders(tasks)\n tasks = []\n for i in xrange(len(strm_slots)):\n slot = strm_slots[i]\n node = slot[:slot.find(':')]\n sub_farm = 'SF%02d'%(i,)\n short_name = sub_farm+'_HLT' # Keep this name to ensure storageMon is working!\n task = pn+'_'+node+'_'+short_name\n tasks.append(node+'/'+task+'/'+short_name+'/RecStorageRecv'+cl1+'(\"'+sub_farm+'\",'+str(i)+',)')\n partition.setStreamReceivers(tasks)\n cnt = 0\n tasks = []\n infra = []\n for j in recvNodes:\n for itm in self.rcvInfra.data:\n i,cl=itm.split('/')\n infra.append(j+'/'+pn+'_'+j+'_'+i+'/'+i+'/'+i+'/'+cl+opt+'(\"'+str(cnt)+'\",)')\n cnt = cnt + 1\n partition.setRecvInfrastructure(infra)\n partition.setRecvReceivers(tasks)\n cnt = 0\n tasks = []\n infra = []\n for j in strmNodes:\n for itm in self.strInfra.data:\n i,cl=itm.split('/')\n infra.append(j+'/'+pn+'_'+j+'_'+i+'/'+i+'/'+i+'/'+cl+opt+'(\"'+str(cnt)+'\",)')\n cnt = cnt + 1\n partition.setStreamInfrastructure(infra)\n partition.setStreamSenders(tasks)\n if partition.saveTasks():\n tasks = partition.collectTasks(tasks={},with_data_sources=0)\n return tasks\n return None",
"def course_partitions(self):\r\n raise NotImplementedError('Subclasses must implement course_partition')",
"def test_partition_with_no_additional_constraints_extra_edge(self):\n self.setup()\n self.graph.add_edge(\n SimpleApplicationEdge(self.vert3, self.vert1), \"TEST\")\n graph, mapper = self.bp.partition(self.graph, self.machine)\n self.assertEqual(len(graph.vertices), 3)\n self.assertEqual(len(graph.edges), 4)",
"def partition_book(self):\n ...",
"def addPartition(self,partitionData):\n self.PCAs[partitionData.id] = partitionData\n self.pcaStatemachineLock[partitionData.id] = threading.Lock()\n self.StateMachineForPca[partitionData.id] = Statemachine(self.StateMachineFile,\"Unconfigured\")\n self.isPCAinTransition[partitionData.id] = False\n self.pcaSequenceNumber[partitionData.id] = 0",
"def _setPartedPartition(self, partition):\n log_method_call(self, self.name)\n\n if partition is not None and not isinstance(partition, parted.Partition):\n raise ValueError(\"partition must be None or a parted.Partition instance\")\n\n log.debug(\"device %s new partedPartition %s\", self.name, partition)\n self._partedPartition = partition\n self.updateName()",
"def update(self, metadata):\n p_metas = metadata.partitions\n\n # Remove old partitions\n removed = set(self._partitions.keys()) - set(p_metas.keys())\n if len(removed) > 0:\n log.info('Removing %d partitions', len(removed))\n for id_ in removed:\n log.debug('Removing partition %s', self._partitions[id_])\n self._partitions.pop(id_)\n\n # Add/update current partitions\n brokers = self._cluster.brokers\n if len(p_metas) > 0:\n log.info(\"Adding %d partitions\", len(p_metas))\n for id_, meta in iteritems(p_metas):\n if meta.leader not in brokers:\n raise LeaderNotAvailable()\n if meta.id not in self._partitions:\n log.debug('Adding partition %s/%s', self.name, meta.id)\n self._partitions[meta.id] = Partition(\n self, meta.id,\n brokers[meta.leader],\n [brokers[b] for b in meta.replicas],\n [brokers[b] for b in meta.isr],\n )\n else:\n self._partitions[id_].update(brokers, meta)",
"def subpartition_bsp(self, min_width, min_height):\n \n def split_horizontal(p):\n ul_x, ul_y = p.ul_pos\n \n split_pos = (random.choice(\n list(range(ul_x + min_width, ul_x + p.width - min_width + 1))), ul_y)\n \n split_x, split_y = split_pos\n \n return([Partition(p.ul_pos, split_x-ul_x, p.height), \n Partition(split_pos, ul_x + p.width - split_x, p.height)])\n \n def split_vertical(p):\n ul_x, ul_y = p.ul_pos\n \n split_pos = (ul_x, random.choice(\n list(range(ul_y + min_height, ul_y + p.height - min_height + 1))))\n \n split_x, split_y = split_pos\n \n return([Partition(p.ul_pos, p.width, split_y-ul_y), \n Partition(split_pos, p.width, ul_y + p.height - split_y)])\n \n\n \n if self.width < min_width or self.height < min_height:\n raise PartitionException(\"Partition too small!\")\n \n splith = (self.width > 2*min_width)\n splitv = (self.height > 2*min_height)\n \n new_partitions = None\n \n if splith and splitv:\n new_partitions = random.choice([\n split_horizontal, split_vertical])(self)\n \n elif splith:\n new_partitions = split_horizontal(self)\n \n elif splitv:\n new_partitions = split_vertical(self)\n \n else:\n return [self]\n \n return list(flatten([p.subpartition_bsp(min_width, min_height) \n for p in new_partitions]))",
"def allocate(self,partition, num_farms, activity, runinfo_dp):\n if self.load():\n nf = num_farms\n got = 0\n used = Online.PVSS.StringVector()\n farms = Online.PVSS.StringVector()\n dpv = Online.PVSS.DataPointVector()\n for i in xrange(len(self.inUse.data)):\n f = self.inUse.data[i]\n n = self.subfarms.data[i]\n if len(f)==0 and got<nf:\n dpv.push_back(self.dp2(self.name+'_'+n,'UsedBy'))\n dpv.back().data = partition\n dpv.push_back(self.dp2(self.name+'_'+n,'RunInfo'))\n dpv.back().data = runinfo_dp\n dpv.push_back(self.dp2(self.name+'_'+n,'Activity'))\n dpv.back().data = activity\n used.push_back(partition)\n farms.push_back(n)\n got = got + 1\n else:\n used.push_back(f)\n if got==nf:\n if len(runinfo_dp)>0:\n #dpv.push_back(self.dp2(runinfo_dp,'general.partName'))\n #dpv.back().data = partition\n #dpv.push_back(self.dp2(runinfo_dp,'HLTFarm.nSubFarms'))\n #dpv.back().data = nf\n dpv.push_back(self.dp2(runinfo_dp,'HLTFarm.subFarms'))\n dpv.back().data = farms\n self.inUse.data = used\n self.writer.add(dpv)\n self.writer.add(self.inUse)\n if self.writer.execute():\n return 'SUCCESS'\n self.error('Failed to update allocation information for partition '+\\\n partition+' in farm system:'+self.name)\n return None\n return self.error('Not enough free subfarms availible for partition '+partition+\\\n ' in farm system:'+self.name)\n return self.error('Failed to load information for partition '+partition+\\\n ' in farm system:'+self.name)",
"def load_partitions(partition_list, pickle_base_name=DEFAULT_REVIEWS_PICKLE + '.'):\n\n num_partition = 1\n result = []\n for partition in partition_list:\n print 'Reading partition %d of %d' % (num_partition, len(partition_list))\n with open(pickle_base_name + str(partition)) as file:\n loaded_element = pickle.load(file)\n result.extend(loaded_element)\n\n num_partition += 1\n\n print \"Read a total of %d partitions for a total of %d objects\" % (num_partition - 1, len(result))\n return result",
"def list_partitions(self, partitioning):\n return []",
"def __init__(self, partition, test=False, local_test_data_dir=_LOCAL_TEST_DATA_DIR):\n assert sum(partition) == 100, 'The sum of the partition list must be 100: {}'.format(partition)\n self._partition = partition\n self._test = test\n # Split the files up according to the self._partition list.\n self._partitioned_filenames = []\n filenames = data_filenames(shuffle=False, test=self._test,\n local_test_data_dir=local_test_data_dir)\n part_start = 0\n for i, part_size in enumerate(self._partition):\n part_end = part_start + int(len(filenames) * 0.01 * part_size)\n assert part_end - part_start > 0, 'The number of files in partition {} is zero.'.format(i)\n self._partitioned_filenames.append(filenames[part_start:part_end])",
"def _record_specific_partition(r_d, numnodes, cur):\n # No partitioning has been specified. Create the appropriate entries.\n if r_d['partmtd'] == 0:\n for i in range(1, numnodes + 1):\n Database.execute(cur, 'UPDATE dtables '\n 'SET partmtd = 0 '\n 'WHERE nodeid = ? AND tname = ?',\n ErrorHandle.raise_handler, (i, r_d['tname']))\n\n # Range partitioning has been specified. Create the appropriate entries.\n elif r_d['partmtd'] == 1:\n for i in range(1, numnodes + 1):\n Database.execute(cur, 'UPDATE dtables '\n 'SET partcol = ?, partparam1 = ?, '\n 'partparam2 = ?, partmtd = 1 '\n 'WHERE nodeid = ? AND tname = ?',\n ErrorHandle.raise_handler,\n (r_d['partcol'], r_d['param1'][i - 1], r_d['param2'][i - 1], i,\n r_d['tname']))\n\n # Hash partitioning has been specified. Create the appropriate entries.\n elif r_d['partmtd'] == 2:\n for i in range(1, numnodes + 1):\n Database.execute(cur, 'UPDATE dtables '\n 'SET partcol = ?, partparam1 = ?, partmtd = 2 '\n 'WHERE nodeid = ? AND tname = ?',\n ErrorHandle.raise_handler,\n (r_d['partcol'], r_d['param1'], i, r_d['tname']))",
"def add_hive_partition(_):\n LOGGER.error('Add Hive Parition is not yet supported, exiting!')\n raise NotImplementedError",
"def partition_session(self):\n if self.user['drive']['name'] is not None:\n\n # Set root size\n if self.user['root_freespace'] is True:\n self.user['root_size'] = 'freespace'\n\n # Set partition parameters\n self.user['partitions'] = {'name': ['boot', 'root'],\n 'size': [self.user['boot_size'],\n self.user['root_size']],\n 'filesystem': ['fat32', 'ext4'],\n 'mountpoint': ['/mnt/boot', '/mnt'],\n 'mountorder': [1, 0]}\n\n # Set swap size and filesystem\n if 'Swap' in self.user['optional_partitions']:\n self.user['partitions']['size'].insert(1, self.user['swap_size'])\n self.user['partitions']['filesystem'].insert(1, 'swap')\n\n # Set home size and filesystem\n if 'Home' in self.user['optional_partitions']:\n if self.user['home_freespace'] is True:\n self.user['home_size'] = 'freespace'\n self.user['partitions']['size'].append(self.user['home_size'])\n self.user['partitions']['filesystem'].append('ext4')\n\n # Custom partitions\n else:\n\n # Set partition parameters\n self.user['partitions'] = {\n 'name': ['boot', 'root'],\n 'drive_id': [self.user['boot_id'].split()[0],\n self.user['root_id'].split()[0]],\n 'mountpoint': ['/mnt/boot', '/mnt'],\n 'mountorder': [1, 0]}\n\n # Set swap drive ID\n if self.user['swap_id'] is not None:\n self.user['partitions']['drive_id'].insert(\n 1, self.user['swap_id'].split()[0])\n\n # Set home drive ID\n if self.user['home_id'] is not None:\n self.user['partitions']['drive_id'].append(\n self.user['home_id'].split()[0])\n\n # Set swap parameters\n if ('Swap' in self.user['optional_partitions']) or \\\n (self.user['swap_id'] is not None):\n self.user['partitions']['name'].insert(1, 'swap')\n self.user['partitions']['mountpoint'].insert(1, 'swap')\n self.user['partitions']['mountorder'].insert(1, 2)\n\n # Set home parameters\n if 'Home' in self.user['optional_partitions'] or \\\n (self.user['home_id'] is not None):\n self.user['partitions']['name'].append('home')\n self.user['partitions']['mountpoint'].append('/mnt/home')\n self.user['partitions']['mountorder'].append(3)",
"def partitions(self, *types, new=None, disk=None) -> List[Partition]:\n types = types or (Partition,)\n return [pt for pt in self.scheme\n if all(isinstance(pt, T) for T in types)\n and (pt.disk == disk if disk is not None else True)\n and (pt.is_new == new if new is not None else True)\n ]",
"def do_configure_partition(cls, part, source_params, creator, cr_workdir,\n oe_builddir, bootimg_dir, kernel_dir,\n native_sysroot):\n logger.debug(\"SourcePlugin: do_configure_partition: part: %s\", part)",
"def num_partitions(self): # -> Unknown:\n ...",
"def partitions(self):\n return self._partitions",
"def add_probability(self, partitioning, part, prob):\n self.prob.setdefault(partitioning, dict())\n self.prob[partitioning][part] = prob",
"def num_partitions(self): # -> None:\n ..."
]
| [
"0.62553537",
"0.62338024",
"0.6184228",
"0.6177974",
"0.61490935",
"0.60143113",
"0.59943926",
"0.595958",
"0.595233",
"0.574192",
"0.5720558",
"0.570915",
"0.5700588",
"0.5605133",
"0.5552451",
"0.55524147",
"0.55502284",
"0.55076903",
"0.5502424",
"0.5456309",
"0.5442365",
"0.5438803",
"0.5417105",
"0.53784716",
"0.5311923",
"0.5309789",
"0.52369356",
"0.5224165",
"0.52103525",
"0.52046776"
]
| 0.7340264 | 0 |
GZIP encode bytes object | def _gzipencode(content):
import gzip
out = BytesIO()
f = gzip.GzipFile(fileobj=out, mode='w', compresslevel=5)
f.write(content)
f.close()
return out.getvalue() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def b64_gz_json_encode(obj):\n # The |separators| argument is to densify the command line.\n return base64.b64encode(zlib.compress(\n json.dumps(obj or {}, sort_keys=True, separators=(',', ':')), 9))",
"def encode(self, compress=0):\n raw = bytes(self._encode())\n return gzip.compress(raw, compress) if compress else raw",
"def gzip_compress(data):\n s = BytesIO()\n g = gzip.GzipFile(fileobj=s, mode='wb')\n g.write(data)\n g.close()\n return s.getvalue()",
"def _gzip_str(string_):\n out = BytesIO()\n\n with gzip.GzipFile(fileobj=out, mode='w') as fo:\n fo.write(string_.encode())\n\n bytes_obj = out.getvalue()\n return bytes_obj",
"def do_gzip(fileobj):\r\n sio = cStringIO.StringIO()\r\n gzf = gzip.GzipFile(fileobj = sio, mode = \"wb\")\r\n while True:\r\n data = fileobj.read(buf_size)\r\n if not data:\r\n break\r\n gzf.write(data)\r\n gzf.close()\r\n return sio",
"def _compress_string(content):\n zbuf = StringIO()\n zfile = GzipFile(mode='wb', compresslevel=6, fileobj=zbuf)\n zfile.write(content)\n zfile.close()\n return zbuf.getvalue()",
"def write_gzip_bytes(self, bytes, compresslevel=5):\n sio = StringIO()\n with gzip.GzipFile(None, 'w', compresslevel, sio) as gz:\n gz.write(bytes)\n with self.open('wb') as f:\n f.write(sio.getvalue())",
"def _compress_content(self, content):\n zbuf = io.BytesIO()\n zfile = gzip.GzipFile(mode=\"wb\", compresslevel=9, fileobj=zbuf)\n\n try:\n zfile.write(content.read())\n finally:\n zfile.close()\n\n content.file = zbuf\n content.seek(0)\n\n return content",
"def gzdeflate():\n return zlib.compress(val)",
"def compressBuffer(self, buffer):\r\n # http://jython.xhaus.com/http-compression-in-python-and-jython/\r\n zbuf = cStringIO.StringIO()\r\n zfile = gzip.GzipFile(mode='wb', fileobj=zbuf, compresslevel=9)\r\n zfile.write(buffer)\r\n zfile.close()\r\n return zbuf.getvalue()",
"def compress_encode(value):\n return base64.b64encode(zlib.compress(value.encode(\"ascii\"))).decode(\"ascii\")",
"def read_gzip_bytes(self):\n with gzip.open(self, 'rb') as f:\n return f.read()",
"def compress_zlib(self, string):\n #encode the input sting\n self.string = string.encode()\n return zlib.compress(self.string)",
"def encode(self, obj):\n s = super(CustomEncoder, self).encode(obj)\n # If uncompressed, postprocess for formatting\n if len(s.splitlines()) > 1:\n s = self.postprocess(s)\n return s",
"def de_gzip(data):\n cmps = StringIO.StringIO(data)\n gzipper = gzip.GzipFile(fileobj=cmps)\n return gzipper.read()",
"def de_gzip(data):\n cmps = StringIO.StringIO(data)\n gzipper = gzip.GzipFile(fileobj=cmps)\n return gzipper.read()",
"def compression(s):",
"def save_to_gzip(data,fname):\n with gzip.open(fname + '.gz', 'wb',compresslevel = 9) as f:\n f.write(data.tobytes())",
"def convert_gz_json_type(value):\n return json.loads(zlib.decompress(base64.b64decode(value)))",
"def gzinflate(val):\n return zlib.decompress(val)",
"def __handle_compression(self, x):\n if self.__compress:\n return zlib.compress(x)\n return x",
"def data_zip(self, data):\n stringio = StringIO.StringIO()\n gzip_file = gzip.GzipFile(fileobj=stringio, mode='wb')\n gzip_file.write(data)\n gzip_file.close()\n return stringio.getvalue()",
"def compress(bstr):\n from sphobjinv.re import pb_comments, pb_data\n\n # Preconvert any DOS newlines to Unix\n s = bstr.replace(b\"\\r\\n\", b\"\\n\")\n\n # Pull all of the lines\n m_comments = pb_comments.findall(s)\n m_data = pb_data.finditer(s)\n\n # Assemble the binary header comments and data\n # Comments and data blocks must end in newlines\n hb = b\"\\n\".join(m_comments) + b\"\\n\"\n db = b\"\\n\".join(_.group(0) for _ in m_data) + b\"\\n\"\n\n # Compress the data block\n # Compression level nine is to match that specified in\n # sphinx html builder:\n # https://github.com/sphinx-doc/sphinx/blob/1.4.1/sphinx/\n # builders/html.py#L843\n dbc = zlib.compress(db, 9)\n\n # Return the composited bytestring\n return hb + dbc",
"def compress(self, s):\n data = zlib.compress(s)\n # drop gzip headers and tail\n return data[2:-4]",
"def gunzip(data):\n return gzip.GzipFile(fileobj=StringIO(data)).read()",
"def ToBytes (self):\n return zlib.compress (json.dumps (self.containments, 2).encode ('utf-8'), 9)",
"def compress(value):\n pickled = pickle_util.dump(value)\n return zlib.compress(pickled)",
"def decompress_gzip(in_str):\n # gzip can only handle file object therefore using StringIO\n copmressed_stream = StringIO.StringIO(in_str)\n gzipper = gzip.GzipFile(fileobj=copmressed_stream)\n s = gzipper.read()\n gzipper.close()\n return s",
"def decompress_gzip(in_str):\n import gzip\n # gzip can only handle file object therefore using StringIO\n copmressed_stream = StringIO.StringIO(in_str)\n gzipper = gzip.GzipFile(fileobj=copmressed_stream)\n s = gzipper.read()\n gzipper.close()\n return s",
"def bencode_buffer(data):\n\twith BytesIO() as f:\n\t\tbencode(data, f)\n\t\treturn f.getvalue()"
]
| [
"0.7058347",
"0.7038002",
"0.70237434",
"0.69651395",
"0.6945722",
"0.6864719",
"0.6820687",
"0.67059284",
"0.6684357",
"0.66502655",
"0.6574162",
"0.6489864",
"0.64450616",
"0.6441168",
"0.64198315",
"0.64198315",
"0.636657",
"0.6334356",
"0.6240513",
"0.62067103",
"0.61713606",
"0.613792",
"0.6030444",
"0.60174066",
"0.60035086",
"0.6000922",
"0.59809625",
"0.597874",
"0.5964414",
"0.59443414"
]
| 0.7544876 | 0 |
Try to parse 'item' (string or integer) to enum 'type' | def _parse_enum(type, item):
try:
return type[item]
except:
return type(item) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convertToEnumItem(byte: int, enumType: cern.japc.value.EnumType) -> cern.japc.value.EnumItem:\n ...",
"def _get_item_type(item_dict):\n\n if 'object' in item_dict:\n item_type = 'object'\n elif 'module' in item_dict:\n item_type = 'module'\n elif 'clock' in item_dict:\n item_type = 'clock'\n elif '#include' in item_dict:\n item_type = 'include'\n elif '#set' in item_dict:\n item_type = 'set'\n elif '#define' in item_dict:\n item_type = 'define'\n elif 'omftype' in item_dict:\n item_type = 'omftype'\n elif 'class' in item_dict:\n item_type = 'class'\n else:\n raise TypeError('Unknown type! Item: {}'.format(item_dict))\n\n return item_type",
"def _type(self, item):\n return self.cv.type(item)",
"def defaultItemType(item):\n if 'choices' in item:\n itemtype = 'select'\n else:\n itemtype = type(item['value'])\n if itemtype is None:\n itemtype = str\n return itemtype",
"def from_str(type_string):\n\t\tglobal type_enum\n\t\tif type_string == \"V\":\n\t\t\treturn MoviesType.V\n\t\telif type_string == \"VG\":\n\t\t\treturn MoviesType.VG\n\t\telif type_string == \"TV\":\n\t\t\treturn MoviesType.TV\n\t\telse:\n\t\t\treturn MoviesType.M",
"def get_indicator_type(indicator_type, item):\n\n if indicator_type == 'ip':\n return ip_to_indicator_type(item.get('Name'))\n elif indicator_type == 'hash':\n return FeedIndicatorType.File\n elif indicator_type == 'domain':\n # If * is in the domain it is of type DomainGlob\n if '*' in item.get('Name', ''):\n return FeedIndicatorType.DomainGlob\n return FeedIndicatorType.Domain\n elif indicator_type == 'url':\n return FeedIndicatorType.URL\n elif indicator_type == 'vulnerability':\n return FeedIndicatorType.CVE",
"def _validate_enum(self, item: Any, enum: Any) -> Any:\n if item is None:\n result = get_random_item(enum, self.random)\n elif item and isinstance(item, enum):\n result = item\n else:\n raise NonEnumerableError(enum)\n\n return result.value",
"def GetItemType(self, item):\r\n\r\n return item.GetType()",
"def validate_item(self, form_item, type_):\n if form_item == \"\":\n return None\n else:\n try:\n return type_(form_item)\n except TypeError:\n return None",
"def translate_item_type(resolved_type):\n\n # Another model\n if resolved_type.container == ContainerType.MODEL:\n return resolved_type.type\n\n # Primitive type\n if resolved_type.container == ContainerType.PRIMITIVE:\n return PRIMITIVE_TYPES[resolved_type.type]\n\n # Something more complex\n return translate_type(resolved_type)",
"def isItem(obType,iType):\n if iType == 'sword':\n return isinstance(obType,Sword)\n elif iType == 'axe':\n return isinstance(obType,Axe)\n elif iType == 'shield':\n return isinstance(obType,Shield)\n elif iType == 'helmet':\n return isinstance(obType,Helmet)\n else:\n pass\n # raise SystemError('Bad item type {} in isItem'.format(iType))",
"def convertToEnumItem(boolean: bool) -> cern.japc.value.EnumItem:\n ...",
"def process_item(item):\n if isinstance(item, ast.Slice):\n name = unwrap(item.lower)\n value = item.upper\n return name, value\n\n # due to ast.Index going away in 3.9, simple indices are just the value\n # themselves.\n if isinstance(item, ast.Name):\n return None, item\n\n if isinstance(item, ast.Index):\n return None, item.value\n\n raise TypeError(f\"{type(item)} type not handled\")",
"def _parse(self, val: str):\n if val is None:\n return val\n\n if self._enum_class and isinstance(val, self._enum_class):\n return val # Directly return the enum value if it is the enum.\n\n if val not in self._str2enum:\n msg = \"Not a valid enum value: '{}', valid values: {}\"\n raise ValidationException(\n message=msg.format(val, \", \".join(self.enum)),\n no_personal_data_message=msg.format(\"[val]\", \"[enum]\"),\n error_category=ErrorCategory.USER_ERROR,\n target=ErrorTarget.PIPELINE,\n error_type=ValidationErrorType.INVALID_VALUE,\n )\n return self._str2enum[val]",
"def _parse(self, str_val: str):\n if str_val is None:\n return str_val\n\n if self._enum_class and isinstance(str_val, self._enum_class):\n return str_val # Directly return the enum value if it is the enum.\n\n if str_val not in self._str2enum:\n msg = \"Not a valid enum value: '{}', valid values: {}\"\n raise ValidationException(\n message=msg.format(str_val, \", \".join(self.enum)),\n no_personal_data_message=msg.format(\"[val]\", \"[enum]\"),\n error_category=ErrorCategory.USER_ERROR,\n target=ErrorTarget.PIPELINE,\n )\n return self._str2enum[str_val]",
"def on_enum_parse(self, ctx):\n return None",
"def parse(\n cls,\n value: str\n ):\n\n if value is None or len(value) == 0:\n raise ValueError(\"provided value may not be None or empty\")\n\n for item in cls:\n if value == item.value:\n # found a matching value\n return item\n\n # Fallback value in case the API adds an enum that is not supported\n # by an older version of the SDK\n return cls.Unknown",
"def _check_type(item, types, item_name=None):\n check_types = sum(\n (\n (type(None),)\n if type_ is None\n else (type_,)\n if not isinstance(type_, str)\n else _types[type_]\n for type_ in types\n ),\n (),\n )\n\n if not isinstance(item, check_types):\n type_name = [\n \"None\"\n if cls_ is None\n else cls_.__name__\n if not isinstance(cls_, str)\n else cls_\n for cls_ in types\n ]\n if len(type_name) == 1:\n type_name = type_name[0]\n elif len(type_name) == 2:\n type_name = \" or \".join(type_name)\n else:\n type_name[-1] = \"or \" + type_name[-1]\n type_name = \", \".join(type_name)\n item_name = \"Item\" if item_name is None else \"'%s'\" % item_name\n raise TypeError(\n f\"{item_name} must be an instance of {type_name}, \"\n f\"got {type(item)} instead.\"\n )\n\n return item",
"def text_to_field_type(t):\n\n if t == \"MISS\":\n return BattleShipBoard.MISSED\n \n return BattleShipBoard.HIT",
"def item_to_type_and_shape(item: ValueType) -> Tuple[str, List]:\n item = np.array(item)\n\n shape = list(item.shape)\n if item.dtype == np.bool_:\n return 'int', shape\n item = item.flatten()\n if np.issubdtype(item.dtype, np.integer):\n return 'int', shape\n elif np.issubdtype(item.dtype, np.floating):\n return 'float', shape\n elif is_string(item):\n return 'string', check_and_image_shape(item, shape)\n else:\n raise ValueError(f'Unsupported value: {item}.')",
"def parse_item(raw_item: str) -> Item:\n name, cost, damage, armor = raw_item.split()\n\n return Item(name, int(cost), int(damage), int(armor))",
"def test_enum_datatypes(self) -> None:\n directory = os.path.join(PAYLOAD_DIRECTORY, 'enum_payloads')\n type_name = 'NULL'\n\n def get_id(type_: Dict[str, str]) -> int:\n \"\"\"A helper function to improve test case readability.\"\"\"\n return int(type_[f'{type_name}_id'])\n\n # ArmorFacing\n filepath = os.path.join(directory, 'armor_facing.json')\n type_name = 'armor_facing'\n with open(filepath, encoding='utf-8') as payload_file:\n payload: Dict[str, Any] = json.load(payload_file)\n type_list: List[Dict[str, str]] = payload[f'{type_name}_list']\n self.assertEqual(get_id(type_list[0]), ps2.ArmourFacing.FRONT)\n self.assertEqual(get_id(type_list[1]), ps2.ArmourFacing.RIGHT)\n self.assertEqual(get_id(type_list[2]), ps2.ArmourFacing.TOP)\n self.assertEqual(get_id(type_list[3]), ps2.ArmourFacing.REAR)\n self.assertEqual(get_id(type_list[4]), ps2.ArmourFacing.LEFT)\n self.assertEqual(get_id(type_list[5]), ps2.ArmourFacing.BOTTOM)\n self.assertEqual(get_id(type_list[6]), ps2.ArmourFacing.ALL)\n\n # FireModeType\n filepath = os.path.join(directory, 'fire_mode_type.json')\n type_name = 'fire_mode_type'\n with open(filepath, encoding='utf-8') as payload_file:\n payload: Dict[str, Any] = json.load(payload_file)\n type_list: List[Dict[str, str]] = payload[f'{type_name}_list']\n self.assertEqual(get_id(type_list[0]), ps2.FireModeType.PROJECTILE)\n self.assertEqual(get_id(type_list[1]), ps2.FireModeType.IRON_SIGHT)\n self.assertEqual(get_id(type_list[2]), ps2.FireModeType.MELEE)\n self.assertEqual(get_id(type_list[3]),\n ps2.FireModeType.TRIGGER_ITEM_ABILITY)\n self.assertEqual(get_id(type_list[4]), ps2.FireModeType.THROWN)\n\n # MetagameEventState\n filepath = os.path.join(directory, 'metagame_event_state.json')\n type_name = 'metagame_event_state'\n with open(filepath, encoding='utf-8') as payload_file:\n payload: Dict[str, Any] = json.load(payload_file)\n type_list: List[Dict[str, str]] = payload[f'{type_name}_list']\n self.assertEqual(get_id(type_list[0]), ps2.MetagameEventState.STARTED)\n self.assertEqual(get_id(type_list[1]),\n ps2.MetagameEventState.RESTARTED)\n self.assertEqual(get_id(type_list[2]),\n ps2.MetagameEventState.CANCELLED)\n self.assertEqual(get_id(type_list[3]), ps2.MetagameEventState.ENDED)\n self.assertEqual(get_id(type_list[4]),\n ps2.MetagameEventState.XP_BONUS_CHANGED)\n\n # TargetType\n filepath = os.path.join(directory, 'target_type.json')\n type_name = 'target_type'\n with open(filepath, encoding='utf-8') as payload_file:\n payload: Dict[str, Any] = json.load(payload_file)\n type_list: List[Dict[str, str]] = payload[f'{type_name}_list']\n self.assertEqual(get_id(type_list[0]), ps2.TargetType.SELF)\n self.assertEqual(get_id(type_list[1]), ps2.TargetType.ANY)\n self.assertEqual(get_id(type_list[2]), ps2.TargetType.ENEMY)\n self.assertEqual(get_id(type_list[3]), ps2.TargetType.ALLY)",
"def _process_type(self):\n _type = self._transform_value(self.transform.type)\n if _type is not None:\n self.transformed_item['type'] = _type\n else:\n self.log.error(\n 'feature=ti-transform, action=process-type, error=invalid=type, '\n f'path={self.transform.type.path}, value={_type}'\n )\n raise RuntimeError('Invalid type')",
"def item_type(self):\n pass",
"def itemByType(self, itemType):\n if itemType.value not in self.__inventory__:\n return None\n if len(self.__inventory__[itemType.value]) == 0:\n return None\n return self.__inventory__[itemType.value][0]",
"def parse_fieldtype(value, fieldtype):\n\ttype_mapper = {\n\t\t\"int\": int,\n\t\t\"float\": float,\n\t\t\"basestring\": str,\n\t\t\"dict\": json.loads\n\t}\n\n\ttry:\n\t\tif fieldtype in type_mapper.keys():\n\t\t\treturn type_mapper[fieldtype](value)\n\t\telif fieldtype == \"list\":\n\t\t\traise Exception(\"Can't parse value to list type\")\n\t\telif fieldtype == \"date\":\n\t\t\treturn value\n\t\t# elif fieldtype == \"float\":\n\t\t# \treturn float(value)\n\t\t# elif fieldtype == \"basestring\":\n\t\t# \treturn str(value)\n\t\t# elif fieldtype == \"dict\":\n\t\t# \treturn json.loads(value)\n\texcept Exception, e:\n\t\traise e",
"def get_item(self, item_type):\n if item_type not in self._internal_type_mapping:\n return None\n else:\n return self._internal_type_mapping[item_type]",
"def get_xsd_type(self, item):\n if not self.xsd_types or isinstance(self.xsd_types, AbstractSchemaProxy):\n return\n elif isinstance(item, str):\n xsd_type = self.xsd_types.get(item)\n elif isinstance(item, AttributeNode):\n xsd_type = self.xsd_types.get(item[0])\n else:\n xsd_type = self.xsd_types.get(item.tag)\n\n if not xsd_type:\n return\n elif not isinstance(xsd_type, list):\n return xsd_type\n elif isinstance(item, AttributeNode):\n for x in xsd_type:\n if x.is_valid(item[1]):\n return x\n elif not isinstance(item, str):\n for x in xsd_type:\n if x.is_simple():\n if x.is_valid(item.text):\n return x\n elif x.is_valid(item):\n return x\n\n return xsd_type[0]",
"def from_string(cls, name: str) -> Enum:",
"def SetItemType(self, item, ct_type):\r\n\r\n item.SetType(ct_type)\r\n self.CalculatePositions()\r\n self.Refresh()"
]
| [
"0.67821544",
"0.66704196",
"0.66600126",
"0.6387634",
"0.63724065",
"0.6335099",
"0.63331234",
"0.62657595",
"0.62563825",
"0.6107302",
"0.60311896",
"0.5965388",
"0.58617616",
"0.58300763",
"0.58221483",
"0.5804898",
"0.5765457",
"0.57585025",
"0.5724611",
"0.5699114",
"0.56637126",
"0.5651772",
"0.5619819",
"0.56180376",
"0.5616063",
"0.56110317",
"0.55814373",
"0.55767196",
"0.5574493",
"0.55043167"
]
| 0.86925566 | 0 |
small script to populate relics for testing | def test_relic():
mongo_db = pymongo.MongoClient()
init_db(mongo_db.roguesim_python)
populate_db(mongo_db.roguesim_python) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def populate_db(dbase):\n # In this order: Iron, Blood, Shadow, Fel, Storm\n wowhead_ids = []\n wowhead_ids.extend(item.get_ids_from_wowhead_by_type(-8))\n wowhead_ids.extend(item.get_ids_from_wowhead_by_type(-9))\n wowhead_ids.extend(item.get_ids_from_wowhead_by_type(-10))\n wowhead_ids.extend(item.get_ids_from_wowhead_by_type(-11))\n wowhead_ids.extend(item.get_ids_from_wowhead_by_type(-17))\n item_ids = set(wowhead_ids)\n print(item_ids)\n\n pos = 0\n for item_id in item_ids:\n if pos % 10 == 0:\n print(\"Relic %d of %d\" % (pos, len(item_ids)))\n pos += 1\n import_relic(dbase, item_id)",
"def test_ipam_rirs_list(self):\n pass",
"def _regr_basic():",
"def reindex(self):",
"def reindex(self):",
"def import_relic(dbase, item_id):\n try:\n query = {'remote_id': item_id}\n results = dbase.relics.find(query)\n if results.count() != 0:\n print(\"import_relic: already have relic %d\" % item_id)\n return\n\n print(\"importing relic %d\" % item_id)\n\n relic = ArmoryRelic.get(item_id)\n entry = {\n 'remote_id': item_id,\n 'type': relic['type'],\n 'traits': relic['traits']\n }\n dbase.relics.replace_one(\n {'remote_id': item_id, 'type': relic['type']},\n entry,\n upsert=True\n )\n except ArmoryDocument.ArmoryError as err:\n print(\"import_relic: failed to fetch %d: %s\" % (item_id, err))\n return",
"def test_backup_restore_with_ops(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n initial_gen = copy.deepcopy(gen)\n initial_keys = []\n for x in initial_gen:\n initial_keys.append(x[0])\n self.log.info(\"Start to load items to all buckets\")\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.ops_type = self.input.param(\"ops-type\", \"update\")\n self.log.info(\"Create backup repo \")\n self.backup_create()\n for i in range(1, self.backupset.number_of_backups + 1):\n self._backup_restore_with_ops()\n start = randrange(1, self.backupset.number_of_backups + 1)\n if start == self.backupset.number_of_backups:\n end = start\n else:\n end = randrange(start, self.backupset.number_of_backups + 1)\n\n if self.compact_backup and self.ops_type == \"delete\":\n self.log.info(\"Start to compact backup \")\n self.backup_compact_validate()\n self.log.info(\"Validate deleted keys\")\n self.backup_compact_deleted_keys_validation(initial_keys)\n\n self.log.info(\"start restore cluster \")\n restored = {\"{0}/{1}\".format(start, end): \"\"}\n for i in range(1, self.backupset.number_of_backups + 1):\n self.backupset.start = start\n self.backupset.end = end\n self._backup_restore_with_ops(backup=False, compare_function=\">=\")\n if self.backupset.number_of_backups == 1:\n continue\n while \"{0}/{1}\".format(start, end) in restored:\n start = randrange(1, self.backupset.number_of_backups + 1)\n if start == self.backupset.number_of_backups:\n end = start\n else:\n end = randrange(start, self.backupset.number_of_backups + 1)\n restored[\"{0}/{1}\".format(start, end)] = \"\"",
"def test_get_random_recipes(self):\n pass",
"def update_set_inventories(check_update=1):\n syt.log_info(\"$$$ Adding RE inventories to database\")\n set_inventories = list(reapi.pull_all_set_parts())\n last_updated = info.read_inv_update_date('last_inv_updated_re')\n set_inv = info.read_re_invs()\n\n sets = info.read_bl_set_num_id()\n parts = info.read_re_parts()\n parts.update(info.read_bl_parts()) # Add bl parts in there just in case\n colors = info.read_re_colors()\n\n timer = syt.process_timer(name=\"Add Re Inventories\")\n\n syt.log_info(\"Running Rebrickable Update\")\n\n sets_to_skip = []\n rows_to_scrape = []\n parts_to_insert = []\n pool = _pool(syt.RUNNINGPOOL)\n for idx, row in enumerate(set_inventories):\n if row[0] == 'set_id': continue\n if row[0] in sets_to_skip: continue\n if row[0] in set_inv:\n if check_update == 0 or not syt.old_data(last_updated[row[0]]):\n sets_to_skip.append(row[0])\n continue\n # print(\"2222 {} | {} SET {}\".format(idx, len(parts_to_insert), row[0]))\n rows_to_scrape.append(row)\n\n # Get pieces\n if idx > 0 and idx % (syt.RUNNINGPOOL * 10) == 0:\n syt.log_info(\"@@@ Scraping {} rows\".format(len(rows_to_scrape)))\n _process_data = partial(_process_data_for_inv_db, sets=sets, parts=parts, colors=colors)\n parts_to_insert.extend(pool.map(_process_data, rows_to_scrape))\n # print(\"$[{}]\".format(len(rows_to_scrape)))\n rows_to_scrape = []\n sleep(0.01)\n\n #Insert data\n if idx > 0 and len(parts_to_insert) >= (syt.RUNNINGPOOL * 30):\n parts_to_insert = list(filter(None, parts_to_insert))\n syt.log_info(\"@@@ Inserting rows >[{}]\".format(len(parts_to_insert)))\n _add_re_inventories_to_database(parts_to_insert)\n timer.log_time(300, len(set_inventories) - idx)\n parts_to_insert = []\n\n _add_re_inventories_to_database(parts_to_insert)\n timer.log_time(len(parts_to_insert))\n timer.end()\n\n pool.close()\n pool.join()\n syt.log_info(\"%%% Finished RE inventories to database\")",
"def __init__(self):\n self.recipeset = {}\n self.hardcoded()",
"def mock_repodata(repodata, case):\n if 'repodata' in case:\n data = deepcopy(repodata)\n dict_merge(data, case['repodata'])\n else:\n data = repodata\n\n dataframe = pd.DataFrame(columns=utils.RepoData.columns)\n for channel, packages in data.items():\n for name, versions in packages.items():\n for item in versions:\n pkg = {\n 'channel': channel,\n 'name': name,\n 'build': '',\n 'build_number': 0,\n 'version': 0,\n 'depends': [],\n 'subdir': '',\n 'platform': 'noarch',\n }\n pkg.update(item)\n dataframe = dataframe.append(pkg, ignore_index=True)\n\n backup = utils.RepoData()._df, utils.RepoData()._df_ts\n utils.RepoData()._df = dataframe\n utils.RepoData()._df_ts = datetime.datetime.now()\n yield\n utils.RepoData()._df, utils.RepoData()._df_ts = backup",
"def rebuild_items(self, identifiers):\n raise NotImplementedError",
"def _create_slice(arr, id, reference_name, slice_start, slice_end):\n url = f\"http://{request.host}{BASE_PATH}/data?id={id}&reference_name={reference_name}&start={slice_start}&end={slice_end}\"\n arr.append({ 'url': url, })",
"def setUp(self):\n super(TranscriptionsTest, self).setUp()\n mommy.make_recipe('grunt.seed', _quantity=2)",
"def create_subsets(subsets):\n index = 0\n docs = []\n for name,_,stype in subsets:\n record = {\n \"_id\": name,\n \"type\": stype\n }\n docs.append(record)\n index+=1\n return docs",
"def test_get_recipe_information_bulk(self):\n pass",
"def _build_ID_sets(self):\n raise NotImplementedError",
"def test_example_12():\n\timport pandas as pd\n\tfrom tcrdist.repertoire import TCRrep\n\timport numpy as np\n\n\tdf = pd.read_csv(\"dash.csv\").head(100)\n\ttr = TCRrep(cell_df = df,\n\t\torganism = 'mouse',\n\t\tchains = ['alpha','beta'],\n\t\tdb_file = 'alphabeta_gammadelta_db.tsv',\n\t\tstore_all_cdr=False,\n\t\tarchive_result=True,\n\t\tarchive_name = \"example_archive\")\n\n\ttr2 = TCRrep(cell_df = None,\n\t\torganism = 'mouse',\n\t\tchains = ['alpha','beta'],\n\t\tdb_file = 'alphabeta_gammadelta_db.tsv',\n\t\tblank = True,\n\t\tarchive_name = \"example_archive\")\n\ttr2.rebuild()\n\n\t# Check that all atrributes are the same after rebuild, except metrics which can't be zipped\n\tfor k in tr2.__dict__.keys():\n\t\tprint(k)\n\t\tif k in ['all_genes','metrics_a','metrics_b','metrics_d', 'metrics_g',\n\t\t\t\t'kargs_a','kargs_b','kargs_d','kargs_g']:\n\t\t\tpass\n\t\telse:\n\t\t\tassert np.all(getattr(tr, k) == getattr(tr2, k) )\n\n\tfor k in ['all_genes','metrics_a','metrics_b', 'kargs_a','kargs_b']:\n\t\tassert isinstance(getattr(tr2, k), dict)\n\t\tassert set(getattr(tr, k).keys()) - set(getattr(tr2, k).keys()) == set()",
"def preservation_derivatives(scope=\"package\"):\n app = create_app(\"test\")\n with app.app_context():\n db.create_all()\n\n storage_service = test_helpers.create_test_storage_service(\n name=STORAGE_SERVICE_NAME\n )\n storage_location = test_helpers.create_test_storage_location(\n storage_service_id=storage_service.id\n )\n _ = test_helpers.create_test_pipeline(storage_service_id=storage_service.id)\n fetch_job = test_helpers.create_test_fetch_job(\n storage_service_id=storage_service.id\n )\n\n aip1 = test_helpers.create_test_aip(\n uuid=AIP_1_UUID,\n transfer_name=AIP_1_NAME,\n storage_service_id=storage_service.id,\n storage_location_id=storage_location.id,\n fetch_job_id=fetch_job.id,\n )\n aip2 = test_helpers.create_test_aip(\n uuid=AIP_2_UUID,\n transfer_name=AIP_2_NAME,\n storage_service_id=storage_service.id,\n storage_location_id=storage_location.id,\n fetch_job_id=fetch_job.id,\n )\n\n original_file1 = test_helpers.create_test_file(\n file_type=FileType.original,\n name=ORIGINAL_FILE_1_NAME,\n uuid=ORIGINAL_FILE_1_UUID,\n size=ORIGINAL_FILE_SIZE,\n puid=JPEG_1_01_PUID,\n file_format=JPEG_FILE_FORMAT,\n format_version=JPEG_1_01_FORMAT_VERSION,\n aip_id=aip1.id,\n )\n original_file2 = test_helpers.create_test_file(\n file_type=FileType.original,\n name=ORIGINAL_FILE_2_NAME,\n uuid=ORIGINAL_FILE_2_UUID,\n size=ORIGINAL_FILE_SIZE,\n puid=JPEG_1_02_PUID,\n file_format=JPEG_FILE_FORMAT,\n format_version=JPEG_1_02_FORMAT_VERSION,\n aip_id=aip2.id,\n )\n\n _ = test_helpers.create_test_file(\n file_type=FileType.preservation,\n name=PRESERVATION_FILE_1_NAME,\n uuid=PRESERVATION_FILE_1_UUID,\n size=PRESERVATION_FILE_SIZE,\n puid=TIFF_PUID,\n file_format=TIFF_FILE_FORMAT,\n original_file_id=original_file1.id,\n aip_id=aip1.id,\n )\n _ = test_helpers.create_test_file(\n file_type=FileType.preservation,\n name=PRESERVATION_FILE_2_NAME,\n uuid=PRESERVATION_FILE_2_UUID,\n size=PRESERVATION_FILE_SIZE,\n puid=TIFF_PUID,\n file_format=TIFF_FILE_FORMAT,\n original_file_id=original_file2.id,\n aip_id=aip2.id,\n )\n\n yield app\n\n db.drop_all()",
"def test_final_repset_from_iteration_repsets(self):\r\n repset1 = \"\"\">o1\r\nACCGT\r\n>o2\r\nAGG\r\n>o3\r\nACCGTT\"\"\".split('\\n')\r\n\r\n repset2 = \"\"\">o4\r\nTACCGT\r\n>o5\r\nTAGG\r\n>o6\r\nTACCGTT\"\"\".split('\\n')\r\n\r\n repset3 = \"\"\">o4\r\nCAT\r\n>o7\r\nAAAA\r\n>o1\r\nA\"\"\".split('\\n')\r\n\r\n exp = [(\"o1\", \"ACCGT\"), (\"o2\", \"AGG\"), (\"o3\", \"ACCGTT\"),\r\n (\"o4\", \"TACCGT\"), (\"o5\", \"TAGG\"), (\"o6\", \"TACCGTT\")]\r\n actual = list(final_repset_from_iteration_repsets([repset1, repset2]))\r\n self.assertEqual(actual, exp)\r\n\r\n exp = [(\"o1\", \"ACCGT\"), (\"o2\", \"AGG\"), (\"o3\", \"ACCGTT\"),\r\n (\"o4\", \"TACCGT\"), (\"o5\", \"TAGG\"), (\"o6\", \"TACCGTT\"), ('o7', 'AAAA')]\r\n actual = list(\r\n final_repset_from_iteration_repsets([repset1, repset2, repset3]))\r\n self.assertEqual(actual, exp)",
"def make_slice_gromacs(**kwargs):\n\tspec_in = kwargs.get('spec',None)\n\tif not spec_in: raise Exception('send slice details in a dict called \"spec\"')\n\treq_keys = 'start end skip group'.split()\n\tmissing_keys = [k for k in req_keys if k not in spec_in]\n\tif any(missing_keys): \n\t\traise Exception('slice maker for GROMACS is missing items in kwargs[\\'specs\\']: %s'%missing_keys)\n\t#---prepare specification for the slicer\n\tspec = dict([(k,spec_in[k]) for k in req_keys])\n\t#---get the PBC\n\tspec['pbc'] = spec_in.get('pbc',None)\n\t#---sequence uses the EDR files to figure out which parts we need to slice\n\tspec['sequence'] = kwargs['sequence']\n\tsn_prefixed = kwargs['sn_prefixed']\n\t#---name the slices\n\tpbc_suffix = '' if not spec['pbc'] else '.pbc%s'%spec['pbc']\n\tspec['outkey'] = '%s.%d-%d-%d.%s%s'%(\n\t\tsn_prefixed,spec['start'],spec['end'],spec['skip'],spec['group'],pbc_suffix)\n\tspec['postdir'] = kwargs['postdir']\n\tspec['tpr_keyfinder'] = kwargs['tpr_keyfinder']\n\tspec['traj_keyfinder'] = kwargs['traj_keyfinder']\n\t#---create the group\n\tif spec_in['group']:\n\t\tif spec_in['group']!=kwargs['group_name']:\n\t\t\traise Exception('group_name %s does not match the slice group %s'%(\n\t\t\t\tspec_in['group'],kwargs['group_name']))\n\t\tspec_group = dict(sn=kwargs['sn'],group=spec_in['group'],\n\t\t\tselect=kwargs['group_selection'],simkey=spec['outkey'])\n\t\t#import ipdb;ipdb.set_trace()\n\t\t#---get the latest starting structure\n\t\t#spec['tpr_keyfinder']('EGFR_active_L747P_MD_2', ('s', '01', 'protein'), '0001')\n\t\tgroup_fn = create_group(postdir=kwargs['postdir'],structure=kwargs['last_structure'],**spec_group)\n\t\tspec['group_fn'] = group_fn\n\t#---call the slice maker\n\tslice_trajectory(**spec)\n\t#---return the name for storage in the postdat\n\treturn spec['outkey']",
"def test_ipam_rirs_create(self):\n pass",
"def test_patch_collection(self):\n pass",
"def test_subset_reconstruction_iterable(self, wires):\n circuit = hadamard_circuit(wires)\n bits, recipes = circuit()\n shadow = ClassicalShadow(bits, recipes)\n\n # choose 1000 random indices\n snapshots = np.random.choice(np.arange(10000, dtype=np.int64), size=1000, replace=False)\n state = shadow.global_snapshots(snapshots=snapshots)\n assert state.shape == (len(snapshots), 2**wires, 2**wires)\n\n # check the results against obtaining the full global snapshots\n expected = shadow.global_snapshots()\n for i, t in enumerate(snapshots):\n assert np.allclose(expected[t], state[i])",
"def __get_collection_load_spec(self, doc_ttl=0):\n d_level = Bucket.DurabilityLevel.NONE\n if self.num_replicas != Bucket.ReplicaNum.THREE:\n random.seed(round(time()*1000))\n # Since durability is not supported with replicas=3\n d_level = choice([\n Bucket.DurabilityLevel.NONE,\n Bucket.DurabilityLevel.MAJORITY,\n Bucket.DurabilityLevel.MAJORITY_AND_PERSIST_TO_ACTIVE,\n Bucket.DurabilityLevel.PERSIST_TO_MAJORITY])\n return {\n # Scope/Collection ops params\n MetaCrudParams.COLLECTIONS_TO_DROP: 3,\n\n MetaCrudParams.SCOPES_TO_DROP: 1,\n MetaCrudParams.SCOPES_TO_ADD_PER_BUCKET: 3,\n MetaCrudParams.COLLECTIONS_TO_ADD_FOR_NEW_SCOPES: 5,\n\n MetaCrudParams.COLLECTIONS_TO_ADD_PER_BUCKET: 10,\n\n MetaCrudParams.BUCKET_CONSIDERED_FOR_OPS: \"all\",\n MetaCrudParams.SCOPES_CONSIDERED_FOR_OPS: \"all\",\n MetaCrudParams.COLLECTIONS_CONSIDERED_FOR_OPS: \"all\",\n\n # Doc loading params\n \"doc_crud\": {\n MetaCrudParams.DocCrud.COMMON_DOC_KEY: \"test_collections\",\n\n MetaCrudParams.DocCrud.NUM_ITEMS_FOR_NEW_COLLECTIONS: 5000,\n MetaCrudParams.DocCrud.CREATE_PERCENTAGE_PER_COLLECTION: 20,\n MetaCrudParams.DocCrud.READ_PERCENTAGE_PER_COLLECTION: 10,\n MetaCrudParams.DocCrud.UPDATE_PERCENTAGE_PER_COLLECTION: 10,\n MetaCrudParams.DocCrud.DELETE_PERCENTAGE_PER_COLLECTION: 10,\n },\n\n # Doc_loading task options\n MetaCrudParams.DOC_TTL: doc_ttl,\n MetaCrudParams.DURABILITY_LEVEL: d_level,\n MetaCrudParams.SKIP_READ_ON_ERROR: True,\n MetaCrudParams.SUPPRESS_ERROR_TABLE: False,\n # The below is to skip populating success dictionary for reads\n MetaCrudParams.SKIP_READ_SUCCESS_RESULTS: True,\n\n MetaCrudParams.RETRY_EXCEPTIONS: [],\n MetaCrudParams.IGNORE_EXCEPTIONS: [],\n MetaCrudParams.COLLECTIONS_CONSIDERED_FOR_CRUD: \"all\",\n MetaCrudParams.SCOPES_CONSIDERED_FOR_CRUD: \"all\",\n MetaCrudParams.BUCKETS_CONSIDERED_FOR_CRUD: \"all\"\n }",
"def test_ipam_rirs_update(self):\n pass",
"def NewItems(self) -> _n_1_t_7:",
"def test_initialization_of_TCRsubset_alpha_beta_case_plus_motif_finding():\n import pytest\n import pandas as pd\n from tcrregex.subset import TCRsubset\n from tcrregex.tests.my_test_subset import dist_a_subset, dist_b_subset, clone_df_subset \n from tcrregex.cdr3_motif import TCRMotif\n\n assert isinstance(dist_a_subset, pd.DataFrame)\n assert isinstance(dist_b_subset, pd.DataFrame)\n assert isinstance(clone_df_subset, pd.DataFrame)\n df = clone_df_subset.iloc[0:20, :].copy()\n db = dist_b_subset.iloc[0:20, 0:20]\n da = dist_a_subset.iloc[0:20, 0:20]\n ts=TCRsubset(clone_df = df, \n organism = \"mouse\",\n epitopes = [\"PA\"] ,\n epitope = \"PA\",\n chains = [\"A\",\"B\"],\n dist_a = da,\n dist_b = db)\n motif_df = ts.find_motif()\n assert isinstance(motif_df, pd.DataFrame)\n assert isinstance(ts.motif_df, pd.DataFrame)",
"def test_non_overlapping_similar_crud(self):\n\n # Stat validation reference variables\n verification_dict = dict()\n verification_dict[\"ops_create\"] = 0\n verification_dict[\"ops_update\"] = 0\n verification_dict[\"ops_delete\"] = 0\n verification_dict[\"rollback_item_count\"] = 0\n verification_dict[\"sync_write_aborted_count\"] = 0\n verification_dict[\"sync_write_committed_count\"] = 0\n\n for _, scope in self.bucket.scopes.items():\n for _, collection in scope.collections.items():\n verification_dict[\"ops_create\"] += collection.num_items\n if self.durability_level in self.supported_d_levels:\n verification_dict[\"sync_write_committed_count\"] \\\n += collection.num_items\n\n failed = self.durability_helper.verify_vbucket_details_stats(\n self.bucket, self.cluster_util.get_kv_nodes(self.cluster),\n vbuckets=self.cluster.vbuckets,\n expected_val=verification_dict)\n if failed:\n self.fail(\"Cbstat vbucket-details verification failed\")\n\n doc_ops = self.input.param(\"doc_ops\", \"create\")\n # Reset initial doc_loading params to NO_OPS\n doc_load_template = \\\n self.bucket_util.get_crud_template_from_package(\"initial_load\")\n doc_load_template[MetaCrudParams.DURABILITY_LEVEL] = \"\"\n doc_load_template[MetaCrudParams.COLLECTIONS_CONSIDERED_FOR_CRUD] = 3\n doc_load_template[\"doc_crud\"][\n MetaCrudParams.DocCrud.CREATE_PERCENTAGE_PER_COLLECTION] = 0\n doc_load_template[\"doc_crud\"][\n MetaCrudParams.DocCrud.COMMON_DOC_KEY] = \"test_collections\"\n\n # Create required doc_generators for CRUD ops\n doc_load_template[\"doc_crud\"][\n MetaCrudParams.DocCrud.READ_PERCENTAGE_PER_COLLECTION] = 25\n if DocLoading.Bucket.DocOps.CREATE in doc_ops:\n doc_load_template[\"doc_crud\"][\n MetaCrudParams.DocCrud.CREATE_PERCENTAGE_PER_COLLECTION] = 100\n elif DocLoading.Bucket.DocOps.UPDATE in doc_ops:\n doc_load_template[\"doc_crud\"][\n MetaCrudParams.DocCrud.UPDATE_PERCENTAGE_PER_COLLECTION] = 50\n elif DocLoading.Bucket.DocOps.DELETE in doc_ops:\n doc_load_template[\"doc_crud\"][\n MetaCrudParams.DocCrud.DELETE_PERCENTAGE_PER_COLLECTION] = 50\n\n async_write_crud_spec = deepcopy(doc_load_template)\n sync_write_crud_spec = deepcopy(doc_load_template)\n\n sync_write_crud_spec[MetaCrudParams.DURABILITY_LEVEL] = \\\n self.durability_level\n\n async_write_loading_task = \\\n self.bucket_util.run_scenario_from_spec(\n self.task,\n self.cluster,\n self.cluster.buckets,\n async_write_crud_spec,\n mutation_num=1,\n async_load=True)\n sync_write_loading_task = \\\n self.bucket_util.run_scenario_from_spec(\n self.task,\n self.cluster,\n self.cluster.buckets,\n sync_write_crud_spec,\n mutation_num=2,\n async_load=True)\n\n # Wait for all task to complete\n self.task.jython_task_manager.get_task_result(async_write_loading_task)\n self.task.jython_task_manager.get_task_result(sync_write_loading_task)\n\n # Validate CRUD loading results\n self.bucket_util.validate_doc_loading_results(async_write_loading_task)\n self.bucket_util.validate_doc_loading_results(sync_write_loading_task)\n\n if async_write_loading_task.result is False:\n self.log_failure(\"Doc_ops failed in async_write_task\")\n if sync_write_loading_task.result is False:\n self.log_failure(\"Doc_ops failed in sync_write_task\")\n\n # Verify doc count and other stats\n self.bucket_util._wait_for_stats_all_buckets(self.cluster,\n self.cluster.buckets)\n self.bucket_util.validate_docs_per_collections_all_buckets(\n self.cluster)",
"def fixture_tc_objs(request, reform_xx, puf_subsample, cps_subsample):\n puftest = request.param\n p_xx = Policy()\n p_xx.implement_reform(reform_xx, raise_errors=False)\n if puftest:\n rec_xx = Records(data=puf_subsample)\n else:\n rec_xx = Records.cps_constructor(data=cps_subsample)\n c_xx = Calculator(policy=p_xx, records=rec_xx)\n c_xx.advance_to_year(TEST_YEAR)\n c_xx.calc_all()\n return rec_xx, c_xx, puftest"
]
| [
"0.6297669",
"0.5585367",
"0.5498943",
"0.5468988",
"0.5468988",
"0.54151404",
"0.53962183",
"0.5380501",
"0.53432065",
"0.520739",
"0.5203599",
"0.5164569",
"0.5147828",
"0.5144866",
"0.5131755",
"0.51268554",
"0.51264954",
"0.51263267",
"0.51216674",
"0.5116029",
"0.50760853",
"0.5067229",
"0.50595874",
"0.5027936",
"0.4994005",
"0.49899927",
"0.4977344",
"0.4953401",
"0.49488166",
"0.49355724"
]
| 0.6881789 | 0 |
Read KML data from cache, and download from repo if necessary | def read_kml():
global kmldata
global CONFIG
if type(kmldata) == type(None):
if not os.path.exists(CONFIG["kmlfile"]):
fiona.drvsupport.supported_drivers['KML'] = 'rw'
kmldata = geopandas.read_file(CONFIG["kmlrepo"], driver="KML")
os.makedirs(CONFIG["cachedir"],exist_ok=True)
with open(CONFIG["kmlfile"], "wb") as fh:
pickle.dump(kmldata,fh)
else:
with open(CONFIG["kmlfile"], "rb") as fh:
kmldata = pickle.load(fh)
return kmldata | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read(self, url: str):\n\n log.info(f\"Downloading KMZ file {basename(url)}\")\n kml = self.fetch(url)\n\n log.info(\"Parsing KML data\")\n self.iter_elems = iterparse(BytesIO(kml), events=(\"start\", \"end\"), resolve_entities=False)\n\n prod_items = {\n \"issuer\": \"Issuer\",\n \"product_id\": \"ProductID\",\n \"generating_process\": \"GeneratingProcess\",\n \"issue_time\": \"IssueTime\",\n }\n\n nsmap = None\n\n # Get Basic Metadata\n prod_definition = None\n prod_definition_tag = None\n for event, element in self.iter_elems:\n if event == \"start\":\n # get namespaces from root element\n if nsmap is None:\n nsmap = element.nsmap\n prod_definition_tag = f\"{{{nsmap['dwd']}}}ProductDefinition\"\n elif event == \"end\":\n if element.tag == prod_definition_tag:\n prod_definition = element\n # stop processing after head\n # leave forecast data for iteration\n break\n\n self.metadata = {k: prod_definition.find(f\"{{{nsmap['dwd']}}}{v}\").text for k, v in prod_items.items()}\n self.metadata[\"issue_time\"] = dt.datetime.fromisoformat(self.metadata[\"issue_time\"])\n\n # Get time steps.\n timesteps = prod_definition.findall(\n \"dwd:ForecastTimeSteps\",\n nsmap,\n )[0]\n self.timesteps = [dt.datetime.fromisoformat(i.text) for i in timesteps.getchildren()]\n\n # save namespace map for later iteration\n self.nsmap = nsmap",
"def _retrieveCachedData(self):",
"def download(self, name: str):\n result = self.l2.load(name)\n if result is not None:\n logging.debug(f'{name} l2 hit')\n return result\n\n result = self.l3.download(name, self.l2.get_path(name))\n if result is not None:\n logging.debug(f'{name} l3 hit')\n return self.l2.load(name)\n logging.debug(f'{name} cache miss')\n return None # Cache Miss",
"def read_cache(cc):\n \n out_file = os.path.join(cc.scene_dir, 'output', cc.scene_id+'_pickle')\n if cc.atmo_src == 'narr':\n out_file += '_narr'\n elif cc.atmo_src == 'merra':\n out_file += '_merra'\n \n if not os.path.isfile(out_file):\n raise OSError('pickle_file is not in expected location %s' % out_file) \n\n with open(out_file, 'rb') as f:\n x = pickle.load(f)\n return x",
"def cache_matrio_data(filename):\n prefix = \"https://data.matr.io/3/api/v1/file\"\n key = MATRIO_DATA_KEYS[filename]\n if not os.path.isfile(filename):\n cache_download(\"{}/{}/download\".format(prefix, key), filename)",
"def getData(self, local_cache):",
"def read_cache(self):\n with open(self.get_cache_filename(), 'rb') as f:\n data = pickle.loads(f.read())\n self.timestamp = data['timestamp']\n self.cache = data['cache']",
"def loadCacheFile(self):\n if not os.path.exists(self.cachePath):\n self.initCacheFile()\n else:\n with open(self.cachePath) as json_cacheFile:\n self.cacheData = json.load(json_cacheFile)",
"def _read_cache(self, path):\n if self._cache:\n cache_path = os.path.join(self._cache, path)\n\n if os.path.exists(cache_path):\n with io.open(cache_path, encoding='utf-8') as f:\n text = f.read()\n\n return text\n\n msg = ('Unable to download remote file \"{0}\" and local cache is not '\n 'available.').format(path)\n raise RuntimeError(msg)",
"def check_cache(self):\n\n if os.path.exists(self.data_filename):\n self.checked = True\n return\n\n if not os.path.exists(self.work_dirname):\n os.makedirs(self.work_dirname)\n\n if (self.data_h5_path is not None) and self.data_h5_path and os.path.exists(self.data_h5_path):\n shutil.copy(self.data_h5_path, self.data_filename)\n self.checked = True\n return\n\n src_url_prefix = 'http://yann.lecun.com/exdb/mnist/'\n train_lbl_filename = 'train-labels-idx1-ubyte.gz'\n train_img_filename = 'train-images-idx3-ubyte.gz'\n val_lbl_filename = 't10k-labels-idx1-ubyte.gz'\n val_img_filename = 't10k-images-idx3-ubyte.gz'\n\n train_lbl, train_img = self._read_data(\n src_url_prefix, self.work_dirname, train_lbl_filename, train_img_filename)\n val_lbl, val_img = self._read_data(\n src_url_prefix, self.work_dirname, val_lbl_filename, val_img_filename)\n\n h5f = h5py.File(self.data_filename, 'w')\n h5f.create_dataset('train_lbl', data=train_lbl)\n h5f.create_dataset('train_img', data=train_img)\n h5f.create_dataset('val_lbl', data=val_lbl)\n h5f.create_dataset('val_img', data=val_img)\n h5f.close()\n\n self.checked = True",
"def load_cache(name, typ=\"pkl\"):\n filename = cache_name(name, typ)\n if typ == \"str\":\n with open(filename, 'r') as fin:\n return fin.read()\n elif typ == \"pkl\":\n with open(filename, 'rb') as fin:\n return pickle.load(fin)\n elif typ == \"h5\":\n import keras\n return keras.models.load_model(filename)\n else:\n raise ValueError(\"Invalid type '{}'.\".format(typ))",
"def fetch_the_data():\n subprocess.run([\"wget\", \"https://storage.googleapis.com/recipe-box/recipes_raw.zip\"])\n subprocess.run([\"unzip\", \"recipes_raw.zip\", \"-d\", RECIPES_DIRPATH])\n subprocess.run([\"rm\", \"recipes_raw.zip\"])",
"def load_local_cache(self):\n folder = os.path.dirname(__file__)\n path = os.path.join(folder, 'local_document_cache.dat')\n path = os.path.normpath(path)\n QtGui.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)\n with open(path, mode='rb') as the_file:\n try:\n mapa = pickle.load(the_file)\n self.komponente = mapa['komponente']\n self.analitickeMetode= mapa['metode']\n self.dilucijskeJedinice = mapa['dilucije']\n self.generatoriCistogZraka = mapa['generatori']\n self.uredjaji = mapa['uredjaji']\n self.postaje = mapa['postaje']\n except Exception as err:\n logging.error(str(err), exc_info=True)\n mes = '\\n'.join(['Ucitavanje REST cache nije uspjelo.', str(err)])\n QtGui.QApplication.restoreOverrideCursor()\n QtGui.QMessageBox.warning(QtGui.QApplication, 'Problem', mes)\n QtGui.QApplication.restoreOverrideCursor()",
"def _fetch_large():\n # Large training data:\n resource(\n target=data_path(\"eeg\", \"SMNI_CMI_TRAIN.tar.gz\"),\n url=\"https://kdd.ics.uci.edu/databases/eeg/SMNI_CMI_TRAIN.tar.gz\",\n )\n dependency(\n target=data_path(\"eeg\", \"train\"),\n source=data_path(\"eeg\", \"SMNI_CMI_TRAIN.tar.gz\"),\n commands=[\n \"tar xzf SMNI_CMI_TRAIN.tar.gz\",\n \"mv SMNI_CMI_TRAIN train\",\n \"find train | grep gz$ | xargs gunzip\",\n ],\n )\n # Large test data:\n resource(\n target=data_path(\"eeg\", \"SMNI_CMI_TEST.tar.gz\"),\n url=\"https://kdd.ics.uci.edu/databases/eeg/SMNI_CMI_TEST.tar.gz\",\n )\n dependency(\n target=data_path(\"eeg\", \"test\"),\n source=data_path(\"eeg\", \"SMNI_CMI_TEST.tar.gz\"),\n commands=[\n \"tar xzf SMNI_CMI_TEST.tar.gz\",\n \"mv SMNI_CMI_TEST test\",\n \"find test | grep gz$ | xargs gunzip\",\n ],\n )",
"def _load_cached_2to3(self, path, cache):\n try:\n cache_stats = os.stat(cache)\n source_stats = os.stat(path)\n except OSError as e:\n if e.errno == errno.ENOENT: # FileNotFoundError\n self.logger.debug('Cache miss: %s' % cache)\n return None\n else:\n raise\n\n if cache_stats.st_mtime <= source_stats.st_mtime:\n self.logger.debug('Cache miss (stale): %s' % cache)\n return None\n\n self.logger.debug(\"Cache hit: %s\" % cache)\n return super().get_data(cache)",
"def read_data_cache_file(self):\n with open(self.cache_filename, 'r') as json_data:\n return json.load(json_data)",
"def _load_cache(self):\n self.cache = self.cache_manager.retrieve(self.cache_file)\n if self.cache is None:\n self.cache = {}\n return",
"def get_content_from_cache(self):\n\n rss_feed = []\n news_to_show = 0\n\n try:\n self.print_if_verbose(\n f\"Method 'get_content_from_cache' is working: \\n\"\n f\"Trying to get content from cache...\"\n )\n os.chdir(\"cache\")\n except Exception as error:\n print(f\"{error}: cache does not exists!\")\n return\n\n try:\n os.chdir(\"image_cache\")\n self.full_path_to_image_cache = os.getcwd()\n os.chdir(\"..\")\n except:\n pass\n\n try:\n with open(\"rss_reader_cache.json\", \"r\", encoding=\"utf-8\") as cache_file:\n data_from_cache = json.load(cache_file)\n self.print_if_verbose(f\"Content from cache has been received successfully. \\n\")\n except Exception as error:\n self.print_if_verbose(f\"{error}: cache file does not exist! \\n\")\n return\n\n if self.source:\n for feed in data_from_cache:\n if self.source in feed.keys():\n for news in feed[self.source]:\n if news[\"PubDate\"] == str(self.date):\n rss_feed.append(news)\n news_to_show += 1\n if self.limit and news_to_show == self.limit:\n break\n if self.limit and news_to_show == self.limit:\n break\n else:\n for channel in data_from_cache:\n for feed_link in channel:\n for news in channel[feed_link]:\n if news[\"PubDate\"] == str(self.date):\n rss_feed.append(news)\n news_to_show += 1\n if self.limit and news_to_show == self.limit:\n break\n if self.limit and news_to_show == self.limit:\n break\n\n os.chdir(\"..\")\n\n self.news_amount = len(rss_feed)\n\n if self.news_amount == 0:\n print(f\"There is no news in cache for specified date. \\n\")\n else:\n self.print_if_verbose(f\"There is {self.news_amount} news in cache for specified date. \\n\")\n\n self.print_if_verbose(f\"Method 'get_content_from_cache' is finished. \\n\")\n\n return rss_feed",
"def __load(self, use_cache):\n\n cache_path = path_lib.get_relative_file_path('runtime', 'input_cache', f'company_embeddings_{VERSION}.pkl')\n if use_cache and os.path.isfile(cache_path):\n return path_lib.read_cache(cache_path)\n\n print(f'\\nloading data from {self.__competitor_path} ...')\n with open(self.__competitor_path, 'rb') as f:\n tmp = json.load(f)\n d_linkedin_name_2_linkedin_val = tmp['d_linkedin_name_2_linkedin_val']\n\n data = []\n\n print('loading sentence bert to generate embeddings ...')\n from sentence_transformers import SentenceTransformer\n self.__sentence_bert = SentenceTransformer('bert-large-nli-stsb-mean-tokens')\n\n # converting the raw data to features that we need\n for linkedin_name, linkedin_val in d_linkedin_name_2_linkedin_val.items():\n # get features\n feature = self.__choose_features(linkedin_val)\n data.append([feature, linkedin_name])\n\n print('writing cache ...')\n path_lib.cache(cache_path, data)\n\n print('finish loading ')\n return data",
"def __getitem__(self, pathway):\n xmlpath = self.local_kgml_dir + pathway + '.xml'\n if exists(xmlpath):\n tree = ElementTree.parse(xmlpath)\n root = tree.getroot()\n else:\n try:\n r = requests.get(self.link_to_kgml.format(pathway), timeout=5, headers=self.headers)\n r.raise_for_status()\n root = ElementTree.fromstring(r.text)\n except requests.exceptions.HTTPError:\n self.logger.warning('Unable to download pathway xml: {}'.format(pathway))\n return None\n except requests.exceptions.ConnectTimeout:\n self.logger.warning('Unable to download pathway xml: {}'.format(pathway))\n return None\n except ElementTree.ParseError:\n self.logger.warning('Unable to parse pathway xml: {}'.format(pathway))\n return None\n except Exception:\n self.logger.warning('Unknown error getting pathway xml: {}'.format(pathway))\n return None\n\n if self.save_local:\n with open(xmlpath, 'w') as fo:\n fo.write(r.text)\n\n return self.parseKGML(root)",
"def cache(self, dataset, prev_login=None):\n if dataset.archive is not self:\n raise NiAnalysisError(\n \"{} is not from {}\".format(dataset, self))\n assert dataset.uri is not None\n with self.login(prev_login=prev_login) as xnat_login:\n sess_id, scan_id = re.match(\n r'/data/experiments/(\\w+)/scans/(.*)',\n dataset.uri).groups()\n xsession = xnat_login.experiments[sess_id]\n xdataset = xsession.scans[scan_id]\n xresource = XnatSource.get_resource(xdataset, dataset)\n cache_path = self.cache_path(dataset)\n XnatSource.download_dataset(\n tempfile.mkdtemp(), xresource, xdataset, dataset,\n xsession.label, cache_path)\n return cache_path",
"def load_data(cache_file_h5py,cache_file_pickle):\n if not os.path.exists(cache_file_h5py) or not os.path.exists(cache_file_pickle):\n raise RuntimeError(\"############################ERROR##############################\\n. \"\n \"please download cache file, it include training data and vocabulary & labels. \"\n \"link can be found in README.md\\n download zip file, unzip it, then put cache files as FLAGS.\"\n \"cache_file_h5py and FLAGS.cache_file_pickle suggested location.\")\n print(\"INFO. cache file exists. going to load cache file\")\n f_data = h5py.File(cache_file_h5py, 'r')\n print(\"f_data.keys:\",list(f_data.keys()))\n train_X=f_data['train_X'] # np.array(\n print(\"train_X.shape:\",train_X.shape)\n train_Y=f_data['train_Y'] # np.array(\n print(\"train_Y.shape:\",train_Y.shape,\";\")\n vaild_X=f_data['vaild_X'] # np.array(\n valid_Y=f_data['valid_Y'] # np.array(\n test_X=f_data['test_X'] # np.array(\n test_Y=f_data['test_Y'] # np.array(\n #print(train_X)\n #f_data.close()\n\n word2index, label2index=None,None\n with open(cache_file_pickle, 'rb') as data_f_pickle:\n word2index, label2index=pickle.load(data_f_pickle)\n print(\"INFO. cache file load successful...\")\n return word2index, label2index,train_X,train_Y,vaild_X,valid_Y,test_X,test_Y",
"def load_data(cache_file_h5py,cache_file_pickle):\n if not os.path.exists(cache_file_h5py) or not os.path.exists(cache_file_pickle):\n raise RuntimeError(\"############################ERROR##############################\\n. \"\n \"please download cache file, it include training data and vocabulary & labels. \"\n \"link can be found in README.md\\n download zip file, unzip it, then put cache files as FLAGS.\"\n \"cache_file_h5py and FLAGS.cache_file_pickle suggested location.\")\n print(\"INFO. cache file exists. going to load cache file\")\n f_data = h5py.File(cache_file_h5py, 'r')\n print(\"f_data.keys:\",list(f_data.keys()))\n train_X=f_data['train_X'] # np.array(\n print(\"train_X.shape:\",train_X.shape)\n train_Y=f_data['train_Y'] # np.array(\n print(\"train_Y.shape:\",train_Y.shape,\";\")\n vaild_X=f_data['vaild_X'] # np.array(\n valid_Y=f_data['valid_Y'] # np.array(\n test_X=f_data['test_X'] # np.array(\n test_Y=f_data['test_Y'] # np.array(\n #print(train_X)\n #f_data.close()\n\n word2index, label2index=None,None\n with open(cache_file_pickle, 'rb') as data_f_pickle:\n word2index, label2index=pickle.load(data_f_pickle)\n print(\"INFO. cache file load successful...\")\n return word2index, label2index,train_X,train_Y,vaild_X,valid_Y,test_X,test_Y",
"def load_data(cache_file_h5py,cache_file_pickle):\n if not os.path.exists(cache_file_h5py) or not os.path.exists(cache_file_pickle):\n raise RuntimeError(\"############################ERROR##############################\\n. \"\n \"please download cache file, it include training data and vocabulary & labels. \"\n \"link can be found in README.md\\n download zip file, unzip it, then put cache files as FLAGS.\"\n \"cache_file_h5py and FLAGS.cache_file_pickle suggested location.\")\n print(\"INFO. cache file exists. going to load cache file\")\n f_data = h5py.File(cache_file_h5py, 'r')\n print(\"f_data.keys:\",list(f_data.keys()))\n train_X=f_data['train_X'] # np.array(\n print(\"train_X.shape:\",train_X.shape)\n train_Y=f_data['train_Y'] # np.array(\n print(\"train_Y.shape:\",train_Y.shape,\";\")\n vaild_X=f_data['vaild_X'] # np.array(\n valid_Y=f_data['valid_Y'] # np.array(\n test_X=f_data['test_X'] # np.array(\n test_Y=f_data['test_Y'] # np.array(\n #print(train_X)\n #f_data.close()\n\n word2index, label2index=None,None\n with open(cache_file_pickle, 'rb') as data_f_pickle:\n word2index, label2index=pickle.load(data_f_pickle)\n print(\"INFO. cache file load successful...\")\n return word2index, label2index,train_X,train_Y,vaild_X,valid_Y,test_X,test_Y",
"def load_data(cache_file_h5py,cache_file_pickle):\n if not os.path.exists(cache_file_h5py) or not os.path.exists(cache_file_pickle):\n raise RuntimeError(\"############################ERROR##############################\\n. \"\n \"please download cache file, it include training data and vocabulary & labels. \"\n \"link can be found in README.md\\n download zip file, unzip it, then put cache files as FLAGS.\"\n \"cache_file_h5py and FLAGS.cache_file_pickle suggested location.\")\n print(\"INFO. cache file exists. going to load cache file\")\n f_data = h5py.File(cache_file_h5py, 'r')\n print(\"f_data.keys:\",list(f_data.keys()))\n train_X=f_data['train_X'] # np.array(\n print(\"train_X.shape:\",train_X.shape)\n train_Y=f_data['train_Y'] # np.array(\n print(\"train_Y.shape:\",train_Y.shape,\";\")\n vaild_X=f_data['vaild_X'] # np.array(\n valid_Y=f_data['valid_Y'] # np.array(\n test_X=f_data['test_X'] # np.array(\n test_Y=f_data['test_Y'] # np.array(\n #print(train_X)\n #f_data.close()\n\n word2index, label2index=None,None\n with open(cache_file_pickle, 'rb') as data_f_pickle:\n word2index, label2index=pickle.load(data_f_pickle)\n print(\"INFO. cache file load successful...\")\n return word2index, label2index,train_X,train_Y,vaild_X,valid_Y,test_X,test_Y",
"def use_cached_files(self, cache_key):\r\n pass",
"def loadGameFromCache(self, theKey):\n theGameFile = File(self.theCacheDirectory, theKey + \".zip\")\n theLine = None\n try:\n theLine = br.readLine()\n br.close()\n ir.close()\n gIn.close()\n fIn.close()\n except Exception as e:\n if theLine == None:\n return None\n return Game.loadFromJSON(theLine)",
"def data(self):\r\n from pylons import g\r\n wiki_data = g.permacache.get(self.cache_key)\r\n\r\n if wiki_data is None:\r\n # Parse the XML file\r\n wiki_xml = etree.parse(self.pathname)\r\n wiki_data = self._process_data(wiki_xml)\r\n g.permacache.set(self.cache_key, wiki_data)\r\n\r\n return wiki_data",
"def _load_cache():\n BASE_DIR = os.path.dirname(os.path.abspath(__file__))\n fname = os.path.join(BASE_DIR, \"model_cache.json\")\n with open(fname) as f:\n models_cache = json.load(f)\n return models_cache",
"def _get(self):\n self.lib.get()\n blob = self.get_cached_blob()\n if not blob:\n blob = self.knex.get(self.nested_url(), url_options=self.inherited_url_options)\n self.load_blob(blob)\n self.cache_blob(blob)\n else:\n self.load_blob(blob)"
]
| [
"0.6442564",
"0.61632365",
"0.6131439",
"0.6007126",
"0.59295243",
"0.59220517",
"0.58809197",
"0.5823584",
"0.58158576",
"0.57879627",
"0.57863224",
"0.5725393",
"0.5719492",
"0.5704884",
"0.5697475",
"0.5693312",
"0.5687403",
"0.56795806",
"0.56612206",
"0.56491363",
"0.5610846",
"0.56051785",
"0.56051785",
"0.56051785",
"0.56051785",
"0.5594325",
"0.55923337",
"0.5589232",
"0.55781966",
"0.55744606"
]
| 0.7199014 | 0 |
Read CSV data from cache, and download from repo if necessary | def read_csv():
global csvdata
global CONFIG
if type(csvdata) == type(None):
if not os.path.exists(CONFIG["csvfile"]):
csvdata = pandas.read_csv(CONFIG["csvrepo"],
na_values=["-999999","NOT AVAILABLE"])
os.makedirs(CONFIG["cachedir"],exist_ok=True)
csvdata.to_csv(CONFIG["csvfile"])
else:
csvdata = pandas.read_csv(CONFIG["csvfile"])
return csvdata | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_csv_cached(filename='../apps/naive_c_stats.csv', cache={}):\n if filename in cache:\n return cache[filename]\n if not os.path.exists(filename):\n ans = None\n else:\n ans = numpy.recfromcsv(filename)\n cache[filename] = ans\n return ans",
"def _csv_get(page):\n cache_key = reverse('timetable.views.display')\n\n ret = cache.get(cache_key)\n if ret is not None:\n print 'hola'\n return ret\n else:\n print 'ciao'\n ret = _csv_download(page)\n cache.set(cache_key, ret, timeout=15) # cache lasts 15 seconds\n return ret",
"def init_cache_from_csv(self, path: str) -> None:\n log.debug(f\"Initalising {self.name} from csv at {path}\")\n df = io.csv_to_df(path=path)\n df = df.set_index(self.ids).sort_index(axis=0).sort_index(axis=1)\n io.df_to_parquet(df=df, path=self.path)\n log.debug(f\"{self.name} now cached in local parquet.\")",
"def _read_csv(self):\n self.function_name = '_read_csv'\n with open(os.path.join(self.task.downloads, self.csv_name)) as csv_file:\n reader = csv.reader(csv_file, dialect='excel')\n for row in reader:\n self.input_data.append(row)",
"def read_csv_input_file(self,\n file_name: str,\n usecols: list = None,\n names: list = None,\n remove_spurious_urls=False,\n unique_key=None\n ):\n\n # split the extension two time so we can also deal with a double extension bla.csv.zip\n file_base, file_ext = os.path.splitext(file_name)\n file_base2, file_ext2 = os.path.splitext(file_base)\n\n # build the cache file including the cache_directory\n cache_file = Path(CACHE_DIRECTORY) / (file_base2 + \".pkl\")\n\n if os.path.exists(cache_file):\n # add the type so we can recognise it is a data frame\n self.logger.info(\"Reading from cache {}\".format(cache_file))\n df: pd.DataFrame = pd.read_pickle(cache_file)\n df.reset_index(inplace=True)\n elif \".csv\" in (file_ext, file_ext2):\n self.logger.info(\"Reading from file {}\".format(file_name))\n df = pd.read_csv(file_name,\n header=None,\n usecols=usecols,\n names=names\n )\n\n if remove_spurious_urls:\n self.logger.info(\"Removing spurious urls\")\n df = self.remove_spurious_urls(df)\n\n df = self.clip_kvk_range(df, unique_key=unique_key, kvk_range=self.kvk_range_read)\n\n self.logger.info(\"Writing data to cache {}\".format(cache_file))\n df.to_pickle(cache_file)\n else:\n raise AssertionError(\"Can only read h5 or csv files\")\n\n try:\n df.drop(\"index\", axis=0, inplace=True)\n except KeyError:\n self.logger.debug(\"No index to drop\")\n else:\n self.logger.debug(\"Dropped index\")\n\n return df",
"def download_global_csv(output_dir: str):\n for filename, url_path in CSVS_TO_READ:\n url = urljoin(GITHUB_BASE_URL, url_path)\n path = os.path.join(output_dir, filename)\n df = pd.read_csv(url)\n df.to_csv(path)",
"def fetch(url: str, cache: str) -> pd.DataFrame:\n r = requests.get(url)\n r.raise_for_status()\n datestamp = date.today().strftime('%Y%m%d')\n name = url.split('/')[-1].replace('.csv','')\n os.makedirs(cache, exist_ok=True)\n filename = os.path.join(cache, f\"{datestamp}_{name}.csv\")\n with open(filename, \"w\") as f:\n f.write(r.text)\n return pd.read_csv(filename)",
"def _retrieveCachedData(self):",
"def _read_cache(self, path):\n if self._cache:\n cache_path = os.path.join(self._cache, path)\n\n if os.path.exists(cache_path):\n with io.open(cache_path, encoding='utf-8') as f:\n text = f.read()\n\n return text\n\n msg = ('Unable to download remote file \"{0}\" and local cache is not '\n 'available.').format(path)\n raise RuntimeError(msg)",
"def _get_csv_data():\n temp_dir = './catalog/vendor_caches'\n if not os.path.exists(temp_dir):\n os.mkdir(temp_dir)\n \n csv_output_dict = {}\n \n print 'Downloading csv file...'\n br = utils.create_browser(SLEEP_MIN, SLEEP_MAX)\n \n if TESTRUN: print 'Load Login Page'\n br.open(\"https://www.broderbros.com/cgi-bin/online/webbro/bro-index.w\")\n# br.response().read()\n try:\n # Fill login form\n br.select_form(name = 'frmLogin')\n frm = br.form\n \n ctrl = frm.find_control('userName')\n ctrl.value = USERNAME\n ctrl = frm.find_control('password')\n ctrl.value = PASSWORD\n \n # Submit login form\n if TESTRUN: print 'Submit Login Form'\n \n br.select_form(name = 'frmLogin')\n br.submit()\n except:\n print \"Login form does not exist, please check URL, downloaded html or site is down\"\n return None\n \n # Simulate js for setting cookies\n utmn = str(int(random.random()*4294967295))\n utmu = str(int(time.time()/1000))\n utm1 = \"__utm1=\"+utmn+\".\"+utmu+\"; path=/; expires=Sun, 18 Jan 2038 00:00:00 GMT\"\n utm2 = \"__utm2=\"+utmu+\"; path=/; expires=Sun, 18 Jan 2038 00:00:00 GMT\"\n utm3 = \"__utm3=\"+utmu+\"; path=/;\"\n br.set_cookie(utm1)\n br.set_cookie(utm2)\n br.set_cookie(utm3)\n \n if TESTRUN: print 'Downloading and extracting CSV'\n try:\n tar_url = \"https://www.broderbros.com/cgi-bin/download/webshr/prod-info-view.w?f=bro-AllStyles_R06.tar.gz\"\n br.retrieve(tar_url, os.path.join(temp_dir, \"bro-AllStyles_R06.tar.gz\"))\n tar = tarfile.open(os.path.join(temp_dir, \"bro-AllStyles_R06.tar.gz\"))\n #~ member = tar.getmember('/usr/dbx/ai/AllStyles4/bro/items_R06.csv') # get file info \n for member in tar.getmembers():\n member.name = member.name.split('/')[-1] # strip directory from filename\n tar.extractall(os.path.join(temp_dir, 'bro-AllStyles_R06'))\n tar.close()\n except:\n print \"Issue in downloading CSV\"\n return None\n \n #reader = csv.reader(open('data/bro-AllStyles_R06/items_R06.csv', 'rb'))\n \n f_object = open(os.path.join(temp_dir, 'bro-AllStyles_R06/items_R06.csv'), 'rb')\n reader = csv.reader(f_object)\n \n for row in reader:\n item_id = row[7].lower()\n if csv_output_dict.has_key(item_id):\n if TESTRUN:print \"item id already in dictionary so excluding it.\"\n pass\n else:\n mill = row[23]\n item_url = 'https://www.broderbros.com/cgi-bin/online/webshr/prod-detail.w?sr='+str(item_id)\n browser = utils.create_browser(SLEEP_MIN, SLEEP_MAX)\n browser.set_handle_redirect(False)\n \n try:\n #~ browser.open_novisit(item_url)\n temp_dict = {}\n temp_dict['id'] = item_id.lower()\n temp_dict['brand'] = mill.lower()\n temp_dict['url'] = item_url\n csv_output_dict[item_id] = temp_dict\n if TESTRUN:\n print temp_dict\n print '+'*78\n except:\n pass\n f_object.close()\n shutil.rmtree(os.path.join(temp_dir, \"bro-AllStyles_R06\"))\n \n os.remove(os.path.join(temp_dir, \"bro-AllStyles_R06.tar.gz\"))\n return csv_output_dict",
"def _load_cache(self):\n logger.debug(\"Loading coherence data for %s from cache\", self.w1)\n\n assert self.variant_unit is None, \"Cannot load from cache once variant_unit has been set\"\n with open(self._cache_key) as f:\n self.rows = json.load(f)\n\n self._already_generated = True\n logger.debug(\"Loaded {} rows from cache ({})\".format(len(self.rows), self._cache_key))",
"def getData(self, local_cache):",
"def get_data(self, csv_file):\n pass",
"def create(cls):\n ssl._create_default_https_context = ssl._create_unverified_context\n c = lookup.Cache('https://raw.githubusercontent.com/spilchen/baseball_id_db/main/master.csv')\n return c",
"def prepare_cache_data() -> None:\n logger.debug(\"Downloading chromium\")\n if os.path.exists(LOCAL_CHROMIUM_PATH):\n return None\n s3 = S3()\n logger.debug(\"Check that remote folder exists:\")\n if not s3.is_exists(bucket=CACHE_BUCKET, key=\"local-chromium\"):\n s3.download_directory(bucket=CACHE_BUCKET,\n key=\"local-chromium\",\n dst=os.path.dirname(LOCAL_CHROMIUM_PATH))\n logger.debug(\"Downloading Done!!!\")",
"def fetch_data():\n for category in CHEATSHEETS.items():\n subprocess.call(f'curl -o {PATH}{category[0] + \".csv\"} {category[1]}', shell=True)\n\n index = -1\n for filename in os.listdir(PATH):\n for idx, row in pd.read_csv(PATH + filename, on_bad_lines='skip').replace(np.nan, '').iterrows():\n name = row['Model']\n url = REDIRECT_URL + name.lower()\n category = filename.split('.')[0]\n featurizers = row['Acceptable Featurizers'].split(' ') if row['Acceptable Featurizers'] != '' else []\n backends = ['PyTorch' if item in {\"PTorch\", \"Torch\", \"PyTorch \"} else item for item in row['Backend'].split('/')]\n types = row['Type'] if filename != 'general.csv' else row['Classifier/Regressor']\n types = types.split('/') if filename == 'material.csv' else types.split('/ ')\n index += 1\n\n backend_list.append(backends)\n type_list.append(types)\n featurizer_list.append(featurizers)\n model_list.append(Model(name, url, category, featurizers, backends, types, index))",
"def load_data():\n df = pd.read_csv(\"https://raw.githubusercontent.com/Andrea-Giuliani/Python-Project/master/data/final_dataset.csv\",sep=',') \n return df",
"def fetch_csv_from_url(url):\n\t\n\t#cache avoidance.\n\twith requests_cache.disabled():\n\t\tr = requests.get(url)\n\t\tif r.status_code == 200:\n\t\t\treturn r.iter_lines()",
"def downloading_csv(self, url_address):\n cache.clear()\n url = URL(url_address)\n f = open(self.cur_quotes_csvfile, 'wb') # save as test.gif\n f.write(url.download())\n f.close()",
"def downloadData(url):\r\n\r\n data = urllib2.urlopen(url)\r\n csvdata = data.read()",
"def read_data_cache_file(self):\n with open(self.cache_filename, 'r') as json_data:\n return json.load(json_data)",
"def download_mp3_by_csv(s, username, passwd, csv_path, download_dir=None):\n\n s = login(s, username, passwd)\n refs = pd.read_csv(csv_path, sep=';').Name\n length = len(refs)\n for i, ref in enumerate(refs):\n sys.stdout.write('\\r')\n sys.stdout.write('downloading: %s/%s' % (i+1, length))\n sys.stdout.flush()\n s = search_by_ref(s, ref)\n mp3_path = None\n if download_dir != None:\n file_name = '%s.mp3' % ref\n mp3_path = os.path.join(download_dir, file_name)\n result = download_mp3(s, mp3_path, ref)\n if result == 1:\n return 1\n sys.stdout.write('\\n')\n sys.stdout.flush()\n s.driver.close()",
"def update_csv():\n return os.listdir('./data')",
"def ShyRetrieve(symbol, minDate=None, downloadMissing=None):\n import os\n for file in os.listdir(\"Cache\"):\n if file == f\"{symbol}.csv\":\n print(f\"Parsing {symbol} from local drive.\")\n stock = Stock.ParseCSV(\"Cache/\" + file)\n if len(stock.history) != 0 and (minDate == None or stock.history[-1].date >= minDate):\n return stock\n\n def okayToDownload():\n if minDate != None:\n return True\n if downloadMissing != None:\n return downloadMissing\n print(f\"{symbol} not found in local drive. Okay to download from yfinance? (y/n)\")\n response = input()\n if response.lower() == \"y\":\n return True\n elif response.lower() == \"n\":\n return False\n else:\n print(\"Please respond \\\"y\\\" or \\\"n\\\".\")\n return okayToDownload()\n\n if okayToDownload():\n #stock = Stock.FromYfinance(symbol=symbol)\n stock = Stock(symbol=symbol)\n stock.Update()\n stock.SaveToCSV()\n print(f\"{stock.name} downloaded from yfinance API.\")\n return stock\n else:\n return Stock(symbol=symbol)",
"def from_csv_to_database():\r\n for year, path in FileNamePath.items():\r\n # load csv files\r\n with open(path, encoding='cp1251') as dataset:\r\n print(f\"Download {year} data\")\r\n get_curr_data(dataset, year)",
"def _load_cached_2to3(self, path, cache):\n try:\n cache_stats = os.stat(cache)\n source_stats = os.stat(path)\n except OSError as e:\n if e.errno == errno.ENOENT: # FileNotFoundError\n self.logger.debug('Cache miss: %s' % cache)\n return None\n else:\n raise\n\n if cache_stats.st_mtime <= source_stats.st_mtime:\n self.logger.debug('Cache miss (stale): %s' % cache)\n return None\n\n self.logger.debug(\"Cache hit: %s\" % cache)\n return super().get_data(cache)",
"def download(csvpath, asset_manager_id, data_id_type, data_id_list):\n interface = interface_direct_csvpath(csvpath)\n logging.config.dictConfig(DEFAULT_LOGGING)\n logger = logging.getLogger(__name__)\n objs = []\n for data_id in data_id_list:\n Dict = dict()\n Dict[data_id_type] = data_id\n objs.append(interface.retrieve(asset_manager_id=asset_manager_id, **Dict))\n return objs",
"def _read_cache(url):\n\n j = None\n m = hashlib.md5()\n m.update(url)\n if os.path.exists('.cache.%s' % m.hexdigest()):\n with open('.cache.%s' % m.hexdigest(), 'rb') as infile:\n j = json.load(infile)\n\n return j",
"def read_sentiment140(sentiment140Path = \"/data/sentiment140/sentiment140.csv\"):\n\n senti140Cache = join(cacheDir, \"sentiment140.json\")\n\n\n # create cached file if necessary\n if not exists(senti140Cache):\n ensureCache()\n\n # request path to file if necessary\n if not sentiment140Path:\n print(\"Please provide the local path to the sentiment140 dataset: \")\n sentiment140Path = sys.stdin.readline().strip()\n\n # download the file if it doesn't exist\n if not exists(sentiment140Path):\n\n # download entire source zipfile from internet\n print(\"Downloading sentiment140 dataset from Stanford...\")\n file_path = get_file(url_sentiment140)\n\n # save specified CSV from zipfile\n with ZipFile(file_path, 'r') as zp:\n zp.extract(csv_sentiment140, dir_tmp_sentiment140)\n shutil.move(os.path.join(dir_tmp_sentiment140, csv_sentiment140), sentiment140Path)\n\n # write to cache\n with open(senti140Cache,\"w\") as cacheFile:\n with open(sentiment140Path) as sentiPath:\n\n # enumerate over CSV entries\n reader = latin_csv_reader(sentiPath, delimiter=',')\n for i, line in enumerate(reader):\n\n # format text\n text = preprocess.tweet(line[index_sentiment140_label])\n\n # generate binary label\n if line[index_sentiment140_text] == label_sentiment140_positive:\n label = label_positive\n else:\n label = label_negative\n\n # write (text,label) pairs\n cacheFile.write( json.dumps([text, label]) )\n cacheFile.write(\"\\n\")\n\n return cacheMaker(senti140Cache)",
"def read_data_cache(self):\n if os.path.exists(self.cache_filename):\n return self.read_data_cache_file()\n else:\n data = self._empty_data()\n self.write_data_cache(data)\n return data"
]
| [
"0.65985745",
"0.65972996",
"0.6584848",
"0.63103306",
"0.62697816",
"0.6211688",
"0.61880475",
"0.6171694",
"0.6123702",
"0.6039283",
"0.5928468",
"0.59249216",
"0.5912777",
"0.59076524",
"0.5897626",
"0.5853826",
"0.5804479",
"0.578792",
"0.5781011",
"0.57711154",
"0.57405144",
"0.5719462",
"0.5683395",
"0.5681288",
"0.5626546",
"0.56235516",
"0.5605092",
"0.5591836",
"0.5559103",
"0.5552184"
]
| 0.6952292 | 0 |
Returns the utility id(s) at the given position(s) ARGUMENT pos (tuple) (latitude,longitude) of position pos (list) list of (latitude,longitude) positions RETURN int utility id if pos is a tuple list list of utility ids if pos is a list of tuples | def get_utility(pos):
kml = read_kml()
if type(pos[0]) in (list,tuple): # vector of positions
return list(map(lambda x: list(kml[kml.contains(Point(x[1],x[0]))==True].index),pos))
else: # singleton
return list(kml[kml.contains(Point(pos[1],pos[0]))].index) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def id_from_position(self, position: PositionT) -> Union[int, Array]:\n int_pos = self._to_integer_position(position)\n ids = self._get_id_from_dict(self._int_position_to_site, int_pos)\n return ids",
"def get_user_and_item_ids(self, data):\n\n vec_extract = np.vectorize(self.extract_row_and_col_number)\n userID, itemID = vec_extract(data[:, 0])\n userID -= 1\n itemID -= 1\n return userID, itemID",
"def obtain_seq_pos_info(result,seq_pos,seq_pos_n,chain_name,multiple_chains):\n chain_nm_seq_pos=\"\"\n if multiple_chains:\n chain_nm_seq_pos=chain_name\n for pos in result:\n if pos[0] != \"-\": #Consider only num in the pdb\n seq_pos.append([pos[0][0],pos[0][1],\"\",chain_nm_seq_pos,seq_pos_n]);\n seq_pos_n+=1\n return (seq_pos,seq_pos_n)",
"def index_from_position_tuple(self, position):\n x = self.base_values.index(position[0])\n y = self.base_values.index(position[1])\n return y * self.size + x",
"def HTSeq_pos_to_tuple(HTSeq_pos):\n try:\n chrom = HTSeq_pos.chrom\n except AttributeError:\n raise MutantError(\"Invalid position %s! Need an HTSeq iv object. (If empty, maybe read wasn't aligned?)\"%(HTSeq_pos,))\n strand = HTSeq_pos.strand\n # HTSeq is 0-based and I want 1-based, thus the +1; end has no +1 because in HTSeq end is the base AFTER the alignment.\n start_pos = HTSeq_pos.start+1\n end_pos = HTSeq_pos.end\n output_pos = (chrom, start_pos, end_pos, strand)\n check_valid_position_tuple(output_pos)\n return output_pos",
"def getPionID(self, posisi):\n for idx in range(len(self.arrayPion)):\n if (self.arrayPion[idx].currentPosition == posisi):\n return idx\n return -1",
"def posIdc(vec):\n for idx in vec:\n if idx == 0:\n continue\n if idx > 0:\n return tuple(vec)\n else:\n return tuple(-np.array(vec))",
"def get_position(pos):\n if type(pos) is str:\n return list(map(lambda x: float(x),pos.split(\",\")))\n return pos",
"def get_waypoint_pos(pos: list, waypoints: numpy.ndarray):\n dist = np.sum((pos - waypoints)**2, axis=1)\n wp_id = np.argmin(dist)\n return waypoints[wp_id], wp_id",
"def position_tuples(self, protein=False):\n if protein:\n if not self.is_coding():\n raise AttributeError(\n \"Cannot return wild type protein \"\n \"position tuples for non-coding wild \"\n \"type [{}]\".format(self.parent_name)\n )\n else:\n seq = self.protein_seq\n offset = self.protein_offset\n else:\n seq = self.dna_seq\n offset = self.dna_offset\n\n return [(i + offset + 1, seq[i]) for i in range(len(seq))]",
"def _get_coordinates(self, tile, position=None):\n if not position:\n position = self.position\n\n for i in range(self.PUZZLE_NUM_ROWS):\n for j in range(self.PUZZLE_NUM_COLUMNS):\n if position[i][j] == tile:\n return i, j\n\n return RuntimeError('Invalid tile value')",
"def parse_pos(pos, regexp=POS_REGEXP):\n m = regexp.match(pos)\n return tuple(map(int, m.groups()))",
"def _get_insertion_info(insertion_pos, allowed_strand_vals=SEQ_STRANDS):\n try:\n strand, ins_start, ins_end = insertion_pos.strand, insertion_pos.min_position, insertion_pos.max_position\n except AttributeError:\n strand, ins_start, ins_end = insertion_pos\n if allowed_strand_vals is not None:\n assert strand in allowed_strand_vals, \"Strand should be %s, and is %s!\"%(' or '.join(allowed_strand_vals), strand)\n return strand, ins_start, ins_end",
"def _get_id(self, item, prefix, item_list):\r\n try:\r\n index = item_list.index(item)\r\n except ValueError:\r\n index = len(item_list)\r\n item_list.append(item)\r\n\r\n return self._id_for_index(prefix, index)",
"def get_idx_from_latlon(self, latitudes, longitudes, unique=False):\n try:\n lat_idx = [np.argmin(np.abs(self.lat - lat)) for lat in latitudes]\n lon_idx = [np.argmin(np.abs(self.lon - lon)) for lon in longitudes]\n if unique:\n ijdx = np.vstack({(i, j) for i, j in zip(lat_idx, lon_idx)})\n lat_idx = ijdx[:, 0].tolist()\n lon_idx = ijdx[:, 1].tolist()\n except TypeError:\n lat_idx = np.argmin(np.abs(self.lat - latitudes))\n lon_idx = np.argmin(np.abs(self.lon - longitudes))\n\n return lat_idx, lon_idx",
"def _make_pos(pos):\n return pos.chromosome, pos.strand, pos.min_position, pos.min_position+20",
"def get_position(self) -> typing.Tuple[int, int]:\n raise NotImplementedError",
"def _get_points(self, pos):\n\t\tpoints = [ (self.last_pos[0], self.last_pos[1]) ]\n\t\tlen_x = pos[0] - self.last_pos[0]\n\t\tlen_y = pos[1] - self.last_pos[1]\n\t\tlength = math.sqrt(len_x ** 2 + len_y ** 2)\n\t\tstep_x = len_x / length\n\t\tstep_y = len_y / length\n\t\tfor i in xrange(int(length)):\n\t\t\tpoints.append((points[-1][0] + step_x, points[-1][1] + step_y))\n\t\tpoints = map(lambda x:(int(0.5+x[0]), int(0.5+x[1])), points)\n\t\t# return light-weight, uniq integer point list\n\t\treturn list(set(points))",
"def get_neighbour_squares_idx(self, pos):\n if pos:\n possible_values = {0, 1, 2}\n col_variation = zip( [pos[0], pos[0]], possible_values - {pos[1]} )\n row_variation = zip( possible_values - {pos[0]}, [pos[1], pos[1]] )\n return list(col_variation), list(row_variation)",
"def getProximity(tuples):\n\t\t\tsortedIndices = [indices for indices in tuples]\n\t\t\t#return abs(sortedIndices[0][1] - sortedIndices[-1][0])\n\t\t\treturn sortedIndices[-1][0] - sortedIndices[0][1]",
"def parse_position(chrom_pos: str):\n chrom, pos = chrom_pos.split('_')\n return chrom, int(pos)",
"def getIntPos(self):\n return (int(self.pos[0]),int(self.pos[1]))",
"def convert2int(self,seq_pep):\n\t\treturn [self.aminoacids.index(pep) for pep in seq_pep]",
"def info(self, list: list[int], /) -> list[int]:",
"def local_coordinates(self, position: np.ndarray) -> Tuple[float, float]:\n raise NotImplementedError()",
"def check_valid_position_tuple(pos):\n try: chrom, start_pos, end_pos, strand = pos\n except (TypeError, ValueError): raise MutantError(\"Didn't get a correct position tuple! %s\"%pos)\n if strand not in SEQ_STRANDS: raise MutantError(\"Invalid strand %s!\"%strand)\n if start_pos < 1: raise MutantError(\"Sequence positions must be positive!\")\n if start_pos > end_pos: raise MutantError(\"Sequence start can't be after end!\")",
"def make_TSUGITE_list(TSUGITE_name, m2_info, m3_info, m4_info, offset):\n \"\"\"\n 1 Get information from m_info.\n \"\"\"\n x_m2 = m2_info[0]\n y_m2 = m2_info[1]\n z_m2 = m2_info[2]\n\n m2_points = m2_info[3]\n\n m2_p0 = m2_points[0]\n m2_p1 = m2_points[1]\n m2_p2 = m2_points[2]\n m2_p3 = m2_points[3]\n\n x_m3 = m3_info[0]\n y_m3 = m3_info[1]\n z_m3 = m3_info[2]\n\n m3_points = m3_info[3]\n\n m3_p0 = m3_points[0]\n m3_p1 = m3_points[1]\n m3_p2 = m3_points[2]\n m3_p3 = m3_points[3]\n\n \"\"\"\n 2 Get base point to make TSUGITE.\n \"\"\"\n # base_point = (dx, dy)\n dx_U = x_m2 / 2\n dy_U = m2_p0[1]\n\n dx_L = x_m3 / 2\n dy_L = m3_p0[1]\n\n \"\"\"\n 3 Call appropriate function.\n \"\"\"\n if TSUGITE_name == 'ARI':\n dx = dx_U\n dy = dy_U\n m_info = m2_info\n\n m2_left_list, m2_right_list, m2_SEN_info = make_ARI_list(dx, dy, m_info, offset)\n\n dx = dx_L\n dy = dy_L\n m_info = m3_info\n\n m3_left_list, m3_right_list, m3_SEN_info = make_ARI_list(dx, dy, m_info, offset)\n\n elif TSUGITE_name == 'KAMA':\n pass\n elif TSUGITE_name == 'RYAKUKAMA':\n pass\n elif TSUGITE_name == 'MECHIGAI':\n pass\n elif TSUGITE_name == 'AIKAKI':\n pass\n elif TSUGITE_name == 'KOSHIKAKE':\n pass\n elif TSUGITE_name == 'HAKO':\n pass\n else:\n sys.exit()\n\n TSUGITE_list = [m2_left_list, m2_right_list, m3_left_list, m3_right_list]\n\n return TSUGITE_list, m2_SEN_info, m3_SEN_info",
"def _position_to_id(self, x, y):\n return x + y * self.n",
"def get_position(self, position):",
"def ind(self, pos):\n row = int(pos[1:]) - 1\n column = self.letter_to_column(pos[0])\n return row, column"
]
| [
"0.6022311",
"0.5717848",
"0.5675736",
"0.54066074",
"0.53870887",
"0.53862596",
"0.5323103",
"0.52927417",
"0.5261723",
"0.52416205",
"0.5208296",
"0.5198828",
"0.5192943",
"0.5138359",
"0.5123215",
"0.5121656",
"0.51213694",
"0.51176745",
"0.50980175",
"0.5092307",
"0.5070137",
"0.5059437",
"0.5026772",
"0.50130194",
"0.49868482",
"0.4966918",
"0.4966547",
"0.4951469",
"0.49336293",
"0.49244386"
]
| 0.6860404 | 0 |
Compute the (latitude,longitude) tuple of the position given ARGUMENT pos (str) The position given as a commadelimeted string pos (tuple) The position given as a (lat,lon) tuple RETURN tuple The position given as a (lat,lon) tuple | def get_position(pos):
if type(pos) is str:
return list(map(lambda x: float(x),pos.split(",")))
return pos | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_coordinates(self, place):\n if re.match(r\"-?[\\d.]+,-?[\\d.]+\", place):\n return tuple(place.split(\",\"))\n return tuple(\n str(coord) for coord in self._geocoder.geosearch(place).coordinates\n )",
"def parse_coords(geo: str) -> Tuple[float, float]:\n lat, long = [float(x.strip()) for x in geo.split(\",\")]\n if lat > 90 or lat < -90:\n raise ValueError(\"latitude does not fall in the range (-90, 90)\")\n if long > 180 or long < -180:\n raise ValueError(\"longitude does not fall in the range (-180, 180)\")\n return (lat, long)",
"def parse_pos(pos, regexp=POS_REGEXP):\n m = regexp.match(pos)\n return tuple(map(int, m.groups()))",
"def _coord_to_tuple(self, coord):\n if isinstance(coord, str):\n return tuple(float(k) for k in coord[1:-1].split(', '))\n else:\n assert len(coord) == 2\n return coord",
"def local_coordinates(self, position: np.ndarray) -> Tuple[float, float]:\n raise NotImplementedError()",
"def get_tuple(self, string):\n a = re.search('\\((\\d+\\.\\d+), (\\d+\\.\\d+)\\)', string)\n if not a:\n return None\n else:\n return (float(a.group(1)), float(a.group(2)))",
"def _point_as_tuple(input_string: str) -> _Tuple[float]:\n out = tuple(float(coordinate) for coordinate in input_string.split(','))\n if len(out) == DIMENSIONS:\n return out\n raise TypeError",
"def get_location_str(self) -> tuple:\r\n if self.data is None:\r\n return (None, None)\r\n\r\n lat = self.data['GPSInfo']['GPSLatitude']\r\n lon = self.data['GPSInfo']['GPSLongitude']\r\n \r\n # Convert from Degrees, minutes, seconds to standard form\r\n latitude = (lat[0][0] / lat[0][1]) \\\r\n + (lat[1][0] /lat[1][1] / 60) \\\r\n + (lat[2][0] / lat[2][1] / 3600)\r\n \r\n longitude = (lon[0][0] / lon[0][1]) \\\r\n + (lon[1][0] / lon[1][1] / 60) \\\r\n + (lon[2][0] / lon[2][1] / 3600)\r\n\r\n # Make the results presentable\r\n latitude = str(round(latitude, 6)) \\\r\n + chr(176) + ' ' \\\r\n + self.data['GPSInfo']['GPSLatitudeRef']\r\n \r\n longitude = str(round(longitude, 6)) \\\r\n + chr(176) + ' ' \\\r\n + self.data['GPSInfo']['GPSLongitudeRef']\r\n \r\n return (latitude, longitude)",
"def HTSeq_pos_to_tuple(HTSeq_pos):\n try:\n chrom = HTSeq_pos.chrom\n except AttributeError:\n raise MutantError(\"Invalid position %s! Need an HTSeq iv object. (If empty, maybe read wasn't aligned?)\"%(HTSeq_pos,))\n strand = HTSeq_pos.strand\n # HTSeq is 0-based and I want 1-based, thus the +1; end has no +1 because in HTSeq end is the base AFTER the alignment.\n start_pos = HTSeq_pos.start+1\n end_pos = HTSeq_pos.end\n output_pos = (chrom, start_pos, end_pos, strand)\n check_valid_position_tuple(output_pos)\n return output_pos",
"def parse_position(chrom_pos: str):\n chrom, pos = chrom_pos.split('_')\n return chrom, int(pos)",
"def coords_to_gps(self,coords):\n return ((self.max_lat - (self.lat_step * (0.5+coords[0]))),(self.min_lon + (self.lon_step * (0.5+coords[1]))))",
"def get_location(self) -> tuple:\r\n if self.data is None:\r\n return (None, None)\r\n \r\n lat = self.data['GPSInfo']['GPSLatitude']\r\n lon = self.data['GPSInfo']['GPSLongitude']\r\n \r\n # Convert from Degrees, minutes, seconds to standard form\r\n latitude = (lat[0][0] / lat[0][1]) \\\r\n + (lat[1][0] / lat[1][1] / 60) \\\r\n + (lat[2][0] / lat[2][1] / 3600)\r\n \r\n longitude = (lon[0][0] / lon[0][1]) \\\r\n + (lon[1][0] / lon[1][1] / 60) \\\r\n + (lon[2][0] / lon[2][1] / 3600)\r\n\r\n # Adjust for direction references\r\n if self.data['GPSInfo']['GPSLatitudeRef'] == 'S':\r\n latitude *= -1\r\n\r\n if self.data['GPSInfo']['GPSLongitudeRef'] == 'W':\r\n longitude *= -1\r\n\r\n return (round(latitude, 6), round(longitude, 6))",
"def make_position(data) -> Position:\n return (data[\"x\"], data[\"y\"])",
"def get_lat(x):\n lat, lon = x.split(',')\n return float(lat)",
"def gps_to_coords(self,lat,lon):\n\n if (lat <= self.min_lat or lat >= self.max_lat or lon <= self.min_lon or lon >= self.max_lon):\n return (-1,-1)\n\n lat_spot = int((self.max_lat-lat)/self.lat_step)\n lon_spot = int((lon-self.min_lon)/self.lon_step)\n #print \"lat: %f lon: %f lat_spot: %f lon_spot: %f\" % (lat,lon,lat_spot,lon_spot)\n return (lat_spot,lon_spot)",
"def retupleize_geo_strings(value):\n if not value:\n return value\n elif \"(\" not in value:\n return value\n try:\n # Is this a dirty, dirty hack, or inspiration?\n # Location is retrieved as a string from the database\n # The alternative is to retrieve and process the\n # entire activity dataset...\n return eval(value)\n except NameError:\n # Not everything with a parentheses is a tuple.\n return value",
"def _make_pos(pos):\n return pos.chromosome, pos.strand, pos.min_position, pos.min_position+20",
"def get_coordinates(text):\n m = re.search(COORD_PATTERN, text)\n if m:\n neglat = m.groups(0)[0]\n latitude = neglat + m.groups(0)[1]\n neglong = m.groups(0)[2]\n longitude = neglong + m.groups(0)[3]\n return {\n \"lat\": latitude,\n \"lon\": longitude\n }\n return None",
"def success_geo(lat,lng):\n return (lng,lat)",
"def _parse_uncompressed_position(data: str) -> Tuple[float, float, int, str, str]:\n # Decode the latitude and ambiguity\n try:\n lat, ambiguity = APRSUtils.decode_uncompressed_latitude(data[0:8])\n\n except ValueError as e:\n raise ParseError(\"Invalid latitude: {}\".format(e))\n\n # Decode the longitude\n try:\n lng = APRSUtils.decode_uncompressed_longitude(data[9:18])\n\n except ValueError as e:\n raise ParseError(\"Invalid longitude: {}\".format(e))\n\n logger.debug(\"Latitude: {} ({}) Longitude: {}\".format(\n lat, ambiguity, lng\n ))\n\n # Parse the symbol table\n symbol_table = data[8]\n logger.debug(\"Symbol table: {}\".format(symbol_table))\n\n try:\n # Parse the symbol ID\n symbol_id = data[18]\n logger.debug(\"Symbol: {}\".format(symbol_id))\n except IndexError:\n raise ParseError(\"Missing symbol identifier\")\n\n return (lat, lng, ambiguity, symbol_table, symbol_id)",
"def parsenwspt(text):\n lat = int(text[0:4]) / 100\n lon = int(text[4:])\n if lon < 1000:\n lon += 10000\n return (lon / -100, lat)",
"def tuple(self) -> Tuple[float, float]:\n return (self.latitude, self.longitude)",
"def geolocate(place): # string\n geolocator = geopy.geocoders.Nominatim()\n location = geolocator.geocode(place)\n # i dati si danno in (latitudine, longitudine), ma vanno intesi come (y, x)\n # ovvero vanno visualizzati come x=longitudine, y=latitudine\n return (location.latitude, location.longitude) # coordinate",
"def parse(arg: Tuple[str, str, str, str, str]) -> Tuple[str, str, str]:\n return (arg[2], arg[3], arg[4])",
"def lat_lng(row):\r\n lat = row[\"latitude\"]\r\n lng = row[\"longitude\"]\r\n n = int(lat/GRANULARITY)\r\n nlat_start = n * GRANULARITY\r\n nlat_end = nlat_start + GRANULARITY\r\n nlg=int(lng/GRANULARITY)\r\n nlng_start = nlg * GRANULARITY\r\n nlng_end = nlng_start + GRANULARITY\r\n latlng=[(nlat_start,nlng_start), (nlat_start,nlng_end), (nlat_end,nlng_end), (nlat_end,nlng_start)]\r\n return latlng",
"def get_coordinates(geotags) -> Tuple[float, float]:\n lat = get_decimal_from_dms(\n geotags['GPSLatitude'],\n geotags['GPSLatitudeRef'],\n )\n lon = get_decimal_from_dms(\n geotags['GPSLongitude'],\n geotags['GPSLongitudeRef'],\n )\n\n return lat, lon",
"def parse_location(location_str):\n def floatify(latlon):\n \"\"\" Turns a latlon string into a float \"\"\"\n sign = -2. * (latlon[-1].lower() in ['s', 'w']) + 1\n return float(latlon[:-1]) * sign\n points = location_str.strip().split(',')\n if not len(points) == 2:\n raise BadQuery(\"Expected four comma seperated values \"\n \"defining a single point.\")\n\n is_lat = lambda x: x[-1].lower() in ['n', 's']\n lat = filter(is_lat, points)\n if not len(lat) == 1:\n raise BadQuery(\"Expected two latitudes (determined by \" +\n \"values ending in 'N' or 'S'\")\n is_lon = lambda x: x[-1].lower() in ['e', 'w']\n lon = filter(is_lon, points)\n if not len(lon) == 1:\n raise BadQuery(\"Expected two longitudes (determined by \" +\n \"values ending in 'E' or 'W'\")\n lat = floatify(lat[0])\n lon = floatify(lon[0])\n\n # make sure latitude is in range.\n if (lat > 90.) or (lat < -90):\n raise BadQuery(\"Latitude must be within -90 and 90, got %s\" %\n str(lat))\n # we let the user use either longitudes of 0 to 360\n # or -180 to 180, then convert to nautical (-180 to 180).\n if lon > 360. or lon < -180.:\n raise BadQuery(\"Longitudes must be within -180 and 360, got %s\" %\n str(lon))\n # make sure lons end up in -180 to 180.\n lon = np.mod(lon + 180., 360.) - 180.\n\n location = {'latitude': lat,\n 'longitude': lon}\n return location",
"def tile_coordinates(text):\n UL = (text[1]), (text[2]) # Upper Left\n UR = (text[3]), (text[2]) # Upper Right\n LR = (text[3]), (text[4]) # Lower Right\n LL = (text[1]), (text[4]) # Lower Left\n coordinates = (UL, UR, LR, LL)\n return text[0], [tuple(float(x) for x in xs) for xs in coordinates]",
"def check_valid_position_tuple(pos):\n try: chrom, start_pos, end_pos, strand = pos\n except (TypeError, ValueError): raise MutantError(\"Didn't get a correct position tuple! %s\"%pos)\n if strand not in SEQ_STRANDS: raise MutantError(\"Invalid strand %s!\"%strand)\n if start_pos < 1: raise MutantError(\"Sequence positions must be positive!\")\n if start_pos > end_pos: raise MutantError(\"Sequence start can't be after end!\")",
"def string_to_json_position(x):\n\n s = x.split(',')\n return {'lat': float(s[0]), 'lng': float(s[1])}"
]
| [
"0.70621926",
"0.68079394",
"0.6600164",
"0.650143",
"0.64311135",
"0.6425078",
"0.6422044",
"0.63975847",
"0.6376725",
"0.6372813",
"0.630666",
"0.6303668",
"0.617006",
"0.61447626",
"0.61388314",
"0.61161673",
"0.61083496",
"0.60994375",
"0.60494965",
"0.6034523",
"0.60241747",
"0.6022874",
"0.5995858",
"0.5955266",
"0.5954517",
"0.59478617",
"0.5942374",
"0.59345806",
"0.59231734",
"0.5914843"
]
| 0.7531564 | 0 |
Return a subset of `returned_resource_set` that contains only resources created by the test suite. | def exclude_foreign_resources(returned_resource_set, expected_resource_set):
expected_owners = {res.owner for res in expected_resource_set}
return [
res for res in returned_resource_set
if res.owner in expected_owners
] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def avail(self, time, resource_group):\n a = set()\n for r in self.resource_group.resources:\n pass",
"def get_resources(self):\n res = set()\n res.update(self.get_inputs())\n res.update(self.get_outputs())\n return res",
"def search_resources(self, conditional):\n return list(filter(conditional, self._resources))",
"def get_resources(self):\n return []",
"def GetExclusiveResources(self):\n res = set(self.exclusive_resources)\n if self.parent:\n res |= self.parent.GetExclusiveResources()\n return res",
"def getTestSets():\n return list(_testsetdict.keys())",
"def resources(self, query):\n return _MockResponse()",
"def test_get_deployment_resources(self):\n pass",
"def test_list_all_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass",
"def mock_resource_collection_containing(\n resources: Iterable[message.Message],\n) -> mock.MagicMock:\n mock_collection = mock.MagicMock(spec=fhir_package.ResourceCollection)\n resources = {\n cast(Any, resource).url.value: resource for resource in resources\n }\n\n def mock_get(uri: str) -> message.Message:\n return resources.get(uri)\n\n mock_collection.get.side_effect = mock_get\n\n return mock_collection",
"def test_get_resource_ids(self):\n for i in range(11):\n self.app.post(f'/v1/resource/{self.test_resource}/id/test{i}', headers=admin_headers)\n self._test_paging(f'/v1/resource/{self.test_resource}/ids', admin_headers, 10, 'resource_ids')",
"def test_resource_statefulset(self, integrationtest, k8sconfig):\n config = self.k8sconfig(integrationtest, k8sconfig)\n MM = MetaManifest\n err_resp = (K8sResource(\"\", \"\", \"\", False, \"\"), True)\n\n # Tuples of API version that we ask for (if any), and what the final\n # K8sResource element will contain.\n api_versions = [\n # We expect to get the version we asked for.\n (\"autoscaling/v1\", \"autoscaling/v1\"),\n (\"autoscaling/v2beta1\", \"autoscaling/v2beta1\"),\n (\"autoscaling/v2beta2\", \"autoscaling/v2beta2\"),\n\n # Function must automatically determine the latest version of the resource.\n (\"\", \"autoscaling/v1\"),\n ]\n\n # Convenience.\n kind = \"HorizontalPodAutoscaler\"\n name = kind.lower() + \"s\"\n\n for src, expected in api_versions:\n print(src)\n # A particular StatefulSet in a particular namespace.\n res, err = k8s.resource(config, MM(src, kind, \"ns\", \"name\"))\n assert not err\n assert res == K8sResource(\n apiVersion=expected,\n kind=kind,\n name=name,\n namespaced=True,\n url=f\"{config.url}/apis/{expected}/namespaces/ns/{name}/name\",\n )\n\n # All StatefulSets in all namespaces.\n res, err = k8s.resource(config, MM(src, kind, None, None))\n assert not err\n assert res == K8sResource(\n apiVersion=expected,\n kind=kind,\n name=name,\n namespaced=True,\n url=f\"{config.url}/apis/{expected}/{name}\",\n )\n\n # All StatefulSets in a particular namespace.\n res, err = k8s.resource(config, MM(src, kind, \"ns\", \"\"))\n assert not err\n assert res == K8sResource(\n apiVersion=expected,\n kind=kind,\n name=name,\n namespaced=True,\n url=f\"{config.url}/apis/{expected}/namespaces/ns/{name}\",\n )\n\n # A particular StatefulSet in all namespaces -> Invalid.\n assert k8s.resource(config, MM(src, kind, None, \"name\")) == err_resp",
"def available_sets(session, player):\n excluded_sets = set(session.taken.keys())\n for grouping in session.exclusives:\n if player.sets.intersection(grouping):\n excluded_sets.update(grouping)\n return [s for s in session.sets if s not in excluded_sets]",
"def test_filter_returns_empty_results(admin_client, public_resource_with_metadata):\n # double uuid4 collision incredibly unlikely, especially for the purposes of a pytest\n query_filter = {\"author\": [\"{}\".format(str(uuid.uuid4()))], \"owner\": [\"{}\".format(str(uuid.uuid4()))]}\n djangoresponse = admin_client.get('/discoverapi/?filter={}'.format(json.dumps(query_filter)), follow=True)\n response = json.loads(djangoresponse.content.decode(\"utf-8\"))\n resources = response['resources']\n assert len(json.loads(resources)) == 0\n assert djangoresponse.status_code == 200",
"def test_get_cloud_resources(self):\n pass",
"def test_get_resource_group_list(self):\n pass",
"async def test_get_all(self):\n await self.collection.create({'id': 'foo', 'token': 'foo:bar'})\n await self.collection.create({'id': 'baz', 'token': 'baz:qux'})\n expected = (\n {'id': 'baz', 'username': 'baz'},\n {'id': 'foo', 'username': 'foo'})\n self.assertEqual(expected, await self.resource.get_all())",
"def getTestSet(self):\r\n return self.fTestData",
"def get_TestServiceDirectResources(test_case, # type: AnyMagpieTestCaseType\n ignore_missing_service=False, # type: bool\n override_service_name=null, # type: Optional[Str]\n override_headers=null, # type: Optional[HeadersType]\n override_cookies=null, # type: Optional[CookiesType]\n ): # type: (...) -> List[JSON]\n app_or_url = get_app_or_url(test_case)\n svc_name = override_service_name if override_service_name is not null else test_case.test_service_name\n path = \"/services/{svc}/resources\".format(svc=svc_name)\n resp = test_request(app_or_url, \"GET\", path,\n headers=override_headers if override_headers is not null else test_case.json_headers,\n cookies=override_cookies if override_cookies is not null else test_case.cookies,\n expect_errors=ignore_missing_service)\n if ignore_missing_service and resp.status_code == 404:\n return []\n json_body = get_json_body(resp)\n resources = json_body[svc_name][\"resources\"]\n return [resources[res] for res in resources]",
"def getResources(self):\n\t\treturn deepcopy(self.server.resources)",
"def pseudo_random_subset(resources, portion):\r\n for resource in resources:\r\n #The hashes that the RNG seed function creates are platform dependent\r\n #so 64 bit systems return different random values.\r\n #However, we can get 32 bit system hashes on 64 bit systems by bitmasking the hash.\r\n resource_id_hash = hash(resource.get('_id')) & 0xffffffff\r\n #If we were just trying to match the behavior of python's built-in hash function we\r\n #would need to covert to a signed int, but because the RNG hashes strings to\r\n #unsigned longs don't need to do this:\r\n #http://stackoverflow.com/questions/23260975/how-does-python-2-7-3-hash-strings-used-to-seed-random-number-generators\r\n random_value = random.Random(resource_id_hash).random()\r\n if 10 * (random_value % .1) < portion:\r\n yield resource",
"def test_general_subset_all():\n pass",
"def filtered_context(context):\n\n ctx = Context(context.opt)\n for resource in context.resources():\n if resource.child:\n continue\n\n if resource.filtered():\n ctx.add(resource)\n\n return ctx",
"def test_get_resource_with_added_packages_retrieves_resource(self):\n vs_1 = self._valueset_cls()\n vs_1.url.value = 'vs1'\n\n vs_2 = self._valueset_cls()\n vs_2.url.value = 'vs2'\n\n package_1 = fhir_package.FhirPackage(\n ig_info=fhir_package.IgInfo(\n name='name',\n version='version',\n description='description',\n canonical='url',\n ),\n structure_definitions=mock_resource_collection_containing([]),\n search_parameters=mock_resource_collection_containing([]),\n code_systems=mock_resource_collection_containing([]),\n value_sets=mock_resource_collection_containing([vs_1]),\n )\n package_2 = fhir_package.FhirPackage(\n ig_info=fhir_package.IgInfo(\n name='name',\n version='version',\n description='description',\n canonical='url',\n ),\n structure_definitions=mock_resource_collection_containing([]),\n search_parameters=mock_resource_collection_containing([]),\n code_systems=mock_resource_collection_containing([]),\n value_sets=mock_resource_collection_containing([vs_2]),\n )\n\n manager = fhir_package.FhirPackageManager()\n manager.add_package(package_1)\n manager.add_package(package_2)\n\n self.assertEqual(manager.get_resource('vs1'), vs_1)\n self.assertEqual(manager.get_resource('vs2'), vs_2)\n self.assertIsNone(manager.get_resource('mystery-url'))",
"def empty(self):\r\n return self._resources.empty()",
"def subset(\n self, \n include: Union[Sequence[Any], Any] = None, \n exclude: Union[Sequence[Any], Any] = None) -> Bunch:\n pass",
"def objects_in_use(self):\n return set()",
"def create_resources(self) -> List[ResourceDescription]:\r\n return self.resources",
"def filtered_context(context):\n\n ctx = Context(context.opt)\n for resource in context.resources():\n if resource.child:\n continue\n if resource.filtered():\n ctx.add(resource)\n\n return ctx",
"def required_resource_keys(self) -> AbstractSet[str]:\n return self._required_resource_keys"
]
| [
"0.5811197",
"0.5749601",
"0.5734158",
"0.57138693",
"0.56029826",
"0.55906695",
"0.55860853",
"0.5571422",
"0.5538224",
"0.5534941",
"0.55199194",
"0.54968446",
"0.548352",
"0.54730636",
"0.5462483",
"0.54619956",
"0.5407742",
"0.5396251",
"0.53668493",
"0.5355272",
"0.5352127",
"0.5330971",
"0.53303",
"0.5303623",
"0.5302515",
"0.53016686",
"0.5299539",
"0.52827644",
"0.5280318",
"0.52739716"
]
| 0.67035204 | 0 |
create an OpticsDescription and make sure it fails if units are missing | def test_construct_optics():
OpticsDescription(
name="test",
size_type=SizeType.LST,
reflector_shape=ReflectorShape.PARABOLIC,
n_mirrors=1,
n_mirror_tiles=100,
mirror_area=u.Quantity(550, u.m**2),
equivalent_focal_length=u.Quantity(10, u.m),
effective_focal_length=u.Quantity(11, u.m),
)
with pytest.raises(TypeError):
# missing units
OpticsDescription(
name="test",
size_type=SizeType.LST,
reflector_shape=ReflectorShape.PARABOLIC,
n_mirrors=1,
n_mirror_tiles=100,
mirror_area=550,
equivalent_focal_length=10,
effective_focal_length=11,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_construct_optics():\n OpticsDescription(\n name=\"test\",\n num_mirrors=1,\n num_mirror_tiles=100,\n mirror_area=u.Quantity(550, u.m ** 2),\n equivalent_focal_length=u.Quantity(10, u.m),\n )\n\n with pytest.raises(TypeError):\n OpticsDescription(\n name=\"test\",\n num_mirrors=1,\n num_mirror_tiles=100,\n mirror_area=550,\n equivalent_focal_length=10,\n )",
"def create_unit(self, unit_type, unit_name, modifiers,\n nb_examples_asked=None):\n new_unit = None\n relevant_dict = None\n if unit_type == pu.UnitType.alias:\n new_unit = AliasDefinition(unit_name, modifiers)\n # new_unit = AliasDefinition(unit_name, [], modifiers.argument_name,\n # modifiers.casegen)\n relevant_dict = self.alias_definitions\n self.stats[\"#aliases\"] += 1\n elif unit_type == pu.UnitType.slot:\n new_unit = SlotDefinition(unit_name, modifiers)\n # new_unit = SlotDefinition(unit_name, [], modifiers.argument_name,\n # modifiers.casegen)\n relevant_dict = self.slot_definitions\n self.stats[\"#slots\"] += 1\n elif unit_type == pu.UnitType.intent:\n new_unit = IntentDefinition(unit_name, modifiers)\n # new_unit = IntentDefinition(unit_name, [], modifiers.argument_name,\n # modifiers.casegen)\n relevant_dict = self.intent_definitions\n self.stats[\"#intents\"] += 1\n\n if unit_type == pu.UnitType.intent and nb_examples_asked is not None:\n (train_nb, test_nb) = nb_examples_asked\n new_unit.set_nb_examples_asked(train_nb, test_nb)\n\n if unit_name not in relevant_dict:\n relevant_dict[unit_name] = new_unit\n elif modifiers.variation_name is None:\n pass # Rules will be added to the already defined unit",
"def test_missing_description(self):\n self.check_validation_error(\"description\\n field required\", name=\"Name\")",
"def test_invalid_units(self):\n with self.assertRaises(ValueError):\n UnitSystem(SYSTEM_NAME, INVALID_UNIT, LENGTH_METERS, VOLUME_LITERS,\n MASS_GRAMS)\n\n with self.assertRaises(ValueError):\n UnitSystem(SYSTEM_NAME, TEMP_CELSIUS, INVALID_UNIT, VOLUME_LITERS,\n MASS_GRAMS)\n\n with self.assertRaises(ValueError):\n UnitSystem(SYSTEM_NAME, TEMP_CELSIUS, LENGTH_METERS, INVALID_UNIT,\n MASS_GRAMS)\n\n with self.assertRaises(ValueError):\n UnitSystem(SYSTEM_NAME, TEMP_CELSIUS, LENGTH_METERS, VOLUME_LITERS,\n INVALID_UNIT)",
"def test_add_val_desc():\n assert add_val_desc('.Weight', dd, 'WGTP', '0') == None\n\n \"\"\" Test cannot append description to nonexistent value \"\"\"\n v = dd.vars['WGT']\n assert add_val_desc('.Weight', dd, 'WGT', '314') == None\n\n \"\"\" Test can append descriptiont to value and have extraneous space removed \"\"\"\n assert add_val_desc('.Weight', dd, 'WGT', '-9999..09999') == '-9999..09999'\n assert v.vals['-9999..09999'] == 'Integerized Weight'",
"def __init__(self):\n self.drones = ZergUnit(UnitTypeId.DRONE, to_count=0)\n self.lings = ZergUnit(UnitTypeId.ZERGLING, to_count=999)\n self.queens = ZergUnit(UnitTypeId.QUEEN, to_count=3)\n self.roaches = ZergUnit(UnitTypeId.ROACH, to_count=100, priority=True)\n self.ravagers = ZergUnit(UnitTypeId.RAVAGER, to_count=0)\n self.defense_spines = DefensiveBuilding(\n unit_type=UnitTypeId.SPINECRAWLER, position_type=DefensePosition.Entrance, to_base_index=1, to_count=3\n )\n self.gas = StepBuildGas(to_count=3)\n\n unit_building = BuildOrder(\n [\n Step(None, self.drones, skip_until=self.should_build_drones),\n Step(UnitExists(UnitTypeId.SPAWNINGPOOL), self.defense_spines),\n Step(\n RequiredAll([UnitExists(UnitTypeId.ROACHWARREN), UnitExists(UnitTypeId.ROACH)]),\n self.ravagers,\n skip_until=self.should_build_ravagers,\n ),\n Step(UnitExists(UnitTypeId.ROACHWARREN), self.roaches),\n Step(\n RequiredAll(\n [\n UnitExists(UnitTypeId.SPAWNINGPOOL),\n UnitExists(\n UnitTypeId.ROACHWARREN,\n include_pending=True,\n include_not_ready=True,\n include_killed=True,\n ),\n ]\n ),\n self.lings,\n ),\n Step(UnitExists(UnitTypeId.SPAWNINGPOOL), self.queens),\n Step(UnitExists(UnitTypeId.SPAWNINGPOOL), self.lings),\n ]\n )\n\n buildings: BuildOrder = BuildOrder(\n [\n Step(None, ActBuilding(UnitTypeId.SPAWNINGPOOL, to_count=1)),\n Step(UnitExists(UnitTypeId.SPAWNINGPOOL), ActBuilding(UnitTypeId.ROACHWARREN, to_count=1)),\n Step(None, self.gas, skip_until=self.should_build_gas),\n ]\n )\n\n super().__init__(buildings, unit_building)",
"def test_optics_from_name(optics_name):\n optics = OpticsDescription.from_name(optics_name)\n assert optics.equivalent_focal_length > 0\n # make sure the string rep gives back the name:\n assert str(optics) == optics_name",
"def create_descr(self, attr_name):",
"def test_creation_when_missing_service_description(self):\n\n self.data = {\n \"service_name\": \"Live at the yard\",\n \"service_price\": \"5000\",\n \"service_description\": \"\",\n \"service_category\": \"Music\",\n \"service_subcategory\": \"Live\",\n \"service_attributes\": {\n \"duration\": \"as long \",\n \"width\": \"20\",\n \"length\": \"20\",\n \"height\": \"20\"\n }\n }\n\n create_store = self.client.post(create_store_url, data=json.dumps(self.shop_zero), headers=self.my_header)\n store_id = json.loads(create_store.data)\n store_id = json.loads(store_id['store_id'])\n store_id = store_id['$oid']\n response2 = self.client.post(store_url + store_id + '/service/',\n data=json.dumps(self.data),\n headers=self.my_header)\n self.assertEqual(response2.status, \"400 BAD REQUEST\")\n self.assertIn(\"Error. Missing Service Description.\", str(response2.data))",
"def get_description():\n d = dict()\n d['data'] = True\n d['report'] = True\n d['description'] = \"\"\" \"\"\"\n d['arguments'] = [\n dict(type='station', name='station', default='IA2203',\n label='Select Station'),\n ]\n return d",
"def __init__(self, description, itemCode, minUnits, divisor, multiplier, discountPerc, extraData):\n self.description = description\n self.itemCode = itemCode\n self.minUnits = minUnits\n self.divisor = divisor\n self.multiplier = multiplier\n self.discountPerc = discountPerc\n self.extraData = extraData\n self.createRule();",
"def test_empty_description(self):\n self.check_validation_error('description\\n string does not match regex \".+\"', name=\"Name\", description=\"\")",
"def test_empty_description_create(self):\n\n responses.add(\n responses.POST,\n self.host + \"/manager\",\n json={'message': \"Description cannot be empty.\", 'status':\"error\"},\n status=200\n )\n\n with self.assertRaises(CreateError):\n self.azk.create('Project', '')",
"def get_description():\n desc = {\"description\": __doc__}\n desc[\"data\"] = True\n desc[\"arguments\"] = [\n dict(\n type=\"select\",\n options=PDICT3,\n default=\"temps\",\n name=\"v\",\n label=\"Which Variable(s) to Plot\",\n ),\n dict(\n type=\"station\",\n name=\"station1\",\n default=\"IATDSM\",\n label=\"Select First Station:\",\n network=\"IACLIMATE\",\n ),\n dict(\n type=\"select\",\n name=\"c1\",\n label=\"Climatology Source for First Station:\",\n default=\"1951\",\n options=PDICT,\n ),\n dict(\n type=\"station\",\n name=\"station2\",\n default=\"IATDSM\",\n optional=True,\n label=\"Select Second Station (Optional):\",\n network=\"IACLIMATE\",\n ),\n dict(\n type=\"select\",\n name=\"c2\",\n label=\"Climatology Source for Second Station:\",\n default=\"1951\",\n options=PDICT,\n ),\n dict(\n type=\"select\",\n name=\"s\",\n label=\"For difference plot, should smoother be applied:\",\n default=\"0\",\n options=PDICT2,\n ),\n dict(\n type=\"year\",\n min=1880,\n name=\"sy1\",\n default=1991,\n label=\"Inclusive Start Year for First Station Period of Years:\",\n ),\n dict(\n type=\"year\",\n min=1880,\n name=\"ey1\",\n default=2020,\n label=\"Inclusive End Year for First Station Period of Years:\",\n ),\n dict(\n type=\"year\",\n min=1880,\n name=\"sy2\",\n default=1981,\n label=\"Inclusive Start Year for Second Station Period of Years:\",\n ),\n dict(\n type=\"year\",\n min=1880,\n name=\"ey2\",\n default=2010,\n label=\"Inclusive End Year for Second Station Period of Years:\",\n ),\n ]\n return desc",
"def test_creation(self):\n \n from pystarlab.starlab import Option\n opt = Option(parameter=\"n\",\n long_name=\"specify number of particles\",\n is_required=True,\n default_value=None)\n \n self.assertIsInstance(opt, Option)\n self.assertEquals(opt.parameter, \"n\")\n self.assertTrue(opt.is_required)\n self.assertEquals(opt.long_name, \"specify number of particles\")\n self.assertIsNone(opt.default_value)\n self.assertIsNone(opt.value)",
"def test_createInvalidPortDescription(self):\n store = Store()\n factory = DummyFactory(store=store)\n self.assertFailStatus(\n 1, self._makeConfig(store),\n [\"create\", \"--strport\", \"xyz\",\n \"--factory-identifier\", str(factory.storeID)])\n self.assertEqual(\n \"'xyz' is not a valid port description.\\n\", sys.stdout.getvalue())",
"def createUnit(self):\n return _libsbml.UnitDefinition_createUnit(self)",
"def get_description():\n desc = {\"description\": __doc__, \"data\": True}\n desc[\"arguments\"] = [\n dict(\n type=\"station\",\n name=\"station\",\n default=\"IATDSM\",\n label=\"Select Station:\",\n network=\"IACLIMATE\",\n ),\n dict(\n type=\"select\",\n name=\"var\",\n default=\"spi\",\n options=PDICT,\n label=\"Select which metric to plot:\",\n ),\n dict(\n type=\"select\",\n name=\"c\",\n default=\"ncei91\",\n options=PDICT2,\n label=\"Which climatology to use for averages:\",\n ),\n dict(\n type=\"int\",\n name=\"days\",\n default=90,\n label=\"Over how many trailing days to compute the metric?\",\n ),\n ]\n return desc",
"def test_unsupported_units(self):\n with pytest.raises(TypeError):\n set_default_units(\"bad\")\n with pytest.raises(TypeError):\n State(\"water\", T=Q_(100, \"degC\"), p=Q_(1.0, \"atm\"), units=\"bad\")",
"def get_description():\n desc = {\"description\": __doc__, \"data\": True, \"cache\": 600}\n today = datetime.date.today()\n desc[\"arguments\"] = [\n dict(\n type=\"csector\",\n name=\"csector\",\n default=\"IA\",\n label=\"Select state/sector to plot\",\n ),\n dict(\n type=\"date\",\n name=\"sdate\",\n default=f\"{today.year}/01/01\",\n label=\"Start Date:\",\n min=\"2000/01/04\",\n max=today.strftime(\"%Y/%m/%d\"),\n ),\n dict(\n type=\"date\",\n name=\"edate\",\n default=today.strftime(\"%Y/%m/%d\"),\n label=\"End Date:\",\n min=\"2000/01/04\",\n max=today.strftime(\"%Y/%m/%d\"),\n ),\n dict(\n type=\"select\",\n name=\"d\",\n default=\"0\",\n options=PDICT,\n label=\"Select Drought Classification (at and above counted):\",\n ),\n dict(\n type=\"select\",\n name=\"w\",\n default=\"percent\",\n options=PDICT2,\n label=\"How to express time for plot:\",\n ),\n dict(type=\"cmap\", name=\"cmap\", default=\"plasma\", label=\"Color Ramp:\"),\n ]\n return desc",
"def get_description():\n desc = {\"description\": __doc__}\n sts = utc() - timedelta(hours=26)\n ets = utc() - timedelta(hours=2)\n desc[\"arguments\"] = [\n {\n \"type\": \"datetime\",\n \"name\": \"sts\",\n \"default\": sts.strftime(\"%Y/%m/%d %H00\"),\n \"label\": \"Start Timestamp (UTC):\",\n \"min\": \"1986/01/01 0000\",\n },\n {\n \"type\": \"datetime\",\n \"name\": \"ets\",\n \"default\": ets.strftime(\"%Y/%m/%d %H00\"),\n \"label\": (\n \"End Timestamp [inclusive] (UTC), \"\n \"interval must be less than 4 days\"\n ),\n \"min\": \"1986/01/01 0000\",\n },\n {\n \"type\": \"select\",\n \"options\": PDICT,\n \"default\": \"min\",\n \"name\": \"w\",\n \"label\": \"Which statistic to compute\",\n },\n {\n \"type\": \"csector\",\n \"name\": \"csector\",\n \"default\": \"IA\",\n \"label\": \"Select state/sector\",\n },\n {\n \"type\": \"select\",\n \"options\": PDICT2,\n \"default\": \"user\",\n \"label\": \"Plotting mode (user defined color-ramp or freezing)\",\n \"name\": \"mode\",\n },\n {\n \"type\": \"cmap\",\n \"name\": \"cmap\",\n \"default\": \"gnuplot2\",\n \"label\": \"Color Ramp:\",\n },\n ]\n return desc",
"def test_visualize_equipment(self):\n pass",
"def __init__(self, name=\"\", description=\"\", time_units=\"s\", len_units=\"m\",\n pump_units=\"m3/s\"):\n\n # Set general info\n self._type = 1 # pumping well id\n self.parameters = {'full': True,\n 'rw': 1.,\n 'd': 0.,\n 'l': 1.}\n self.time_units = time_units\n self.len_units = len_units\n self.pump_units = pump_units\n\n # Create pumping well data\n self.pumprate = _Data(dtype=0, name=name, description=description)\n self.pumprate.set_units(self.time_units, self.pump_units)\n\n # Set observation wells and piezometers\n self.wells = []",
"def buildUnits(self, obs, UnitName, Quantity):\n \n \"\"\"drones, overlords, zerglings, roaches, hydralisks, corrupters, queen(may need own function)\"\"\" \n actions.FUNCTIONS.select_larva(\"select\")\n if (UnitName == \"drone\"):\n if self.can_do(obs, actions.FUNCTIONS.Train_Zergling_quick.id):\n return actions.FUNCTIONS.Train_Drone_quick(\"now\")\n if (UnitName == \"overlord\"):\n if self.can_do(obs, actions.FUNCTIONS.Train_Zergling_quick.id):\n return actions.FUNCTIONS.Train_Overlord_quick(\"now\")\n if (UnitName == \"zergling\"):\n if self.can_do(obs, actions.FUNCTIONS.Train_Zergling_quick.id):\n return actions.FUNCTIONS.Train_Zergling_quick(\"now\")\n if (UnitName == \"Roach\"):\n if self.can_do(obs, actions.FUNCTIONS.Train_Zergling_quick.id):\n return actions.FUNCTIONS.Train_Roach_quick(\"now\")\n if (UnitName == \"hydralisks\"):\n if self.can_do(obs, actions.FUNCTIONS.Train_Zergling_quick.id):\n return actions.FUNCTIONS.Train_Hydralisk_quick(\"now\")\n if (UnitName == \"corruptor\"):\n if self.can_do(obs, actions.FUNCTIONS.Train_Zergling_quick.id):\n return actions.FUNCTIONS.Train_Corruptor_quick(\"now\")\n \"\"\"UnitsForControlGroup: [#drone, #zergling, #roaches, #hydralisks]\"\"\"",
"def __init__(self, name=\"\", description=\"\", time_units=\"s\", len_units=\"m\"):\n\n # Set general info\n self._type = 2 # observation well id\n self.time_units = time_units\n self.len_units = len_units\n\n self.parameters = {'full': True, # is full penetrating?\n 'r': 1., # radius, distance until pumping well in length units\n 'd': 0., # depth of well screen (from top) in length units\n 'l': 1.} # depth of well bottom in length units\n\n # Create drawdown data\n self.drawdown = _Data(dtype=1, name=name, description=description)\n self.drawdown.set_units(self.time_units, self.len_units)\n\n # Set results from models\n self.data = []",
"def __init__(self, title=\"\", units=\"\", tunits=\"ns\", ax=None, talk=False):\n super().__init__(ax=ax, talk=talk)\n self.title = title\n self.units = units\n self.tunits = tunits",
"def get_description():\n desc = {\"description\": __doc__, \"data\": True}\n today = datetime.date.today()\n thisyear = today.year\n desc[\"arguments\"] = [\n dict(\n type=\"station\",\n name=\"station\",\n default=\"IATDSM\",\n label=\"Select Station:\",\n network=\"IACLIMATE\",\n ),\n dict(\n type=\"select\",\n options=PDICT,\n name=\"var\",\n default=\"precip\",\n label=\"Accumulate Precipitation or Snow?\",\n ),\n dict(\n type=\"year\",\n name=\"year1\",\n default=thisyear,\n label=\"Additional Year to Plot:\",\n ),\n dict(\n type=\"year\",\n name=\"year2\",\n optional=True,\n default=(thisyear - 1),\n label=\"Additional Year to Plot: (optional)\",\n ),\n dict(\n type=\"year\",\n name=\"year3\",\n optional=True,\n default=(thisyear - 2),\n label=\"Additional Year to Plot: (optional)\",\n ),\n dict(\n type=\"sday\",\n name=\"sdate\",\n default=\"0101\",\n label=\"Start Day of Year for Plot:\",\n ),\n dict(\n optional=True,\n type=\"sday\",\n name=\"edate\",\n default=f\"{today:%m%d}\",\n label=\"End Day of Year for Plot:\",\n ),\n dict(\n type=\"int\",\n default=\"3\",\n label=\"Number of missing days to allow before excluding year\",\n name=\"m\",\n ),\n ]\n return desc",
"def testInvalidDescriptions(self):\n self.assertFalse(self.app._ignore_jobs(\"telecommuting is not an option\"))\n self.assertFalse(self.app._ignore_jobs(\"No telecommuting\"))\n self.assertFalse(self.app._ignore_jobs(\"No telecommute\"))\n self.assertFalse(self.app._ignore_jobs(\"TELECOMMUTE IS NOT AN OPTION\"))",
"def test_creation(self):\n\n assert self.test_shape.solid is not None\n assert self.test_shape.volume() > 1000",
"def test_creation(self):\n\n assert self.test_shape.solid is not None\n assert self.test_shape.volume() > 1000"
]
| [
"0.7196217",
"0.56905293",
"0.55807394",
"0.5456513",
"0.5392586",
"0.538737",
"0.53750855",
"0.5374919",
"0.5374841",
"0.53741705",
"0.5371181",
"0.5356631",
"0.5356039",
"0.5303193",
"0.5301167",
"0.52871114",
"0.5274415",
"0.52718306",
"0.5246364",
"0.52147675",
"0.5199354",
"0.5195291",
"0.51918554",
"0.518769",
"0.51793313",
"0.5153661",
"0.515007",
"0.5149082",
"0.51421773",
"0.51421773"
]
| 0.78008306 | 0 |
Returns the city's weather | def get_weather(self):
return self.__weather | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_weather(self):\n\n city = self.user_data[\"weatherSettings\"][\"weatherCity\"]\n country = self.user_data[\"weatherSettings\"][\"weatherCountry\"]\n\n host = \"weather.mios.com\"\n temp_scale = \"C\"\n url = \"http://%s/?tempFormat=%s&cityWeather=%s&countryWeather=%s\" % \\\n (host, temp_scale, Vera.urlencode(city), Vera.urlencode(country))\n\n weather = self.proxy_get(url)\n\n return (float(weather[\"temp\"]), weather[\"text\"])",
"def get(self, cityname):\n response = hereService.getWeatherByCity(cityname)\n return response",
"def find_weather(city):\n\n\ttry:\n\t\thttp = urllib3.PoolManager()\n\t\tresponse = http.request('GET', \n\t\t\t'http://api.openweathermap.org/data/2.5/weather', \n\t\t\tfields ={\n\t\t\t'q':city, \n\t\t\t'units':'metric', \n\t\t\t\"appid\": \"2bc3e79bb974a007818864813f53fd35\"\n\t\t\t}) \n\t\tparsed_data = json.loads(response.data.decode('utf-8'))\n\t\t\n\t\t\n\t\treturn (\"\\t{}\\t{}\\t{}\").format((parsed_data['name']).ljust(10),(str(parsed_data[\"main\"][\"temp\"])).ljust(10), parsed_data[\"weather\"][0][\"description\"])\n\n\texcept Exception as e:\n\t\tprint (e)",
"def GetWeatherByCity(City):\n WeatherUrl = \"http://api.openweathermap.org/data/2.5/weather?q=\"+ City + \"&appid=b4bacbe2dc824431289800439f1ec3df&units=metric\" \n WeatherRequest = requests.get(WeatherUrl)\n WeatherInfo = WeatherRequest.json()\n if ('main' in WeatherInfo):\n pass\n else:\n print(\"Invalid City Name\")\n exit() \n Temp = WeatherInfo['main']['temp']\n Humidity = WeatherInfo['main']['humidity']\n Description = WeatherInfo['weather'][0]['description']\n return(Temp, Humidity, Description)",
"def get_weather_data(city):\n url = (f\"https://api.openweathermap.org/data/2.5/weather\" +\n f\"?q={city}&units=imperial&appid={Config.OW_API_KEY}\")\n\n response = requests.get(url)\n \n return response.json()",
"def get(self, city: str):\n # Make a call to the OpenWeatherMap API and check the units inserted at the query parameter.\n units = request.args.get('unit', '').casefold()\n weather_data, query_units = self.get_weather(city, units)\n temp = self.check_unit(query_units)\n\n # Get the date from the request if no date is provided use the current date and time.\n date_raw = request.args.get('at')\n self.timezone = datetime.now().astimezone().tzinfo\n\n if date_raw:\n # Two date formats are allow an aware and naive date. If no time info has been given use the current time.\n try:\n date = isoparse(date_raw.replace(' ', '+'))\n except ValueError:\n now = datetime.now()\n date = datetime.strptime(date_raw, '%Y-%m-%d').replace(\n hour=now.hour, minute=now.minute, second=now.second, microsecond=now.microsecond,\n tzinfo=self.timezone\n )\n else:\n now = datetime.now()\n date = datetime.now().replace(\n hour=now.hour, minute=now.minute, second=now.second, microsecond=now.microsecond, tzinfo=self.timezone\n )\n\n # Prepare the error response.\n self.error = {\n 'error': '',\n 'error_code': ''\n }\n\n if self.check_past_date(date):\n return self.error, 400\n\n if type(weather_data) == dict:\n # Based on the date check the index of the weather that corresponds with the date in the weather response.\n index = self.find_index(weather_data, date)\n weather_dict = {\n f'{weather_data[\"list\"][index][\"weather\"][0][\"main\"].lower()}':\n f'{weather_data[\"list\"][index][\"weather\"][0][\"description\"]}',\n 'humidity': f'{weather_data[\"list\"][index][\"main\"][\"humidity\"]}%',\n 'pressure': f'{weather_data[\"list\"][index][\"main\"][\"pressure\"]} hPa',\n 'temperature': f'{str(weather_data[\"list\"][index][\"main\"][\"temp\"]) + temp}',\n }\n return weather_dict, 200\n\n elif '404' in str(weather_data):\n self.error['error'] = f'cannot find the city\"{city}\"'\n self.error['error_code'] = 'city_not_found'\n return self.error, 404\n\n else:\n self.error['error'] = 'Something went wrong'\n self.error['error_code'] = 'internal_server_error'\n return self.error, 500",
"def get_weather(api_key, city):\n url = \"https://api.openweathermap.org/data/2.5/forecast?id={}&appid={}\".format(city, api_key)\n r = requests.get(url)\n return r.json()",
"def Getweather(CITY, API_KEY):\n r = urequests.get(\"http://api.openweathermap.org/data/2.5/weather?q=%s&appid=%s\" % (CITY, API_KEY)).json()\n Data = []\n TemperatureMin = int(r[\"main\"][\"temp_min\"] - 273.15)\n Data.append(\"Min: %s C\" % TemperatureMin)\n TemperatureMax = int(r[\"main\"][\"temp_max\"] - 273.15)\n Data.append(\"Max: %s C\" % TemperatureMax)\n ActualTemp = int(r[\"main\"][\"temp\"] - 273.15)\n Data.append(\"Current: %s C\" % ActualTemp)\n Humidity = int(r[\"main\"][\"humidity\"])\n Data.append(\"Humidity: %s\" % Humidity)\n Condition = str(r[\"weather\"][0][\"description\"])\n Data.append(\"Condition: %s\" % Condition)\n return Data",
"def getLocation(self, state, city, token):\n d = requests.get(\n 'http://api.wunderground.com/api/' + str(token) + '/forecast/q/' + str(state) + '/' + str(city) + '.json')\n json = d.json()\n return json",
"def city(self):\r\n try:\r\n return str(self.connect()['name'])\r\n except:\r\n return '@weather_city'",
"def fetch_weather(city):\n wparams = { 'city': city,\n 'key': WEATHERBIT_API_KEY\n }\n resp = requests.get(WEATHERBIT_API_URL, params=wparams)\n # this works, need to likely raise for status?\n full_weather = json.loads(resp.text)\n print(\"Got full_weather: %s\" % (full_weather))\n if not full_weather['data'][0]['precip']:\n print(\"Precip was None, coercing to 0\")\n full_weather['data'][0]['precip'] = 0\n weather_dict = {\n 'temp': farenheit(full_weather['data'][0]['temp']),\n 'conditions': full_weather['data'][0]['weather']['description'].lower(),\n 'precip': full_weather['data'][0]['precip'],\n 'forecast_temp': avg_based_on_forecast(city)\n }\n print(\"Trimmed down weather_dict: %s\" % (weather_dict))\n return weather_dict",
"def weather_helper():\n\n weather = get_weather('Chicago')\n conditions = weather['weather'][0]['description']\n temperature = weather['main']['temp']\n location = weather['name']\n\n curr_weather = 'It is currently %s degrees with %s in %s' % (temperature, conditions, location)\n return curr_weather",
"def current_weather(city_name, API):\r\n global new_city\r\n try:\r\n if city_name.isnumeric(): # if input is zip\r\n url = f'http://api.openweathermap.org/data/2.5/weather?zip={city_name},&appid={API}'\r\n elif ',' in city_name: # if input has a city,state or city,country\r\n new_city = city_name.split(',')\r\n new_city_name = new_city[0].replace(' ', '%20') # so the url correctly handles spaces in cities\r\n if len(new_city[1]) > 2: # if the state/country code is invalid\r\n return \"Not valid state code/country code\"\r\n url = f'https://api.openweathermap.org/data/2.5/weather?q={new_city_name},{new_city[1]},us&appid={API}'\r\n elif ',' not in city_name: # if searched by only city and not state or country code, works for big cities\r\n url = f'http://api.openweathermap.org/data/2.5/weather?q={city_name}&appid={API}'\r\n response = requests.get(url).json() # getting the proper json data based on the input of the city_name\r\n city_latitude = str(response['coord']['lat'])\r\n city_longitude = str(response['coord']['lon'])\r\n if (new_city[1].upper() in states) and (\r\n response['sys']['country'] != 'US'): # to catch foreign cities with US state codes\r\n return \"Not valid city\"\r\n elif (new_city[1].upper() not in states) and (\r\n new_city[1].upper() != response['sys']['country'] and new_city != 'XXX'):\r\n # to catch US cities with foreign country codes\r\n return 'Not a valid city'\r\n elif states[new_city[1].upper()] != coordinates(city_latitude,\r\n city_longitude):\r\n # Check to see if city is located in provided state\r\n return 'City is not located in that state'\r\n current_temp = response['main']['temp']\r\n max_temp = response['main']['temp_max']\r\n min_temp = response['main']['temp_min']\r\n feels_like_temp = response['main']['feels_like']\r\n curr_temp_fheit = round((current_temp * 1.8) - 459.67) # converting to imperial\r\n max_temp_fheit = round((max_temp * 1.8) - 459.67)\r\n min_temp_fheit = round((min_temp * 1.8) - 459.67)\r\n feels_like_temp_fheit = round((feels_like_temp * 1.8) - 459.67)\r\n description = response['weather'][0]['description']\r\n wind = round(response['wind']['speed'] * 2.23694)\r\n\r\n format_weather = (\"Current weather for \" + str(city_name) + \", \" + response['sys']['country'] +\r\n \"\\nCurrent temp: \" + str(curr_temp_fheit) + '\\nMax Temp: ' + str(\r\n max_temp_fheit) + '\\nMin Temp: ' + str(\r\n min_temp_fheit) + '\\nFeels like: ' + str(\r\n feels_like_temp_fheit) + '\\nOutlook: ' + description + '\\nWind: ' + str(\r\n wind) + ' mph')\r\n # print weather in cleaner format\r\n return format_weather\r\n\r\n except KeyError: # If a city that doesn't exist is entered\r\n return 'Not valid city'",
"def GetWeatherByLocation():\n Location = GetLocation()\n WeatherUrl =\"http://api.openweathermap.org/data/2.5/weather?\"+ Location +\"&appid=b4bacbe2dc824431289800439f1ec3df&units=metric\"\n WeatherRequest = requests.get(WeatherUrl)\n WeatherInfo = WeatherRequest.json()\n pprint(WeatherInfo)\n WindSpeed = WeatherInfo['wind']['speed']\n pprint(WindSpeed)\n Temp = WeatherInfo['main']['temp']\n Humidity = WeatherInfo['main']['humidity']\n Description = WeatherInfo['weather'][0]['description']\n print(type(Humidity))\n return(Temp, Humidity, Description)",
"def weather_fetch(city, weather_key):\n #Allows for customizable API key and weather location.\n base_url = \"http://api.openweathermap.org/data/2.5/weather?q=\"\n city = str(city)\n key = str(\"&appid=\" + weather_key + \"&units=metric\")\n complete_url = base_url + city + key\n #Gets API with requests and convert to .json\n weather_api = requests.get(complete_url)\n weather_json = weather_api.json()\n return weather_json",
"def get_weather(city: str, units='standard') -> tuple:\n api_key = os.environ.get('API_KEY')\n url = 'https://api.openweathermap.org/data/2.5/forecast?'\n\n try:\n response = requests.get(f'{url}q={city}&APPID={api_key}&units={units}')\n response.raise_for_status()\n return response.json(), units\n except HTTPError as error:\n return error, units",
"def get_weather(self):\n with urllib.request.urlopen(self.url) as response:\n json_data = response.read().decode('utf-8')\n\n data = json.loads(json_data)\n\n weather = {}\n weather['current'] = {\n 'temp': round(data['current']['temp_f']),\n 'humidity': round(data['current']['humidity']),\n 'summary': data['current']['condition']['text']\n }\n today = data['forecast']['forecastday'][0]['day']\n weather['today'] = {\n 'temp': round(today['maxtemp_f']),\n 'summary': today['condition']['text']\n }\n \n return weather",
"def weather(self):\r\n try:\r\n return str(self.connect()['weather'][0]['description'])\r\n except:\r\n return '@weather'",
"def forecast_weather(self):\n pass",
"def weather():\r\n def weather_api_call():\r\n with open('config.json', 'r') as conf:\r\n conf = json.load(conf)\r\n # Gets the API key from the config.json file\r\n weather_api_key = conf[\"weather_api_key\"]\r\n weather_city_name = conf['weather_city_name']\r\n response = requests.get(\r\n 'http://api.openweathermap.org/data/2.5/weather?'\r\n 'q=' + weather_city_name + '&units=metric&appid=' + weather_api_key)\r\n resp_json = response.json()\r\n with open('weather.json', 'w') as outfile:\r\n # Uses the data from the API to overwrite the weather data\r\n json.dump(resp_json, outfile)\r\n outfile.close()\r\n\r\n def weather_data_extractor():\r\n with open('weather.json', 'r') as weather_json:\r\n weather_json = json.load(weather_json)\r\n temp = weather_json[\"main\"]\r\n weather_item = weather_json[\"weather\"]\r\n desc = weather_item[0]\r\n current_temperature = \"The current temperature is: \" + \\\r\n str(int(temp[\"temp\"])) + \"C\"\r\n current_feels_like = \"Feels like: \" + \\\r\n str(int(temp[\"feels_like\"])) + \"C\"\r\n forecast = desc[\"main\"]\r\n return current_feels_like, current_temperature, forecast\r\n\r\n weather_api_call()\r\n return weather_data_extractor()",
"async def weather(url, session, city):\n params = {\"q\": city, \"lang\": \"ru\", \"units\": \"metric\", \"mode\": \"json\"}\n headers = {\n 'x-rapidapi-key': API_KEY,\n 'x-rapidapi-host': \"community-open-weather-map.p.rapidapi.com\"\n }\n response = await session.get(url, params=params, headers=headers)\n if response.status == 200:\n weather_info = await response.json()\n return weather_info\n return False",
"def fetch_weather(y):\r\n # request parameter(s): Start with '?'\r\n # separate name and value with '='\r\n # multiple parameter name value pairs are separate with '&'\r\n query_string = \"?id={}&units=imperial&APIKEY={}\".format(y, API_KEY)\r\n request_url = WS_URL + query_string\r\n print(\"Request URL: \", request_url)\r\n response = requests.get(request_url)\r\n if response.status_code == 200:\r\n city_name = response.json()[\"city\"][\"name\"]\r\n lst = response.json()[\"list\"]\r\n tmp_list = []\r\n for i in range(len(lst) // 8):\r\n li = [x for x in range(len(lst)) if x // 8 == i]\r\n tmp_list.append(max([lst[j][\"main\"][\"temp_max\"] for j in li]))\r\n return City(city_name, tmp_list)\r\n else:\r\n print(\"How should I know?\")\r\n return None",
"def GetWeather(query, api_key):\n try:\n owm = pyowm.OWM(api_key)\n observation = owm.weather_at_place(str(query))\n location = observation.get_location()\n weather = observation.get_weather()\n temp = weather.get_temperature('fahrenheit')\n status = CleanupWeatherStatus(weather.get_detailed_status())\n return 'It is %sF degrees with %s in %s right now.' % (int(temp['temp']),\n status,\n location.get_name())\n except:\n return 'I couldn\\'t find any weather for %s. I am sorry.' % (query)",
"def city():\n\n print(\"Welcome to my weather API\")\n\n yourcity = input(\"Enter A known City name: \")\n api_key = (\"077936f695f61908cd19a5a2452a97fb\") \n #our public Api-key \n response = requests.get(\"http://api.openweathermap.org/data/2.5/weather?q={0}&appid=077936f695f61908cd19a5a2452a97fb\".format(yourcity, api_key)) #api call\n weather = response.json()\n print(\"The current weather in {0} is {1}\".format(yourcity, weather[\"weather\"][0][\"description\"])) #the result",
"async def city_weather(request: web.Request):\n data = await request.post()\n current_city = data[\"city\"]\n return await collect_info(request, city=current_city)",
"def weather_of_wind(city):\n pattern = re.compile(r'.*(\\d+).*')\n\n time_index = np.load(exp_data_path + os.sep + 'station_list' + os.sep + 'time_index.npy', allow_pickle=True)\n time_index = dict(time_index.tolist())\n numpy_res = np.empty((len(time_index['index']),))\n with open(exp_data_path + os.sep + 'weather' + os.sep + city + os.sep + '{}_wind.csv'.format(city)) as f:\n reader = csv.reader(f)\n for line in reader:\n if '微' in line[1]:\n line[1] = 0\n else:\n line[1] = pattern.match(line[1]).group(1)\n numpy_res[int(line[0])] = int(line[1])\n\n file_name = exp_data_path + os.sep + 'weather' + os.sep + city + os.sep + '{}_wind'.format(city)\n if os.path.exists(file_name):\n os.remove(file_name)\n np.save(file_name, numpy_res)\n pass",
"def pull_forecast(city, api_key):\n base_url = \"http://api.openweathermap.org/data/2.5/forecast?\"\n url = base_url + \"appid=\" + api_key + \"&q=\" + city\n r = requests.get(url)\n r.raise_for_status()\n data = r.json()\n return data",
"def get_weather(html):\n\tcheck_page_type(html)\n\tget_temp(html)\n\tget_table(html)\n\treturn weather_dict",
"def GetCity():\n IPinfoRequest = requests.get('https://ipinfo.io/')\n IPinfo = IPinfoRequest.json()\n City = IPinfo['city']\n return(City)",
"def weather():\n latlong = request.form.get(\"latlong\")\n latlong = latlong.split(\",\")\n data = lookup_weather(latlong[0],latlong[1])\n return render_template(\"weather.html\", data = data)"
]
| [
"0.8158573",
"0.7991035",
"0.78909343",
"0.779052",
"0.7601341",
"0.7493753",
"0.7459171",
"0.74171793",
"0.7396342",
"0.7393161",
"0.7389976",
"0.7368584",
"0.7351321",
"0.73313856",
"0.72969973",
"0.72741586",
"0.7226426",
"0.71553344",
"0.7150858",
"0.70951635",
"0.70761627",
"0.70550096",
"0.7045754",
"0.7034477",
"0.7020682",
"0.6982478",
"0.6952048",
"0.6888609",
"0.68823075",
"0.68740904"
]
| 0.7998161 | 1 |
KPOINTS file written format | def write_output(self, user_kps=[]):
if user_kps == []:
kpoints = self.kps
else:
kpoints = user_kps
if min(kpoints) > 0:
fw = open('KPOINTS','w')
fw.write('KPOINTS\n')
fw.write('0\n')
fw.write('Gamma\n')
fw.write('%d %d %d\n' %tuple(kpoints))
fw.write('0 0 0\n')
fw.close()
else:
print("Non-positive k-point!! Refuse to write KPOINTS file!")
exit() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def KPOINTS(points, header='', dest='.', gamma=True):\n print 'Making KPOINTS file...'\n if gamma:\n center = 'Gamma'\n else:\n center = 'Monkhorst'\n header = str(header)\n \n s = 'Automatic mesh %s' % header\n s += '\\n0' \n s += '\\n%s' % center\n s += '\\n%d %d %d' % (points,points,points)\n s += '\\n0 0 0'\n \n f = open('%s/KPOINTS' % dest, 'w+')\n f.write(s)\n f.close()",
"def remove_z_kpoints():\n\n kpoint_lines = open('KPOINTS').readlines()\n\n twod_kpoints = []\n labels = {}\n i = 4\n\n while i < len(kpoint_lines):\n kpt_1 = kpoint_lines[i].split()\n kpt_2 = kpoint_lines[i+1].split()\n if float(kpt_1[2]) == 0.0 and [float(kpt_1[0]),\n float(kpt_1[1])] not in twod_kpoints:\n twod_kpoints.append([float(kpt_1[0]), float(kpt_1[1])])\n labels[kpt_1[4]] = [float(kpt_1[0]), float(kpt_1[1])]\n\n if float(kpt_2[2]) == 0.0 and [float(kpt_2[0]),\n float(kpt_2[1])] not in twod_kpoints:\n twod_kpoints.append([float(kpt_2[0]), float(kpt_2[1])])\n labels[kpt_2[4]] = [float(kpt_2[0]), float(kpt_2[1])]\n i += 3\n\n kpath = get_markovian_path(twod_kpoints)\n\n with open('KPOINTS', 'w') as kpts:\n for line in kpoint_lines[:4]:\n kpts.write(line)\n\n for i in range(len(kpath)):\n label_1 = [l for l in labels if labels[l] == kpath[i]][0]\n if i == len(kpath) - 1:\n kpt_2 = kpath[0]\n label_2 = [l for l in labels if labels[l] == kpath[0]][0]\n else:\n kpt_2 = kpath[i+1]\n label_2 = [l for l in labels if labels[l] == kpath[i+1]][0]\n\n kpts.write(' '.join([str(kpath[i][0]), str(kpath[i][1]), '0.0 !',\n label_1]))\n kpts.write('\\n')\n kpts.write(' '.join([str(kpt_2[0]), str(kpt_2[1]), '0.0 !',\n label_2]))\n kpts.write('\\n\\n')",
"def write_spec_points_file(spectre_points_filename, spec_points_filename):\n spectre_file = h5py.File(spectre_points_filename, 'r')\n points = get_spec_points(spectre_file)\n spectre_file.close()\n np.savetxt(spec_points_filename, points)",
"def _write_keypoint_results(keypoint_results, gt_folder, pred_folder):",
"def write_features(self):\r\n def pack_keypoint(keypoints, descriptors):\r\n kpts = np.array([[kp.pt[0], kp.pt[1], kp.size,\r\n kp.angle, kp.response, kp.octave,\r\n kp.class_id]\r\n for kp in keypoints])\r\n desc = np.array(descriptors)\r\n return kpts, desc\r\n\r\n filename = self.features_path + self.id\r\n kpts, desc = pack_keypoint(self.keypoints, self.descriptors)\r\n logging.info(f'Writing features of image {self.name} to file...')\r\n np.savez(filename, keypoints=kpts, descriptors=desc)\r\n logging.info('Features saved.')",
"def output_file(data, k):\n if k:\n filename = 'digitsOutput' + str(k) + '.csv'\n else:\n filename = 'digitsOutput.csv'\n f = open(filename, 'w')\n for line in data:\n f.write(str(line) + '\\n')\n f.close()",
"def writePointCloudVTP(self, outFile):\n #points\n vtkPts = vtk.vtkPoints()\n cells = vtk.vtkCellArray()\n\n # setup colors\n Colors = vtk.vtkFloatArray()\n #Colors.SetNumberOfComponents(3)\n Colors.SetNumberOfTuples(self.Npts)\n Colors.SetName(self.label) #can change to any string\n\n for i in range(self.Npts):\n x = self.ctrs[i,0]\n y = self.ctrs[i,1]\n z = self.ctrs[i,2]\n id = vtkPts.InsertNextPoint(x,y,z)\n cells.InsertNextCell(1)\n cells.InsertCellPoint(id)\n Colors.InsertTuple( i, [self.scalar[i]] )\n\n\n #build final vtp object for writing\n polydata = vtk.vtkPolyData()\n polydata.SetPoints(vtkPts)\n polydata.SetVerts(cells)\n polydata.GetPointData().SetScalars(Colors)\n polydata.Modified()\n\n writer = vtk.vtkXMLPolyDataWriter()\n writer.DebugOn()\n writer.SetFileName(outFile)\n writer.SetInputData(polydata)\n #writer.SetDataModeToBinary()\n writer.Write()\n\n return",
"def test_kpoints_write_line(kpoints_parser_line, tmpdir):\n\n kpoints = kpoints_parser_line.get_dict()\n temp_file = str(tmpdir.join('KPOINTSLINE'))\n kpoints_parser_line.write(file_path=temp_file)\n kpoints_parser_line_temp = Kpoints(file_path=temp_file)\n kpoints_temp = kpoints_parser_line_temp.get_dict()\n assert kpoints_temp['mode'] == 'line'\n assert kpoints_temp['comment'] == 'k-points along high symmetry lines'\n assert kpoints_temp['divisions'] == None\n assert kpoints_temp['shifts'] == None\n assert kpoints_temp['centering'] == None\n assert kpoints_temp['num_kpoints'] == 40\n points = kpoints_temp['points']\n np.testing.assert_allclose(points[0][0], np.array([0.0, 0.0, 0.0]))\n np.testing.assert_allclose(points[1][0], np.array([0.5, 0.5, 0.0]))\n np.testing.assert_allclose(points[2][0], np.array([0.5, 0.5, 0.0]))\n np.testing.assert_allclose(points[3][0], np.array([0.5, 0.75, 0.25]))\n np.testing.assert_allclose(points[4][0], np.array([0.5, 0.75, 0.25]))\n np.testing.assert_allclose(points[5][0], np.array([0.0, 0.0, 0.0]))\n assert math.isclose(points[0][1], 1.0, rel_tol=1e-07)\n assert math.isclose(points[1][1], 1.0, rel_tol=1e-07)\n assert math.isclose(points[2][1], 1.0, rel_tol=1e-07)\n assert math.isclose(points[3][1], 1.0, rel_tol=1e-07)\n assert math.isclose(points[4][1], 1.0, rel_tol=1e-07)\n assert math.isclose(points[5][1], 1.0, rel_tol=1e-07)\n assert points[0][2]\n assert points[1][2]\n assert points[2][2]\n assert points[3][2]\n assert points[4][2]\n assert points[5][2]",
"def test_kpoints_write_auto(kpoints_parser_auto, tmpdir):\n\n kpoints = kpoints_parser_auto.get_dict()\n temp_file = str(tmpdir.join('KPOINTS'))\n kpoints_parser_auto.write(file_path=temp_file)\n kpoints_parser_auto_temp = Kpoints(file_path=temp_file)\n kpoints_temp = kpoints_parser_auto_temp.get_dict()\n verify_kpoints_content(kpoints_temp)\n with open(temp_file, 'w') as handler:\n kpoints_parser_auto.write(file_handler=handler)\n with open(temp_file, 'r') as handler:\n kpoints_parser_auto_temp = Kpoints(file_handler=handler)\n kpoints_temp = kpoints_parser_auto_temp.get_dict()\n verify_kpoints_content(kpoints_temp)",
"def dump(points, filename):\n with open(filename, 'w') as f:\n for i, pts in enumerate(points):\n for x, y in pts:\n f.write(f\"{x:.3f},{y:.3f},{i}\\n\")\n print(f\"Dumping data to {filename}...\")",
"def writer(output, output_name, output_data):\n\n kml = simplekml.Kml(name=output_name)\n for exif in output_data:\n if('Latitude' in exif.keys() and\n 'Latitude Reference' in exif.keys() and\n 'Longitude Reference' in exif.keys() and\n 'Longitude' in exif.keys()):\n\n if 'Original Date' in exif.keys():\n dt = exif['Original Date']\n else:\n dt = 'N/A'\n\n if exif['Latitude Reference'] == 'S':\n latitude = '-' + exif['Latitude']\n else:\n latitude = exif['Latitude']\n\n if exif['Longitude Reference'] == 'W':\n longitude = '-' + exif['Longitude']\n else:\n longitude = exif['Longitude']\n\n kml.newpoint(name=exif['Name'],\n description='Originally Created: ' + dt,\n coords=[(longitude, latitude)])\n else:\n pass\n kml.save(os.path.join(output, output_name))",
"def write(self, stable_poses, min_prob=0):\n R_list = []\n for pose in stable_poses:\n if pose.p >= min_prob:\n R_list.append([pose.p, pose.r, pose.x0])\n\n f = open(self.filepath_[:-4] + \".stp\", \"w\")\n f.write(\"#############################################################\\n\")\n f.write(\"# STP file generated by UC Berkeley Automation Sciences Lab #\\n\")\n f.write(\"# #\\n\")\n f.write(\"# Num Poses: %d\" %len(R_list))\n for _ in range(46 - len(str(len(R_list)))):\n f.write(\" \")\n f.write(\" #\\n\")\n f.write(\"# Min Probability: %s\" %str(min_prob))\n for _ in range(40 - len(str(min_prob))):\n f.write(\" \")\n f.write(\" #\\n\")\n f.write(\"# #\\n\")\n f.write(\"#############################################################\\n\")\n f.write(\"\\n\")\n\n # adding R matrices to .stp file\n pose_index = 1\n for i in range(len(R_list)):\n f.write(\"p %f\\n\" %R_list[i][0])\n f.write(\"r %f %f %f\\n\" %(R_list[i][1][0][0], R_list[i][1][0][1], R_list[i][1][0][2]))\n f.write(\" %f %f %f\\n\" %(R_list[i][1][1][0], R_list[i][1][1][1], R_list[i][1][1][2]))\n f.write(\" %f %f %f\\n\" %(R_list[i][1][2][0], R_list[i][1][2][1], R_list[i][1][2][2]))\n f.write(\"x0 %f %f %f\\n\" %(R_list[i][2][0], R_list[i][2][1], R_list[i][2][2]))\n f.write(\"\\n\\n\")\n f.close()",
"def edit_kpts(param_label, i, dir, line_key = 'Gamma', file = 'KPOINTS'):\n\n replacement_line = \" \" + i[0] + \" \" + i[1] + \" \" + i[2]\n gen_file_editor(param_label, dir, file, replacement_line, line_key)\n\n return False",
"def delta_kpoints(self):\n self.get_kpoints('no')\n ori_kp = self.kps\n omk = min(ori_kp)\n nmk = omk+self.diff\n self.kps = [v*nmk/omk for v in ori_kp]\n self.write_output()",
"def write_input(eval_points, filename='input.txt'):\n util.save(eval_points, filename)",
"def read_kpoints(data_file_path, units='crystal'):\n data_file_xml = ET.parse(data_file_path)\n alat = read_lattice_parameter(data_file_path)\n rlv = read_rlv(data_file_path)\n nkpts = int(data_file_xml.find('./BRILLOUIN_ZONE/NUMBER_OF_K-POINTS').text)\n\n kpoints = np.zeros((nkpts, 3))\n for ikpt in range(nkpts):\n kpoints[ikpt] = np.fromstring(data_file_xml.find('./BRILLOUIN_ZONE/K-POINT.%d' % (ikpt+1)).attrib['XYZ'], sep=' ')\n\n if units == 'crystal':\n kpoints = 2 * np.pi / alat * np.dot(kpoints, np.linalg.inv(rlv))\n elif units == 'cartesian':\n kpoints *= 2 * np.pi / alat\n else:\n raise ValueError('units must be \"crystal\" or \"cartesian\"')\n\n return kpoints",
"def test_kpoints_string(tmpdir):\n\n kpoints_str = '# Example file\\n0\\nG\\n4 4 4\\n'\n temp_file = str(tmpdir.join('KPOINTS'))\n kpoints_parser_auto_temp = Kpoints(kpoints_string=kpoints_str)\n kpoints_temp = kpoints_parser_auto_temp.get_dict()\n assert kpoints_temp['mode'] == 'automatic'\n assert kpoints_temp['comment'] == 'Example file'\n assert kpoints_temp['divisions'] == [4, 4, 4]\n assert kpoints_temp['shifts'] == None\n assert kpoints_temp['points'] == None\n assert kpoints_temp['centering'] == 'Gamma'\n assert kpoints_temp['tetra'] == None\n assert kpoints_temp['tetra_volume'] == None\n assert kpoints_temp['num_kpoints'] == 0",
"def verify_kpoints_content(kpoints):\n assert kpoints['mode'] == 'automatic'\n assert kpoints['comment'] == 'Example file'\n assert kpoints['divisions'] == [4, 4, 4]\n assert kpoints['shifts'] == [0.0, 0.0, 0.0]\n assert kpoints['points'] == None\n assert kpoints['centering'] == 'Gamma'\n assert kpoints['tetra'] == None\n assert kpoints['tetra_volume'] == None\n assert kpoints['num_kpoints'] == 0",
"def dump(self, data_points):\n print(data_points)",
"def get_kpoints(self,ifwrite='yes'):\n a11 = float(self.lat[2].split()[0])\n a12 = float(self.lat[2].split()[1])\n a13 = float(self.lat[2].split()[2])\n a21 = float(self.lat[3].split()[0])\n a22 = float(self.lat[3].split()[1])\n a23 = float(self.lat[3].split()[2])\n a31 = float(self.lat[4].split()[0])\n a32 = float(self.lat[4].split()[1])\n a33 = float(self.lat[4].split()[2])\n \n x0 = [a11, a12, a13]\n x1 = [a21, a22, a23]\n x2 = [a31, a32, a33]\n \n self.natom = sum(list(map(int,self.lat[6].split())))\n # Number of atoms in POSCAR/CONTCAR\n \n l0 = np.linalg.norm(x0)\n l1 = np.linalg.norm(x1)\n l2 = np.linalg.norm(x2)\n\n self.cell_norm = [l0, l1, l2]\n \n N = (l0*l1*l2*self.kppra/self.natom)**(1.0/3.0)\n \n k0 = int(N/l0)\n k1 = int(N/l1)\n k2 = int(N/l2)\n\n klist = [k0,k1,k2]\n flag = 0\n kn = klist[:]\n\n if len(set(klist)) == 1:\n if (np.prod(np.array(kn))*self.natom) < self.kppra:\n kn = [v+1 for v in kn]\n elif len(set(klist)) == 3:\n while (np.prod(np.array(kn))*self.natom) < self.kppra and flag < 3:\n kn[klist.index(sorted(klist)[flag])] += 1\n flag += 1\n else:\n while (np.prod(np.array(kn))*self.natom) < self.kppra and flag < 2:\n tmp = sorted(set(klist))[flag]\n tmp_ind = []\n for i in range(3):\n if klist[i] == tmp:\n tmp_ind.append(i)\n kn = [kn[i]+1 if i in tmp_ind else kn[i] for i in range(3)]\n flag += 1\n\n self.kps = kn\n \n if (np.prod(np.array(kn))*self.natom) < self.kppra:\n print(\"===== WARNING =====\")\n print(\"K-points generate method may not be appropriate!\")\n print(\"Check source code!!!!\")\n print(\"===================\")\n exit()\n\n #if ifwrite == 'yes':\n # self.write_output()",
"def saveCalibrationPoints(self):\n\n if self.kinectCalibrated == True:\n with open('cali_points.csv', 'wb') as csvfile:\n csvwriter = csv.writer(csvfile, delimiter=',')\n for row in range(5):\n csvwriter.writerow(self.rgb_click_points[row])\n for row in range(5): \n csvwriter.writerow(self.depth_click_points[row])\n pass",
"def convert_to_kbest_format(infname,outfname,k_str):\n k=int(k_str)\n with codecs.open(outfname,'w','utf-8') as outfile:\n for sent_no, parsed_lines in iterate_nbest_list(infname): \n for i in xrange(0,k): \n outfile.write( u'{} ||| {} ||| {} ||| {}\\n'.format( *parsed_lines[i] ) )",
"def test_kpoints_dict(tmpdir):\n\n kpoints_dict = {\n 'comment': 'Example file',\n 'divisions': [5, 5, 5],\n 'mode': 'automatic',\n 'shifts': None,\n 'points': None,\n 'centering': 'Gamma',\n 'tetra': None,\n 'tetra_volume': None,\n 'num_kpoints': 0,\n 'generating_vectors': None\n }\n temp_file = str(tmpdir.join('KPOINTS'))\n kpoints_parser_auto_temp = Kpoints(kpoints_dict=kpoints_dict)\n kpoints_temp = kpoints_parser_auto_temp.get_dict()\n assert kpoints_temp['mode'] == 'automatic'\n assert kpoints_temp['comment'] == 'Example file'\n assert kpoints_temp['divisions'] == [5, 5, 5]\n assert kpoints_temp['shifts'] == None\n assert kpoints_temp['points'] == None\n assert kpoints_temp['centering'] == 'Gamma'\n assert kpoints_temp['tetra'] == None\n assert kpoints_temp['tetra_volume'] == None\n assert kpoints_temp['num_kpoints'] == 0",
"def test_kpoints_params_auto_file_object(kpoints_parser_auto_file_object):\n\n kpoints = kpoints_parser_auto_file_object.get_dict()\n assert kpoints['mode'] == 'automatic'\n assert kpoints['comment'] == 'Example file'\n assert kpoints['divisions'] == [4, 4, 4]\n assert kpoints['shifts'] == None\n assert kpoints['points'] == None\n assert kpoints['centering'] == 'Gamma'\n assert kpoints['tetra'] == None\n assert kpoints['tetra_volume'] == None\n assert kpoints['num_kpoints'] == 0",
"def kpoints_parser_GRG():\n\n testdir = os.path.dirname(__file__)\n kpointsfile = testdir + '/KPOINTSGRG'\n kpoints = Kpoints(file_path=kpointsfile)\n\n return kpoints",
"def test_write_OPK_to_shp_file(self):\r\n arr_oris = [{'altitude': 53.534337, 'id': 'IMG_1468832894.185000000.jpg', 'easting': 657739.197431,\r\n 'pitch': -172.350586, 'heading': -75.622522, 'roll': -40.654833, 'northing': 6860690.284637}]\r\n\r\n # on export le shapefile a partir des donnees pour le tests\r\n write_OPK_to_shp_file(arr_oris,\r\n self.test_shapefile,\r\n b_export_view_dir=False)\r\n # on tests si la methode a exporte les fichiers\r\n # url: http://stackoverflow.com/questions/82831/how-to-check-whether-a-file-exists-using-python\r\n self.assertTrue(exists(self.test_shapefile))\r\n\r\n # lecture d'un shapefile\r\n r = shapefile.Reader(self.test_shapefile)\r\n # geometries\r\n shapes = r.shapes()\r\n # extraction de la listes des points\r\n list_points = shapes[0].points\r\n # 1 point definit dans le shapefile\r\n self.assertEqual(len(shapes), 1)\r\n # on tests le type de la shape stockee\r\n # url: http://www.esri.com/library/whitepapers/pdfs/shapefile.pdf\r\n # type == 1 => Shape type=Point\r\n self.assertEqual(shapes[0].shapeType, 1)\r\n # on utilise extract_center_dict_ori (qui est doctestee)\r\n self._raise_assert_on_np_is_close_all(list_points[0], extract_center_dict_ori(arr_oris[0])[:2])",
"def _get_kps_ann_file(self):\n prefix = 'person_keypoints' if 'test' not in self.image_set else 'image_info'\n return os.path.join(self.data_path, 'annotations',\n prefix + '_' + self.image_set + '.json')",
"def writeto(self, fileout):\n \n dump_pkl(self.data, fileout)",
"def write_kpi_indices(dst_file):\n global kpi_list\n with open(dst_file, 'w') as f:\n for kpi in kpi_list:\n f.write(kpi.desc() + '\\n')",
"def export_kmz(self):\n self.export_kml(kmz=True)"
]
| [
"0.7680949",
"0.64386225",
"0.62907845",
"0.62823975",
"0.62431276",
"0.6231152",
"0.6125255",
"0.612231",
"0.6043202",
"0.6021002",
"0.59331673",
"0.5904438",
"0.5876673",
"0.58739465",
"0.5865406",
"0.58551",
"0.5840665",
"0.58353406",
"0.5817526",
"0.5816389",
"0.58064806",
"0.5803403",
"0.579115",
"0.57585293",
"0.5739622",
"0.5718584",
"0.5699711",
"0.56821847",
"0.56780547",
"0.5657684"
]
| 0.76395744 | 1 |
this function controls gameplay by iterating through the number of rounds and calling the perform_round function for each. the perform_round function returns the player to be removed for that round which is also called from this function. | def play_game(lst):
number_of_rounds=lst.size-1
for round in range(1,number_of_rounds+1):
number_of_passes=random.randint(-(2*lst.size),(2*lst.size))
remove_player(lst, perform_round(lst,number_of_passes))
cursor=lst.head
print(cursor.data,"is the winner!") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def end_round(self) -> None:\r\n self.collect_money()\r\n self.round_num += 1\r\n self.round_player_money = 0\r\n for player in self.players:\r\n player.set_already_raised(False)",
"def play_round(self):\n print('='*10) # Round separation display\n print(f'Round {self.round_num}:')\n for player in self.players:\n\n # Player separation display:\n if player != self.players[0]:\n print('-' * 5)\n\n self.play_turn(player)\n \n # Return if exit conditions are met\n if (self.exit_flag) or (self.winner is not None) or (self.board.full()):\n return\n self.round_num += 1",
"def round(self):\n #player turn\n if self.started:\n self.started = False #registers the game as started then immediately turns that value false\n if self.initial_action:\n card = self.deck.deal()\n self.player.value += card.value\n if card.is_ace:\n self.player.usable_ace = True\n else:\n self.player.playing = False\n else: \n if self.apply_policy():\n card = self.deck.deal()\n self.player.value += card.value\n if card.is_ace:\n self.player.usable_ace = True\n else:\n self.player.playing = False\n\n #dealer turn\n if self.dealer.value < 17:\n card = self.deck.deal()\n self.dealer.value += card.value\n self.dealer.visible_value += card.value\n #allow people to reduce their scores by applying aces\n self.apply_ace()\n #check to see if anyone has bust by making bust people not _playing\n if self.player.value > 21:\n self.player.broke = True\n self.player.playing = False\n if self.dealer.value > 21:\n self.dealer.broke = True",
"def play_round(players):\n\n # start off playing a normal round\n play_round = play_normal_round\n\n # we shuffle the cards at the begining of every round?\n for player in players:\n shuffle(player.deck)\n\n # while we dont have a winner, and there are still\n # cards to play, keep playing\n while not winning_round_player(players) and players_with_decks(players):\n # go through all the players who still have cards\n # in their deck\n for player in players_with_decks(players):\n try:\n play_round(player)\n except IndexError:\n # one of the players has run out of cards\n # in their deck\n pass\n\n # it's WAR !\n play_round = play_war_round\n\n # did we actually have a winner this round\n if players_with_decks(players):\n # we now have a winner\n # all cards from play go to the winner\n round_winner = winning_round_player(players)\n hand_over_cards(round_winner, players)\n\n return players",
"def play_round(self) -> None:\r\n # Print round details:\r\n self.open_cards()\r\n print(Messages.OPEN_CARDS)\r\n print(self.card_stack)\r\n print(Messages.MONEY_IN_STACK + \" \", end=\"\")\r\n print(self.money_stack)\r\n\r\n start_player = self.cur_player # Helper for the round to end correctly.\r\n\r\n # The actual round\r\n while self.continue_round(start_player):\r\n if self.active_players == 1:\r\n break\r\n self.round_player_money += self.players[self.cur_player].play_round(self.round_player_money)\r\n if not self.players[self.cur_player].is_active():\r\n self.active_players -= 1\r\n self.next_active_player()\r\n self.players[start_player].set_already_raised(True) # Helper for the round to end correctly.\r\n self.end_round()",
"def play_normal_round(player):\n player.cards_in_play.appendleft(player.deck.popleft())\n return player",
"def round_loop(list_of_players, score_chart):\n rounds = 0\n while True:\n rounds += 1\n initialize_round(list_of_players)\n inform_players(list_of_players)\n loop_until_thalom(list_of_players)\n give_round_scores(list_of_players)\n score_chart.update_scores(rounds, list_of_players)\n # score_chart.compute_totals_scores()\n if not continue_playing():\n break",
"def play_round(self):\r\n move1 = self.p1.move()\r\n move2 = self.p2.move()\r\n # Checks if User Wants to Quit Game:\r\n if move1 == \"quit\" or move2 == \"quit\":\r\n self.game_over(True)\r\n print(f\"Player One: {move1.upper()}\\nPlayer Two: {move2.upper()}\")\r\n self.keep_score(move1, move2)\r\n self.p1.learn(move1, move2)\r\n self.p2.learn(move2, move1)",
"def run_turn(self):\n\n all_placed = self.state.all_avatars_placed()\n color = self.__get_next_turn(all_placed)\n if color is None:\n return\n\n if not all_placed:\n # placement round\n func = self.players[color].make_placement\n else:\n # movement round\n func = self.players[color].make_move\n\n queue = Queue()\n thread = Thread(target=self.__player_thread, args=[func, deepcopy(self.state), queue])\n thread.daemon = True\n thread.start()\n thread.join(self.timeout)\n if thread.is_alive():\n #print(\"The \" + str(color) + \" player timed out and will be removed.\")\n self.__remove_player(color)\n return\n\n action = queue.get()\n if action == None:\n #print(\"The \" + str(color) + \" player crashed and will be removed.\")\n self.__remove_player(color)\n return\n\n if not all_placed:\n if self.state.valid_placement(action, color):\n self.state.place_avatar(action, color)\n else:\n #print(\"The \" + str(color) + \" player has attempted an invalid placement and will be removed.\")\n self.__remove_player(color)\n else:\n if self.state.valid_move(*action):\n self.state.move_avatar(*action)\n else:\n #print(\"The \" + str(color) + \" player has attempted an invalid move and will be removed.\")\n self.__remove_player(color)",
"def play(self):\n if self.rounds == 0:\n # When the game has not begun yet, the game must\n # give the players their pieces and a corner to start.\n max_x = ((self.board).size[1] - 1)\n max_y = ((self.board).size[0] - 1)\n starts = [(0, 0), (max_y, max_x), (0, max_x), (max_y, 0)]\n\n for i in xrange(len(self.players)):\n (self.players[i]).add_pieces(self.all_pieces)\n (self.players[i]).start_corner(starts[i])\n\n # if there is no winner, print out the current player's name and\n # let current player perform a move\n if self.winner() == \"None\":\n current = self.players[0]\n print\n \"Current player: \" + current.name\n proposal = current.do_move(self)\n if proposal == None:\n # move on to next player, increment rounds\n first = (self.players).pop(0)\n self.players = self.players + [first]\n self.rounds += 1\n\n\n # ensure that the proposed move is valid\n elif self.valid_move(current, proposal.points):\n # update the board with the move\n (self.board).update(current, proposal.points)\n # let the player update itself accordingly\n current.update_player(proposal, self.board)\n # remove the piece that was played from the player\n current.remove_piece(proposal)\n # place the player at the back of the queue\n first = (self.players).pop(0)\n self.players = self.players + [first]\n # increment the number of rounds just played\n self.rounds += 1\n\n # interrupts the game if an invalid move is proposed\n else:\n raise Exception(\"Invalid move by \" + current.name + \".\")\n\n else:\n print\n \"Game over! And the winner is: \" + self.winner()",
"def play_one_round(self, verbose=False):\n\t\tif verbose:\n\t\t\tprint('\\n--- Round {} ---'.format(self.round_count+1))\n\t\tself.count_one_round()\n\t\tplayer1_move=self.player1.make_move()\n\t\tplayer2_move=self.player2.make_move()\n\t\tround=Round(self.player1.name, self.player2.name, self.player1.move, self.player2.move)\n\t\tresult, winner=round.calculate_result()\n\t\tself.winner_dict[self.round_count]=winner\n\t\tif verbose:\n\t\t\tprint(player1_move)\n\t\t\tprint(player2_move)\n\t\t\tprint(result)",
"def computer_plays():\n global piles\n global num_piles\n\n print('Your move was MEDIOCRE at best MY TURN!!!!')\n opt = opt_play()\n print('I shall remove '+str(opt[1])+' from pile '+str(opt[0]))\n piles[opt[0]] -= opt[1]",
"def play_game(self):\n # print(\"Playing a random game!\")\n for round_num in range(1, self.rounds_to_play + 1):\n # print(\"Play Round No. {}\".format(round_num))\n round = Round(round_num, self.players)\n score = round.play_round()\n # print(len(round.played_cards))\n for i in range(self.num_players):\n self.scores[i] += score[i]\n # print(\"Scores: {}\".format(self.scores))\n # print(\"Final scores: {}\".format(self.scores))\n for player in self.players:\n player.reset_score()\n return self.scores",
"def play_game(self):\r\n try: # Asks user how many rounds they want to play:\r\n game_rounds = int(input(\r\n \"Please enter the desired number of rounds to play: \"\r\n ))\r\n except ValueError: # Ensures input value is correct\r\n print(\"Sorry, I didn't quite catch that.\\nPlease try again,\"\r\n \" and make sure you enter a valid number.\\n\")\r\n return self.play_game()\r\n # Game Starts:\r\n print(\"\\nGame start!\\n\")\r\n for round in range(game_rounds):\r\n print(f\"ROUND {round}:\")\r\n self.play_round()\r\n self.game_over() # Game concludes naturally.\r",
"def play_round(user, dealer, deck):\n start_round(user=user, dealer=dealer, deck=deck)\n dealer_turn(dealer, deck)\n if dealer.total <= GOAL_TOTAL():\n player_turn(user, deck)",
"def end_round(game_state: gs.GameState) -> None:\n # clear auction tiles\n game_state.clear_auction_tiles()\n\n # clear auction suns and mark auction as over (in case it was started)\n game_state.end_auction()\n\n # reset num ras in current round\n game_state.reset_num_ras_this_round()\n\n # do round scoring each player\n scoring_utils.base_round_scoring(game_state.player_states)\n\n for player_state in game_state.player_states:\n # remove temporary tiles from each player\n player_state.remove_all_tiles_by_index(\n gi.list_of_temporary_collectible_indexes()\n )\n\n # reset usability of the suns\n player_state.make_all_suns_usable()\n\n if game_state.is_final_round():\n # if final round, do final scoring\n scoring_utils.final_round_scoring(game_state.player_states)\n\n # mark that the game has ended\n game_state.set_game_ended()\n\n return\n\n # reset passed players\n game_state.reset_active_players()\n\n # advance start player to the next player\n game_state.advance_current_player()\n\n # advance round number\n game_state.increase_round_number()\n\n return",
"def run_game(self):\n game = Poker()\n AI_win = game.play_round(self.name)\n self.update_scores(AI_win)\n message = 'Would you like to play another round? Y(es) or N(o): '\n answer = InputHandler.input_bool(message)\n if answer:\n self.run_game()",
"def play_round(self, money_each_player: int) -> int:\r\n # If all of the balance already been paid.\r\n if self.balance.get_money() == 0:\r\n self.do_call(money_each_player)\r\n return 0\r\n print(Messages.ITS_YOUR_TURN.format(self.name, self.balance, self.round_money))\r\n print(self.cards_stack)\r\n next_move = self.next_move(money_each_player)\r\n raised_by = 0\r\n if next_move == \"C\":\r\n self.do_call(money_each_player)\r\n elif next_move == \"R\":\r\n raised_by = self.do_raise(money_each_player)\r\n elif next_move == \"F\":\r\n self.do_fold()\r\n return raised_by",
"def next_round(self):\n if self.finish_game == 3:\n self.restart_game()\n return\n\n atual_color = self.atual_player.color\n if self.board.valid_moves(atual_color).__len__() > 0:\n self.board.play(self.atual_player.play(self.board.get_clone()), atual_color)\n self.view.atualizar_discos()\n self.finish_game = 0\n else:\n self.finish_game += 1\n self.atual_player = self._opponent(self.atual_player)\n\n self.view.atualizar_jogador_atual(self.atual_player.color)\n\n if self.finish_game == 2:\n self._end_game()",
"def play_round(self):\n move1 = self.p1.move()\n move2 = self.p2.move()\n print(f\"P1: {move1} P2: {move2}\")\n self.p1.learn(move1, move2)\n self.p2.learn(move2, move1)\n \"\"\"Proneround_score and ptworound_score resets\n to 0 at beginning of every round.\"\"\"\n poneround_score = 0\n ptworound_score = 0\n if self.beats(move1, move2):\n print(\"Player 1 Wins This Round\")\n poneround_score = 1\n self.pone_score += 1\n elif self.beats(move2, move1):\n print(\"Player 2 Wins This Round\")\n ptworound_score = 1\n self.ptwo_score += 1\n else:\n print(\"Tie! No Points.\")\n print(f\"Round Points - P1: {poneround_score} | P2: {ptworound_score}\")",
"def play_round(starter, cards):\n r = Round(starter)\n for who, card in cards:\n try:\n r.play(who, card)\n except AssertionError as e:\n print(e)\n return Round.winners",
"def beginRound(self):\n\t\tself.gameState = Table.PRE_FLOP\n\t\tfor p in self.getPlayers():\n\t\t\tif p.money <= 0:\n\t\t\t\tprint p.name\n\t\t\t\tself.playerRemoveList.append(p)\n\t\tself.removeFromPlayerList()\n\t\tif len(self.getPlayers()) == 1:\n\t\t\tself.isGameEnd = True\n\t\telse:\n\t\t\tself.roundNo += 1\n\t\t\tself.determineBlinds()\n\t\t\tself.curRaise = self.bigBlind\n\t\t\tself.collectSmallBlind()\n\t\t\tself.collectBigBlind()\n\t\t\tself.deal()\n\t\t\tself.setState()\n\t\t\tif self.noOfPlayers() == 2:\n\t\t\t\tself.turn = self.curDealerSeatNo\n\t\t\t\t_, self.roundEndSeat = self.findNthPlayerFromSeat(self.turn, 1)\n\t\t\telse:\n\t\t\t\t_, self.turn = self.findNthPlayerFromSeat(self.curDealerSeatNo, 3)\n\t\t\t\t_, self.roundEndSeat = self.findNthPlayerFromSeat(self.curDealerSeatNo, 2)",
"def play_poker(self) -> None:\r\n self.deal_opening_cards()\r\n for i in range(PokerRules.NUM_OF_ROUNDS):\r\n if self.active_players == 1:\r\n break\r\n self.play_round()\r\n PokerRules.winner(self.card_stack, self.money_stack, self.players)",
"def play_war_round(player):\n for i in xrange(4):\n play_normal_round(player)\n\n return player",
"def play_round(self):\r\n your_move = self.you.move()\r\n opposite_move = self.opposite.move()\r\n result = Game.what_move(your_move, opposite_move)\r\n\r\n self.you.learn(opposite_move)\r\n self.opposite.learn(your_move)\r\n\r\n print(\"you choose:\" + your_move + \" and the opposite player choose:\" +\r\n opposite_move)\r\n\r\n if result == 1:\r\n self.you.score += 1\r\n print('=> you won this round!\\n')\r\n elif result == 2:\r\n self.opposite.score += 1\r\n print('=> the opposite pleyer won this round!\\n')\r\n elif result == 0:\r\n print('=> it is Draw!\\n')",
"def play_one_round(self):\r\n new_solutions = self.breeder.breed(self.solutions)\r\n self.solutions.clear()\r\n self.solutions.extend(new_solutions)\r\n self.mutation_maker.mutate(self.solutions)\r\n self.round += 1\r\n self.simulation_stats.add_stats(self.round, self.solutions)\r\n if self.simulation_viz is SimulationViz.FRONT:\r\n self.report_progress()",
"def step(self, actions):\n if len(actions) != len(self._seats):\n raise error.Error('actions must be same shape as number of seats.')\n\n if self._current_player is None:\n raise error.Error('Round cannot be played without 2 or more players.')\n\n if self._round == 4:\n raise error.Error('Rounds already finished, needs to be reset.')\n\n players = [p for p in self._seats if p.playing_hand]\n if len(players) == 1:\n raise error.Error('Round cannot be played with one player.')\n\n self._last_player = self._current_player\n self._last_actions = actions\n\n if not self._current_player.playedthisround and len([p for p in players if not p.isallin]) >= 1:\n if self._current_player.isallin:\n self._current_player = self._next(players, self._current_player)\n return self._get_current_step_returns(False)\n\n move = self._current_player.player_move(\n self._output_state(self._current_player), actions[self._current_player.player_id])\n\n if move[0] == 'call':\n self._player_bet(self._current_player, self._tocall)\n if self._debug:\n print('Player', self._current_player.player_id, move)\n self._current_player = self._next(players, self._current_player)\n elif move[0] == 'check':\n self._player_bet(self._current_player, self._current_player.currentbet)\n if self._debug:\n print('Player', self._current_player.player_id, move)\n self._current_player = self._next(players, self._current_player)\n elif move[0] == 'raise':\n self._player_bet(self._current_player, move[1]+self._current_player.currentbet)\n if self._debug:\n print('Player', self._current_player.player_id, move)\n for p in players:\n if p != self._current_player:\n p.playedthisround = False\n self._current_player = self._next(players, self._current_player)\n elif move[0] == 'fold':\n self._current_player.playing_hand = False\n folded_player = self._current_player\n if self._debug:\n print('Player', self._current_player.player_id, move)\n self._current_player = self._next(players, self._current_player)\n players.remove(folded_player)\n self._folded_players.append(folded_player)\n # break if a single player left\n if len(players) == 1:\n self._resolve(players)\n if all([player.playedthisround for player in players]):\n self._resolve(players)\n\n terminal = False\n if all([player.isallin for player in players]):\n while self._round < 4:\n self._deal_next_round()\n self._round += 1\n if self._round == 4 or len(players) == 1:\n terminal = True\n self._resolve_round(players)\n return self._get_current_step_returns(terminal)",
"def run_iteration(self):\n # Choose the player's starting hands and remove from deck.\n starting_hands_for_players = self.select_hands_for_players()\n self._reset_deck(starting_hands_for_players)\n\n # Finish the board\n iteration_board_cards = self.board_cards[:]\n while len(iteration_board_cards) < 5:\n iteration_board_cards.append(self.current_deck.pop())\n\n index_to_best_hands = self._get_best_hands_for_each_player(\n starting_hands_for_players, iteration_board_cards)\n winning_indices = self._get_winning_indices(index_to_best_hands)\n\n # Now update the statistics.\n for idx in winning_indices:\n self.win_stats[idx] += 1.0 / len(winning_indices)\n for idx, best_hand in index_to_best_hands.iteritems():\n if idx in winning_indices:\n if len(winning_indices) > 1:\n self.player_stats[idx].increment_rank(\n best_hand.hand_rank, TIE_RESULT)\n else:\n self.player_stats[idx].increment_rank(\n best_hand.hand_rank, WIN_RESULT)\n else:\n self.player_stats[idx].increment_rank(\n best_hand.hand_rank, LOSS_RESULT)",
"def update_game(game, episode, buttons,run_status):\n\n game_round = game.round\n if game_round == 'newround':\n newround(game)\n print(game_round)\n game.round = 'preflop'\n print(game.round)\n\n\n return 'go'\n elif game_round == 'preflop':\n check = preflop(game, episode, buttons)\n if check == True:\n game.round = 'flop'\n return 'go'\n elif check == False:\n game.round = 'showdown'\n print(game.round)\n return 'go'\n elif check == 'no input':\n game.round = 'preflop'\n return 'stop'\n return 'stop'\n elif game_round == 'flop':\n check = flop(game, episode, buttons, run_status)\n if check == True:\n game.round = 'turn'\n return 'go'\n elif check == False:\n game.round = 'showdown'\n return 'go'\n elif check == 'no input':\n game.round = 'flop'\n return 'stop'\n return 'stop'\n pass\n elif game_round == 'turn':\n check = turn(game, episode, buttons, run_status)\n if check == True:\n game.round = 'river'\n return 'go'\n elif check == False:\n game.round = 'showdown'\n return 'go'\n elif check == 'no input':\n game.round = 'turn'\n return 'stop'\n return 'stop'\n elif game_round == 'river':\n check = turn(game, episode, buttons, run_status)\n if check == True:\n game.round = 'showdown'\n return 'go'\n elif check == False:\n game.round = 'showdown'\n return 'go'\n elif check == 'no input':\n game.round = 'river'\n return 'stop'\n return 'stop'\n elif game_round == 'showdown':\n showdown(game, episode)\n #game.player1.wager = 100\n #game.player2.wager = 50\n # game.update_tablepot()\n game.round = 'newround'\n return 'go'\n pass",
"def play_game(self):\n self.welcome()\n while (self.winner is None) and (not self.exit_flag) and (not self.board.full()):\n self.play_round()\n self.exit_game()"
]
| [
"0.6789043",
"0.67241484",
"0.640139",
"0.63856524",
"0.63816065",
"0.6379626",
"0.63466334",
"0.6255803",
"0.6233843",
"0.62056315",
"0.6128132",
"0.6107873",
"0.61034423",
"0.60672474",
"0.6036295",
"0.6007066",
"0.5978857",
"0.59656787",
"0.5963043",
"0.59627223",
"0.59405243",
"0.59346175",
"0.59306043",
"0.58635455",
"0.58502686",
"0.5786175",
"0.5775684",
"0.5766055",
"0.57532007",
"0.57502645"
]
| 0.6738714 | 1 |
this function is called from the play_game function and it removes the player who was 'stuck with the potato' at the end of each round. it ensures that head and tail references remain intact and decrement the list accordingly | def remove_player(lst,player):
print("Removing",player)
cursor=lst.head
while cursor.data!=player:
cursor=cursor.next
if cursor==lst.head:
cursor.next.prev=lst.tail
cursor.prev.next=cursor.next
lst.head=cursor.next
if cursor==lst.tail:
cursor.next.prev=cursor.prev
cursor.prev.next=lst.head
lst.tail=cursor.prev
cursor.prev.next=cursor.next
cursor.next.prev=cursor.prev
lst.size-=1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def removeFromPlayerList(self):\n\t\tfor x in self.playerRemoveList:\n\t\t\tself.removePlayer(x)",
"def cull(self) -> None:\n for player in self.players:\n to_remove = [creature for creature in player.battle_line if creature.damage_taken >= creature.power()]\n for creature in to_remove:\n player.battle_line.remove(creature)\n to_remove.destroyed(self, creature)",
"def remove_player_from_game(self, player):\n if player in self.players:\n cards = player.cards\n for card in cards:\n self.cards.append(card)\n\n self.__shuffle_cards()\n player.cards = []\n if player == self.current_player:\n self.choose_next_player()\n\n self.players.remove(player)",
"def removePlayer(self, index):\n\n self.eloList.pop(index)\n self.idList.pop(index)",
"def removePlayer(self, player):\n\t\tfor i in range(len(self.playerList)):\n\t\t\tif self.playerList [i] == player:\n\t\t\t\tself.playerList[i] = None\n\t\t\t\treturn",
"def take_remove_tile_turn(self, remove_tile_fxn):\n tilesAroundOpponents = []\n for player in self.board.players:\n if not player == self.player:\n x, y = player.x, player.y\n nearbyTiles = self.board.get_removable_tiles_around(x, y)\n tilesAroundOpponents.extend(nearbyTiles)\n tilesAroundOpponents = set(tilesAroundOpponents)\n x, y = self.player.x, self.player.y\n tilesAroundMe = set(self.board.get_removable_tiles_around(x, y)) # tiles around controlled player (me)\n safelyAroundOpponents = list(tilesAroundOpponents - tilesAroundMe) # tiles around opponents but not around me\n removableTiles = set(self.board.get_all_open_removable_tiles()) # all removable tiles\n safelyRemovable = list(removableTiles - tilesAroundMe) # all removable tiles except those around me\n try:\n if safelyAroundOpponents:\n target = random.choice(safelyAroundOpponents)\n elif tilesAroundOpponents: # likely that I'm next to other player. I'll have to remove a tile available for both of us\n target = random.choice(list(tilesAroundOpponents))\n else: # no open spots to remove around players can only happen if solid unremovable tiles exist\n target = random.choice(safelyRemovable)\n except IndexError: # this error will catch if last else statement possibly triggered it\n super(TileRemoveBot, self).take_remove_tile_turn(remove_tile_fxn)\n return\n remove_tile_fxn(target.x, target.y)",
"def play_game(lst):\n number_of_rounds=lst.size-1\n for round in range(1,number_of_rounds+1):\n number_of_passes=random.randint(-(2*lst.size),(2*lst.size))\n remove_player(lst, perform_round(lst,number_of_passes))\n cursor=lst.head\n print(cursor.data,\"is the winner!\")",
"def delPlayer(self, idx):\n self.players[idx:idx + 1] = []",
"def cleanup_deadstuff(self):\r\n for bullet in self.bullets:\r\n if not bullet.alive:\r\n self.bullets.remove(bullet)\r\n\r\n for big_rock in self.rocks:\r\n if not big_rock.alive:\r\n self.rocks.remove(big_rock)",
"def deal(self):\n\t\tplayerList = self.getPlayers()\n\t\tstart = self.curDealerSeatNo + 1\n\t\tfor i in range(len(playerList)*2):\n\t\t\tplayerList[(start + i) % len(playerList)].hand.append(self.deck.pop())\n\t\t\tplayerList[(start + i) % len(playerList)].isHandLive = True",
"def remove_player(self, player):\r\n print(\"REMOVING\", player)\r\n player_index = self.get_player_index(player)\r\n\r\n # if we are the current player, move back the index once\r\n if self.current_player == player_index:\r\n self.current_player -= 1\r\n if self.current_player < 0:\r\n self.current_player = len(self.player_list) - 2\r\n\r\n self.player_positions.pop(player_index)\r\n self.player_list.pop(player_index)\r\n\r\n # TODO: put any cards owned by the player back in to the cards list\r",
"def removePlayer(self, index):\n serial = self.seats[index]\n self.seats[index]=0\n if serial in self.players:\n del self.players[serial]",
"def clearList(self):\r\n self.players.clear()",
"def resetPlayerList(self):\n self.playerList = []",
"def _unmove(self):\n (start, end) = self.history.pop()\n self._board[start] = self._board[end]\n self._board[end] = 0\n self.winner = None\n self.player_turn = CheckersGame.opposite[self.player_turn]",
"def remove_from_hand(self):\n pass",
"def remove_from_winning(game: List[int]) -> None:\n while True:\n game_copy = game.copy()\n row = randint(1, len(game_copy))\n if game_copy[row-1] < 1:\n continue\n matches = randint(1, game_copy[row-1])\n remove_matches(game_copy, row - 1, matches)\n if not is_winning(game_copy):\n remove_matches(game, row - 1, matches)\n break\n print(\"{} matches on row {} have been removed.\".format(matches, row))",
"def _removeBolt(self):\n for bolt in self._bolts:\n if (bolt.y-BOLT_HEIGHT/2)>GAME_HEIGHT:\n self._bolts.remove(bolt)\n self._key = False\n elif bolt.y + BOLT_HEIGHT < 0:\n self._bolts.remove(bolt)",
"def __remove_player(self, color):\n self.state.remove_player(color)\n self.violators.append(self.players[color])",
"def delete_gkeeper(alist):\n\n res = [player for player in alist if player[2] != ['Por']]\n\n return res",
"def drop(self):\r\n\t\t#print \"drop_list: {0}\".format(\" \".join(self.gb.drop_list))\r\n\t\tresult = []\r\n\t\tall_cards = [self.wang_list, self.tube_list, self.bamb_list, self.word_list, self.wind_list]\r\n\t\tprevious = \"\"\r\n\t\tfor cards in all_cards:\r\n\t\t\tfor i in range(len(cards)):\r\n\t\t\t\t\"\"\" avoid running same card \"\"\"\r\n\t\t\t\tif (cards[i] == previous): continue\r\n\t\t\t\tc = cards.pop(i)\r\n\t\t\t\tprevious = c\r\n\t\t\t\tmini, useful_amount, score = self.count_steps()\r\n\t\t\t\tcards.insert(i, c)\r\n\t\t\t\tresult.append([mini, useful_amount, score, c])\r\n\t\t\t\t#print \"min: {0}, useful_amount: {1}, score: {2}, dcard: {3}\".format(mini, useful_amount, score, c)\r\n\r\n\t\tdcard = self.sorting_by_criteria(result)\r\n\t\t#print \"\\tGeniusAgent drop: {0}\".format(dcard)\r\n\t\tctype = GameBoard.CardType(dcard)\r\n\t\tall_cards[ctype-1].remove(dcard)\r\n\t\tself.card_count -= 1\r\n\t\treturn dcard",
"def remove_ball(ball_list, canvas):\r\n if len(ball_list) > 1:\r\n ball_list[len(ball_list) - 1].delete_ball()\r\n ball_list.pop()",
"def remove_self(self):\n if self.game.rules[\"trapping\"]:\n [neighbor.untrap() for neighbor in self.get_neighbors() if neighbor.trapped and self in neighbor.get_sandwichers() and len(neighbor.get_sandwichers()) == 2]\n self.game.empty_square(self.position)\n self.position = None",
"def play_normal_round(player):\n player.cards_in_play.appendleft(player.deck.popleft())\n return player",
"def deleteBolts(self):\n a = self.get_bolts()\n for i in self.get_bolts():\n if i.y>GAME_HEIGHT:\n a.remove(i)\n self.set_plyrbolts(0)\n elif i.y<=-BOLT_HEIGHT:\n a.remove(i)",
"def removeIfDead(self):\n global HP, winColor, FPS, kills\n if self.health <= 0:\n if self.rank == \"firerate\":\n if P.boosts == 1:\n P.timer = 600\n else:\n P.boosts += 1\n\n if self.rank == \"healer\":\n if P.medkits == 1:\n HP = 100\n else:\n P.medkits += 1\n\n if self.rank == \"quadshot\":\n P.quadshot = True\n P.quadshottimer = 300\n FPS = 100\n\n if self.rank == \"helper\":\n if self.firsttime:\n self.image = pygame.transform.rotate(self.image, 180)\n self.firsttime = False\n self.y -= self.vel*3\n if self.y <= 0:\n del enemies[findPlace(self, enemies)]\n if yn(Frame, 3):\n projectiles.append(projectile(self.x+self.w+2, self.y+self.h//2, 8, yvel=0, r=True, l=False))\n projectiles.append(projectile(self.x-42, self.y+self.h//2, -8, yvel=0, r=False, l=True))\n else:\n del enemies[findPlace(self, enemies)]\n kills += 1",
"def end_turn(self):\n for _ in range(self._hand.size()):\n card = self._hand.pop()\n self._discard.push(card)\n\n for _ in range(self._active.size()):\n card = self._active.pop()\n self._discard.push(card)\n\n for _ in range(self._handsize):\n if self._deck.size() == 0:\n self._discard.shuffle_collection()\n self._deck.replace(self._discard)\n self._discard.clear_collection()\n card = self._deck.pop()\n self._hand.push(card)\n self._money = 0\n self._attack = 0",
"def clearTroves(self):\n self.primaryTroveList.thaw(\"\")\n self.newTroves.thaw(\"\")\n self.oldTroves.thaw(\"\")",
"def resetPlayerHands(self, players):\n\t\tfor x in players:\n\t\t\tx.hand = []",
"def prune_losers(self):\n self.log.debug(\"PRUNE LOSERS\")\n # check to see if people i followed follow me back\n cutoff_time = (datetime.now()\n - timedelta(hours=self.reciprocation_window))\n ingrates = Target.objects.filter(\n hunter=self.user, status=Target.PURGATORY,\n modified__lt=cutoff_time) # They didn't follow back in time\n\n for ingrate in ingrates:\n ingrate.status = Target.INGRATE\n ingrate.save()\n self.log.debug(\" => Unfollowed %s\" % ingrate.hunted.screen_name)\n try:\n self.api.destroy_friendship(ingrate.hunted)\n except Exception, e:\n print e\n return\n finally:\n pass\n #self.contact(ingrate)"
]
| [
"0.65123177",
"0.63221455",
"0.63050866",
"0.62616086",
"0.62591946",
"0.6235872",
"0.6151183",
"0.6131671",
"0.605631",
"0.60448986",
"0.6034432",
"0.60006094",
"0.5953268",
"0.5933007",
"0.5919274",
"0.5892594",
"0.58757186",
"0.5870959",
"0.58654535",
"0.5825909",
"0.58150935",
"0.5779333",
"0.5773737",
"0.5771853",
"0.576182",
"0.57563853",
"0.5725429",
"0.57237077",
"0.56827635",
"0.56787497"
]
| 0.726261 | 0 |
Returns all scene assets. | def get_all_assets(self):
return c4d.documents.GetAllAssets(self._document, False, '') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def returnAllAssets(self):\n return self.__assets",
"def assets(self):\n return self._assets.values()",
"def getAssets(self):\n return self.assets",
"def get_assets(self):\n self.logger.debug(\"Fetching assets.\")\n return self._api_query(\"assets\")[\"assets\"]",
"def children(self) -> \"AssetList\":\n return self._cognite_client.assets.list(parent_ids=[self.id], limit=None)",
"def get_scenes(self):\n scenes = []\n for i in self.scenes:\n scenes.append(self.scenes[i])\n\n return scenes",
"def assets():",
"def assets(self):\n return SpaceAssetsProxy(self._client, self.id)",
"def assets(self):\n # N.B.:\n # we don't cache the results of the {root} exploration so we can always\n # return a result that reflects the current contents of the filesystem\n yield from self._explore()\n # all done\n return",
"def get_all_assets(self):\n\n with preserve_expanded_rows(self.view):\n with preserve_selection(self.view):\n self.clear()\n containers = lib.get_containers()\n items = lib.create_node(containers)\n self.add_items(items)\n\n return len(items) > 0",
"def assets(self):\n static = self.static\n if static is None:\n return None\n\n assets = os.path.join(static, 'assets')\n if not os.path.isdir(assets):\n return None\n\n return assets",
"def assets():\n pass",
"def subtree(self, depth: int = None) -> \"AssetList\":\n return self._cognite_client.assets.retrieve_subtree(id=self.id, depth=depth)",
"def scenelist(self):\n return self.caller.player_ob.db.random_scenelist or []",
"def get_assets(self):\n findstr = r'W\\.iframeInit\\({\"assets\":(\\[.*\\])'\n try:\n page = str(requests.get(self.srcpage).content, 'utf-8')\n asset_search = re.search(findstr, page)\n if asset_search:\n assets = asset_search.group(1)\n try:\n assets = json.loads(assets)\n except ValueError:\n print(\"Error loading JSON string\")\n self.assets = pd.DataFrame(assets)\n return self.assets\n else:\n raise AssetNotFoundError\n except:\n print(\"Failed to get asset information from page.\\nCheck video ID.\")",
"def assets(self):\n if self._assets_cache:\n return self._assets_cache\n asset_defaults = Section.assets.value\n ret_val = dict([(obj.name, obj.value) for obj in asset_defaults])\n config_vals = self._section_as_dict(Section.assets.name)\n ret_val.update(config_vals)\n self._assets_cache = AttrDict(**ret_val)\n return self._assets_cache",
"def get_assets(self):\n # This includes a kludge to get the objectiveBankId directly from\n # this Activity's Objective's private _my_map :o\n from ..repository.objects import AssetList\n if not self.is_asset_based_activity():\n raise IllegalState()\n url_str = (self._base_url + '/objectivebanks/' +\n self.get_objective()._my_map['objectiveBankId'] +\n '/assets/bulk?id=' + '&id='.join(self._my_map['assetIds']))\n return AssetList(self._load_json(url_str))",
"def getAssets(self, start=None, length=20):\n if start == None:\n start = len(self.assets)\n doc = minidom.parse(urllib.urlopen(\"%s/rest/assets/user/%s/%i/%i\" % (serverString, self.name, start, length)))\n if int(doc.getElementsByTagName(\"status\")[0].firstChild.data) != 1:\n raise ServerError(doc.getElementsByTagName(\"status\")[0].firstChild.data)\n for element in doc.getElementsByTagName(\"asset\"):\n self.assets += [Asset()]\n self.assets[-1]._getInfoFromNode(element)",
"def get_objects(self):\n objs = self.scene.get_objects()\n objs_attached = self.scene.get_attached_objects()\n return objs, objs_attached",
"def get_queryset(self):\n return self.request.user.assets.all()",
"def get_visuals(self):\n return self.scene['visuals']",
"def handle_assets(self):\n return self.assets(asset_type='HANDLE')",
"def get_all_entities(self):\n return Artifact.get_all()",
"def list_game_assets(game_assets=False):\n\n asset_dir_abs_path = os.path.join(API.root_path, 'assets')\n py_files_in_asset_dir = glob.glob(os.path.join(asset_dir_abs_path, '*.py'))\n\n output = [\n os.path.splitext(os.path.basename(f))[0]\n for f\n in py_files_in_asset_dir\n if os.path.basename(f) != '__init__.py'\n ]\n\n if game_assets:\n for collection_name in output:\n collectionObject = get_game_asset(collection_name, return_type=object)\n if not getattr(collectionObject, 'is_game_asset', False):\n output.remove(collection_name)\n\n return sorted(output)",
"def asset_get():\n search_assets = request.args.getlist(\"name\")\n find_assets = []\n for asset_name in search_assets:\n if asset_name in app.bank:\n find_assets.append(app.bank[asset_name].to_list())\n find_assets = sorted(find_assets, key=lambda s: s[0])\n return jsonify(find_assets)",
"def get_all(self):\n return objects.registry.AssemblyList.get_all(self.context)",
"def sporecastAssets(sporecastId, start=0, length=20):\n url = \"%s/rest/assets/sporecast/%s/%i/%i\" % (serverString, sporecastId, start, length)\n doc = minidom.parseString(urllib.urlopen(url).read().decode(\"utf-8\", \"ignore\").encode(\"ascii\", \"xmlcharrefreplace\"))\n if int(doc.getElementsByTagName(\"status\")[0].firstChild.data) != 1:\n raise ServerError(doc.getElementsByTagName(\"status\")[0].firstChild.data)\n assets = []\n for element in doc.getElementsByTagName(\"asset\"):\n assets += [Asset()]\n assets[-1]._getInfoFromNode(element)\n return assets",
"def get_selected_assets(self):\n raise NotImplementedError",
"def load_all_resources():\n\n # Load the fonts\n ResourcesManager._load_font(\"Munro.ttf\")\n\n # Load images\n ResourcesManager.HIBER_NATION_IMG = ResourcesManager._load_image(\"hiber_nation.png\")\n ResourcesManager.SHIP_IMG = ResourcesManager._load_image(\"ship.png\")\n ResourcesManager.MISSILE_IMG = ResourcesManager._load_image(\"missile.png\")\n\n # Load sounds\n # ResourcesManager.MENU_MUSIC = ResourcesManager._load_sound(\"menu.ogg\")",
"def get_list_assets():\n headers = {'X-CoinAPI-Key': os.environ.get('COIN_API_KEY', '')}\n r = requests.get('https://rest.coinapi.io/v1/assets', headers=headers)\n if r.status_code / 100 == 2:\n assets = []\n for asset in r.json():\n if asset['type_is_crypto']:\n assets.append(asset['asset_id'])\n return assets\n else:\n return {\"error\": r.content.decode('utf-8')}"
]
| [
"0.79148006",
"0.7388927",
"0.7363286",
"0.7343612",
"0.70494163",
"0.6815413",
"0.67707574",
"0.6616679",
"0.6502759",
"0.64599687",
"0.6388078",
"0.63788337",
"0.6294711",
"0.629042",
"0.62188584",
"0.6144838",
"0.6102181",
"0.6045124",
"0.5927903",
"0.59136426",
"0.59107137",
"0.5864219",
"0.5852313",
"0.581632",
"0.57719433",
"0.5746419",
"0.57405305",
"0.57263273",
"0.56924945",
"0.5681884"
]
| 0.78163725 | 1 |
Returns a list of take settings for all takes in the scene. | def get_all_take_settings(self):
take_settings = []
take_data = self._document.GetTakeData()
def _traverse(take, depth):
take_settings.append(
C4dTakeSettings(self._main_thread_executor, take, take_data, depth,
self._document))
for child_take in take.GetChildren():
_traverse(child_take, depth + 1)
_traverse(take_data.GetMainTake(), 0)
return take_settings | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _list_settings(self, settings=None):\n if settings == None:\n settings = fileIO.load_json(\"settings.json\")\n print(\"The list of settings is: \")\n for i in settings:\n print(\"{0}: {1}\".format(i, settings[i]))\n return(None)",
"def get_all_settings(profile, store=\"local\"):\n ret = dict()\n ret.update(get_settings(profile=profile, section=\"state\", store=store))\n ret.update(get_settings(profile=profile, section=\"firewallpolicy\", store=store))\n ret.update(get_settings(profile=profile, section=\"settings\", store=store))\n ret.update(get_settings(profile=profile, section=\"logging\", store=store))\n return ret",
"def get_settings():\n return SettingCollection.build()",
"def get_settings_from_config(self):\n return [self.wiki.config.parts_enabled,\n self.wiki.config.pages_per_filepart_history,\n self.wiki.config.revs_per_filepart_history,\n self.wiki.config.numparts_for_abstract,\n self.wiki.config.numparts_for_pagelogs,\n self.wiki.config.pages_per_filepart_abstract,\n self.wiki.config.recombine_metacurrent,\n self.wiki.config.recombine_history,\n self.wiki.config.checkpoint_time]",
"def scenelist(self):\n return self.caller.player_ob.db.random_scenelist or []",
"def prms(widget: QWidget) -> List:\n parameters = BaseTrain.prms(widget)\n return parameters",
"def get_scenes(self):\n scenes = []\n for i in self.scenes:\n scenes.append(self.scenes[i])\n\n return scenes",
"def items(self):\n return ((name, self.__getitem__(name)) for name in self._visible_setting_names_gen)",
"def listAllSettingNames(self):\n\t\treturn sorted(self.settings.iterkeys())",
"def get_visuals(self):\n return self.scene['visuals']",
"def get_settings(self):\n return (self._frequency, self._duration)",
"def get_settings(self):\n return {\n \"game_name\": self.game_name,\n \"n_epochs\": self.n_epochs,\n \"n_episodes\": self.n_episodes,\n \"n_frames\": self.n_frames,\n \"agent\": self.agent.get_settings(),\n \"results_dir\": self.results_dir,\n \"use_minimal_action_set\": self.use_minimal_action_set,\n }",
"def get_trials(self):\n return self._trials",
"def get_settings():\n settings = {}\n for setting in cfg.displayable_setting:\n settings[setting] = getattr(cfg, setting)\n return settings",
"def get_settings():\n settings = {}\n for setting in cfg.displayable_setting:\n settings[setting] = getattr(cfg, setting)\n return settings",
"def all_settings(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"all_settings\")",
"def allPresets():\n\n return [__cleanPresetTreeName(k) for k in __preset_lookup.iterkeys()]",
"def settings(self):\n return [\n self.input_type,\n self.input_object_name,\n self.image_name,\n self.objects_name,\n self.object_extension,\n self.file_name_method,\n self.file_image_name,\n self.wants_file_name_suffix,\n self.file_name_suffix,\n self.file_format,\n self.pathname,\n self.bit_depth,\n self.overwrite,\n self.when_to_save,\n self.update_file_names,\n self.create_subdirectories,\n self.root_dir,\n ]",
"def trial_setup(params):\n runs = []\n trials = []\n for run in range(params['runs']):\n runs = runs + [run]*params['trials_per_run']\n for trial in range(params['trials_per_run']):\n trials.append(trial)\n return(runs,trials)",
"def getSettings(self):\n return self.cfg",
"def get_all(self):\n logging.info(__name__ + ' : reading all settings from instrument')\n self.level.get()\n self.status.get()\n self.rate.get()",
"def get_all_profiles(store=\"local\"):\n return {\n \"Domain Profile\": get_all_settings(profile=\"domain\", store=store),\n \"Private Profile\": get_all_settings(profile=\"private\", store=store),\n \"Public Profile\": get_all_settings(profile=\"public\", store=store),\n }",
"def script(self):\n return list(\n itertools.chain.from_iterable(story.script for story in self.stories)\n )",
"def get_parameters_list(self):\n return self.description[\"config\"][\"values\"].keys()",
"def get_skill_settings(self):\n return self.request({\n \"method\": \"GET\",\n \"path\": \"/\" + UUID + \"/skill/settings\",\n })",
"def options() -> List:\n return list(c.value for c in Plugin)",
"def readSettings(self):\n for i in range(1,N_STATION+1):\n vol = f\"vol{i}\"\n self.param.vol[i-1] = self.settings.value(vol,type=int)\n info = f\"info{i}\"\n self.param.info[i-1] = self.settings.value(info,type=str)\n ip = f\"ip{i}\"\n self.param.ip[i-1] = self.settings.value(ip,type=str)\n muted = f\"muted{i}\"\n self.param.muted[i-1] = self.settings.value(muted,type=bool)",
"def get_step_settings_at_index(self, index):\n return self.routine_template.get_step_settings_at_index(index)",
"def getAllTriStimulus(self):\n return self.tristimulus",
"def get_settings(self):\n return self.settings"
]
| [
"0.57145137",
"0.56829137",
"0.5681458",
"0.56472075",
"0.564445",
"0.56234556",
"0.55627966",
"0.54923457",
"0.54868585",
"0.54854923",
"0.54794174",
"0.54638684",
"0.54450613",
"0.5429522",
"0.5429522",
"0.53882307",
"0.5235746",
"0.5195114",
"0.51877916",
"0.51831794",
"0.51723313",
"0.51710427",
"0.5167869",
"0.51282716",
"0.5115123",
"0.5102978",
"0.5084432",
"0.50699186",
"0.50685596",
"0.50527656"
]
| 0.80501604 | 0 |
Returns name of the scene. | def get_scene_name(self):
return self._document.GetDocumentName() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def name(self):\n return self._scene_name",
"def scene_name():\n\n pass",
"def get_current_scene_name():\n\n scene_name = cmds.file(query=True, sceneName=True, shortName=True)\n scene_name = osp.splitext(scene_name)[0]\n\n return scene_name",
"def bspb_sceneName():\n projectName = bs_pathGenerator.bs_getEnvDetails()['projectName'].lower()\n # get shot details.\n epi, seq, shot, stage = bs_pathGenerator.bs_shotDetailsCheckAndReturn()\n return '{0}_{1}_{2}_{3}_{4}'.format(projectName, epi, seq, shot, stage)",
"def name(self):\n return self.mesh.name",
"def name(self):\n return self.mesh.name",
"def currentBaseSceneName(self):\n logger.debug(\"Func: currentBaseSceneName/getter\")\n\n return self._currentBaseSceneName",
"def name(self) -> str:\n return self._root.name",
"def name(self):\n\t\treturn self.asset.name",
"def updateSceneName(*args):\n pi.openSceneFullPath = cmds.file(q=True, sn=True)\n pi.openScene = os.path.basename(pi.openSceneFullPath)\n\n if pi.openScene == \"\":\n pi.openScene = \"UNSAVED SCENE!\"\n cmds.text(widgets[\"sceneText\"], e=True, l=pi.openScene)",
"def name(self) -> str:\n return self.doc.get('name', self.identifier())",
"def get_name(self):\n return self._assets[0].get_name()",
"def get_scene(videoname_):\n s = videoname_.split(\"_S_\")[-1]\n s = s.split(\"_\")[0]\n return s[:4]",
"def name(self):\n return self._light.name",
"def get_name(self) -> str:\n def _seg2():\n if self.name:\n return self.name\n else:\n try:\n return self.player.title\n except AttributeError:\n return 'No title specified'\n try:\n if self.player.title == 'translate_tts':\n return 'Speech'\n else:\n return _seg2()\n except AttributeError:\n return _seg2()",
"def getName():",
"def getName():",
"def getName():",
"def getName():",
"def getName():",
"def getName():",
"def name(self) -> str:\n return pulumi.get(self, \"name\")",
"def name(self) -> str:\n return pulumi.get(self, \"name\")",
"def name(self) -> str:\n return pulumi.get(self, \"name\")",
"def name(self) -> str:\n return pulumi.get(self, \"name\")",
"def name(self) -> str:\n return pulumi.get(self, \"name\")",
"def name(self) -> str:\n return pulumi.get(self, \"name\")",
"def name(self) -> str:\n return pulumi.get(self, \"name\")",
"def name(self) -> str:\n return pulumi.get(self, \"name\")",
"def name(self) -> str:\n return pulumi.get(self, \"name\")"
]
| [
"0.91623044",
"0.859132",
"0.8028259",
"0.76417583",
"0.72261703",
"0.72261703",
"0.71367395",
"0.6990485",
"0.68125004",
"0.6693551",
"0.66564184",
"0.6653075",
"0.6639346",
"0.66376406",
"0.66322047",
"0.6629142",
"0.6629142",
"0.6629142",
"0.6629142",
"0.6629142",
"0.6629142",
"0.66264457",
"0.66264457",
"0.66264457",
"0.66264457",
"0.66264457",
"0.66264457",
"0.66264457",
"0.66264457",
"0.66264457"
]
| 0.8868723 | 1 |
Returns name of the scene without extension. | def get_scene_name_without_extension(self):
return re.sub(r'\.c4d$', '', self.get_scene_name()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_scene_name(self):\n return self._document.GetDocumentName()",
"def name(self):\n return self._scene_name",
"def get_current_scene_name():\n\n scene_name = cmds.file(query=True, sceneName=True, shortName=True)\n scene_name = osp.splitext(scene_name)[0]\n\n return scene_name",
"def scene_name():\n\n pass",
"def name(self):\n\t\tnam = super( textureFile, self ).name\n\t\tif self.hasUdim:\n\t\t\ttry:\n\t\t\t\treturn nam[:nam.rindex( '.' )]\n\t\t\texcept:\n\t\t\t\treturn nam\n\t\treturn nam",
"def name(self):\n #type: ()->Text\n return (\n os.path.splitext(os.path.basename(self.fileName))[0])",
"def bspb_sceneName():\n projectName = bs_pathGenerator.bs_getEnvDetails()['projectName'].lower()\n # get shot details.\n epi, seq, shot, stage = bs_pathGenerator.bs_shotDetailsCheckAndReturn()\n return '{0}_{1}_{2}_{3}_{4}'.format(projectName, epi, seq, shot, stage)",
"def name_sans_ext(self) -> str:\n return os.path.splitext(self.path)[0]",
"def get_scene(videoname_):\n s = videoname_.split(\"_S_\")[-1]\n s = s.split(\"_\")[0]\n return s[:4]",
"def get_scene(videoname):\n s = videoname.split(\"_S_\")[-1]\n s = s.split(\"_\")[0]\n return s[:4]",
"def world_name(self) -> str:\n return os.path.basename(self.path)",
"def name(self):\n return self.path.stem",
"def get_name(self) -> str:\n return os.path.split(os.getcwd())[-1]",
"def name(self):\n if hasattr(self, \"module\"):\n return self.module.__name__.replace('_', '-')\n return None",
"def name(self) -> str:\n return self._root.name",
"def currentBaseSceneName(self):\n logger.debug(\"Func: currentBaseSceneName/getter\")\n\n return self._currentBaseSceneName",
"def get_release_name(self) -> str:\n if self.season is not None and self.episode is None:\n return os.path.basename(os.path.dirname(self.file))\n return os.path.splitext(os.path.basename(self.file))[0]",
"def name(self) -> str:\n if '/' in self.path.strip('/'):\n basename: str = os.path.basename(self.path)\n return basename\n return self.path",
"def title(self):\n if self.file_name is None:\n return None\n else:\n fname = os.path.split(self.file_name)[-1]\n fname, *ext = fname.rsplit('.', 1)\n procgen = ext and ext[0] in ('json', 'yaml')\n if procgen and self._seed and self._seed.spawn_key:\n # Append the spawn key as the episode number\n fname += '-e' + str(self._seed.spawn_key[-1])\n return fname",
"def get_name(self):\n return self._assets[0].get_name()",
"def get_name(self):\n return None",
"def just_the_name(path):\n name = os.path.splitext(os.path.basename(path))[0]\n return name",
"def get_name(self):\n return self.id.split('.')[-1:][0]",
"def name(self):\n\t\treturn self.asset.name",
"def name(self):\n return self.mesh.name",
"def name(self):\n return self.mesh.name",
"def get_name(self) -> str:\n def _seg2():\n if self.name:\n return self.name\n else:\n try:\n return self.player.title\n except AttributeError:\n return 'No title specified'\n try:\n if self.player.title == 'translate_tts':\n return 'Speech'\n else:\n return _seg2()\n except AttributeError:\n return _seg2()",
"def name(self):\n return self._path or '__main__'",
"def just_the_name(path):\n return os.path.splitext(os.path.basename(path))[0]",
"def name() -> str:\n pass"
]
| [
"0.77446645",
"0.76176494",
"0.75056267",
"0.7391316",
"0.6971184",
"0.6722483",
"0.6717563",
"0.6558429",
"0.64907384",
"0.64628965",
"0.6381981",
"0.6360798",
"0.6342684",
"0.63157916",
"0.6294775",
"0.6282843",
"0.6249788",
"0.62023914",
"0.62014425",
"0.6194817",
"0.6188842",
"0.61742705",
"0.6163026",
"0.6138539",
"0.61337966",
"0.61337966",
"0.61282814",
"0.61265033",
"0.6124427",
"0.6118069"
]
| 0.8073612 | 0 |
Returns the path of the scene. | def get_scene_path(self):
return self._maybe_fix_windows_path(self._document.GetDocumentPath()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def currentScenePath(self):\n logger.debug(\"Func: currentBaseScenePath/getter\")\n\n return os.path.join(self.projectDir, self._currentSceneInfo[\"Versions\"][self.currentVersionIndex-1][\"RelativePath\"])",
"def currentBaseScenePath(self):\n logger.debug(\"Func: currentBaseScenePath/getter\")\n\n return os.path.join(self.projectDir, self._currentSceneInfo[\"Path\"])",
"def path(self) :\n return self.m_path",
"def getPath(self):\n return self.path",
"def path(self) -> str:\n return pulumi.get(self, \"path\")",
"def path(self) -> str:\n return pulumi.get(self, \"path\")",
"def path( self ) :\n\n return( self.__path )",
"def getPath(self):\n return self._path",
"def get_path(self):\n return self.path",
"def currentPreviewPath(self):\n logger.debug(\"Func: currentPreviewPath/getter\")\n if self._currentSceneInfo[\"SubProject\"] is not \"None\":\n path = os.path.join(self._pathsDict[\"previewsDir\"], self._currentSceneInfo[\"Category\"],\n self._currentSceneInfo[\"Name\"])\n else:\n path = os.path.join(self._pathsDict[\"previewsDir\"], self._currentSceneInfo[\"Category\"],\n self._currentSceneInfo[\"SubProject\"], self._currentSceneInfo[\"Name\"])\n return path\n # if os.path.isdir(path):\n # return path\n # else:\n # return \"\"",
"def path(self):\n # type: () -> string_types\n return self._path",
"def path(self) -> str:\n return self._path",
"def path(self) -> str:\n return self._path",
"def path(self) -> str:\n return self._path",
"def path(self) -> str:\n return self._path",
"def getPath(self):\r\n\t\treturn self.pathToGoal",
"def get_scene_folder():\n\n flg = logging.getLogger(\"lettuce.xgenSetup.get_scene_folder\")\n\n file_name = mc.file(q=True, sceneName=True)\n\n head, tail = os.path.split(file_name)\n\n flg.info(\"Scene fileName: {}\".format(tail))\n flg.info(\"Scene directory: {}\".format(head))\n\n return head",
"def path(self):\n return self._path",
"def path(self):\n return self._path",
"def path(self):\n return self._path",
"def path(self):\n return self._path",
"def path(self):\n return self._path",
"def path(self):\n return self._path",
"def path(self):\n return self._path",
"def path(self):\n return self._path",
"def path(self):\n return self._path",
"def path(self):\n return self._path",
"def path(self):\n return self._path",
"def path(self):\n return self._path",
"def path(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"path\")"
]
| [
"0.7808846",
"0.7336566",
"0.70863146",
"0.70296896",
"0.7013405",
"0.7013405",
"0.6983869",
"0.69631296",
"0.6961136",
"0.6953609",
"0.6948439",
"0.6935067",
"0.6935067",
"0.6935067",
"0.6935067",
"0.6891935",
"0.6875375",
"0.6852452",
"0.6852452",
"0.6852452",
"0.6852452",
"0.6852452",
"0.6852452",
"0.6852452",
"0.6852452",
"0.6852452",
"0.6852452",
"0.6852452",
"0.6852452",
"0.6849937"
]
| 0.8036171 | 0 |
Checks if the provided document is the same as the one with which this instance was initialized. | def has_the_same_document(self, document):
try:
return document == self._document and document.GetDocumentPath() == \
self._document.GetDocumentPath()
except ReferenceError:
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __eq__(self, other):\n if not isinstance(other, Document):\n return False\n\n return self.__dict__ == other.__dict__",
"def validateDocument(self, doc):\n if doc is None: doc__o = None\n else: doc__o = doc._o\n ret = libxml2mod.xmlValidateDocument(self._o, doc__o)\n return ret",
"def document(self, document):\n if document is not None and not re.search(r'^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=)?$', document): # noqa: E501\n raise ValueError(r\"Invalid value for `document`, must be a follow pattern or equal to `/^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=)?$/`\") # noqa: E501\n\n self._document = document",
"def check_document(self, document):\n if isinstance(document, dict):\n document = json.dumps(document)\n r = requests.post('/'.join([self.base_url,\n 'check']),\n data=document,\n headers={'Content-Type': 'application/json'})\n return r.json()",
"def document_is_valid(self, document_is_valid):\n\n self._document_is_valid = document_is_valid",
"def __eq__(self, other):\n if not isinstance(other, DocumentPosition):\n return False\n\n return self.to_dict() == other.to_dict()",
"def validate(self, document):\n self.validator.validate(document)",
"def document_exists(self, docid):\n raise NotImplementedError",
"def _document_exists(self, document_name):\n return len(self.ssm_client.list_document_versions(Name=document_name)['DocumentVersions']) >= 1",
"def __ne__(self, other):\n if not isinstance(other, DocumentPosition):\n return True\n\n return self.to_dict() != other.to_dict()",
"async def document_valid(self, document):\n return web.json_response()",
"def __eq__(self, other):\n if not isinstance(other, DocumentFormat):\n return False\n\n return self.to_dict() == other.to_dict()",
"def __eq__(self, other):\n if not isinstance(other, DropDocumentReq):\n return False\n\n return self.__dict__ == other.__dict__",
"async def is_valid(self):\n\n # TODO: check if we are inside transaction and raise exception\n try:\n log.debug(\"Perform dirty document check\")\n await self._per_currency_balance_is_valid()\n await self._per_account_balance_is_valid(include_dirty=True)\n\n log.debug(\"Perform clean document check\")\n await self._per_account_balance_is_valid(include_dirty=False)\n except InvalidDocumentException as e:\n log.error(\"Document is not valid: %s\", e.args[0])\n return False\n log.debug(\"Document is valid\")\n return True",
"def __ne__(self, other):\n if not isinstance(other, DocumentFormat):\n return True\n\n return self.to_dict() != other.to_dict()",
"def validateDocumentFinal(self, doc):\n if doc is None: doc__o = None\n else: doc__o = doc._o\n ret = libxml2mod.xmlValidateDocumentFinal(self._o, doc__o)\n return ret",
"def check_doc_unchanged(original, new, doc_name):\n changes = get_doc_changes(original, new)\n\n if changes:\n raise ValueError(\n '{} differs from stored ({})'.format(\n doc_name,\n ', '.join(['{}: {!r}!={!r}'.format('.'.join(offset), v1, v2) for offset, v1, v2 in changes])\n )\n )",
"def validate(self, document) -> None:\n if not len(document.text) > 0:\n raise ValidationError(\n message=self._message,\n cursor_position=document.cursor_position,\n )",
"def __eq__(self, other):\n if not isinstance(other, AutodetectDocumentValidationResult):\n return False\n\n return self.__dict__ == other.__dict__",
"def validateDocument(self, ctxt):\n if ctxt is None: ctxt__o = None\n else: ctxt__o = ctxt._o\n ret = libxml2mod.xmlValidateDocument(ctxt__o, self._o)\n return ret",
"def IsDocumentRelated(self, *args, **kwargs):\n pass",
"def _update_documents(self):\n if self.campaigns.active():\n top_ranked_documents = utils._rank_documents(self.event)\n # [TODO] this is a kludge\n if set(top_ranked_documents) == set(self.documents.all()):\n return False\n print('[DEBUG] Docs changed! New campaign ahoy!')\n self.documents = top_ranked_documents\n return True",
"def __eq__(self, other):\n if not isinstance(other, ChosenDocumentType):\n return False\n\n return self.to_dict() == other.to_dict()",
"def __eq__(self, other):\n\n if not self or not other: #either one of them is null\n return False\n\n if len(self.documents) != len(other.documents):\n return False\n\n for i in xrange(0, len(self.documents)):\n if self.documents[i].index != other.documents[i].index:\n return False\n\n return True",
"def assertValid(self, doc):\n return self.schema.assertValid(doc)",
"def __eq__(self, other):\n if not isinstance(other, DocumentConfig):\n return False\n\n return self.__dict__ == other.__dict__",
"def reset_document(self):\n # FIXME: this state does not make sense\n self.doc_version_set = False\n self.doc_comment_set = False\n self.doc_namespace_set = False\n self.doc_data_lics_set = False\n self.doc_name_set = False\n self.doc_spdx_id_set = False",
"def __ne__(self, other):\n if not isinstance(other, ChosenDocumentType):\n return True\n\n return self.to_dict() != other.to_dict()",
"def check_doc(document):\n try:\n assert document[0].startswith(\"#doc \")\n except AssertionError:\n print(\"The document does not start with '#doc' but instead\", document[0])\n doc_id = document[0].split()[1]\n for i in range(1, len(document)):\n line = document[i]\n try:\n assert int(line.split()[0]) == i - 1\n except (ValueError, AssertionError):\n print(\"Document\", doc_id, \"line\", i, \":\", line, \"expect line index\",\n i - 1, \", found\", line.split()[0])",
"def checkConsistency(self):\n return _libsbml.SBMLDocument_checkConsistency(self)"
]
| [
"0.6909281",
"0.66066235",
"0.65212065",
"0.64504385",
"0.6407911",
"0.63363045",
"0.6308045",
"0.6298419",
"0.6292361",
"0.6220213",
"0.6178518",
"0.6174105",
"0.6146417",
"0.61002827",
"0.6076059",
"0.60620683",
"0.6045985",
"0.6036641",
"0.6031452",
"0.6000527",
"0.599361",
"0.5992917",
"0.5977019",
"0.5963089",
"0.5960342",
"0.59549737",
"0.5950524",
"0.594285",
"0.5939874",
"0.5905051"
]
| 0.8196296 | 0 |
Checks if the scene is saved. | def is_saved(self):
return self.get_scene_path() != '' and not self._document.GetChanged() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def IsSaved(self):\n\t\treturn self.acad.ActiveDocument.Saved",
"def check_is_saved(self):\n raise NotImplementedError()",
"def _check_scene_open(self):\n return self._engine.current_file_path() is not None",
"def is_saved(self):\n return self._slicerIsSaved",
"def has_saved(self, event):\n return event in self.saved_events",
"def is_saved(self):\n return self.session.is_saved",
"def is_saved(self):\n last_path = self.__key._Key__reference.path().element_list()[-1]\n return ((last_path.has_name() ^ last_path.has_id()) and\n self.__key.has_id_or_name())",
"def saveCallback(self):\n\n ## TODO // TEST IT\n self._pathsDict[\"sceneFile\"] = self.getSceneFile()\n try:\n openSceneInfo = self.getOpenSceneInfo()\n if not openSceneInfo:\n return\n except TypeError:\n return\n if openSceneInfo[\"jsonFile\"]:\n jsonInfo = self._loadJson(openSceneInfo[\"jsonFile\"])\n if jsonInfo[\"ReferenceFile\"]:\n absRefFile = os.path.join(self._pathsDict[\"projectDir\"], jsonInfo[\"ReferenceFile\"])\n # TODO : ref => Dict\n absBaseSceneVersion = os.path.join(self._pathsDict[\"projectDir\"], jsonInfo[\"Versions\"][int(jsonInfo[\"ReferencedVersion\"]) - 1][\"RelativePath\"])\n # if the refererenced scene file is the saved file (saved or saved as)\n if self._pathsDict[\"sceneFile\"] == absBaseSceneVersion:\n # copy over the forReference file\n try:\n shutil.copyfile(self._pathsDict[\"sceneFile\"], absRefFile)\n print \"Scene Manager Update:\\nReference File Updated\"\n except:\n pass",
"def on_saved(self):\n return self._saved_position == self._current_undo_command",
"def save_drawing_if_necessary(self):\n\n app_doc_data = AppDocData.instance()\n if app_doc_data.activeDrawing and app_doc_data.activeDrawing.modified:\n #if QMessageBox.Yes == QMessageBox.question(self, self.tr(\"Question\"),\n # self.tr(\"Do you want to save drawing?\"),\n # QMessageBox.Yes | QMessageBox.No):\n # self.actionSaveCliked()\n # return True\n if QMessageBox.Ignore == QMessageBox.question(self, self.tr('Continue?'),\n self.tr('Changes may not have been saved.'),\n QMessageBox.Ignore | QMessageBox.Cancel):\n return False\n return True",
"def menu_save_scene(self):\n file_name = QtGui.QFileDialog().getSaveFileName(self, \"Save Scene to File\", get_data_path(), \"*.pkl\")\n with open(file_name, \"wb\") as f:\n pickle.dump(self.scene, f, pickle.HIGHEST_PROTOCOL)",
"def scene_is_modified():\n\n pass",
"def checkChanged(self):\n if len(self.undoStack):\n self.msg = messagebox.askyesnocancel('Save Data?',\n 'Game is not saved. Save it?')\n if self.msg == None:\n return 'cancel'\n elif self.msg == 'yes':\n self.save()\n return 'yes'\n else:\n return 'no'",
"def has_save_strategy(self):\r\n return self.save_strategy is not None",
"def isSceneModified(self):\n logger.debug(\"Func: isSceneModified\")\n return nuke.modified()",
"def exit_check(self):\n if self.changed:\n msg = \"The current object has not been saved - would you like to exit?\"\n reply = QMessageBox.question(self.parent, 'Message', msg, QMessageBox.Yes, QMessageBox.No)\n return reply == QMessageBox.Yes\n else:\n return True",
"def should_save_snapshot_done(self, should_save: bool) -> None:\n if should_save:\n self._phase = APIPhase.BEFORE_SAVE_SNAPSHOT",
"def saveEditor(self, fn):\n for editor in self.editors:\n if Utilities.samepath(fn, editor.getFileName()):\n break\n else:\n return True\n \n if not editor.isModified():\n return True\n else:\n ok = editor.saveFile()\n return ok",
"def saved(self, sid):\n\t\tprint self.get_fpath(sid)\n\t\treturn os.path.exists(self.get_fpath(sid))",
"def save(self, fname = None):\n return True",
"def checkScene ( doc_id ):\n if cmds.objExists ( \"root\" ) :\n \n self.labelStatus.setText ( \"You shouldn't have any named 'root' node in your scene\" )\n return False \n \n return True",
"def should_save(self):\n return self.modified",
"def Save(self, window):\n\n if not self._doSave:\n return False\n\n if not self.Find(window):\n return False\n\n name = window.GetName()\n self._persistentObjects[name].Save()\n\n return True",
"def save_epoch(self):\n if self._best_epoch:\n # save if we have the best performance\n return True\n # otherwise save only every save iter\n return (self.epoch % self._save_iter) == 0",
"def should_save_final_snapshot_done(self, should_save: bool) -> None:\n if should_save:\n self._phase = APIPhase.BEFORE_SAVE_FINAL_SNAPSHOT\n else:\n self._phase = APIPhase.BEFORE_REUSE_INSTANCE",
"def __is_new_save(self):\n last_save = self.__get_last_save()\n new_save = self.__create_save()\n for signal in new_save:\n if signal in last_save:\n for attribut in new_save[signal]:\n if attribut in last_save[signal]:\n if new_save[signal][attribut] == last_save[signal][attribut]:\n return False\n else:\n return True\n else:\n return True\n else:\n return True",
"def unsaved_details_exist(self):\r\n return (self.talkDetailsWidget.saveButton.isEnabled() and\r\n (self.talkDetailsWidget.titleLineEdit.text() or\r\n self.talkDetailsWidget.presenterLineEdit.text() or\r\n self.talkDetailsWidget.categoryLineEdit.text() or\r\n self.talkDetailsWidget.descriptionTextEdit.toPlainText()))",
"def Persist(self) -> bool:",
"def Persist(self) -> bool:",
"def begin_saving(self):\n self.saving.value = True"
]
| [
"0.73493844",
"0.7254954",
"0.7156946",
"0.695256",
"0.6739197",
"0.6644546",
"0.6619367",
"0.6561541",
"0.655039",
"0.6498395",
"0.6490378",
"0.646278",
"0.64500505",
"0.631071",
"0.63061804",
"0.6293041",
"0.62691706",
"0.625628",
"0.62528276",
"0.61826736",
"0.6141712",
"0.61072",
"0.6075846",
"0.59655386",
"0.5950364",
"0.59392726",
"0.5938981",
"0.5927689",
"0.5927689",
"0.5917408"
]
| 0.86973995 | 0 |
\Theta(n^3) method to calculate the expected cost of the optimal BST. | def get_optimal_bst(p, q):
assert p is not None
assert q is not None
assert len(p) == len(q)
n = len(p) - 1
assert n >= 0
if n == 0:
return 1.0, []
e = [[-1 for _ in range(0, n + 1)] for _ in range(0, n + 1)]
root = [[-1 for _ in range(0, n)] for _ in range(0, n)]
for i in range(1, n + 1):
e[i][i] = (q[i - 1] + q[i]) * 2 + p[i]
root[i - 1][i - 1] = i
for i in range(n + 1, 0, -1):
for j in range(i + 1, n + 1):
w = sum(q[i - 1:j + 1]) + sum(p[i:j + 1])
min_cost = sys.maxsize
current_root = -1
for r in range(i, j + 1):
assert (r == i or e[i][r - 1] > 0) and (r == j or e[r + 1][j] > 0)
e_left = q[i - 1] if r == i else e[i][r - 1]
e_right = q[j] if r == j else e[r + 1][j]
e_cost = w + e_left + e_right
if e_cost < min_cost:
min_cost = e_cost
current_root = r
e[i][j] = min_cost
root[i - 1][j - 1] = current_root
return e[1][n], root | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calculate_best_way(self) -> int:\n node = self._find_lowest_cost_node()\n while node:\n cost = self.costs[node]\n neighbors = self.graph[node]\n for neighbor in neighbors.keys():\n node_cost = cost + neighbors[neighbor]\n if self.costs[neighbor] > node_cost:\n self.costs[neighbor] = node_cost\n self.parents[neighbor] = node\n self.closed_nodes.append(node)\n node = self._find_lowest_cost_node()\n\n return self.costs[\"fin\"]",
"def get_optimal_bst_fast(p, q):\n\n assert p is not None\n assert q is not None\n assert len(p) == len(q)\n n = len(p) - 1\n assert n >= 0\n\n e = [[-1 for _ in range(0, n + 1)] for _ in range(0, n + 2)]\n w = [[-1 for _ in range(0, n + 1)] for _ in range(0, n + 2)]\n root = [[-1 for _ in range(0, n)] for _ in range(0, n)]\n\n for i in range(1, n + 1):\n root[i - 1][i - 1] = i\n\n for i in range(1, n + 2):\n e[i][i - 1] = q[i - 1]\n w[i][i - 1] = q[i - 1]\n\n # Calculate w matrix first.\n for i in range(n + 1, 0, -1):\n for j in range(i, n + 1):\n w[i][j] = w[i][j - 1] + p[j] + q[j]\n\n for i in range(1, n + 1):\n e[i][i] = e[i][i - 1] + e[i + 1][i] + w[i][i]\n\n for j_i_diff in range(1, n):\n i = 1\n r = root[0][j_i_diff - 1]\n while i + j_i_diff <= n:\n j = i + j_i_diff\n e_cost = e[i][r - 1] + e[r + 1][j] + w[i][j]\n if e[i][j] < 0 or e_cost < e[i][j]:\n e[i][j] = e_cost\n root[i - 1][j - 1] = r\n if r == root[i][j - 1]:\n i += 1\n else:\n r += 1\n\n return e[1][n], root",
"def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n class Node:\n def __init__(self, state, parent, action, pathCost):\n self.state = state\n self.parent = parent\n self.action = action\n self.pathCost = pathCost\n\n def solution(self):\n path = list()\n tempNode = self\n while tempNode.state != problem.getStartState():\n path.insert(0, tempNode.action)\n tempNode = tempNode.parent\n return path\n\n def __eq__(self, other):\n if isinstance(other, Node):\n return self.state == other.state\n\n\n def childNode(successor, parent, action, stepCost):\n pathCost = parent.pathCost + stepCost\n child = Node(successor, parent, action, pathCost)\n return child\n\n initialNode = Node(problem.getStartState(), None, None, 0)\n frontier = util.PriorityQueue() #ucs uses a priority queue\n frontier.push(initialNode, initialNode.pathCost)\n explored = set()\n\n while not frontier.isEmpty() :\n nextNode = frontier.pop() #extract from the start of the queue\n if problem.isGoalState(nextNode.state):\n return nextNode.solution()\n explored.add(nextNode.state)\n for successor, action, stepCost in problem.getSuccessors(nextNode.state):\n child = childNode(successor, nextNode, action, stepCost)\n if child.state not in explored:\n frontier.update(child, child.pathCost) #we only check if state is in explored because update does the other\n return []\n util.raiseNotDefined()",
"def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n\n templist=[]\n explored = set()\n fringe = util.PriorityQueue()\n # state, list of directions till now and the cost is pushed in the stack\n # so that algorithm can explore the node with lowest cost first\n fringe.push((problem.getStartState(),templist),1)\n\n while (not fringe.isEmpty()):\n (currentNode,currDir) = fringe.pop()\n\n if problem.isGoalState(currentNode):\n pathToGoal = currDir\n break\n if not (currentNode in explored):\n explored.add(currentNode)\n for childNode in problem.getSuccessors(currentNode):\n # total cost is cost till now plus cost to the child node\n totalCost = childNode[2]+problem.getCostOfActions(currDir)\n fringe.push((childNode[0],currDir+[childNode[1]]),totalCost)\n\n\n\n\n return pathToGoal;",
"def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n # Initialize data structures\n parent_node = {}\n path_to_node = {}\n priority_queue = util.PriorityQueue()\n\n p_c = 0.5\n h_c = 1 - p_c\n\n # Get the start node\n start_node = problem.getStartState()\n parent_node[start_node] = None\n path_to_node[start_node] = []\n priority_queue.update(start_node, 0)\n\n #goal_found = False\n\n while not priority_queue.isEmpty():\n # Get the next node\n node_to_expand = priority_queue.pop()\n # Check if goal state is reached\n if problem.isGoalState(node_to_expand):\n break\n next_nodes = problem.getSuccessors(node_to_expand)\n path_to_parent = path_to_node[node_to_expand]\n\n for one_node in next_nodes:\n point, move, cost = one_node\n curr_path = path_to_node[node_to_expand] + [move]\n curr_cost = problem.getCostOfActions(curr_path)\n heuristic_cost = heuristic(point, problem)\n # Check if current node already exists in the previously visited nodes\n if point in path_to_node:\n prev_cost = problem.getCostOfActions(path_to_node[point])\n if prev_cost > curr_cost:\n path_to_node[point] = curr_path\n priority_queue.update(point, curr_cost + heuristic_cost)\n \n else:\n path_to_node[point] = curr_path\n priority_queue.update(point, curr_cost + heuristic_cost)\n \n # current_cost = problem.getCostOfActions(point) * p_c + heuristic(point, problem) * h_c\n\n print(node_to_expand) \n return path_to_node[node_to_expand]\n \n# nodes_to_expand = set()\n# # get max value node in the fringe node\n# min_val = float(\"inf\")\n# for one_node in fringe_node:\n# # Compute the cost to reach a node\n# total_cost = cost_to_point[one_node] * p_c + heuristic(one_node,problem) * h_c\n# if total_cost < min_val:\n# min_val = total_cost\n# \n# for one_node in fringe_node:\n# # Compute the cost to reach a node\n# total_cost = cost_to_point[one_node] * p_c + heuristic(one_node,problem) * h_c\n# if total_cost == min_val:\n# nodes_to_expand.add(one_node)\n# fringe_node.remove(one_node)\n#\n# # Expand the fringe node \n# for one_node in nodes_to_expand:\n# path_to_parent = path_to_point[one_node]\n# for nxt_node in problem.getSuccessors(one_node):\n# pos = nxt_node[0]\n# mv = nxt_node[1]\n# # check if point already present in path to point\n# prev_cost = float(\"inf\")\n# if pos in cost_to_point:\n# prev_cost = cost_to_point[pos]\n# new_path = path_to_parent + [mv]\n# if prev_cost > problem.getCostOfActions(new_path):\n# path_to_point[pos] = new_path\n# cost_to_point[pos] = problem.getCostOfActions(new_path)\n# fringe_node.append(pos)\n#\n# # Check if destination is reached in the fringe node\n# for one_node in fringe_node:\n# if problem.isGoalState(one_node):\n# final_node = one_node\n# goal_found = True\n# break\n# \n# #print(len(fringe_node))\n# print(final_node)\n# print(path_to_point[final_node])\n# return path_to_point[final_node] \n\n util.raiseNotDefined()",
"def get_expected_cost(self):",
"def cost(self):\n node, path_back = self, []\n cost = 0\n while node:\n path_back.append(node)\n if node.action is not None:\n cost = cost + node.action.cost\n node = node.parent\n # remove one due to root empty node \n #cost = cost-1\n return [cost, list(reversed(path_back))]",
"def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n path_to_point = {}\n cost_to_point = {}\n\n # Get the start node\n start_node = problem.getStartState()\n fringe_node = [start_node]\n path_to_point[start_node] = []\n cost_to_point[start_node] = problem.getCostOfActions(path_to_point[start_node])\n\n goal_found = False\n\n while(not goal_found):\n #for i in range(100): \n nodes_to_expand = set()\n # get max value node in the fringe node\n min_val = float(\"inf\")\n for one_node in fringe_node:\n # Compute the cost to reach a node\n if cost_to_point[one_node] < min_val:\n min_val = cost_to_point[one_node]\n \n for one_node in fringe_node:\n # Compute the cost to reach a node\n if cost_to_point[one_node] == min_val:\n nodes_to_expand.add(one_node)\n fringe_node.remove(one_node)\n\n # Expand the fringe node \n for one_node in nodes_to_expand:\n path_to_parent = path_to_point[one_node]\n for nxt_node in problem.getSuccessors(one_node):\n pos = nxt_node[0]\n mv = nxt_node[1]\n # check if point already present in path to point\n prev_cost = float(\"inf\")\n if pos in cost_to_point:\n prev_cost = cost_to_point[pos]\n new_path = path_to_parent + [mv]\n if prev_cost > problem.getCostOfActions(new_path):\n path_to_point[pos] = new_path\n cost_to_point[pos] = problem.getCostOfActions(new_path)\n fringe_node.append(pos)\n\n # Check if destination is reached in the fringe node\n for one_node in fringe_node:\n if problem.isGoalState(one_node):\n final_node = one_node\n goal_found = True\n break\n \n #print(len(fringe_node))\n print(final_node)\n print(path_to_point[final_node])\n return path_to_point[final_node] \n\n util.raiseNotDefined()",
"def fastest_path_estimation(sol):\n\n class Path:\n def __init__(self, places, graph):\n self.g = 0 # current cost\n self.graph = graph\n self.visited = [places[0]] # list of already visited attractions\n self.not_visited = copy.deepcopy(places[1:]) # list of attractions not yet visited\n\n def __lt__(self, other):\n return self.g < other.g\n\n def add(self, idx):\n # add the cost\n self.g += self.graph[self.visited[-1], idx]\n # add the to the visited place and remove from the unvisited places\n self.visited.append(idx)\n self.not_visited.remove(idx)\n\n def add_to_heap_queue(path):\n # custom function to add to heap queue sorted by the solution's cost\n heappush(h_queue, path)\n\n if len(sol.not_visited) == 0:\n return 0\n elif len(sol.not_visited) == 1:\n return sol.graph[sol.visited[-1], sol.not_visited[0]]\n\n c = sol.visited[-1]\n pm = sol.not_visited[-1]\n # the heap queue of solution sorted by their cost - change all to tuples with g for dijkstra\n h_queue = []\n\n # the places to use for the graph\n sub_search_places = [c]\n sub_search_places.extend(sol.not_visited)\n\n # push the first \"node\" in the queue\n add_to_heap_queue(Path(sub_search_places, sol.graph))\n while True:\n # take the next solution with the shortest cost\n path = heappop(h_queue)\n # if it contains destination, stop and return that solution\n if pm in path.visited:\n return path.g\n # create a new solution for each neighbor of the current vertex and add it to heap queue\n for place in path.not_visited:\n new_path = copy.deepcopy(path)\n new_path.add(place)\n add_to_heap_queue(new_path)",
"def search_best_goal_node(self):\n\n dist_to_goal_list = [self.calc_dist_to_goal(n.x, n.y) for n in self.node_list]\n goal_indexes = [\n dist_to_goal_list.index(i)\n for i in dist_to_goal_list\n if i <= self.expand_dis\n ]\n\n safe_goal_indexes = []\n for goal_index in goal_indexes:\n t_node = self.steer(self.node_list[goal_index], self.goal_node)\n if self.check_collision(t_node, self.obstacle_list):\n safe_goal_indexes.append(goal_index)\n\n if not safe_goal_indexes:\n return None\n\n min_cost = min([self.node_list[i].cost for i in safe_goal_indexes])\n for i in safe_goal_indexes:\n if self.node_list[i].cost == min_cost:\n return i\n\n return None",
"def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n class Node:\n def __init__(self, state, parent, action, pathCost):\n self.state = state\n self.parent = parent\n self.action = action\n self.pathCost = pathCost\n\n def solution(self):\n path = list()\n tempNode = self\n while tempNode.state != problem.getStartState():\n path.insert(0, tempNode.action)\n tempNode = tempNode.parent\n return path\n\n def __eq__(self, other):\n if isinstance(other, Node):\n return self.state == other.state\n\n\n def childNode(successor, parent, action, stepCost):\n pathCost = parent.pathCost + stepCost\n child = Node(successor, parent, action, pathCost)\n return child\n\n initialNode = Node(problem.getStartState(), None, None, 0)\n frontier = util.PriorityQueue() #bfs uses a queue\n frontier.push(initialNode, initialNode.pathCost + heuristic(initialNode.state, problem)) #we use f(n) = pathCost + h(n) for the best solution\n explored = set()\n\n while not frontier.isEmpty() :\n nextNode = frontier.pop() #extract from the start of the queue\n if problem.isGoalState(nextNode.state):\n return nextNode.solution()\n explored.add(nextNode.state)\n for successor, action, stepCost in problem.getSuccessors(nextNode.state):\n child = childNode(successor, nextNode, action, stepCost)\n if child.state not in explored:\n frontier.update(child, child.pathCost + heuristic(child.state, problem))\n return []\n util.raiseNotDefined()",
"def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n #Creamos las estructuras de datos necesarias (priority queue y set)\n openNodes = util.PriorityQueue()\n closedNodes = set([])\n\n #Guardamos el nodo inicial\n node = Node(problem.getStartState(), '', 0, None)\n\n #Calculamos funcion heuristica y el coste acumulado para sacar la funcion de evaluacion del nodo inicial\n fn = problem.getCostOfActions(node.path) + nullHeuristic(node.name, problem);\n\n #Lo metemos en la cola con su funcion de evaluacion como prioridad\n openNodes.push(node, fn)\n\n #Iteramos para cada nodo\n while True:\n if openNodes.isEmpty():\n break #ERROR: throw exception\n else :\n #sacamos el nodo de arriba de la cola\n node = openNodes.pop()\n if problem.isGoalState(node.name): #Comprobamos si el nodo es Goal. Si lo es terminamos.\n break\n else: #Expandimos los nodos sucesores del nodo si no estan en closed\n if nodeIsClosed(node, closedNodes) is False:\n for successor in problem.getSuccessors(node.name):\n n, p, c = successor\n succNode = Node(n, p, c, node)\n if nodeIsClosed(succNode, closedNodes) is False:\n fn = problem.getCostOfActions(findPath(succNode)) + nullHeuristic(succNode.name, problem);\n openNodes.push(succNode, fn)\n #Metemos el nodo en closed\n closedNodes.add(node)\n\n #Devolvemos el camino al Goal\n return findPath(node)",
"def next_node_dfs(search_state, last_node_is_ok):\n log_T, initial_state, min_score, max_depth, maxtraversals, node, node_idx, it, order, score, sub_info = search_state\n min_score = float(min_score) # make sure numba knows this is a float (otherwise, sometimes, it doesn't (bug in numba))\n n_states = log_T.shape[0]\n if it == maxtraversals:\n assert False, \"Number of traversals exceeded\"\n while True:\n # next node ##\n # try adding a value at the end\n for next_idx, next_state in enumerate(order[node[-1]]):\n if last_node_is_ok and min_score <= score + log_T[node[-1], next_state] and len(node) < max_depth \\\n and syntax_check(np.array(node + [next_state]), sub_info, partial=True):\n node.append(next_state)\n node_idx.append(next_idx)\n break\n # adding a value at the end failed, so we are a leave\n else:\n for p in xrange(len(node) - 1, -1, -1):\n if node_idx[p] != n_states - 1: # find where within the node to increase (and discard all others after)\n old_idx = node_idx[p]\n del node_idx[p:]\n del node[p:]\n node_idx.append(old_idx + 1)\n prev_state = node[p - 1] if p > 0 else initial_state\n node.append(order[prev_state, node_idx[p]])\n break\n else:\n search_state = log_T, initial_state, min_score, max_depth, maxtraversals, list(node), list(node_idx), it, order, score, sub_info\n return [-1], score, search_state # end of the generator, can't increase even the root\n last_node_is_ok = True # We can now make progress again, regardless of whether we could at the beginning\n it += 1\n # score and return current node if adequate\n score = log_T[initial_state, node[0]]\n for p in xrange(1, len(node)):\n score += log_T[node[p - 1], node[p]]\n if min_score <= score and syntax_check(np.array(node), sub_info, partial=False):\n search_state = log_T, initial_state, min_score, max_depth, maxtraversals, list(node), list(node_idx), it, order, score, sub_info\n return list(node), score, search_state # the invocation to list here is to make a copy, don't remove!",
"def uniformCostSearch(problem):\n pq = util.PriorityQueue()\n startState = problem.getStartState()\n pq.push((\"\", None, startState), 0)\n # moves[state] returns (dir, parent) the direction and the parent from which this state was reached\n moves = {}\n # Current cost to reach this state\n stateCost = {startState: 0}\n goalState = None\n while not pq.isEmpty():\n (move, parent, currentState) = pq.pop()\n if currentState in moves: # already visited\n continue\n moves[currentState] = (move, parent)\n if problem.isGoalState(currentState): # Found goal state\n goalState = currentState\n break\n children = problem.getSuccessors(currentState)\n for successor, action, cost in children:\n if successor not in stateCost or stateCost[successor] > stateCost[currentState] + cost:\n pq.push((action, currentState, successor), stateCost[currentState] + cost)\n stateCost[successor] = stateCost[currentState] + cost\n\n currentState = goalState\n path = []\n while currentState != startState:\n (move, parent) = moves[currentState]\n path.append(move)\n currentState = parent\n path.reverse()\n return path",
"def solve_tsp(dist):\n\n # number of nodes\n N = dist.shape[0]\n\n # tsp path for quick calculation of cost\n ii = np.arange(N)\n jj = np.hstack((np.arange(1, N), 0))\n\n # for each node, a sorted list of closest nodes\n dsort = [np.argsort(d) for d in dist]\n dsort = [d[d != i] for i, d in enumerate(dsort)]\n\n # randomly initialize path through graph\n path = np.random.permutation(N)\n idx = np.argsort(path)\n cost = np.sum(dist[path[ii], path[jj]])\n \n # keep track of objective function over time\n cost_hist = [cost]\n\n # optimization loop\n node = 0\n while node < N:\n\n # we'll try breaking the connection i -> j\n i = path[node]\n j = path[(node+1) % N]\n \n # since we are breaking i -> j we can remove the cost of that connection\n c = cost - dist[i, j]\n\n # search over nodes k that are closer to j than i\n for k in dsort[j]:\n # can safely continue if dist[i,j] < dist[k,j] for the remaining k\n if k == i:\n node += 1\n break\n\n # break connection k -> p\n # add connection j -> p\n # add connection i -> k\n p = path[(idx[k]+1) % N]\n new_cost = c - dist[k,p] + dist[j,p] + dist[i,k]\n\n # if this swap improves the cost, implement it and move to next i\n if new_cost < cost:\n path = reverse_segment(path, idx[j], idx[k])\n idx = np.argsort(path)\n # make sure that we didn't screw up\n assert np.abs(np.sum(dist[path[ii], path[jj]]) - new_cost) < 1e-6\n cost = new_cost\n # restart from the begining of the graph\n cost_hist.append(cost)\n node = 0\n break\n\n return path, cost_hist",
"def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n q_p=util.PriorityQueue()\n #nodes=[]\n visited=[]\n \n node=dict()\n start=problem.getStartState()\n node['parent']=None\n node['direction']=None\n node['state']=start\n node['cost']=0\n \n q_p.push(node,node['cost'])\n lis.append(node)\n \n while q_p.isEmpty()!=True:\n node=q_p.pop()\n print node\n state=node['state']\n visited.append(state)\n #lis.append(node)\n if problem.isGoalState(state):\n print \"found\"\n return getPath(problem,node)\n \n suc=problem.getSuccessors(state)\n if suc ==None:\n continue \n for child in suc:\n if child[0] not in visited:\n childnode={}\n childnode['parent']=state\n childnode['direction']=child[1]\n childnode['state']=child[0]\n childnode['cost']=node['cost']+1\n q_p.push(childnode,childnode['cost'])\n lis.append(childnode)\n \n\n \n\n\n\n\n \n\n \n \n #util.raiseNotDefined()",
"def _leaf_insertion_cost(x_y_z_array, dist_matrix, leaf_i, leaves, query_name, orig_njt):\n x,y,z = x_y_z_array\n\n all_leaves_sum = 0.\n for leaf_j in leaves:\n if leaf_j == leaf_i:\n continue\n else:\n all_leaves_sum += orig_njt.orig_dist_matrix.at[leaf_j, query_name] - (x + z)\n\n return all_leaves_sum**2.",
"def solve(problem):\n\n # *** YOUR CODE HERE ***\n\n # The core of Iterative Deepening Search are iterations of Depth Limited\n # Search with given increasing depth.\n\n # A recursive version of Depth Limited Search\n def depth_limited_search(problem, limit):\n \"\"\"\n Return a list of nodes we traversed (or None).\n :param problem: the starting set up.\n :param limit: a given numeric depth limit.\n :return: a list of nodes.\n \"\"\"\n\n # in this case, we simply use a list to keep track of nodes we\n # traversed, instead of the data structure, Stack.\n path = list()\n visited = set() # as before, to prevent duplicated nodes\n root = problem.get_initial_state()\n\n def rec_dls(state, action, depth):\n\n visited.add(state)\n\n # if it is a goal\n if problem.goal_test(state):\n path.append((state, action))\n return path\n\n # or if it reaches a certain depth, but not a goal\n elif depth == 0:\n visited.remove(state)\n return None\n\n else:\n path.append([state, action])\n for successor, action, cost in problem.get_successors(state):\n if successor not in visited:\n # recursively expands the deepest node\n res = rec_dls(successor, action, depth-1)\n if res is not None:\n return res\n path.pop()\n visited.remove(state)\n\n # \"Stared From the Bottom\" (root)\n result = rec_dls(root, 'None', limit)\n # return the path if the we DID have achieved something\n if result is not None:\n return path\n\n import sys\n for depth in range(sys.maxsize): # depth from 0 to infinity\n print(\"Lower-bound of the optimal cost is {}\".format(depth))\n res2 = depth_limited_search(problem, depth)\n if res2 is not None:\n action_list = list()\n for move in res2:\n action_list.append(move[1]) # recall index 0 is the parent\n # do not forget a None returned in iteration 0 (with depth 0)\n action_list.remove('None')\n return action_list",
"def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n # Get the start node\n start_state = problem.getStartState()\n print(start_state)\n\n # Define a stack\n plan_stack = util.Queue()\n start_plan = [start_state] # node, cost\n plan_stack.push(start_plan)\n\n # Visited nodes\n visited_nodes = set(start_state)\n\n goal_found = False\n\n while not goal_found:\n # Get the plan from the stack\n plan_to_expand = plan_stack.pop()\n node_to_exp = plan_to_expand[-1]\n all_nxt_nodes = problem.getSuccessors(node_to_exp)\n\n # Traverse through all the next nodes\n for nxt_node in all_nxt_nodes:\n nxt_pos = nxt_node[0]\n\n if nxt_pos in visited_nodes: # Check if node is already visited\n continue\n\n visited_nodes.add(nxt_pos) # Add the node to visited nodes\n nxt_plan = plan_to_expand + [nxt_pos] # add node to the plan\n plan_stack.push(nxt_plan) # push the plan into the stack\n goal_found = problem.isGoalState(nxt_pos) # Check if goal is achieved\n if goal_found:\n break\n \n \n print(goal_found)\n print(nxt_plan)\n\n moves = []\n # Convert plan to moves\n for i in range(len(nxt_plan) - 1):\n for nxt_node in problem.getSuccessors(nxt_plan[i]):\n nxt_pos = nxt_node[0]\n nxt_mv = nxt_node[1]\n if nxt_pos == nxt_plan[i+1]:\n moves.append(nxt_mv)\n break\n \n return moves\n\n \n\n # Calculate the minimum plan cost \n #min_val = float(\"inf\")\n #for one_plan in plan_stack:\n # plan_cost = one_plan[1]\n # if plan_cost < min_val:\n # min_val = plan_cost\n\n ## Expand the nodes with minimum plan cost\n #for one_plan in plan_stack:\n # plan_cost = one_plan[1]\n # if plan_cost == min_val:\n # plan_step = one_plan[0] \n # # Expand the last node of plan\n # last_node = plan_step[end]\n # for nxt_node in problem.getSuccessors(last_node):\n\n\n\n util.raiseNotDefined()",
"def cons_heuristic(state, goal_state): \n unused_stacks = set(range(len(state.stack_containers))) - set(goal_state.keys())\n unused_index = list(unused_stacks)[0] if unused_stacks else -1\n cost = 0\n height_dict = {} # Dict for storing element and height index in goal state\n for i in range(len(state.stack_containers)):\n for element in state.stack_containers[i]:\n h_stack = get_heurisitc_cost_stacks(element, i, goal_state, unused_stacks, unused_index)\n cost += h_stack[0] \n if h_stack[1] > -1:\n height_dict[element] = h_stack[1] \n for i in range(len(state.stack_containers)):\n for j in range(len(state.stack_containers[i])):\n c = get_heuristic_cost_height(state.stack_containers[i][j], j, height_dict)\n cost += get_heuristic_cost_height(state.stack_containers[i][j], j, height_dict) \n return cost",
"def cost(self) -> float:",
"def uniformCostSearch(problem):\n # Initialization\n startState = problem.getStartState()\n\n if problem.isGoalState(startState):\n return [] # No action needed\n\n closedSet = set()\n queue = util.PriorityQueue()\n queue.push((startState, None, 0), 0)\n cameFrom = dict() # Stores most efficient previous action\n gScore = dict() # Stores current cost from start\n gScore[startState] = 0\n\n # Search\n while queue.heap: # Do while open set is not empty\n (currentState, action, cost) = queue.pop()\n\n if problem.isGoalState(currentState):\n # Goal reached. Construct path\n path = util.Queue() \n \n # Backtrack to start state\n while currentState is not startState and currentState in cameFrom:\n currentState, action = cameFrom[currentState]\n path.push(action)\n\n return path.list\n\n # Expand current state\n closedSet.add(currentState) \n for successor in problem.getSuccessors(currentState):\n successorState, successorAction, successorCost = successor\n \n if successorState in closedSet:\n continue # Skip already expanded states\n \n # Initialize entries not already in dictionaries to a big number\n if currentState not in gScore:\n gScore[currentState] = 999999999999\n if successorState not in gScore:\n gScore[successorState] = 999999999999\n\n # Compare this path to best path\n gTentative = gScore[currentState] + successorCost\n if gTentative >= gScore[successorState]:\n continue # Not a better path\n\n # A better path is found, store this path\n cameFrom[successorState] = (currentState, successorAction)\n gScore[successorState] = gTentative # Store new cost\n # Update the priority queue\n queue.update(successor, gScore[successorState])",
"def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n class Node:\n def __init__(self, state, parent, action, pathCost):\n self.state = state\n self.parent = parent\n self.action = action\n self.pathCost = pathCost\n\n def solution(self):\n path = list()\n tempNode = self\n while tempNode.state != problem.getStartState():\n path.insert(0, tempNode.action)\n tempNode = tempNode.parent\n return path\n\n\n\n\n def childNode(successor, parent, action, stepCost):\n pathCost = parent.pathCost + stepCost\n child = Node(successor, parent, action, pathCost)\n return child\n\n initialNode = Node(problem.getStartState(), None, None, 0)\n if problem.isGoalState(initialNode.state):\n return initialNode.solution()\n\n frontier = util.Queue() #bfs uses a queue\n frontier.push(initialNode)\n explored = set()\n\n while not frontier.isEmpty() :\n nextNode = frontier.pop() #extract from the start of the queue\n explored.add(nextNode.state)\n for successor, action, stepCost in problem.getSuccessors(nextNode.state):\n child = childNode(successor, nextNode, action, stepCost)\n if child.state not in explored and child not in frontier.list:\n if problem.isGoalState(child.state):\n return child.solution()\n frontier.push(child)\n return []\n util.raiseNotDefined()",
"def solve(self):\n smallest_f = self.get_smallest_f_cost_unvisited_node()\n smallest_f_node = smallest_f[0]\n\n if smallest_f[1] > 1:\n current_node = self.get_smallest_h_cost_unvisited_node()\n else:\n current_node = smallest_f_node\n if current_node.f_cost == self.inf:\n return\n\n self.set_h_cost(current_node)\n self.unvisited_pos.remove(current_node.pos)\n self.visited_pos.append(current_node.pos)\n neighbours = algo_utils.get_neighbours(current_node, self.grid, self.wall_pos)\n\n for neigh in neighbours:\n neighbour_dist = neigh.g_cost\n current_dist = current_node.g_cost\n new_dist = current_dist + 1\n if neighbour_dist < new_dist:\n continue\n neigh.g_cost = new_dist\n self.set_h_cost(neigh)\n mix_neigh = {neigh.pos: neigh.g_cost}\n self.mix.update(mix_neigh)\n mix_current = {current_node.pos: current_node.g_cost}\n self.mix.update(mix_current)\n\n smallest_f = self.get_smallest_f_cost_unvisited_node()\n smallest_f_node = smallest_f[0]\n smallest_h_node = self.get_smallest_h_cost_unvisited_node()\n\n if (\n self.end_pos not in self.unvisited_pos\n or algo_utils.get_smallest_g_cost_unvisited_node(\n self.grid, self.unvisited_pos\n ).g_cost\n == self.inf\n ):\n for key, value in self.mix.items():\n self.mix[key] = round((value * 1.0) / self.end_node.g_cost, 3)\n self.backtrack_path(self.end_node)\n else:\n if smallest_f[1] > 1:\n current_node = smallest_h_node\n else:\n current_node = smallest_f_node\n self.solve()",
"def search(state, goal_state):\n\n def gn(node):\n return node.gn()\n\n tiles_places = []\n for i in range(len(goal_state)):\n for j in range(len(goal_state)):\n heapq.heappush(tiles_places, (goal_state[i][j], (i, j)))\n\n def hn(node):\n cost = 0\n for i in range(len(node.state)):\n for j in range(len(node.state)):\n tile_i, tile_j = tiles_places[node.state[i][j]][1]\n if i != tile_i or j != tile_j:\n cost += abs(tile_i - i) + abs(tile_j - j)\n return cost\n\n def fn(node):\n return gn(node) + hn(node)\n\n return bfs.search(state, goal_state, fn)",
"def get_cost(self) -> float:\n return math.e / self.fitness",
"def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n startState = problem.getStartState()\n fringe = util.PriorityQueue()\n cost = 0 \n visitedNodes = []\n actions = []\n \n \"\"\" \n Format of Priority Queue :\n (item , priority)\n item => state , actions , cost\n priorityQueue.push ( (state , actions , cost) , cost )\n \n \"\"\"\n \n if ( problem.isGoalState(startState) ):\n return actions\n else :\n newNode = startState , actions , cost\n priority = cost\n fringe.push( newNode , priority )\n while ( fringe.isEmpty() == False ):\n currentState , actions , cost = fringe.pop()\n if ( problem.isGoalState(currentState) == True ) :\n #print(\"Final Path : \" + str(actions))\n return actions\n else :\n if ( (currentState in visitedNodes) == False ):\n visitedNodes.append(currentState)\n currentStateSuccessors = problem.getSuccessors(currentState)\n for node in currentStateSuccessors :\n state , action , stateCost = node\n if( ( state in visitedNodes) == False ) :\n newNode = state , actions + [action] , cost + stateCost\n priority = cost + stateCost\n fringe.push( newNode , priority )\n util.raiseNotDefined()",
"def select_final(self):\n best_qsa_star = -99999\n best_node = None\n for a, c in self.children.items():\n qsa = c.wins / c.visits\n if c.visits_amaf == 0:\n qsa_tilde = 0\n else:\n qsa_tilde = c.wins_amaf / c.visits_amaf\n bsa = sqrt(self.k / (self.visits + self.k))\n qsa_star = (1 - bsa) * qsa + bsa * qsa_tilde\n if qsa_star > best_qsa_star:\n best_qsa_star = qsa_star\n best_node = c\n return best_node.action",
"def aStarSearch(problem):\n \"*** YOUR CODE HERE ***\"\n root = problem.getStartState()\n pq = PriorityQueue()\n visited = [] # list of visited nodes to not visit again\n path = {} # to store predecessor and next move of nodes\n gn={} # for total distance\n\n '''intializing for root'''\n pq.push(root,0)\n path[root]=(None,None) # (predecessor, action)\n gn[root]=0\n\n\n while not pq.isEmpty():\n # print(path)\n node = pq.pop()\n successors = problem.getSuccessors(node)\n\n if problem.isGoalState(node):\n p=[root]\n while path[node]!=(None,None): #moving from goal to root\n p.append(node)\n node = path[node][0]\n return p\n\n visited.append(node)\n\n for i in successors:\n i_successor = i[0]\n i_action=i[1]\n i_cost=gn[node]+ i[3] # i_cost is total gn\n if i_successor not in visited:\n if (i_successor not in gn) or (gn[i_successor]>i_cost): #dijkstra property too\n path[i_successor]=(node,i_action) # (predecessor, action)\n gn[i_successor]=i_cost\n fn = i_cost + problem.getHeuristic(i_successor)\n pq.update(i_successor,fn)",
"def uniformCostSearch(problem):\n stack = PriorityQueue()\n\n visited = []\n parent_dict = dict()\n start_state = problem.getStartState()\n stack.push(start_state, 0)\n actions_dict = dict()\n final_actions = []\n discovered = [problem.getStartState]\n cost_dict = dict()\n cost_dict[start_state] = 0\n parent_dict[start_state] = (420,420)\n cost_dict[(420,420)] = 0\n\n if problem.isGoalState(problem.getStartState()):\n return []\n\n while not stack.isEmpty():\n current_state = stack.pop()\n\n if current_state not in visited:\n\n visited.append(current_state)\n\n if problem.isGoalState(current_state):\n break\n successors = problem.getSuccessors(current_state)\n for s in successors:\n if s[0] not in visited:\n if s[0] not in cost_dict:\n cost_dict[s[0]] = cost_dict[current_state] + s[2]\n stack.push(s[0], cost_dict[s[0]]+1)\n parent_dict[s[0]] = current_state\n actions_dict[(current_state, s[0])] = s[1]\n discovered.append(s[0])\n elif cost_dict[current_state] + s[2] < cost_dict[s[0]]:\n cost_dict[s[0]] = cost_dict[current_state] + s[2]\n parent_dict[s[0]] = current_state\n parent_dict[s[0]] = current_state\n actions_dict[(current_state, s[0])] = s[1]\n\n while current_state is not start_state:\n parent = parent_dict[current_state]\n final_actions.append(actions_dict[parent, current_state])\n current_state = parent\n final_actions.reverse()\n\n return final_actions"
]
| [
"0.68802005",
"0.6709244",
"0.62288725",
"0.6184886",
"0.6125053",
"0.60624397",
"0.59995437",
"0.5982352",
"0.59660757",
"0.5932713",
"0.5929007",
"0.5865698",
"0.5860287",
"0.5847322",
"0.58336675",
"0.5827743",
"0.58238465",
"0.58049667",
"0.5789467",
"0.5785034",
"0.5772391",
"0.57531613",
"0.5743322",
"0.5737745",
"0.5736831",
"0.57147336",
"0.57071465",
"0.5702781",
"0.56884754",
"0.5683415"
]
| 0.6960742 | 0 |
\Theta(n^2) method to calculate the expected cost of the optimal BST. Takes use of Knuth's conclusion that a root matrix exists so that root[i][j1] <= root[i][j] <= root[i+1][j] for all i < j. | def get_optimal_bst_fast(p, q):
assert p is not None
assert q is not None
assert len(p) == len(q)
n = len(p) - 1
assert n >= 0
e = [[-1 for _ in range(0, n + 1)] for _ in range(0, n + 2)]
w = [[-1 for _ in range(0, n + 1)] for _ in range(0, n + 2)]
root = [[-1 for _ in range(0, n)] for _ in range(0, n)]
for i in range(1, n + 1):
root[i - 1][i - 1] = i
for i in range(1, n + 2):
e[i][i - 1] = q[i - 1]
w[i][i - 1] = q[i - 1]
# Calculate w matrix first.
for i in range(n + 1, 0, -1):
for j in range(i, n + 1):
w[i][j] = w[i][j - 1] + p[j] + q[j]
for i in range(1, n + 1):
e[i][i] = e[i][i - 1] + e[i + 1][i] + w[i][i]
for j_i_diff in range(1, n):
i = 1
r = root[0][j_i_diff - 1]
while i + j_i_diff <= n:
j = i + j_i_diff
e_cost = e[i][r - 1] + e[r + 1][j] + w[i][j]
if e[i][j] < 0 or e_cost < e[i][j]:
e[i][j] = e_cost
root[i - 1][j - 1] = r
if r == root[i][j - 1]:
i += 1
else:
r += 1
return e[1][n], root | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_optimal_bst(p, q):\n\n assert p is not None\n assert q is not None\n assert len(p) == len(q)\n n = len(p) - 1\n assert n >= 0\n\n if n == 0:\n return 1.0, []\n\n e = [[-1 for _ in range(0, n + 1)] for _ in range(0, n + 1)]\n root = [[-1 for _ in range(0, n)] for _ in range(0, n)]\n\n for i in range(1, n + 1):\n e[i][i] = (q[i - 1] + q[i]) * 2 + p[i]\n root[i - 1][i - 1] = i\n\n for i in range(n + 1, 0, -1):\n for j in range(i + 1, n + 1):\n w = sum(q[i - 1:j + 1]) + sum(p[i:j + 1])\n min_cost = sys.maxsize\n current_root = -1\n for r in range(i, j + 1):\n assert (r == i or e[i][r - 1] > 0) and (r == j or e[r + 1][j] > 0)\n e_left = q[i - 1] if r == i else e[i][r - 1]\n e_right = q[j] if r == j else e[r + 1][j]\n e_cost = w + e_left + e_right\n\n if e_cost < min_cost:\n min_cost = e_cost\n current_root = r\n\n e[i][j] = min_cost\n root[i - 1][j - 1] = current_root\n\n return e[1][n], root",
"def calculate_best_way(self) -> int:\n node = self._find_lowest_cost_node()\n while node:\n cost = self.costs[node]\n neighbors = self.graph[node]\n for neighbor in neighbors.keys():\n node_cost = cost + neighbors[neighbor]\n if self.costs[neighbor] > node_cost:\n self.costs[neighbor] = node_cost\n self.parents[neighbor] = node\n self.closed_nodes.append(node)\n node = self._find_lowest_cost_node()\n\n return self.costs[\"fin\"]",
"def breadth_first_search(root_node):\n if root_node.goal_test():\n return root_node\n\n frontier = [root_node]\n explored = []\n\n while frontier:\n node = frontier.pop(0)\n explored.append(node)\n\n for successor in node.generate_successors():\n if not successor:\n continue\n if not (successor.is_in(frontier) and successor.is_in(explored)):\n if successor.goal_test():\n return successor\n frontier.append(successor)\n return None # No Solution",
"def Trees__CheckBST():\n # Python2 ported to Python3 via 2to3-3.7\n # URL:https://www.hackerrank.com/challenges/ctci-is-binary-search-tree/problem\n # O(n) solution. Passes all test cases.\n # Tricky part with leaf in left side of root being bigger than root. (or right/smaller)\n # E.g:\n # 3\n # 2 6\n # 1 4 5 7\n # Note, the 4 is bigger than the parent 3. But in a proper BST parent\n # must be bigger than all items on parent's left side.\n # Keep track of last biggest element as we descend to children (& last smallest element)\n # val < last biggest (last_left) #for cases like 4\n # val > last smallest (last_right) # for mirror side.\n # Convieniently, this also ensures uniqueness.\n def checkBST(root):\n queue = []\n queue.append((root, None, None)) # node, last_left, last_right.\n while queue:\n node, last_left, last_right = queue.pop()\n if not node:\n continue\n if last_left and not node.data < last_left \\\n or last_right and not node.data > last_right:\n return False\n queue.append((node.left, node.data, last_right))\n queue.append((node.right, last_left, node.data))\n return True",
"def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n # Get the start node\n start_state = problem.getStartState()\n print(start_state)\n\n # Define a stack\n plan_stack = util.Queue()\n start_plan = [start_state] # node, cost\n plan_stack.push(start_plan)\n\n # Visited nodes\n visited_nodes = set(start_state)\n\n goal_found = False\n\n while not goal_found:\n # Get the plan from the stack\n plan_to_expand = plan_stack.pop()\n node_to_exp = plan_to_expand[-1]\n all_nxt_nodes = problem.getSuccessors(node_to_exp)\n\n # Traverse through all the next nodes\n for nxt_node in all_nxt_nodes:\n nxt_pos = nxt_node[0]\n\n if nxt_pos in visited_nodes: # Check if node is already visited\n continue\n\n visited_nodes.add(nxt_pos) # Add the node to visited nodes\n nxt_plan = plan_to_expand + [nxt_pos] # add node to the plan\n plan_stack.push(nxt_plan) # push the plan into the stack\n goal_found = problem.isGoalState(nxt_pos) # Check if goal is achieved\n if goal_found:\n break\n \n \n print(goal_found)\n print(nxt_plan)\n\n moves = []\n # Convert plan to moves\n for i in range(len(nxt_plan) - 1):\n for nxt_node in problem.getSuccessors(nxt_plan[i]):\n nxt_pos = nxt_node[0]\n nxt_mv = nxt_node[1]\n if nxt_pos == nxt_plan[i+1]:\n moves.append(nxt_mv)\n break\n \n return moves\n\n \n\n # Calculate the minimum plan cost \n #min_val = float(\"inf\")\n #for one_plan in plan_stack:\n # plan_cost = one_plan[1]\n # if plan_cost < min_val:\n # min_val = plan_cost\n\n ## Expand the nodes with minimum plan cost\n #for one_plan in plan_stack:\n # plan_cost = one_plan[1]\n # if plan_cost == min_val:\n # plan_step = one_plan[0] \n # # Expand the last node of plan\n # last_node = plan_step[end]\n # for nxt_node in problem.getSuccessors(last_node):\n\n\n\n util.raiseNotDefined()",
"def find_best_path(self, root):\n number_vertices = len(self.states)\n distances = [-float(\"inf\")] * number_vertices\n distances[root] = 0\n predecessors = [None] * number_vertices\n\n for _ in range(number_vertices - 1):\n for origin in range(number_vertices):\n for (target, value) in self.transitions[origin]:\n if distances[target] < distances[origin] + value:\n distances[target] = distances[origin] + value\n predecessors[target] = origin\n\n # compute the vertices with the highest value, excluding the root\n distances[root] = -float(\"inf\")\n most_valued_vertices = np.nonzero(distances == np.max(distances))[0]\n # choose at *random* among the most valuable vertices\n most_valued_vertex = np.random.choice(most_valued_vertices)\n return most_valued_vertex, predecessors",
"def _leaf_insertion_cost(x_y_z_array, dist_matrix, leaf_i, leaves, query_name, orig_njt):\n x,y,z = x_y_z_array\n\n all_leaves_sum = 0.\n for leaf_j in leaves:\n if leaf_j == leaf_i:\n continue\n else:\n all_leaves_sum += orig_njt.orig_dist_matrix.at[leaf_j, query_name] - (x + z)\n\n return all_leaves_sum**2.",
"def solve(self):\n smallest_f = self.get_smallest_f_cost_unvisited_node()\n smallest_f_node = smallest_f[0]\n\n if smallest_f[1] > 1:\n current_node = self.get_smallest_h_cost_unvisited_node()\n else:\n current_node = smallest_f_node\n if current_node.f_cost == self.inf:\n return\n\n self.set_h_cost(current_node)\n self.unvisited_pos.remove(current_node.pos)\n self.visited_pos.append(current_node.pos)\n neighbours = algo_utils.get_neighbours(current_node, self.grid, self.wall_pos)\n\n for neigh in neighbours:\n neighbour_dist = neigh.g_cost\n current_dist = current_node.g_cost\n new_dist = current_dist + 1\n if neighbour_dist < new_dist:\n continue\n neigh.g_cost = new_dist\n self.set_h_cost(neigh)\n mix_neigh = {neigh.pos: neigh.g_cost}\n self.mix.update(mix_neigh)\n mix_current = {current_node.pos: current_node.g_cost}\n self.mix.update(mix_current)\n\n smallest_f = self.get_smallest_f_cost_unvisited_node()\n smallest_f_node = smallest_f[0]\n smallest_h_node = self.get_smallest_h_cost_unvisited_node()\n\n if (\n self.end_pos not in self.unvisited_pos\n or algo_utils.get_smallest_g_cost_unvisited_node(\n self.grid, self.unvisited_pos\n ).g_cost\n == self.inf\n ):\n for key, value in self.mix.items():\n self.mix[key] = round((value * 1.0) / self.end_node.g_cost, 3)\n self.backtrack_path(self.end_node)\n else:\n if smallest_f[1] > 1:\n current_node = smallest_h_node\n else:\n current_node = smallest_f_node\n self.solve()",
"def _find_root(function, N, squared_integers, grid_data_dct2):\n\n # From the implementation by Botev, the original paper author\n # Rule of thumb of obtaining a feasible solution\n N2 = tf.math.maximum(\n tf.math.minimum(tf.constant(1050, ztypes.float), N),\n tf.constant(50, ztypes.float),\n )\n tol = 10e-12 + 0.01 * (N2 - 50) / 1000\n left_bracket = tf.constant(0.0, dtype=ztypes.float)\n right_bracket = tf.constant(10e-12, ztypes.float) + tf.constant(\n 0.01, ztypes.float\n ) * (N2 - tf.constant(50, ztypes.float)) / tf.constant(1000, ztypes.float)\n\n converged = tf.constant(False)\n t_star = tf.constant(0.0, dtype=ztypes.float)\n\n def fixed_point_function(t):\n return _fixed_point(t, N, squared_integers, grid_data_dct2)\n\n def condition(right_bracket, converged, t_star):\n return tf.math.logical_not(converged)\n\n def body(right_bracket, converged, t_star):\n t_star, value_at_t_star, num_iterations, converged = root_search.brentq(\n fixed_point_function, left_bracket, right_bracket, None, None, 2e-12\n )\n\n t_star = t_star - value_at_t_star\n\n right_bracket = right_bracket * tf.constant(2.0, ztypes.float)\n\n return right_bracket, converged, t_star\n\n # While a solution is not found, increase the tolerance and try again\n right_bracket, converged, t_star = tf.while_loop(\n condition, body, [right_bracket, converged, t_star]\n )\n\n return t_star",
"def breadthFirstSearch(initialState, finalState):\n\n def exploreNext(neighbor, move):\n \"\"\"Finds out if the neighbor is withinf the boundaries and explore it.\n `explored` is the set used in the BFS function.\n `stateQueue` is the queue inside the BFS function.\n `currentState` is each visited node inside the loop of the BFS function.\n\n \"\"\"\n if (neighbor != None and tuple(neighbor) not in explored):\n nextState = State(neighbor)\n nextState.path = currentState.path.copy()\n nextState.path.append(move)\n stateQueue.append(nextState)\n\n stateQueue = deque([]) # List of States\n explored = set() # Set of tuples of each visited state of the puzzle\n sizeBytesCounter = 0\n\n # Init queue\n stateQueue.append(State(initialState))\n\n # while queue is not empty\n while stateQueue:\n currentState = stateQueue.popleft()\n sizeBytesCounter += sys.getsizeof(currentState)\n\n # Add an unmodified list to the set, a tuple\n explored.add(tuple(currentState.puzzle))\n\n if finalState == currentState.puzzle:\n return currentState, explored, sizeBytesCounter\n \n # Create a node of the current state\n currentNode = Node(currentState.puzzle)\n\n # Iterate over posible paths\n exploreNext(*currentNode.up())\n exploreNext(*currentNode.down())\n exploreNext(*currentNode.left())\n exploreNext(*currentNode.right())\n \n return None",
"def next_node_dfs(search_state, last_node_is_ok):\n log_T, initial_state, min_score, max_depth, maxtraversals, node, node_idx, it, order, score, sub_info = search_state\n min_score = float(min_score) # make sure numba knows this is a float (otherwise, sometimes, it doesn't (bug in numba))\n n_states = log_T.shape[0]\n if it == maxtraversals:\n assert False, \"Number of traversals exceeded\"\n while True:\n # next node ##\n # try adding a value at the end\n for next_idx, next_state in enumerate(order[node[-1]]):\n if last_node_is_ok and min_score <= score + log_T[node[-1], next_state] and len(node) < max_depth \\\n and syntax_check(np.array(node + [next_state]), sub_info, partial=True):\n node.append(next_state)\n node_idx.append(next_idx)\n break\n # adding a value at the end failed, so we are a leave\n else:\n for p in xrange(len(node) - 1, -1, -1):\n if node_idx[p] != n_states - 1: # find where within the node to increase (and discard all others after)\n old_idx = node_idx[p]\n del node_idx[p:]\n del node[p:]\n node_idx.append(old_idx + 1)\n prev_state = node[p - 1] if p > 0 else initial_state\n node.append(order[prev_state, node_idx[p]])\n break\n else:\n search_state = log_T, initial_state, min_score, max_depth, maxtraversals, list(node), list(node_idx), it, order, score, sub_info\n return [-1], score, search_state # end of the generator, can't increase even the root\n last_node_is_ok = True # We can now make progress again, regardless of whether we could at the beginning\n it += 1\n # score and return current node if adequate\n score = log_T[initial_state, node[0]]\n for p in xrange(1, len(node)):\n score += log_T[node[p - 1], node[p]]\n if min_score <= score and syntax_check(np.array(node), sub_info, partial=False):\n search_state = log_T, initial_state, min_score, max_depth, maxtraversals, list(node), list(node_idx), it, order, score, sub_info\n return list(node), score, search_state # the invocation to list here is to make a copy, don't remove!",
"def best_plan(self, tree=None):\n if tree is None:\n tree = self.root\n\n if not tree.children:\n\n agent_plans = {}\n agent_costs = {}\n\n for agent in range(len(tree.starts)):\n end_nodes, costs = tree.return_end_nodes(agent, only_lowest_costs=True)\n\n if tree == self.root:\n agent_plans[agent] = end_nodes\n agent_costs[agent] = costs\n\n else:\n # sum the costs for each plan, and pair end nodes for each plan\n costs_temp = []\n plans = []\n\n for i in range(tree.N_goal_states ** (tree.hierarchy_number - 1)):\n cost_temp = costs[tree.N_goal_states * i:tree.N_goal_states * (i + 1)]\n cost_temp_sum = np.sum(cost_temp)\n costs_temp.append(cost_temp_sum)\n\n plan_temp = end_nodes[tree.N_goal_states * i:tree.N_goal_states * (i + 1)]\n plans.append(plan_temp)\n\n costs = costs_temp\n\n agent_plans[agent] = plans\n agent_costs[agent] = costs\n\n if tree == self.root:\n return agent_plans\n else:\n return agent_plans, agent_costs\n\n else: # tree has children\n agent_child_plans, agent_child_costs = {i: [] for i in range(len(tree.starts))}, {i: [] for i in\n range(len(tree.starts))}\n for child in tree.children:\n child_plan, child_cost = self.best_plan(child)\n\n for agent in range(len(tree.starts)):\n agent_child_plans[agent].append(child_plan[agent])\n agent_child_costs[agent].append(child_cost[agent])\n\n agent_no_obs_ends, agent_no_obs_costs = {}, {}\n for agent in range(len(tree.starts)):\n no_obs_end, no_obs_cost = tree.return_end_nodes(agent, only_lowest_costs=True)\n agent_no_obs_ends[agent] = no_obs_end\n agent_no_obs_costs[agent] = no_obs_cost\n\n if tree == self.root:\n\n agent_best_child_costs = {i: np.inf for i in range(len(tree.starts))}\n agent_best_child_plans = {i: None for i in range(len(tree.starts))}\n\n for i in range(\n len(agent_child_plans[0])): # All agents have same length plan, so we can look at agent 0\n child = tree.children[i]\n\n all_agent_costs = {}\n\n for agent in range(len(tree.starts)):\n cost = agent_child_costs[agent][i]\n\n obs_node = child.starts[agent]\n cost_to_obs = obs_node.parent.path_costs.copy() + obs_node.parent.node_costs.copy()\n\n if (len(cost_to_obs) != 1) or (len(cost) != 1):\n print('Error1')\n\n cost = np.array(cost) + np.array(cost_to_obs)\n\n all_agent_costs[agent] = np.sum(cost) # np.sum to remove list/array format\n\n if np.sum(list(all_agent_costs.values())) < np.sum(\n list(agent_best_child_costs.values())): # Sum to remove list items\n for agent in range(len(tree.starts)):\n agent_best_child_costs[agent] = all_agent_costs[agent]\n for agent in range(len(tree.starts)):\n agent_best_child_plans[agent] = agent_child_plans[agent][i].copy()\n\n # Remove list so that cost is not [cost] for no_obs\n for agent in range(len(tree.starts)):\n agent_no_obs_costs[agent] = np.sum(agent_no_obs_costs[agent])\n\n if np.sum(list(agent_best_child_costs.values())) < np.sum(list(agent_no_obs_costs.values())):\n return agent_best_child_plans\n\n else:\n return agent_no_obs_ends\n\n else: # tree is not root\n\n agent_final_plans = {agent: [[] for _ in range(tree.N_goal_states ** (tree.hierarchy_number - 1))] for\n agent in range(len(tree.starts))}\n agent_final_costs = {agent: [[] for _ in range(tree.N_goal_states ** (tree.hierarchy_number - 1))] for\n agent in range(len(tree.starts))}\n\n for i in range(tree.N_goal_states ** (tree.hierarchy_number - 1)):\n for j in range(tree.N_goal_states):\n\n all_agent_costs_no_obs = {}\n all_agent_plans_no_obs = {}\n\n best_child_cost = {i: np.inf for i in range(len(tree.starts))}\n best_child_plan = {}\n\n for agent in range(len(tree.starts)):\n end_no_obs, cost_no_obs = agent_no_obs_ends[agent][i + j], agent_no_obs_costs[agent][i + j]\n\n all_agent_costs_no_obs[agent] = cost_no_obs\n all_agent_plans_no_obs[agent] = end_no_obs\n\n for k in range(len(agent_child_plans[0])): # All agent has same len child plans\n child = tree.children[k]\n all_agent_costs = {}\n for agent in range(len(tree.starts)):\n # plan = agent_child_plans[agent][k][i + j]\n cost = agent_child_costs[agent][k][i + j]\n\n obs_node = child.starts[agent]\n cost_to_obs = obs_node.parent.path_costs[0][i + j].copy() + \\\n obs_node.parent.node_costs[0][\n i + j].copy()\n\n cost = np.array(cost) + np.array(cost_to_obs)\n\n all_agent_costs[agent] = np.sum(cost)\n\n if np.sum(list(all_agent_costs.values())) < np.sum(list(best_child_cost.values())):\n best_child_cost = all_agent_costs\n\n for agent in range(len(tree.starts)):\n best_child_plan[agent] = agent_child_plans[agent][k][i + j]\n\n if np.sum(list(best_child_cost.values())) < np.sum(list(all_agent_costs_no_obs.values())):\n for agent in range(len(tree.starts)):\n agent_final_costs[agent][i].append(best_child_cost[agent])\n agent_final_plans[agent][i].append(best_child_plan[agent])\n else:\n for agent in range(len(tree.starts)):\n agent_final_costs[agent][i].append(all_agent_costs_no_obs[agent])\n agent_final_plans[agent][i].append(all_agent_plans_no_obs[agent])\n\n ## \"Merge\" final_costs\n agent_costs_temp = {i: [] for i in range(len(tree.starts))}\n\n for agent in range(len(tree.starts)):\n # agent_costs_temp[agent].append(np.sum(agent_final_costs[agent])) TODO: Remove this line? This is wrong?\n for cost in agent_final_costs[agent]:\n agent_costs_temp[agent].append(np.sum(cost))\n\n agent_final_costs = agent_costs_temp\n\n return agent_final_plans, agent_final_costs",
"def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE IF YOU WANT TO PRACTICE ***\"\n # Initialize a stack\n open = util.Queue()\n\n # Retrieve the init state\n init = (problem.getStartState(), ['Stop'], 0)\n open.push(init)\n closed = []\n while not open.isEmpty():\n currNode = open.pop()\n currState = currNode[0]\n currPath = currNode[1]\n currCost = currNode[2]\n\n if problem.isGoalState(currState):\n return currPath[1:]\n else:\n if currState not in closed:\n closed.append(currState)\n successors = problem.getSuccessors(currState)\n if len(successors) > 0:\n for each in successors:\n if each[0] not in closed:\n temp = (each[0], currPath + [each[1]], currCost + each[2])\n open.push(temp)\n return False",
"def minimumEffortPath(self, heights: List[List[int]]) -> int:\n m, n = len(heights), len(heights[0])\n\n def diff(i, j, _i, _j):\n return abs(heights[i][j] - heights[_i][_j])\n\n max_diff = 0\n for i in range(m):\n for j in range(n):\n for dx, dy in [(-1, 0), (1, 0), (0, -1), (0, 1)]:\n _i, _j = i+dx, j+dy\n if 0<=_i<m and 0<=_j<n:\n max_diff = max(max_diff, diff(i, j, _i, _j))\n\n @lru_cache(None)\n def dfs(i, j, remain, k):\n if i == m-1 and j == n-1:\n return True\n for dx, dy in [(-1, 0), (1, 0), (0, -1), (0, 1)]:\n _i, _j = i+dx, j+dy\n if 0<=_i<m and 0<=_j<n:\n bit = 1<<(_i*n + _j)\n if remain&bit and diff(i, j, _i, _j) <= k:\n if dfs(_i, _j, remain^bit, k):\n return True\n return False\n\n def bisearch(s, e, func):\n while s <= e:\n p = s + (e-s)//2\n if func(p):\n e = p-1\n else:\n s = p+1\n return e+1\n\n return bisearch(0, max_diff, lambda k: dfs(0, 0, (1<<(m*n))-1, k))",
"def minimumEffortPath(self, heights: List[List[int]]) -> int:\n m, n = len(heights), len(heights[0])\n\n def diff(i, j, _i, _j):\n return abs(heights[i][j] - heights[_i][_j])\n\n max_diff = 0\n for i in range(m):\n for j in range(n):\n for dx, dy in [(-1, 0), (1, 0), (0, -1), (0, 1)]:\n _i, _j = i+dx, j+dy\n if 0<=_i<m and 0<=_j<n:\n max_diff = max(max_diff, diff(i, j, _i, _j))\n\n def dfs(i, j, visited, k):\n if i == m-1 and j == n-1:\n return True\n visited.add((i, j))\n for dx, dy in [(-1, 0), (1, 0), (0, -1), (0, 1)]:\n _i, _j = i+dx, j+dy\n if 0<=_i<m and 0<=_j<n and (_i, _j) not in visited and diff(i, j, _i, _j) <= k:\n if dfs(_i, _j, visited, k):\n return True\n return False\n\n def bisearch(s, e, func):\n while s <= e:\n p = s + (e-s)//2\n if func(p):\n e = p-1\n else:\n s = p+1\n return e+1\n\n return bisearch(0, max_diff, lambda k: dfs(0, 0, set(), k))",
"def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n path_to_point = {}\n cost_to_point = {}\n\n # Get the start node\n start_node = problem.getStartState()\n fringe_node = [start_node]\n path_to_point[start_node] = []\n cost_to_point[start_node] = problem.getCostOfActions(path_to_point[start_node])\n\n goal_found = False\n\n while(not goal_found):\n #for i in range(100): \n nodes_to_expand = set()\n # get max value node in the fringe node\n min_val = float(\"inf\")\n for one_node in fringe_node:\n # Compute the cost to reach a node\n if cost_to_point[one_node] < min_val:\n min_val = cost_to_point[one_node]\n \n for one_node in fringe_node:\n # Compute the cost to reach a node\n if cost_to_point[one_node] == min_val:\n nodes_to_expand.add(one_node)\n fringe_node.remove(one_node)\n\n # Expand the fringe node \n for one_node in nodes_to_expand:\n path_to_parent = path_to_point[one_node]\n for nxt_node in problem.getSuccessors(one_node):\n pos = nxt_node[0]\n mv = nxt_node[1]\n # check if point already present in path to point\n prev_cost = float(\"inf\")\n if pos in cost_to_point:\n prev_cost = cost_to_point[pos]\n new_path = path_to_parent + [mv]\n if prev_cost > problem.getCostOfActions(new_path):\n path_to_point[pos] = new_path\n cost_to_point[pos] = problem.getCostOfActions(new_path)\n fringe_node.append(pos)\n\n # Check if destination is reached in the fringe node\n for one_node in fringe_node:\n if problem.isGoalState(one_node):\n final_node = one_node\n goal_found = True\n break\n \n #print(len(fringe_node))\n print(final_node)\n print(path_to_point[final_node])\n return path_to_point[final_node] \n\n util.raiseNotDefined()",
"def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n # Initialize data structures\n parent_node = {}\n path_to_node = {}\n priority_queue = util.PriorityQueue()\n\n p_c = 0.5\n h_c = 1 - p_c\n\n # Get the start node\n start_node = problem.getStartState()\n parent_node[start_node] = None\n path_to_node[start_node] = []\n priority_queue.update(start_node, 0)\n\n #goal_found = False\n\n while not priority_queue.isEmpty():\n # Get the next node\n node_to_expand = priority_queue.pop()\n # Check if goal state is reached\n if problem.isGoalState(node_to_expand):\n break\n next_nodes = problem.getSuccessors(node_to_expand)\n path_to_parent = path_to_node[node_to_expand]\n\n for one_node in next_nodes:\n point, move, cost = one_node\n curr_path = path_to_node[node_to_expand] + [move]\n curr_cost = problem.getCostOfActions(curr_path)\n heuristic_cost = heuristic(point, problem)\n # Check if current node already exists in the previously visited nodes\n if point in path_to_node:\n prev_cost = problem.getCostOfActions(path_to_node[point])\n if prev_cost > curr_cost:\n path_to_node[point] = curr_path\n priority_queue.update(point, curr_cost + heuristic_cost)\n \n else:\n path_to_node[point] = curr_path\n priority_queue.update(point, curr_cost + heuristic_cost)\n \n # current_cost = problem.getCostOfActions(point) * p_c + heuristic(point, problem) * h_c\n\n print(node_to_expand) \n return path_to_node[node_to_expand]\n \n# nodes_to_expand = set()\n# # get max value node in the fringe node\n# min_val = float(\"inf\")\n# for one_node in fringe_node:\n# # Compute the cost to reach a node\n# total_cost = cost_to_point[one_node] * p_c + heuristic(one_node,problem) * h_c\n# if total_cost < min_val:\n# min_val = total_cost\n# \n# for one_node in fringe_node:\n# # Compute the cost to reach a node\n# total_cost = cost_to_point[one_node] * p_c + heuristic(one_node,problem) * h_c\n# if total_cost == min_val:\n# nodes_to_expand.add(one_node)\n# fringe_node.remove(one_node)\n#\n# # Expand the fringe node \n# for one_node in nodes_to_expand:\n# path_to_parent = path_to_point[one_node]\n# for nxt_node in problem.getSuccessors(one_node):\n# pos = nxt_node[0]\n# mv = nxt_node[1]\n# # check if point already present in path to point\n# prev_cost = float(\"inf\")\n# if pos in cost_to_point:\n# prev_cost = cost_to_point[pos]\n# new_path = path_to_parent + [mv]\n# if prev_cost > problem.getCostOfActions(new_path):\n# path_to_point[pos] = new_path\n# cost_to_point[pos] = problem.getCostOfActions(new_path)\n# fringe_node.append(pos)\n#\n# # Check if destination is reached in the fringe node\n# for one_node in fringe_node:\n# if problem.isGoalState(one_node):\n# final_node = one_node\n# goal_found = True\n# break\n# \n# #print(len(fringe_node))\n# print(final_node)\n# print(path_to_point[final_node])\n# return path_to_point[final_node] \n\n util.raiseNotDefined()",
"def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n class Node:\n def __init__(self, state, parent, action, pathCost):\n self.state = state\n self.parent = parent\n self.action = action\n self.pathCost = pathCost\n\n def solution(self):\n path = list()\n tempNode = self\n while tempNode.state != problem.getStartState():\n path.insert(0, tempNode.action)\n tempNode = tempNode.parent\n return path\n\n\n\n\n def childNode(successor, parent, action, stepCost):\n pathCost = parent.pathCost + stepCost\n child = Node(successor, parent, action, pathCost)\n return child\n\n initialNode = Node(problem.getStartState(), None, None, 0)\n if problem.isGoalState(initialNode.state):\n return initialNode.solution()\n\n frontier = util.Queue() #bfs uses a queue\n frontier.push(initialNode)\n explored = set()\n\n while not frontier.isEmpty() :\n nextNode = frontier.pop() #extract from the start of the queue\n explored.add(nextNode.state)\n for successor, action, stepCost in problem.getSuccessors(nextNode.state):\n child = childNode(successor, nextNode, action, stepCost)\n if child.state not in explored and child not in frontier.list:\n if problem.isGoalState(child.state):\n return child.solution()\n frontier.push(child)\n return []\n util.raiseNotDefined()",
"def grid_cost(grid):\r\n cashe = [[None for x in range(len(grid[0]))] for y in range(len(grid))]\r\n n_rows = len(grid)\r\n n_cols = len(grid[0])\r\n \r\n def cell_cost(row, col):\r\n \"\"\"The cost of getting to a given cell in the current grid.\"\"\"\r\n if row < 0 or row >= n_rows or col < 0 or col >= n_cols:\r\n return INFINITY # Off grid cells are treated as infinities\r\n elif cashe[row][col] is None:\r\n cost = grid[row][col]\r\n if row != 0:\r\n doom = [cell_cost(row - 1, col + delta_col) for delta_col in range(-1, 2)]\r\n cost += min(doom)\r\n cashe[row][col] = cost\r\n return cashe[row][col]\r\n else:\r\n return cashe[row][col]\r\n \r\n best = min(cell_cost(n_rows - 1, col) for col in range(n_cols))\r\n return best",
"def search_best_goal_node(self):\n\n dist_to_goal_list = [self.calc_dist_to_goal(n.x, n.y) for n in self.node_list]\n goal_indexes = [\n dist_to_goal_list.index(i)\n for i in dist_to_goal_list\n if i <= self.expand_dis\n ]\n\n safe_goal_indexes = []\n for goal_index in goal_indexes:\n t_node = self.steer(self.node_list[goal_index], self.goal_node)\n if self.check_collision(t_node, self.obstacle_list):\n safe_goal_indexes.append(goal_index)\n\n if not safe_goal_indexes:\n return None\n\n min_cost = min([self.node_list[i].cost for i in safe_goal_indexes])\n for i in safe_goal_indexes:\n if self.node_list[i].cost == min_cost:\n return i\n\n return None",
"def a_star_alg(self, p1: int, p2: int, max_level: int = 1000):\r\n \r\n # Create start and end node\r\n start_node = Node(None, p1, self.node_dict[p1])\r\n start_node.g = start_node.h = start_node.f = 0\r\n end_node = Node(None, p2, self.node_dict[p2])\r\n end_node.g = end_node.h = end_node.f = 0\r\n\r\n # Initialize both open and closed list\r\n open_list = []\r\n closed_list = []\r\n\r\n # Add the start node\r\n open_list.append(start_node)\r\n\r\n # Loop until you find the end\r\n level = 0\r\n while len(open_list) > 0 and level < max_level:\r\n level += 1\r\n\r\n # Get the current node (the node in open_list with the lowest cost)\r\n current_node = open_list[0]\r\n current_index = 0\r\n for index, item in enumerate(open_list):\r\n if item.f < current_node.f:\r\n current_node = item\r\n current_index = index\r\n\r\n # Pop current off open list, add to closed list\r\n open_list.pop(current_index)\r\n closed_list.append(current_node)\r\n\r\n # Found the goal\r\n if current_node == end_node:\r\n path = []\r\n distance = current_node.g\r\n current = current_node\r\n while current is not None:\r\n path.append(current.number)\r\n current = current.parent\r\n\r\n return path[::-1], distance # Return reversed path\r\n\r\n # Generate children\r\n children = []\r\n for new_number in self.road_tree[current_node.number]: # Adjacent nodes\r\n new_node = Node(current_node, new_number, self.node_dict[new_number])\r\n children.append(new_node)\r\n\r\n # Loop through children\r\n for child in children:\r\n append_to_open_list = False\r\n\r\n # Create the f, g, and h values\r\n child.g = current_node.g + self.road_dict[(current_node.number, child.number)]\r\n child.h = sqrt((child.x - end_node.x) ** 2 + (child.y - end_node.y) ** 2) / 200\r\n child.f = child.g + child.h\r\n\r\n # Child is already in the closed list\r\n closed_list, append_to_open_list = self.check_in_list(child, closed_list, append_to_open_list)\r\n\r\n # Child is already in the open list\r\n open_list, append_to_open_list = self.check_in_list(child, open_list, append_to_open_list)\r\n\r\n # Add the child to the open list\r\n if append_to_open_list:\r\n open_list.append(child)\r\n\r\n return [], 1e10",
"def original_solution():\n matrix = get_data()\n # Construct Graph\n G = nx.DiGraph()\n rows, cols = len(matrix), len(matrix[0])\n for r in xrange(rows):\n for c in xrange(cols):\n if 0 < c:\n G.add_edge(r*cols + c, r*cols + c - 1, weight=matrix[r][c-1])\n if c < cols-1:\n G.add_edge(r*cols + c, r*cols + c + 1, weight=matrix[r][c+1])\n if 0 < r:\n G.add_edge(r*cols + c, (r-1)*cols + c, weight=matrix[r-1][c])\n if r < rows-1:\n G.add_edge(r*cols + c, (r+1)*cols + c, weight=matrix[r+1][c])\n # Calculate shortest path\n path = nx.shortest_path(G, 0, rows*cols-1, weighted=True)\n \n # Get cost for path\n s = 0\n for p in path:\n c = p % cols\n r = (p - c) / rows\n s += matrix[r][c]\n return s",
"def AStar(maze: list, start: tuple, goal: tuple):\n n = len(maze) # Get the dimension of the maze\n\n #========================================#\n # Some data checking statements\n\n if (not is_valid(start, n)):\n print(\"AStar: Start indices outside maze dimensions\")\n return False\n elif (not is_valid(goal, n)):\n print(\"AStar: Goal indices outside maze dimensions\")\n return False\n\n # End data checking statements\n #========================================#\n\n number_of_nodes_visited = 0\n # We can use a simple visited matrix since the heuristic (euclidean distance) is both admissible AND consistent\n visited = copy.deepcopy(maze) # We can use a copy of the maze to keep track of visited squares (Considered using a set here, thought that time efficiency was important)\n # visited = list(map(list, maze)) # Alternative to using copy.deepcopy\n\n g_cost = [[float('inf') for i in range(n)] for j in range(n)] # Initialize a matrix of the same size as maze where each value is 'infinity'.\n # f_cost = [[float('inf') for i in range(n)] for j in range(n)] # Initialize a matrix of the same size as maze where each value is 'infinity'.\n previous = [[None for i in range(n)] for j in range(n)] # Initialize a matrix of the same size as maze where each value is None.\n\n heap = [] # Define our 'heap' which is just a list, but all pushes and pops will be through the heapq library.\n \n heapq.heappush(heap, (0, start)) # Push our start onto the heap. It's ok for this to have 0 'f' value since it'll be immediately popped off anyway.\n g_cost[start[0]][start[1]] = 0\n # f_cost[start[0]][start[1]] = euclidean_distance(start, goal)\n\n while (len(heap)): # While there exists items in the queue\n min_value = heapq.heappop(heap) # Pop the square with lowest 'f' value from our heap.\n number_of_nodes_visited += 1 # Increase number of nodes visited\n\n # if (visited[current[0]][current[1]] == False): # If we have not visited this node\n # visited[start[0]][start[1]] = 1 # Set it to visited\n\n current_f, current = min_value\n\n if (current == goal): # If current is the goal, we found it!\n # We now want to traverse back to make a path using our 'previous' matrix\n path = []\n while (current != None):\n path.append(current)\n current = previous[current[0]][current[1]]\n path.reverse()\n return (True, path, number_of_nodes_visited)\n\n current_i, current_j = current # Unpack the current pair\n \n # Now we want to add all unvisited squares that are possible to get to from the current square\n for i in range(len(nearby_offsets)):\n offset_i, offset_j = nearby_offsets[i]\n possible = (current_i + offset_i, current_j + offset_j)\n # print(f\"Current possible: {possible_i} {possible_j}\") # DEBUG\n if (is_valid(possible, n)): # If the calculated square is within the maze matrix\n if (maze[possible[0]][possible[1]]): # If there is something there\n continue\n # Check to see if this path is better (just need to check g_cost since h_cost is always the same)\n possible_g_cost = g_cost[current[0]][current[1]] + 1\n if (possible_g_cost < g_cost[possible[0]][possible[1]]): # If the cost is indeed less\n previous[possible[0]][possible[1]] = current\n g_cost[possible[0]][possible[1]] = possible_g_cost\n # Check to see if the node is in the heap, and if it is not, put it in.\n if (not visited[possible[0]][possible[1]]):\n heapq.heappush(heap, (possible_g_cost + euclidean_distance(possible, goal), possible))\n visited[possible[0]][possible[1]] = 1\n \n # found = False\n # for (f_cost, (square_i, square_j)) in heap:\n # if (square_i == possible[0] and square_j == possible[1]):\n # found = True\n # break\n # if (not found):\n # heapq.heappush(heap, (possible_g_cost + euclidean_distance(possible, goal), possible))\n\n # if (visited[possible[0]][possible[1]]): # If this node has already been visited\n # # Check to see if this path is better (just need to check g_cost since h_cost is always the same)\n # if (f_cost[possible[0]][possible[1]] > possible_f_cost):\n # heapq.heappush(heap, (possible_f_cost, possible)) # Push this back onto the heap for re-examination\n # f_cost[possible[0]][possible[1]] = possible_f_cost # Assign the new f-cost\n # previous[possible[0]][possible[1]] = current # Update previous\n # else\n return (False, [], number_of_nodes_visited) # If the while loop goes out, and the queue is empty, then there is no possible path",
"def monteCarloTreeSearch(chessboard):\n init_state = NodeState(chessboard)\n init_state.setCurrentTurn(State.BLACK)\n init_node = TreeNode(init_state)\n\n for _ in range(COMPUTATION_LIMIT):\n expanded_node = treePolicy(init_node)\n reward = defaultPolicy(expanded_node)\n backPropagation(expanded_node, reward)\n \n best_child_node = findBestChild(init_node, False)\n return best_child_node.getState().getBestMovement()",
"def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n class Node:\n def __init__(self, state, parent, action, pathCost):\n self.state = state\n self.parent = parent\n self.action = action\n self.pathCost = pathCost\n\n def solution(self):\n path = list()\n tempNode = self\n while tempNode.state != problem.getStartState():\n path.insert(0, tempNode.action)\n tempNode = tempNode.parent\n return path\n\n def __eq__(self, other):\n if isinstance(other, Node):\n return self.state == other.state\n\n\n def childNode(successor, parent, action, stepCost):\n pathCost = parent.pathCost + stepCost\n child = Node(successor, parent, action, pathCost)\n return child\n\n initialNode = Node(problem.getStartState(), None, None, 0)\n frontier = util.PriorityQueue() #ucs uses a priority queue\n frontier.push(initialNode, initialNode.pathCost)\n explored = set()\n\n while not frontier.isEmpty() :\n nextNode = frontier.pop() #extract from the start of the queue\n if problem.isGoalState(nextNode.state):\n return nextNode.solution()\n explored.add(nextNode.state)\n for successor, action, stepCost in problem.getSuccessors(nextNode.state):\n child = childNode(successor, nextNode, action, stepCost)\n if child.state not in explored:\n frontier.update(child, child.pathCost) #we only check if state is in explored because update does the other\n return []\n util.raiseNotDefined()",
"def breadth_first_search(initial_state):\n list_of_processed_nodes = []\n num_unprocessed_nodes = 0#\n num_unconsidered_children = 0#\n\n initial_node = Node(state=initial_state)\n node_deque = collections.deque()\n node_deque.append(initial_node)\n goal_state_found = False\n goal_node = None\n\n while len(node_deque) > 0 and not goal_state_found:\n e = node_deque.popleft()\n #pdb.set_trace()\n if e in list_of_processed_nodes:\n num_unprocessed_nodes += 1\n continue\n else:\n list_of_processed_nodes.append(e)\n\n list_of_children_nodes, num_unconsidered_children = generate_children_nodes(\n curr_node=e, list_of_processed_nodes=list_of_processed_nodes,\n running_count_of_children_dups=num_unconsidered_children#\n )\n \n for child_node in list_of_children_nodes:\n #print 'Node {0} with goal status {1}'.format(child_node.index, child_node.state.snake_ate_food)\n if child_node.state.goal_state_reached():\n #print \"Goal state reached with node index {0}\".format(child_node.index)\n goal_state_found = True\n goal_node = child_node\n break\n else:\n #print \"Adding to deque node index {0}\".format(child_node.index)\n node_deque.append(child_node)\n\n if len(node_deque) == 0 and not goal_state_found:\n print '*'*40\n print 'NO SOLUTION PATH FOUND'\n print '*'*40\n sys.exit(0)\n\n #pdb.set_trace()#\n # Summary & results\n #print '{0} nodes processed!'.format(len(list_of_processed_nodes))\n #print '{0} nodes already visited, skipped!'.format(num_unprocessed_nodes)\n #print '{0} node children skipped!'.format(num_unconsidered_children)\n #os.system('say -v \"Victoria\" \"done\"')\n\n return goal_node, list_of_processed_nodes",
"def solve_tsp(dist):\n\n # number of nodes\n N = dist.shape[0]\n\n # tsp path for quick calculation of cost\n ii = np.arange(N)\n jj = np.hstack((np.arange(1, N), 0))\n\n # for each node, a sorted list of closest nodes\n dsort = [np.argsort(d) for d in dist]\n dsort = [d[d != i] for i, d in enumerate(dsort)]\n\n # randomly initialize path through graph\n path = np.random.permutation(N)\n idx = np.argsort(path)\n cost = np.sum(dist[path[ii], path[jj]])\n \n # keep track of objective function over time\n cost_hist = [cost]\n\n # optimization loop\n node = 0\n while node < N:\n\n # we'll try breaking the connection i -> j\n i = path[node]\n j = path[(node+1) % N]\n \n # since we are breaking i -> j we can remove the cost of that connection\n c = cost - dist[i, j]\n\n # search over nodes k that are closer to j than i\n for k in dsort[j]:\n # can safely continue if dist[i,j] < dist[k,j] for the remaining k\n if k == i:\n node += 1\n break\n\n # break connection k -> p\n # add connection j -> p\n # add connection i -> k\n p = path[(idx[k]+1) % N]\n new_cost = c - dist[k,p] + dist[j,p] + dist[i,k]\n\n # if this swap improves the cost, implement it and move to next i\n if new_cost < cost:\n path = reverse_segment(path, idx[j], idx[k])\n idx = np.argsort(path)\n # make sure that we didn't screw up\n assert np.abs(np.sum(dist[path[ii], path[jj]]) - new_cost) < 1e-6\n cost = new_cost\n # restart from the begining of the graph\n cost_hist.append(cost)\n node = 0\n break\n\n return path, cost_hist",
"def solve(problem):\n\n # *** YOUR CODE HERE ***\n\n # The core of Iterative Deepening Search are iterations of Depth Limited\n # Search with given increasing depth.\n\n # A recursive version of Depth Limited Search\n def depth_limited_search(problem, limit):\n \"\"\"\n Return a list of nodes we traversed (or None).\n :param problem: the starting set up.\n :param limit: a given numeric depth limit.\n :return: a list of nodes.\n \"\"\"\n\n # in this case, we simply use a list to keep track of nodes we\n # traversed, instead of the data structure, Stack.\n path = list()\n visited = set() # as before, to prevent duplicated nodes\n root = problem.get_initial_state()\n\n def rec_dls(state, action, depth):\n\n visited.add(state)\n\n # if it is a goal\n if problem.goal_test(state):\n path.append((state, action))\n return path\n\n # or if it reaches a certain depth, but not a goal\n elif depth == 0:\n visited.remove(state)\n return None\n\n else:\n path.append([state, action])\n for successor, action, cost in problem.get_successors(state):\n if successor not in visited:\n # recursively expands the deepest node\n res = rec_dls(successor, action, depth-1)\n if res is not None:\n return res\n path.pop()\n visited.remove(state)\n\n # \"Stared From the Bottom\" (root)\n result = rec_dls(root, 'None', limit)\n # return the path if the we DID have achieved something\n if result is not None:\n return path\n\n import sys\n for depth in range(sys.maxsize): # depth from 0 to infinity\n print(\"Lower-bound of the optimal cost is {}\".format(depth))\n res2 = depth_limited_search(problem, depth)\n if res2 is not None:\n action_list = list()\n for move in res2:\n action_list.append(move[1]) # recall index 0 is the parent\n # do not forget a None returned in iteration 0 (with depth 0)\n action_list.remove('None')\n return action_list",
"def bisect_tree(self, breaking_edge_style='centroid', minSize=None):\n snl = self.n_leaves\n if (breaking_edge_style != 'clade'):\n e = self.get_breaking_edge(breaking_edge_style, minSize)\n if (e is None):\n return None, None, None\n #_LOG.debug(\"breaking_edge length = %s, %s\" % (\n # e.length, breaking_edge_style))\n tree1, tree2 = self.bipartition_by_edge(e)\n else:\n tree1, tree2, e = self.bipartition_by_root()\n\n #_LOG.debug(\"Tree 1 has %s nodes, tree 2 has %s nodes\" % (\n # tree1.n_leaves, tree2.n_leaves))\n #assert snl == tree1.n_leaves + tree2.n_leaves\n return tree1, tree2, e",
"def k_nearest_neighbor(self, k, target, current_root, k_nearest_heap): # 1 step\r\n iter_list = [] # a stack to store iteration path # 1 step\r\n # step1: find the 'nearest' leaf\r\n nearest_leaf = current_root # 1 step\r\n while nearest_leaf is not None: # 2 steps: while, is not\r\n iter_list.append(nearest_leaf) # store the path # 1 step\r\n tt = nearest_leaf.point # 2 steps: nearest_leaf.point, tt = \r\n if target[nearest_leaf.axis] < nearest_leaf.point[nearest_leaf.axis]: # 6 steps: if, <, nearest_leaf.axis, nearest_leaf.point, nearest_leaf.point[],target[]\r\n if nearest_leaf.left is not None: # then go to the left child # 3 steps: if, is not, nearest_leaf.left\r\n nearest_leaf = nearest_leaf.left # 2 steps: nearest_leaf.left, nearest_leaf = \r\n else:\r\n break\r\n else:\r\n if nearest_leaf.right is not None: # else, go to the right child\r\n nearest_leaf = nearest_leaf.right\r\n else:\r\n break\r\n while nearest_leaf.left is not None or nearest_leaf.right is not None: # 6 steps: while, is not, or, is not, nearest_leaf.left, nearest_leaf.right\r\n if nearest_leaf.left is not None: # 3 steps: if, is not, nearest_leaf.left\r\n nearest_leaf = nearest_leaf.left # 2 steps: nearest_leaf.left, = \r\n iter_list.append(nearest_leaf) # 1 step\r\n if nearest_leaf.right is not None: # 3 steps: if, is not, nearest_leaf.right\r\n nearest_leaf = nearest_leaf.right # 2 steps: nearest_leaf.right, = \r\n iter_list.append(nearest_leaf) # 1 step\r\n tt = nearest_leaf.point # 2 steps: nearest_leaf.point, tt = \r\n \"\"\"\r\n step2: find the k nearest by backtracking upside\r\n Two situations to add the point into the heap k_nearest_heap\r\n A. when len(k_nearest_heap) < k\r\n B. when dis(point, target) < current_max_dis\r\n \"\"\"\r\n # k_nearest_heap = LargeHeap() # the large heap to store the current 'nearest' neighbors\r\n # the max distance is actually the distance between target and the top of the heap\r\n '''\r\n current_max_dis = self.distance(target, nearest_leaf.point[:self.n_dim])\r\n k_nearest_heap.add(nearest_leaf, current_max_dis)\r\n tmp = iter_list.pop()\r\n '''\r\n former_node = nearest_leaf # the former 'current_node', to indicate whether go through this child\r\n # 1 step\r\n while iter_list != []: # 2 steps: while, !=\r\n if k_nearest_heap.len > 0: # 3 steps: if, k_nearest_heap.len, >\r\n current_max_dis = k_nearest_heap.heaplist[0][1] # 4 steps: k_nearest_heap.heaplist, k_nearest_heap.heaplist[0], k_nearest_heap.heaplist[0][1], current_max_dis =\r\n else:\r\n current_max_dis = -1\r\n current_pointer = iter_list.pop() # 1+38 steps: 1 step - current_pointer = ; 38 steps - iter_list.pop()\r\n tt = current_pointer.point # 2 steps: current_pointer.point, tt=\r\n dis = self.distance(current_pointer.point[:self.n_dim], target) \r\n # 1+11 steps: 1 step - dis=, 11 steps - self.distance()\r\n if k_nearest_heap.len < k:\r\n k_nearest_heap.add(current_pointer, dis)\r\n elif dis < current_max_dis: # 2 steps: elif, <\r\n k_nearest_heap.pop() # 38 steps: k_nearest_heap.pop()\r\n k_nearest_heap.add(current_pointer, dis) # 30 steps: k_nearest_heap.add()\r\n # current_max_dis = self.distance(k_nearest_heap.heaplist[0][0].point[:self.n_dim], target)\r\n current_max_dis = k_nearest_heap.heaplist[0][1] # 4 steps: k_nearest_heap.heaplist, k_nearest_heap.heaplist[],k_nearest_heap.heaplist[][], current_max_dis =\r\n axis = current_pointer.axis # 2 steps: current_pointer.axis, axis = \r\n if abs(target[axis] - current_pointer.point[axis]) >= current_max_dis:\r\n # 6 steps: if, >=, target[axis], - , current_pointer.point[], abs()\r\n former_node = current_pointer # 1 step\r\n # if not intersect with\r\n continue # 1 step\r\n if current_pointer.left is not None and current_pointer.left != former_node:\r\n # 5 steps: if, is not, and, current_pointer.left, !=\r\n tt = current_pointer.left # 2 steps: current_pointer.left, tt =\r\n # iter_list.append(current_pointer.left)\r\n self.k_nearest_neighbor(k, target, current_pointer.left, k_nearest_heap)\r\n # T(n/2) steps: self.k_nearest_neighbor()\r\n if current_pointer.right is not None and current_pointer.right != former_node:\r\n # 5 steps: if, is not, and, current_pointer.left, !=\r\n tt = current_pointer.right # 2 steps: current_pointer.left, tt =\r\n # iter_list.append(current_pointer.righat)\r\n self.k_nearest_neighbor(k, target, current_pointer.right, k_nearest_heap)\r\n # T(n/2) steps: self.k_nearest_neighbor()\r\n former_node = current_pointer # 1 step\r\n rlist = [] # 1 step\r\n rdis = [] # 1 step\r\n for ele in k_nearest_heap.heaplist: # 2 steps: for, in \r\n rlist.append(ele[0].point) # 3 steps: append(), ele[0], ele[0].point\r\n rdis.append(ele[1]) # 2 steps: append(), ele[1]\r\n return rdis, rlist # 1 step\r"
]
| [
"0.6906452",
"0.60709584",
"0.588671",
"0.5770251",
"0.57167107",
"0.56957114",
"0.56831235",
"0.5621595",
"0.5570404",
"0.5544286",
"0.5538527",
"0.5525354",
"0.55162233",
"0.55161303",
"0.5510437",
"0.5503939",
"0.5503517",
"0.5502011",
"0.5501439",
"0.54870886",
"0.5455407",
"0.5440854",
"0.543194",
"0.5413029",
"0.53908545",
"0.53835356",
"0.53778833",
"0.5374442",
"0.5361587",
"0.5360982"
]
| 0.67792785 | 1 |
Recursively sort list or dict nested lists | def recursive_sort(obj):
if isinstance(obj, dict):
for key, val in obj.iteritems():
obj[key] = recursive_sort(val)
_sorted = obj
elif isinstance(obj, list):
new_list = []
for val in obj:
new_list.append(recursive_sort(val))
_sorted = sorted(new_list)
else:
_sorted = obj
return _sorted | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _sort_nodes(cls: Type, lst: List[Dict[str, Any]],\n by: str = 'item_title'):\n assert type(lst) == list\n lst.sort(key=lambda n: n[by])\n for n in lst:\n if 'nodes' in n:\n cls._sort_nodes(n['nodes'], by)",
"def sort_tree(data_list, sort_key_path):\n result = {}\n for elem in data_list:\n temp_element = copy.deepcopy(elem)\n sort_name = get_sub_value(temp_element, sort_key_path)\n if sort_name not in result:\n result.update({sort_name: {}})\n\n while temp_element:\n val, keys = _remove_deepest(temp_element)\n if keys:\n _add_sub_value(result[sort_name], keys, val)\n\n return result",
"def sort_lists(obj):\n if type(obj) == list:\n return sorted(sort_lists(x) for x in obj)\n elif type(obj) == dict:\n return dict((k, sort_lists(v)) for (k,v) in obj.items())\n else:\n return obj",
"def tree_sort(l: List(int)) -> List(int):\n\n tree = Tree()\n [tree.add_child(n) for n in l]\n return tree.collate()",
"def recursive_sort(list_to_sort, key=0):\n length = len(list_to_sort)\n if length <= 1:\n return list_to_sort\n swaplist = list_to_sort.copy()\n for i in range(0, length - 1):\n if swaplist[i][key] > swaplist[i + 1][key]:\n (swaplist[i], swaplist[i + 1]) = \\\n (swaplist[i + 1], swaplist[i])\n return recursive_sort(swaplist[0:length - 1], key) \\\n + swaplist[length - 1:length]",
"def _sort_tree(self):\n \n self._children = sorted(self._children, key=lambda x : x.id_num)\n for c in self.children:\n if hasattr(c, '_sort_tree'):\n c._sort_tree()\n \n return",
"def make_order(self, root):\n order = []\n if root and isinstance(root[0], dict):\n keys = set()\n for item in root:\n for key in item.keys():\n keys.add(key)\n for key in args.order or []:\n key = self.get_key(key, keys)\n keys.remove(key)\n order.append(key)\n order += sorted(list(keys))\n return order",
"def merge_sort(data):\n # End recursion if only a single element is present\n if len(data) < 2:\n return data\n else:\n # Split the list into two halves\n left, right = split(data)\n # Split until a single element is present\n # Build up the sorted list from there\n return merge(merge_sort(left), merge_sort(right))",
"def sortloclist(orglst: typing.List[dict]) -> typing.List[dict]:\n root = LocNode('')\n for dct in orglst:\n root.addtree(dct)\n rlst: typing.List[dict] = []\n root.dfslst(\"\", rlst)\n return rlst",
"def sort_nested_dict(d):\n for x in d:\n for y in d[x]:\n d[x][y] = sorted(d[x][y].items(), key=lambda z:z[0])\n return d",
"def _topological_sort(self):\n\n visited = defaultdict(bool)\n stack = []\n\n for pod in self.pods:\n if not visited[pod]:\n self._topological_sort_pod(pod, visited, stack)\n\n return stack[::-1]",
"def sort_L3():\n for item in d_list:\n item.sort(key=operator.itemgetter(1))",
"def toposorted(infos):\n key_to_info = {}\n depends = {}\n for info in infos:\n key_to_info[info.key] = info\n depends[info.key] = []\n for info in infos:\n for after in info.after:\n after_info = key_to_info[after]\n depends[info.key].append(after_info)\n for before in info.before:\n before_info = key_to_info[before]\n depends[before_info.key].append(info)\n return topological_sort(infos, lambda info: depends[info.key])",
"def sort_nested_dict(sort_me, item_key, start_return=0, count=0, lambda_on_value=lambda x: x, descending=False):\n counter = 0\n ret = list()\n for key in sorted(sort_me.keys(), key=lambda x: lambda_on_value(sort_me[x][item_key]), reverse=descending):\n counter += 1\n if start_return and counter < start_return:\n continue\n if count and count < counter:\n break\n ret.append((key, sort_me[key]))\n return ret",
"def sort(self):\n def siftDown(start, count):\n \"\"\"\n This method tries to swap down the children's of the branch\n given by index 'start', making the lowest.\n \"\"\"\n root = start\n while root * 2 + 1 < count:\n child = root * 2 + 1 # 'child' is the left children of the current node\n if child < count - 1 and self.data[child] > self.data[child + 1]:\n # Verify that right sibling is lower than the left one, if so,\n # let 'child' be the right sibling\n child += 1\n if self.data[root] > self.data[child]:\n # Swap the current child and the parent if the parent is higher than the child\n self.data[root], self.data[child] = self.data[child], self.data[root]\n root = child\n else:\n return\n\n start = self.size / 2 - 1\n end = self.size - 1\n\n # Is this really necessary? If the structure is already ordered by \"heap-way\"...\n while start >= 0:\n # This is necessary to verify that we end-up with a correct min-heap structure,\n # because we can sort the structure at any time and end up with a max-heap.\n siftDown(start, self.size)\n start -= 1\n \n while end > 0:\n # With a 'min-heap' structure, it only takes swapping the first and the\n # \"last\" element in the heap to order it, and then reorder the heap\n # from the beginning to the \"end\"\n self.data[end], self.data[0] = self.data[0], self.data[end]\n siftDown(0, end)\n end -= 1",
"def all_children_sorted(self):\n\n for child in sorted(self.children):\n yield child\n for sub_child in child.all_children_sorted():\n yield sub_child",
"def sort(self):\n self.chain_list.sort()\n for chain in self.chain_list:\n chain.sort()",
"def sort_1(l):\n pass",
"def sort_subgroups(new_document_list):\n for page in new_document_list:\n if page[0]:\n page[0][0] = sorted(page[0][0], key=lambda g: g['bbox'][1])\n if page[1]:\n if page[1][0]:\n page[1][0] = sorted(page[1][0], key=lambda g: g['bbox'][1])\n if len(page[1])>1:\n if page[1][1]:\n page[1][1] = sorted(page[1][1], key=lambda g: g['bbox'][1])\n if page[2]:\n if page[2][0]:\n page[2][0] = sorted(page[2][0], key=lambda g: g['bbox'][1])\n if len(page[2])>1:\n if page[2][1]:\n page[2][1] = sorted(page[2][1], key=lambda g: g['bbox'][1])\n if len(page[2])>2:\n if page[2][2]:\n page[2][2] = sorted(page[2][2], key=lambda g: g['bbox'][1])\n return new_document_list",
"def sub_list(l):\n r = []\n\n for i in l:\n if type(i) in prims:\n r.append(i)\n elif type(i) is list:\n r.append(sub_list(i))\n elif type(i) is dict:\n r.append(sub_dict(i))\n else:\n print \"Unknown Type: {}\".format(type(i))\n r = sorted(r)\n return r",
"def toposort(data):\n\n\n # Ignore self dependencies.\n for k, v in data.items():\n v.discard(k)\n # Find all items that don't depend on anything.\n extra_items_in_deps = \\\n reduce(set.union, data.itervalues()) - set(data.iterkeys())\n # Add empty dependences where needed\n data.update({item:set() for item in extra_items_in_deps})\n while True:\n ordered = set(item for item, dep in data.iteritems() if not dep)\n if not ordered:\n break\n yield ordered\n data = {item: (dep - ordered)\n for item, dep in data.iteritems()\n if item not in ordered}\n assert not data, \\\n \"Cyclic dependencies exist among these items:\\n{}\".format(\n '\\n'.join(repr(x) for x in data.iteritems()))",
"def merge_sort(l: list) -> list:\r\n # Trap for lists with one or fewer elements.\r\n if len(l) <= 1:\r\n return l[:]\r\n # Divide the list into 2\r\n mid = len(l) // 2\r\n first = l[mid:]\r\n second = l[:mid]\r\n # Recursively sort smaller lists and merge the two resulting lists.\r\n left = merge_sort(first)\r\n right = merge_sort(second)\r\n return merge(left, right)",
"def test_hierarchy_element_sorting(self):\n hs = HierarchyScheme(None, (\"foo\", \"bar\"), \"foobar\")\n # Atom lists are used here to indicate the positions\n # that this element may have in the \"proper\" sorting.\n hs.add_hierarchy_element((\"Z\", 10), [13])\n hs.add_hierarchy_element((\"Z\", 5), [12])\n # Switch two values so the list isn't just reversed from the correct sorting\n hs.add_hierarchy_element((\"None\", \"10\"), [10])\n hs.add_hierarchy_element((\"Y\", 10), [11])\n hs.add_hierarchy_element((\"A\", 10), [8, 9])\n hs.add_hierarchy_element((\"A\", \"10\"), [8, 9])\n hs.add_hierarchy_element((\"A\", 1), [5, 6, 7])\n hs.add_hierarchy_element((\"A\", \"1\"), [5, 6, 7])\n hs.add_hierarchy_element((\"A\", \" 1\"), [5, 6, 7])\n hs.add_hierarchy_element((\"A\", \"None\"), [4])\n hs.add_hierarchy_element((\"A\", \" \"), [2, 3])\n hs.add_hierarchy_element((\"A\", \"\"), [2, 3])\n hs.add_hierarchy_element((\" \", \"10\"), [0, 1])\n hs.add_hierarchy_element((\"\", \"10\"), [0, 1])\n hs.sort_hierarchy_elements()\n\n # Compare the sorted hierarchyelements to their expected sortings\n # (assuming that the atom indices indicate valid positions for the resulting elements in the proper sorting)\n for idx, ele in enumerate(hs.hierarchy_elements):\n assert idx in ele.atom_indices",
"def _toposort_with_ordered_mech_tuples(self, data):\n result = []\n for dependency_set in toposort(data):\n d_iter = iter(dependency_set)\n result.extend(sorted(dependency_set, key=lambda item : next(d_iter).mechanism.name))\n return result",
"def topological_sort(X, root):\r\n neighbors = X.neighbors\r\n\r\n visited = defaultdict(lambda: False)\r\n\r\n stack = []\r\n parents = {}\r\n\r\n build_topological(root, None, neighbors, visited, stack, parents)\r\n return stack, parents",
"def _sort_dependencies(self):\n def sort_hier(node):\n if node is None:\n return None\n task = self.get_task_by_mapper(node.item)\n if node.cycles is not None:\n tasks = []\n for n in node.cycles:\n tasks.append(self.get_task_by_mapper(n.item))\n task.circular = task._sort_circular_dependencies(self, tasks)\n for child in node.children:\n t = sort_hier(child)\n if t is not None:\n task.childtasks.append(t)\n return task\n \n mappers = self._get_noninheriting_mappers()\n head = DependencySorter(self.dependencies, list(mappers)).sort(allow_all_cycles=True)\n #print \"-------------------------\"\n #print str(head)\n #print \"---------------------------\"\n task = sort_hier(head)\n return task",
"def _sort_node(self, node, parent, level):\n if node in parent.keys():\n if level > parent[node]:\n parent[node] = level\n else:\n parent[node] = level\n\n for downstream_node in node.downstream_nodes:\n self._sort_node(downstream_node, parent, level=level + 1)",
"def mergesort(lst):\n result = []\n i = 0\n while i < len(lst):\n left = lst[i] if isinstance(lst[i], list) else [lst[i]]\n i += 1\n\n right = lst[i] if isinstance(lst[i], list) else [lst[i]]\n i += 1\n\n result.append(merge(left, right))\n return result",
"def sorted(x) -> List:\n pass",
"def ordered(obj):\n if isinstance(obj, dict):\n return sorted((k, ordered(v)) for k, v in obj.items())\n if isinstance(obj, list):\n return sorted(ordered(x) for x in obj)\n else:\n return obj"
]
| [
"0.6870522",
"0.6836625",
"0.6693552",
"0.6647584",
"0.6405208",
"0.6400667",
"0.637927",
"0.6320867",
"0.6313137",
"0.6243161",
"0.6231137",
"0.6197422",
"0.61671627",
"0.61670756",
"0.61259884",
"0.60648847",
"0.6053707",
"0.6015906",
"0.59107816",
"0.5910119",
"0.58951104",
"0.5870901",
"0.5865036",
"0.5858776",
"0.5857721",
"0.584564",
"0.584078",
"0.58302313",
"0.5829135",
"0.5825893"
]
| 0.69302994 | 0 |
Executes a pigpio socket command. | def _pigpio_command(sock, cmd, p1, p2):
if sock is not None:
sock.send(struct.pack('IIII', cmd, p1, p2, 0))
x, y, z, res = struct.unpack('IIII', sock.recv(16))
return res
else:
raise _pigpioError("*** Module not started, call pigpio.start() ***") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _pigpio_command_ext(sock, cmd, p1, p2, extents):\n if sock is not None:\n msg = struct.pack('IIII', cmd, p1, p2, 0)\n for ext in extents: msg += ext\n sock.sendall(msg)\n x, y, z, res = struct.unpack('IIII', sock.recv(16))\n return res\n else:\n raise _pigpioError(\"*** Module not started, call pigpio.start() ***\")",
"def start(host = os.getenv(\"PIGPIO_ADDR\", ''),\n port = os.getenv(\"PIGPIO_PORT\", 8888)):\n\n global _control, _notify\n global _host, _port\n\n _host = host\n _port = int(port)\n\n _control = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n try:\n _control.connect((_host, _port))\n _notify = _callback_thread()\n except socket.error:\n if _control is not None:\n _control = None\n if _host == '':\n h = \"localhost\"\n else:\n h = _host\n errStr = \"Can't connect to pigpio on \" + str(h) + \"(\" + str(_port) + \")\"\n print(\"%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\")\n print(errStr)\n print(\"\")\n print(\"Did you start the pigpio daemon? E.g. sudo pigpiod\")\n print(\"\")\n print(\"Did you specify the correct Pi host/port in the environment\")\n print(\"variables PIGPIO_ADDR/PIGPIO_PORT?\")\n print(\"E.g. export PIGPIO_ADDR=soft, export PIGPIO_PORT=8888\")\n print(\"\")\n print(\"Did you specify the correct Pi host/port in the\")\n print(\"pigpio.start() function? E.g. pigpio.start('soft', 8888))\")\n print(\"%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\")\n return False\n return True",
"def proc_exec(cmd):\n\n return envoy.run(cmd)",
"def _execute(self, message):\n logging.info(__name__ + ' : Send the following command to the device: %s' % message)\n self.visa_handle.write('@%s%s' % (self._number, message))\n sleep(70e-3) # wait for the device to be able to respond\n result = self._read()\n if result.find('?') >= 0:\n print(\"Error: Command %s not recognized\" % message)\n else:\n return result",
"def proc_exec_async(cmd):\n\n envoy.connect(cmd)\n return None",
"def cmd_port(args):",
"def send_command(command):\n print(\"Send: >>> \"+command)\n TOPIPE.write(command + EOL)\n TOPIPE.flush()",
"def command():\n server = get_server()\n port = get_port()\n \n click.echo(f'{server.get(\"hostname\")}:{port} -> localhost:{port}')\n click.echo('CTRL+C for quit')\n bash('ssh -N -L {port}:localhost:{port} -i {ssh_key_path} {username}@{hostname}'.format(\n ssh_key_path=server.get('ssh_key_path'),\n username=server.get('username'),\n hostname=server.get('hostname'),\n port=port\n ))",
"def do_command(command):\n send_command(command)\n response = get_response()\n print(\"Rcvd: <<< \\n\" + response)\n return response",
"def run_command(command, sender_socket, ip, port):\n command_bytes = bytes(command, \"UTF-8\")\n sender_socket.sendto(command_bytes, (ip, port))\n has_data = True\n while has_data:\n try:\n byte_reply = sender_socket.recv(BUFFER_SIZE)\n str_reply = byte_reply.decode(\"UTF-8\")\n print(str_reply)\n if \"|-- Transfer \" in str_reply:\n handle_keylog_transfer(str_reply, sender_socket)\n except socket.timeout:\n has_data = False",
"def socket_command(self, cmd_str, verify_path=''):\n if verify_path and \\\n not os.path.exists(os.path.join(verify_path, 'server',\n 'server.py')) and \\\n not os.path.exists(os.path.join(verify_path, 'src',\n os.path.basename(os.getcwd()),\n 'server', 'server.py')):\n sys.stderr.write('This does not appear to be a templeton app.\\n')\n return errno.ENOENT\n sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n try:\n sock.connect(TEMPLETOND_SOCK_FILENAME)\n sock.send(cmd_str + '\\n')\n except socket.error, e:\n if e.errno == errno.ENOENT or e.errno == errno.ENOTCONN or \\\n e.errno == errno.ECONNREFUSED:\n sys.stderr.write('Could not connect to socket; is templetond running?\\n')\n return e.errno\n \n rsp = ''\n while '\\n' not in rsp:\n read = sock.recv(1024)\n if read:\n rsp += read\n else:\n break\n sock.close()\n err = 0\n errstr = ''\n rsp = rsp.rstrip('\\n')\n if rsp[:5] == 'ERROR':\n s = rsp.split(' ')\n try:\n err = int(s[1])\n except ValueError:\n err = 255\n errstr = rsp\n else:\n errstr = ' '.join(s[2:])\n elif rsp[:2] == 'OK':\n if len(rsp) > 2:\n print rsp[2:].strip()\n else:\n err = 255\n errstr = rsp\n if errstr:\n sys.stderr.write('error: %s\\n' % errstr)\n return err",
"def _execute_impl(self, commands):\n conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n conn.connect((self.host, self.port))\n for c in commands:\n conn.sendall(c)\n conn.recv(4096)\n conn.close()",
"def execute(self, devices, command_bytes):",
"def run_commands(ip_address, user, password, commandList, platform, buffer=5000):\n print \"Configuring \" + ip_address\n remote_conn_pre = paramiko.SSHClient()\n remote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n remote_conn_pre.connect(ip_address, username=user, password=password)\n remote_conn = remote_conn_pre.invoke_shell()\n if platform == \"cisco\":\n remote_conn.send(\"enable\\n\")\n time.sleep(1)\n remote_conn.send(password+'\\n')\n time.sleep(1)\n commands = commandList.split('\\n')\n for com in commands:\n remote_conn.send(com+'\\n')\n time.sleep(1)\n output = remote_conn.recv(buffer)\n #print output",
"def run(command, pii=False):\n message = command if not pii else '[redacted due to pii]'\n log.debug('Executing: %(message)s', dict(message=message))\n subprocess.call(command, shell=True)",
"def do_socket_logic():\n pass",
"def port_cmd(self):\n print_debug(\"Executing PORT\")\n # PORT creates a new connection from server to client.\n sock = new_socket()\n self.port_connection(sock)\n # Get required parameters for PORT command.\n port_params, host_ip, host_port = self.parse_port_req(sock)\n print_debug(\"PARAMS: \" + port_params)\n command = \"PORT %s\\r\\n\" % port_params\n msg_rec = self.send_and_log(self.s, command)\n print_debug(msg_rec)\n return msg_rec, sock",
"def _send_execute_command(self):\n client = SBusClient(self.storlet_pipe_path)\n try:\n resp = client.execute(self.srequest.params, self.remote_fds)\n if not resp.status:\n raise StorletRuntimeException(\"Failed to send execute command\")\n\n if not resp.task_id:\n raise StorletRuntimeException(\"Missing task id\")\n else:\n self.task_id = resp.task_id\n except SBusClientException:\n raise StorletRuntimeException(\"Failed to send execute command\")",
"def cli_run(host_ip:str, linux_user:str, linux_password:str, cmd:str)->dict:\n try:\n c = Connection(linux_user + \"@\" + host_ip, connect_kwargs={'password':linux_password})\n return c.run(cmd, warn=True)\n except Exception as e:\n return {\"Error\": str(e)}",
"def run(host, tasks, procsPerTask, memFracPerTask, outputLoc, primaryIP, allocation,\n instance, localIP):\n argStr = ' '.join(sys.argv[1:])\n #argStr = ' '.join([str(i) for i in locals().values()]) # do not move this line.\n #TODO: Determine logDir value.\n logDir = \"\"\n\n #os.system(\"sleep 15\")\n os.system(\"python {0} {1} &\".format(join(dirname(__file__), \"networking.py\"), argStr))\n os.system(\"sleep 20\")\n port = 13001\n buf = 1024\n tcpsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n m_addr = (primaryIP, port)\n tcpsocket.connect(m_addr)\n register_command = \"register\\t{0}\\t{1}\".format(host, tasks)\n os.system(\"echo {0} >> {1}\".format(register_command, join(logDir, \"sent.log\")))\n tcpsocket.send(register_command)\n tcpsocket.close()",
"def cmd(command):\n pflush(\"[%s]> %s\" % (HOSTNAME, command))\n code = os.system(command)\n if code != 0:\n raise RuntimeError(\"Error executing: \" + command)",
"def main():\r\n parser = argparse.ArgumentParser(description=\"\"\"Starts SSH session with one\r\n of ARC\\'s Raspberrypis.\"\"\")\r\n\r\n parser.add_argument('usr', help='Username for the remote device.')\r\n parser.add_argument('pwd', help='Password for [email protected].')\r\n\r\n args = parser.parse_args()\r\n\r\n address = get_IP(IP_list(args.pwd), args.usr)\r\n os.system(\"ssh \" + \"pi\" + \"@\" + address)",
"def do_command(command):\n send_command(command)\n # time.sleep(0.1) # may be required on slow machines\n response = get_response()\n print(\"Rcvd: <<< \" + response)\n return response",
"def run_protocol(self, device, command, *argv, **kwarg):\n if not IxnetworkIxiaClientImpl.ixnet:\n return 0, \"Ixia not connected\"\n ############# Implement me ################\n if command == \"start_protocols\":\n device.applog.info(\"Starting All Protocols\")\n IxnetworkIxiaClientImpl.ixnet.StartAllProtocols(Arg1=\"sync\")\n time.sleep(15)\n for ep in IxnetworkIxiaClientImpl.ip_eps:\n device.applog.info(\"Sending ARP on \" + ep.Name)\n ep.Start()\n ep.SendArp()\n time.sleep(5)\n device.applog.info(\"Generating Traffic\")\n for ti in IxnetworkIxiaClientImpl.tis:\n ti.Generate()\n device.applog.info(\"Applying Traffic\")\n IxnetworkIxiaClientImpl.ixnet.Traffic.Apply()\n elif command == \"stop_protocols\":\n device.applog.info(\"Stopping All Protocols\")\n IxnetworkIxiaClientImpl.ixnet.StopAllProtocols(Arg1=\"sync\")\n elif command == \"set_protocol\":\n params = kwarg[\"params\"]\n param = params[0]\n for ep in IxnetworkIxiaClientImpl.bgp_eps:\n if \"bgp_peer\" in param and param[\"bgp_peer\"] != ep.Name:\n continue\n enable = param[\"enable\"]\n IxnetworkIxiaClientImpl.bgp_eps\n ep.Active.Single(enable)\n IxnetworkIxiaClientImpl.ixnet.Globals.Topology.ApplyOnTheFly()\n return 0, \"\"",
"def command(s_socket):\r\n command = raw_input(\"#> \")\r\n bytes_value = to_bytes(len(command) + 5, 4, 'little')\r\n s_socket.send('c' + bytes_value + command)\r\n\r\n print(s_socket.recv(MAX_BUFFER_LENGTH))",
"def command(mode, ip, log):\n logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging_config[log])\n\n # Using the default dict to get a valid format string no matter what\n phantom_socket = PhantomSocket(ip)\n phantom_socket.connect()\n click.echo('CONNECTED TO THE PHANTOM CAMERA')\n\n mode_identifier = _modes[mode]\n phantom_socket.set_mode(mode_identifier)\n click.echo('PHANTOM WILL TRANSIT INTO THE MODE \"%s\" NOW!' % mode_identifier)\n click.echo('THIS WILL CAUSE A REBOOT OF THE CAMERA, SO PLEASE HAVE PATIENCE')\n click.echo('IN CASE A CONNECTION CANNOT BE ESTABLISHED EVEN AFTER SOME TIME, HARD RESET THE CAMERA')\n click.echo('AFTER THE HARD RESET, THE MODE SHOULD BE CHANGED')\n phantom_socket.disconnect()",
"def vivitek(label: str, conn: str, command: str):\n with closing(socket(AF_INET, SOCK_STREAM)) as s:\n s.settimeout(10)\n s.connect((conn, 7000))\n commands = {\n \"poweron\": b\"power.on\",\n \"poweroff\": b\"power.off\",\n \"freeze\": b\"freeze = 1\",\n \"unfreeze\": b\"freeze = 0\",\n \"blank\": b\"blank = 1\",\n \"unblank\": b\"blank = 0\",\n }\n s.send(b\"op \" + commands[command] + b\"\\r\")\n yield\n s.recv(128)\n s.shutdown(SHUT_RDWR)\n print(f\"{label} (vivitek:{conn}): {command} done\")",
"def respond(cmd,t,p):\n\tt.write(cmd)\n\treturn wait(t,p)",
"def run(ctx, user_cmd):\n connecter = ScalingoInterface(ctx.obj)\n connecter.run(user_cmd)",
"def runConquest(self):\n os.chdir(wd)\n if self.platform == \"cluster\":\n os.system(\"mpirun -np %d -map-by node %s\" % (self.numProc, self.binPath))\n elif self.platform == \"Thomas\":\n os.system(\"gerun %s\" % (self.binPath))"
]
| [
"0.6749727",
"0.6158536",
"0.56130964",
"0.55257773",
"0.54540557",
"0.544766",
"0.5429975",
"0.53793585",
"0.53342295",
"0.5333023",
"0.5332043",
"0.5328123",
"0.53224134",
"0.5302751",
"0.5292858",
"0.528122",
"0.5278182",
"0.5271519",
"0.5267302",
"0.52533823",
"0.52503264",
"0.5241379",
"0.52285105",
"0.52224064",
"0.5168365",
"0.5152796",
"0.51478946",
"0.51285654",
"0.5126054",
"0.5125059"
]
| 0.81030446 | 0 |
Executes an extended pigpio socket command. | def _pigpio_command_ext(sock, cmd, p1, p2, extents):
if sock is not None:
msg = struct.pack('IIII', cmd, p1, p2, 0)
for ext in extents: msg += ext
sock.sendall(msg)
x, y, z, res = struct.unpack('IIII', sock.recv(16))
return res
else:
raise _pigpioError("*** Module not started, call pigpio.start() ***") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _pigpio_command(sock, cmd, p1, p2):\n if sock is not None:\n sock.send(struct.pack('IIII', cmd, p1, p2, 0))\n x, y, z, res = struct.unpack('IIII', sock.recv(16))\n return res\n else:\n raise _pigpioError(\"*** Module not started, call pigpio.start() ***\")",
"def eprt_cmd(self, proto=\"1\"):\n print_debug(\"Executing EPRT\")\n sock = new_socket()\n # Create port connection using extended info.\n self.port_connection(sock)\n net_prt = proto\n # Get required parameters for EPRT command.\n eprt_params, net_addr, tcp_port = self.parse_eprt_req(sock, net_prt)\n print_debug(\"PARAMS: \" + eprt_params)\n command = \"EPRT %s\\r\\n\" % eprt_params\n msg_rec = self.send_and_log(self.s, command)\n print_debug(msg_rec)\n return msg_rec, sock",
"def proc_exec(cmd):\n\n return envoy.run(cmd)",
"def do_socket_logic():\n pass",
"def run_protocol(self, device, command, *argv, **kwarg):\n if not IxnetworkIxiaClientImpl.ixnet:\n return 0, \"Ixia not connected\"\n ############# Implement me ################\n if command == \"start_protocols\":\n device.applog.info(\"Starting All Protocols\")\n IxnetworkIxiaClientImpl.ixnet.StartAllProtocols(Arg1=\"sync\")\n time.sleep(15)\n for ep in IxnetworkIxiaClientImpl.ip_eps:\n device.applog.info(\"Sending ARP on \" + ep.Name)\n ep.Start()\n ep.SendArp()\n time.sleep(5)\n device.applog.info(\"Generating Traffic\")\n for ti in IxnetworkIxiaClientImpl.tis:\n ti.Generate()\n device.applog.info(\"Applying Traffic\")\n IxnetworkIxiaClientImpl.ixnet.Traffic.Apply()\n elif command == \"stop_protocols\":\n device.applog.info(\"Stopping All Protocols\")\n IxnetworkIxiaClientImpl.ixnet.StopAllProtocols(Arg1=\"sync\")\n elif command == \"set_protocol\":\n params = kwarg[\"params\"]\n param = params[0]\n for ep in IxnetworkIxiaClientImpl.bgp_eps:\n if \"bgp_peer\" in param and param[\"bgp_peer\"] != ep.Name:\n continue\n enable = param[\"enable\"]\n IxnetworkIxiaClientImpl.bgp_eps\n ep.Active.Single(enable)\n IxnetworkIxiaClientImpl.ixnet.Globals.Topology.ApplyOnTheFly()\n return 0, \"\"",
"def cmd_port(args):",
"def command(self, inst_data: int, buf: bytes, /) -> None:",
"def do_command(command):\n send_command(command)\n response = get_response()\n print(\"Rcvd: <<< \\n\" + response)\n return response",
"def execute(self, devices, command_bytes):",
"def start(host = os.getenv(\"PIGPIO_ADDR\", ''),\n port = os.getenv(\"PIGPIO_PORT\", 8888)):\n\n global _control, _notify\n global _host, _port\n\n _host = host\n _port = int(port)\n\n _control = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n try:\n _control.connect((_host, _port))\n _notify = _callback_thread()\n except socket.error:\n if _control is not None:\n _control = None\n if _host == '':\n h = \"localhost\"\n else:\n h = _host\n errStr = \"Can't connect to pigpio on \" + str(h) + \"(\" + str(_port) + \")\"\n print(\"%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\")\n print(errStr)\n print(\"\")\n print(\"Did you start the pigpio daemon? E.g. sudo pigpiod\")\n print(\"\")\n print(\"Did you specify the correct Pi host/port in the environment\")\n print(\"variables PIGPIO_ADDR/PIGPIO_PORT?\")\n print(\"E.g. export PIGPIO_ADDR=soft, export PIGPIO_PORT=8888\")\n print(\"\")\n print(\"Did you specify the correct Pi host/port in the\")\n print(\"pigpio.start() function? E.g. pigpio.start('soft', 8888))\")\n print(\"%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\")\n return False\n return True",
"def _execute(self, message):\n logging.info(__name__ + ' : Send the following command to the device: %s' % message)\n self.visa_handle.write('@%s%s' % (self._number, message))\n sleep(70e-3) # wait for the device to be able to respond\n result = self._read()\n if result.find('?') >= 0:\n print(\"Error: Command %s not recognized\" % message)\n else:\n return result",
"def proc_exec_async(cmd):\n\n envoy.connect(cmd)\n return None",
"def send_cmd(self, cmd):\n logger.info(\"sending cmd %s to fuse\", cmd)\n with open(self.ipc + \".\" + str(threading.current_thread().ident), 'w', 0) as f:\n #with open(self.ipc, 'a+') as f:\n f.write(cmd)\n #f.flush()\n logger.info(\"writing to fuse returned\")",
"def send_command_line(self, command):\n raise NotImplementedError",
"def send_command(self, cmd):\n\n\t\tself.eyetribe._connection.request(cmd)",
"def execute(self, irc_c, msg, cmd):",
"def socket_command(self, cmd_str, verify_path=''):\n if verify_path and \\\n not os.path.exists(os.path.join(verify_path, 'server',\n 'server.py')) and \\\n not os.path.exists(os.path.join(verify_path, 'src',\n os.path.basename(os.getcwd()),\n 'server', 'server.py')):\n sys.stderr.write('This does not appear to be a templeton app.\\n')\n return errno.ENOENT\n sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n try:\n sock.connect(TEMPLETOND_SOCK_FILENAME)\n sock.send(cmd_str + '\\n')\n except socket.error, e:\n if e.errno == errno.ENOENT or e.errno == errno.ENOTCONN or \\\n e.errno == errno.ECONNREFUSED:\n sys.stderr.write('Could not connect to socket; is templetond running?\\n')\n return e.errno\n \n rsp = ''\n while '\\n' not in rsp:\n read = sock.recv(1024)\n if read:\n rsp += read\n else:\n break\n sock.close()\n err = 0\n errstr = ''\n rsp = rsp.rstrip('\\n')\n if rsp[:5] == 'ERROR':\n s = rsp.split(' ')\n try:\n err = int(s[1])\n except ValueError:\n err = 255\n errstr = rsp\n else:\n errstr = ' '.join(s[2:])\n elif rsp[:2] == 'OK':\n if len(rsp) > 2:\n print rsp[2:].strip()\n else:\n err = 255\n errstr = rsp\n if errstr:\n sys.stderr.write('error: %s\\n' % errstr)\n return err",
"def send_exploit(ip: str, port: int) -> None:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((ip, port))\n sock.settimeout(5)\n sock.send(build_buf(add_shellcode()))\n try:\n print(sock.recv(1024))\n except socket.timeout:\n pass\n finally:\n sock.close()",
"def _execute_impl(self, commands):\n conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n conn.connect((self.host, self.port))\n for c in commands:\n conn.sendall(c)\n conn.recv(4096)\n conn.close()",
"def vivitek(label: str, conn: str, command: str):\n with closing(socket(AF_INET, SOCK_STREAM)) as s:\n s.settimeout(10)\n s.connect((conn, 7000))\n commands = {\n \"poweron\": b\"power.on\",\n \"poweroff\": b\"power.off\",\n \"freeze\": b\"freeze = 1\",\n \"unfreeze\": b\"freeze = 0\",\n \"blank\": b\"blank = 1\",\n \"unblank\": b\"blank = 0\",\n }\n s.send(b\"op \" + commands[command] + b\"\\r\")\n yield\n s.recv(128)\n s.shutdown(SHUT_RDWR)\n print(f\"{label} (vivitek:{conn}): {command} done\")",
"def send_command(command):\n print(\"Send: >>> \"+command)\n TOPIPE.write(command + EOL)\n TOPIPE.flush()",
"def cmd(self, command):\n self.enode.get_shell('bash').send_command(command, matches=self.scapy_prompt)\n response = self.enode.get_shell('bash').get_response()\n return response",
"def ping(cmd, *args, **argv):\n import os\n context = argv[\"context\"]\n \n def count(num):\n if str(num) == str(int(num)):\n if num < 0:\n context.write(\"%s: bad number of packets to transmit.\" % cmd)\n else:\n return num\n else:\n context.write(\"%s: can't set unicast time-to-live: Unknown host\" % cmd)\n return\n\n ping_p = {\"-c\":count}\n\n def isipaddress(ip):\n import socket\n try:\n ipa = socket.gethostbyname(ip)\n return ipa\n except:\n context.write(\"ping: unknown host %s\" % ip)\n return None\n\n def doping(cmd, sign, sign_param, ipaddress):\n if sign in ping_p.keys():\n # #\n pass\n else:\n context.write(\"connect: Unknown host\")\n return \n \n\tif ping_p[sign](sign_param):\n # #\n pass\n else:\n return \n if isipaddress(ipaddress):\n ip = isipaddress(ipaddress)\n else:\n return\n try:\n os.system(str(cmd) + \" \" + str(sign) + \" \" + str(sign_param) + \" \" + str(ip))\n except:\n context.write(\"has some errors in ping command\")\n return \n\n length = len(args)\n \n if length == 0:\n helpinfo = context.resolver.get_func_doc(getattr(context.resolver.get_module(cmd), cmd))\n helpinfo_format = helpinfo[\"format\"].rstrip()\n helpinfo_format = helpinfo_format.lstrip()\n if helpinfo_format == \"\":\n return\n context.write(helpinfo_format)\n\n elif length < 3:\n cmd_real = context.resolver.has_command(args[0], cmd)\n if cmd_real != None:\n cmds = cmd.split()\n cmd_n = \"_\".join(cmds)\n modulename = cmd_n + \"_\" + cmd_real\n module = context.resolver.get_module(modulename)\n func = getattr(module, modulename)\n func(cmd + \" \" + cmd_real, context = context)\n else:\n doping(cmd, \"-c\", 4, args[0])\n\n else:\n\tif args[1] == \"0\":\n\t os.system(str(cmd) + \" \" + str(args[2]))\n\telse:\n doping(cmd, args[0], args[1], args[2])",
"def _start_server_cmd(cls, address='localhost:44818',\n tags=(('SENSOR1', 'INT'), ('ACTUATOR1', 'INT'))):\n\n CMD = sys.executable + ' -m cpppo.server.enip '\n PRINT_STDOUT = '--no-print '\n HTTP = '--web %s:80 ' % address[0:address.find(':')]\n # print 'DEBUG: enip _start_server_cmd HTTP: ', HTTP\n ADDRESS = '--address ' + address + ' '\n TAGS = EnipProtocol._tuple_to_cpppo_tags(tags)\n\n if sys.platform.startswith('linux'):\n SHELL = '/bin/bash -c '\n LOG = '--log logs/protocols_tests_enip_server '\n else:\n raise OSError\n\n cmd = shlex.split(\n CMD +\n PRINT_STDOUT +\n LOG +\n ADDRESS +\n TAGS\n )\n print('DEBUG enip _start_server cmd: ', cmd)\n\n return cmd",
"def main() -> None:\n\n # region Init Raw-packet Base class\n base: Base = Base(admin_only=True, available_platforms=['Linux', 'Darwin', 'Windows'])\n # endregion\n\n # region Parse script arguments\n parser: ArgumentParser = ArgumentParser(description=base.get_banner(__script_name__),\n formatter_class=RawDescriptionHelpFormatter)\n parser.add_argument('-a', '--address', type=str, help='Set address for listen (default: \"0.0.0.0\")',\n default='0.0.0.0')\n parser.add_argument('-p', '--port', type=int, help='Set port for listen (default: 80)', default=80)\n parser.add_argument('-s', '--site', type=str, help='Set site template \"google\" or \"apple\"', default='apple')\n parser.add_argument('-r', '--redirect', type=str, help='Set site domain for redirect', default='authentication.net')\n parser.add_argument('-q', '--quiet', action='store_true', help='Minimal output')\n args = parser.parse_args()\n # endregion\n\n # region Print banner\n if not args.quiet:\n base.print_banner(__script_name__)\n # endregion\n\n # region Start Phishing HTTP server\n try:\n phishing_server: PhishingServer = PhishingServer()\n phishing_server.start(address=args.address, port=args.port, site=args.site,\n redirect=args.redirect, quiet=args.quiet)\n\n except KeyboardInterrupt:\n if not args.quiet:\n base.print_info('Exit')\n exit(0)\n\n except AssertionError as Error:\n if not args.quiet:\n base.print_error(Error.args[0])\n exit(1)\n # endregion",
"def send_command(command):\n if connection_type == USE_I2C:\n cmd = \"\"\n cmd += chr( SSD1306_ADDRESS )\n cmd += chr( SELECT_CONTROL_BYTE )\n cmd += chr( command )\n i2cWrite(cmd, 10, False)\n else:\n print \"Not implemented for that connection type yet.\"",
"def connect_subproc():\n return factory.connect_subproc([sys.executable, \"-u\", SERVER_FILE, \"-q\", \"-m\", \"stdio\"], \n SlaveService)",
"def notifyCommand(self, argv, extraData):\n if len(argv) == 0:\n # echo command ---> allways OK\n LOG(\"echo command\")\n return 0\n # decode the command\n cmd = argv[0].upper()\n retStatus = False;\n if (cmd == \"H\") or (cmd == \"HELP\"):\n retStatus = self.helpCmd(argv)\n elif (cmd == \"Q\") or (cmd == \"QUIT\"):\n retStatus = self.quitCmd(argv)\n elif (cmd == \"U\") or (cmd == \"DUMPCONFIGURATION\"):\n retStatus = self.dumpConfigurationCmd(argv)\n elif (cmd == \"L\") or (cmd == \"LISTPACKETS\"):\n retStatus = self.listPacketsCmd(argv)\n elif (cmd == \"G\") or (cmd == \"GENERATE\"):\n retStatus = self.generateCmd(argv)\n elif (cmd == \"RP\") or (cmd == \"RECORDPACKETS\"):\n retStatus = self.recordPacketsCmd(argv)\n elif (cmd == \"SR\") or (cmd == \"STOPPACKETRECORDER\"):\n retStatus = self.stopPacketRecorderCmd(argv)\n elif (cmd == \"P\") or (cmd == \"SETPACKETDATA\"):\n retStatus = self.setPacketDataCmd(argv, extraData)\n elif (cmd == \"S\") or (cmd == \"SENDPACKET\"):\n retStatus = self.sendPacketCmd(argv, extraData)\n elif (cmd == \"C1\") or (cmd == \"CONNECTCNC\"):\n retStatus = self.connectCNCcmd(argv)\n elif (cmd == \"D1\") or (cmd == \"DISCONNECTCNC\"):\n retStatus = self.disconnectCNCcmd(argv)\n elif (cmd == \"C2\") or (cmd == \"CONNECTCNC2\"):\n retStatus = self.connectCNC2cmd(argv)\n elif (cmd == \"D2\") or (cmd == \"DISCONNECTCNC2\"):\n retStatus = self.disconnectCNC2cmd(argv)\n elif (cmd == \"E1\") or (cmd == \"CONNECTEDEN\"):\n retStatus = self.connectEDENcmd(argv)\n elif (cmd == \"F1\") or (cmd == \"DISCONNECTEDEN\"):\n retStatus = self.disconnectEDENcmd(argv)\n elif (cmd == \"E2\") or (cmd == \"CONNECTEDEN2\"):\n retStatus = self.connectEDEN2cmd(argv)\n elif (cmd == \"F2\") or (cmd == \"DISCONNECTEDEN2\"):\n retStatus = self.disconnectEDEN2cmd(argv)\n elif (cmd == \"RF\") or (cmd == \"REPLAYFRAMES\"):\n retStatus = self.replayFramesCmd(argv)\n elif (cmd == \"N1\") or (cmd == \"CONNECTNCTRS1\"):\n retStatus = self.connectNCTRS1cmd(argv)\n elif (cmd == \"O1\") or (cmd == \"DISCONNECTNCTRS1\"):\n retStatus = self.disconnectNCTRS1cmd(argv)\n elif (cmd == \"N2\") or (cmd == \"CONNECTNCTRS2\"):\n retStatus = self.connectNCTRS2cmd(argv)\n elif (cmd == \"O2\") or (cmd == \"DISCONNECTNCTRS2\"):\n retStatus = self.disconnectNCTRS2cmd(argv)\n elif (cmd == \"N3\") or (cmd == \"CONNECTNCTRS3\"):\n retStatus = self.connectNCTRS3cmd(argv)\n elif (cmd == \"O3\") or (cmd == \"DISCONNECTNCTRS3\"):\n retStatus = self.disconnectNCTRS3cmd(argv)\n else:\n LOG_WARNING(\"invalid command \" + argv[0])\n return -1\n if retStatus:\n # processing successful\n return 0\n # processing error\n return -2",
"def send_command(self, cmd, shell=None, silent=False):",
"def sendCmd( self, *cmd, **kwargs ):\n kwargs.setdefault( 'printPid', False )\n if not self.execed:\n return Node.sendCmd( self, *cmd, **kwargs )\n else:\n error( '*** Error: %s has execed and cannot accept commands' %\n self.name )"
]
| [
"0.77093375",
"0.56171656",
"0.55937904",
"0.5364864",
"0.5340037",
"0.5328134",
"0.5307892",
"0.5300608",
"0.52866066",
"0.5278818",
"0.52584463",
"0.5224311",
"0.5205626",
"0.52012485",
"0.5194916",
"0.5194699",
"0.51928604",
"0.518897",
"0.51671207",
"0.5155846",
"0.51500124",
"0.5142843",
"0.5139381",
"0.5135403",
"0.51331115",
"0.51293063",
"0.5124772",
"0.51167893",
"0.51016694",
"0.50855756"
]
| 0.77301323 | 0 |
Set the gpio mode. | def set_mode(gpio, mode):
return _u2i(_pigpio_command(_control, _PI_CMD_MODES, gpio, mode)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setmode(self, mode):\n # ueberpruefe, ob der Modus gueltig ist\n if mode in [GPIO.BCM, GPIO.BOARD]:\n self.mode = mode\n print(f\"Modus auf {mode} gesetzt\")\n else:\n raise ValueError(\"An invalid mode was passed to setmode()\")",
"def setup_gpio(self):\n GPIO.setmode(GPIO.BOARD)\n GPIO.setup(self._input_pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)",
"def set_PIenable(self,highlow): \n GPIO.output(self.chanlist[4], highlow)",
"def setInput(self):\n gpio.setup(self.bcm_id, gpio.IN, pull_up_down=self.pull)\n self.mode = gpio.IN",
"def set_pin(self):\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BOARD)\n GPIO.setup(self.pin, GPIO.OUT)\n GPIO.output(self.pin, GPIO.LOW)\n time.sleep(3)\n GPIO.output(self.pin, GPIO.HIGH)",
"def gpio_setup():\n GPIO.setmode(GPIO.BOARD)\n GPIO.setwarnings(False)\n for led in (RED, AMBER, GREEN):\n GPIO.setup(LED[led],GPIO.OUT)",
"def set_mode(self, port, bit, x):\n hw = self.device.peripherals[port].MODER\n mode = {'i':0,'o':1,'f':2,'a':3}.get(x, 0)\n shift = (bit & 15) << 1\n val = hw.rd()\n val &= ~(3 << shift)\n val |= mode << shift\n hw.wr(val)",
"def setup_gpio(self):\n try:\n pin = 4\n gpio = importlib.import_module('RPi.GPIO')\n gpio.setmode(gpio.BCM)\n gpio.setup(pin, gpio.IN, pull_up_down=gpio.PUD_UP)\n gpio.add_event_detect(pin, gpio.FALLING, callback=self.on_snap_pressed, bouncetime=200)\n except ImportError as e:\n self._logger.exception(e)\n print('raspi gpio module not found, continuing...')",
"def set_mode(self,mode,state=True):\n\t\tprint \"SET_MODE START\"\n\t\tfor key,val in self.ms_all.iteritems():\n\t\t\tif val.index(mode) is not None:\n\t\t\t\tif state:\n\t\t\t\t\tval.activate( val.index(mode) )\n\t\t\t\telse:\n\t\t\t\t\tval.deactivate( val.index(mode) )\n\t\t\"\"\"\n\t\tprint \"SET_MODE DONE -- ALSO DOING EXPERIMENTAL -- \"\n\t\t# DEBUG / EXPERIMENTAL\n\t\tif self.int_encoder is not None:\n\t\t\tif mode == 'volume' and state == True and 'mode_timeout' in self.cfg_gpio and self.int_enabled:\n\t\t\t\tprint \"DEBUG2.. GPIO/VOLUME ({0}:{1}).. disabling our interrupts..\".format(mode,state)\n\t\t\t\tself.gpio.remove_event_detect(13)\n\t\t\t\tself.gpio.remove_event_detect(6)\n\t\t\t\tself.int_enabled = False\n\t\t\telif mode != 'volume' and state == True and 'mode_timeout' in self.cfg_gpio and not self.int_enabled:\n\t\t\t\tprint \"DEBUG2.. GPIO/NOT VOLUME ({0}:{1}).. enabling our interrupts..\".format(mode,state)\n\t\t\t\tself.gpio.setup((13,6), self.gpio.IN, pull_up_down=self.gpio.PUD_DOWN)\n\t\t\t\tself.gpio.add_event_detect(13, self.gpio.RISING, callback=self.int_encoder) # NO bouncetime \n\t\t\t\tself.gpio.add_event_detect(6, self.gpio.RISING, callback=self.int_encoder) # NO bouncetime\n\t\t\t\tself.int_enabled = True\n\t\t\telif mode == 'volume' and state == True and 'mode_timeout' not in self.cfg_gpio and not self.int_enabled:\n\t\t\t\tprint \"DEBUG2.. ECA/VOLUME ({0}:{1}).. enabling our interrupts..\".format(mode,state)\n\t\t\t\tself.gpio.setup((13,6), self.gpio.IN, pull_up_down=self.gpio.PUD_DOWN)\n\t\t\t\tself.gpio.add_event_detect(13, self.gpio.RISING, callback=self.int_encoder) # NO bouncetime \n\t\t\t\tself.gpio.add_event_detect(6, self.gpio.RISING, callback=self.int_encoder) # NO bouncetime\n\t\t\t\tself.int_enabled = True\n\t\t\telif mode != 'volume' and state == True and 'mode_timeout' not in self.cfg_gpio and self.int_enabled:\n\t\t\t\tprint \"DEBUG2.. ECA/NOT VOLUME ({0}:{1}).. disabling our interrupts..\".format(mode,state)\n\t\t\t\tself.gpio.remove_event_detect(13)\n\t\t\t\tself.gpio.remove_event_detect(6)\n\t\t\t\tself.int_enabled = False\n\t\t\tprint \"DEBUG2.. done\"\n\t\t\"\"\"",
"def get_mode(gpio):\n return _u2i(_pigpio_command(_control, _PI_CMD_MODEG, gpio, 0))",
"def GPIOsetup():\n GPIO.setmode(GPIO.BOARD)\n for led in (RED,YELLOW,GREEN):\n GPIO.setup(LED[led],GPIO.OUT)",
"def set(self, state):\n if self.mode == gpio.OUT:\n # Write an event to the buffer. \n self._buffer_write(state, time.time()*1000)\n\n gpio.output(self.bcm_id, state)\n self.state = state",
"def gpio_set(self, pin: str, status: Union[bool, str]) -> None:\n self.__logger.debug('Eva.gpio_set called')\n return self.__http_client.gpio_set(pin, status)",
"def setup_gpio(self):\n logger.info(\"Setting up GPIO pins\")\n gpio.setmode(gpio.BOARD)\n gpio.setup(self.pins[\"SCLK\"], gpio.OUT)\n gpio.setup(self.pins[\"SDO\"], gpio.OUT)\n gpio.setup(self.pins[\"SDI\"], gpio.IN)\n gpio.setup(self.pins[\"IO_UPDATE\"], gpio.OUT)\n gpio.setup(self.pins[\"IO_RESET\"], gpio.OUT)\n gpio.setup(self.pins[\"RAM_SWP_OVR\"], gpio.IN)\n gpio.setup(self.pins[\"EXT_PWR_DOWN\"], gpio.OUT)\n gpio.setup(self.pins[\"MASTER_RESET\"], gpio.OUT)\n gpio.setup(self.pins[\"PLL_LOCK\"], gpio.IN)\n gpio.setup(self.pins[\"P_0\"], gpio.OUT)\n gpio.setup(self.pins[\"P_1\"], gpio.OUT)\n gpio.setup(self.pins[\"P_2\"], gpio.OUT)",
"def setOutput(self):\n self.stopListening()\n\n gpio.setup(self.bcm_id, gpio.OUT)\n self.mode = gpio.OUT",
"def setMode(self, mode):\n if mode == 0 or mode == 1:\n with self.lock:\n self.mode = mode\n else:\n raise FliError(\"FLISetCameraMode failed\")",
"def set(pin, val=1):\n if val not in [0,1]:\n raise RuntimeError\n GPIO.output(pin,val)",
"def Set(self,value):\n if value:\n onoff = 0x01\n else:\n onoff = 0x00\n self.Bus.Write_uInt8(self.Address,0x20+self.Pin, onoff)",
"def turn_on(self):\n GPIO.output(self.gpio, True) # turn on light",
"def gpio(self) -> int:",
"def set_mode(self, mode):\n self.write(\":FUNC {}\".format(mode))",
"def set_mode(self, mode, port):\n port = int(port)\n self._validate_port(\"set_mode\", port)\n self._validate_mode(mode)\n logger.debug(\"{} setting power mode to {} for usb port {}\".format(\n self._device_name, mode, port))\n self._shell_fn(self._command_dict[\"SET_MODE\"].format(mode, port))",
"def ON(self):\n GPIO.setmode(GPIO.BOARD)\n GPIO.setup(self.PIN, GPIO.OUT)\n GPIO.output(self.PIN, True)\n self.STATUS = \"ON\"",
"def set_tgpio_digital(self, ionum, value, delay_sec=None):\r\n return self._arm.set_tgpio_digital(ionum=ionum, value=value, delay_sec=delay_sec)",
"def set_mode(self, mode):\n print('set_mode', mode)\n self._mode = int(mode)",
"def mode(self, mode):\n self.set_mode(mode)",
"def modeHack(self, pin, mode, board=0):\n msg = [int(pin), int(mode)]\n return self.callModule('hackp', board, 0, 'setMode', msg)",
"def gpio_status(self, mode=None):\n if mode == True:\n print('gtd: status(True), was {}'.format(repr(self.gpio_state)))\n # TODO: Turn on the GPIO for Heat and Fan\n self.gpio_state = True\n elif mode == False:\n print('gtd: status(False), was {}'.format(repr(self.gpio_state)))\n # TODO: Turn off the GPIO for Heat and Fan\n self.gpio_state = False\n \n # In any case, and especially if the incoming parameter MODE is None,\n # the return value of GPIO_STATUS is the state of the GPIO pins.\n\n return self.gpio_state",
"def set_pin_mode(self, pin_number, mode):\n command = (''.join(('M',mode,str(pin_number)))).encode()\n #print 'set_pin_mode =',command,(''.join(('M',mode,str(pin_number))))\n self.conn.write(command)",
"def set_mode(self, mode):\n if mode in self.MODES:\n self.mode = self.MODES[mode]"
]
| [
"0.73701143",
"0.6988686",
"0.6701568",
"0.6627662",
"0.66036546",
"0.6546484",
"0.6519655",
"0.64256644",
"0.63737124",
"0.6357623",
"0.63544023",
"0.6337343",
"0.6337022",
"0.63302016",
"0.63285375",
"0.6321674",
"0.62827396",
"0.62731683",
"0.62613213",
"0.62119853",
"0.6192309",
"0.6183255",
"0.6178886",
"0.614943",
"0.6131791",
"0.6116162",
"0.6099386",
"0.60879433",
"0.6087353",
"0.6020895"
]
| 0.81758034 | 0 |
Get the gpio mode. Returns the gpio mode if OK, otherwise PI_BAD_GPIO. | def get_mode(gpio):
return _u2i(_pigpio_command(_control, _PI_CMD_MODEG, gpio, 0)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def gpio_status(self, mode=None):\n if mode == True:\n print('gtd: status(True), was {}'.format(repr(self.gpio_state)))\n # TODO: Turn on the GPIO for Heat and Fan\n self.gpio_state = True\n elif mode == False:\n print('gtd: status(False), was {}'.format(repr(self.gpio_state)))\n # TODO: Turn off the GPIO for Heat and Fan\n self.gpio_state = False\n \n # In any case, and especially if the incoming parameter MODE is None,\n # the return value of GPIO_STATUS is the state of the GPIO pins.\n\n return self.gpio_state",
"def set_mode(gpio, mode):\n return _u2i(_pigpio_command(_control, _PI_CMD_MODES, gpio, mode))",
"def gpio(self) -> Gpio:\n\n return self._gpio",
"def get_mode(self, port):\n port = int(port)\n self._validate_port(\"get_mode\", port)\n flags = self._regex_shell_fn(\n self._command_dict[\"GET_MODE\"].format(port),\n self._regex_dict[\"GET_MODE_REGEX\"],\n tries=5)\n\n if \"O\" in flags:\n mode = OFF\n elif \"S\" in flags:\n mode = SYNC\n else:\n mode = CHARGE\n return mode",
"def get(self):\n if self.mode == gpio.IN:\n self.state = gpio.input(self.bcm_id)\n\n return self.state",
"def get_mode(self):\r\n return self._api.get_mode()",
"def gpio_function(self, channel):\n self._check_mode()\n return self.channels.get(channel)",
"def get_tgpio_digital(self, ionum=None):\r\n return self._arm.get_tgpio_digital(ionum)",
"def gpio(self) -> int:",
"def get_mode(self):\n self.read(\":FUNC?\")",
"def get_tgpio_version(self):\r\n return self._arm.get_tgpio_version()",
"def get_pir_mode(self):\n return self.parent._usa_pir",
"def get_mode(self):\r\n return self.mode",
"def mode(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"mode\")",
"def get_mode(self, ):\n return self.get_parameter('mode')",
"def getmode(self):\n return self.mode",
"def get_cgpio_digital(self, ionum=None):\r\n return self._arm.get_cgpio_digital(ionum=ionum)",
"def mode(self) -> Optional[pulumi.Input[Union[str, 'Mode']]]:\n return pulumi.get(self, \"mode\")",
"def get_mode(self) -> str:\n\n return self.send(self.cmd.GET_MODE)",
"def getFunction(self):\n return gpio.gpio_function(self.bcm_id)",
"def get_platform():\n try:\n import RPi.GPIO\n return PI\n except ImportError:\n pass\n\n if platform.system() == 'Linux':\n return LINUX\n else:\n return UNKNOWN",
"def gpio_get(self, pin: str, pin_type: str) -> Union[bool, float]:\n self.__logger.debug('Eva.gpio_get called')\n return self.__http_client.gpio_get(pin, pin_type)",
"def mode(self) -> Optional[str]:\n return pulumi.get(self, \"mode\")",
"def mode(self) -> Optional[str]:\n return pulumi.get(self, \"mode\")",
"def gpio_reset_config(self):\r\n return self._arm.gpio_reset_config",
"def mode(self) -> pulumi.Input[Union[str, 'IstioIngressGatewayMode']]:\n return pulumi.get(self, \"mode\")",
"def getGatingMode(self, channel, unitCode=0):\n resp = self.XAPCommand('GMODE', channel, unitCode=unitCode)\n return int(resp)",
"def getMode(self):\n return self._mode",
"def drmode(self):\n data = self._ftdi.spi_read(self.DRMODE_ADDR, len=1, burst='fixed')\n return data[0] & self.DRMODE_MASK",
"def mode(self):\n return self._data.get('mode', None)"
]
| [
"0.68460256",
"0.651641",
"0.64104354",
"0.6068761",
"0.60609215",
"0.5901669",
"0.5876019",
"0.5844004",
"0.582057",
"0.57855934",
"0.5780658",
"0.577863",
"0.5758183",
"0.57478285",
"0.5708945",
"0.5695887",
"0.5661123",
"0.5659766",
"0.5653747",
"0.5627731",
"0.56262314",
"0.5607183",
"0.560458",
"0.560458",
"0.56039315",
"0.55939895",
"0.55513245",
"0.55502635",
"0.5540604",
"0.55157083"
]
| 0.8368854 | 0 |
Set or clear the gpio pullup/down resistor. Returns 0 if OK, otherwise PI_BAD_GPIO, PI_BAD_PUD, or PI_NOT_PERMITTED. | def set_pull_up_down(gpio, pud):
return _u2i(_pigpio_command(_control, _PI_CMD_PUD, gpio, pud)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def gpio_set_input_pullup(self, pin: int) -> None:\n self._pins[pin - 1] = \"INPUT_PULLUP\"",
"def set_pin_pullup(self, pin, value):\n pin = pin - 1\n if pin < 8:\n self.__port_a_pullup = self.__helper.updatebyte(\n self.__port_a_pullup, pin, value)\n self.__bus.write_byte_data(\n self.__ioaddress, self.GPPUA, self.__port_a_pullup)\n else:\n self.__port_b_pullup = self.__helper.updatebyte(\n self.__port_b_pullup, pin - 8, value)\n self.__bus.write_byte_data(\n self.__ioaddress, self.GPPUB, self.__port_b_pullup)\n return",
"def setup_gpio(self):\n GPIO.setmode(GPIO.BOARD)\n GPIO.setup(self._input_pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)",
"def setup_gpio(self):\n try:\n pin = 4\n gpio = importlib.import_module('RPi.GPIO')\n gpio.setmode(gpio.BCM)\n gpio.setup(pin, gpio.IN, pull_up_down=gpio.PUD_UP)\n gpio.add_event_detect(pin, gpio.FALLING, callback=self.on_snap_pressed, bouncetime=200)\n except ImportError as e:\n self._logger.exception(e)\n print('raspi gpio module not found, continuing...')",
"def set_PIenable(self,highlow): \n GPIO.output(self.chanlist[4], highlow)",
"def set(pin, val=1):\n if val not in [0,1]:\n raise RuntimeError\n GPIO.output(pin,val)",
"def initialize_heating_pin(pi, pin):\n pi.set_pull_up_down(pin, pigpio.PUD_DOWN)",
"def pin_pullup(self, pin):\n port_num = self._convert_pin_port(pin)\n if port_num:\n gpio.pullup(port_num, gpio.PULLUP)",
"def pin_pullclear(self, pin):\n port_num = self._convert_pin_port(pin)\n if port_num:\n gpio.pullup(port_num, gpio.PULLNONE)",
"def reset(self):\n self.wait_until_idle()\n self.__interface.write_pin(self.__interface.RST_PIN, RPi.GPIO.LOW)\n time.sleep(self.__interface.WT_PIN_TOGGLE)\n self.__interface.write_pin(self.__interface.RST_PIN, RPi.GPIO.HIGH)\n time.sleep(self.__interface.WT_PIN_TOGGLE)\n\n self.__sleeping = False",
"def setup(self):\n self.pi.set_pull_up_down(self.gpio, pigpio.PUD_OFF)\n self.pi.set_watchdog(self.gpio, 0)\n self.register_callbacks()",
"def gpio_setup():\n GPIO.setmode(GPIO.BOARD)\n GPIO.setwarnings(False)\n for led in (RED, AMBER, GREEN):\n GPIO.setup(LED[led],GPIO.OUT)",
"def config_tgpio_reset_when_stop(self, on_off):\r\n return self._arm.config_io_reset_when_stop(1, on_off)",
"def pin_pulldown(self, pin):\n port_num = self._convert_pin_port(pin)\n gpio.pullup(port_num, gpio.PULLDOWN)",
"def config_cgpio_reset_when_stop(self, on_off):\r\n return self._arm.config_io_reset_when_stop(0, on_off)",
"def set_pupd(self, port, bit, x):\n hw = self.device.peripherals[port].PUPDR\n mode = {'pu':1,'pd':2}.get(x, 0)\n shift = (bit & 15) << 1\n val = hw.rd()\n val &= ~(3 << shift)\n val |= mode << shift\n hw.wr(val)",
"def gpio(self) -> int:",
"def test_write(self):\n with patch('RPi.GPIO.setmode') as mock_setmode:\n gpio = GPIODevice()\n with patch('RPi.GPIO.setup') as mock_setup:\n with patch('RPi.GPIO.output') as mock_output:\n value = gpio.write(0, False)\n mock_output.called_once_with(0, False)\n with patch('RPi.GPIO.cleanup') as mock_cleanup:\n gpio.close()",
"def setup(self):\n if not self._gpio_setup:\n if self._gpio is None:\n try:\n import RPi.GPIO as GPIO\n self._gpio = GPIO\n except ImportError:\n raise ImportError('This library requires the RPi.GPIO module\\nInstall with: sudo apt install python-rpi.gpio')\n self._gpio.setmode(self._gpio.BCM)\n self._gpio.setwarnings(False)\n self._gpio.setup(self.cs_pin, self._gpio.OUT)\n self._gpio.setup(self.dc_pin, self._gpio.OUT, initial=self._gpio.LOW, pull_up_down=self._gpio.PUD_OFF)\n self._gpio.setup(self.reset_pin, self._gpio.OUT, initial=self._gpio.HIGH, pull_up_down=self._gpio.PUD_OFF)\n self._gpio.setup(self.busy_pin, self._gpio.IN, pull_up_down=self._gpio.PUD_OFF)\n\n if self._spi_bus is None:\n import spidev\n self._spi_bus = spidev.SpiDev()\n\n self._spi_bus.open(0, self.cs_channel)\n self._spi_bus.no_cs = True\n self._spi_bus.max_speed_hz = 5000000\n\n self._gpio_setup = True\n\n self._gpio.output(self.reset_pin, self._gpio.LOW)\n time.sleep(0.1)\n self._gpio.output(self.reset_pin, self._gpio.HIGH)\n time.sleep(0.1)\n\n self._gpio.output(self.reset_pin, self._gpio.LOW)\n time.sleep(0.1)\n self._gpio.output(self.reset_pin, self._gpio.HIGH)\n\n self._busy_wait(1.0)\n\n # Sending init commands to display\n self._send_command(AC073TC1_CMDH, [0x49, 0x55, 0x20, 0x08, 0x09, 0x18])\n\n self._send_command(AC073TC1_PWR, [0x3F, 0x00, 0x32, 0x2A, 0x0E, 0x2A])\n\n self._send_command(AC073TC1_PSR, [0x5F, 0x69])\n\n self._send_command(AC073TC1_POFS, [0x00, 0x54, 0x00, 0x44])\n\n self._send_command(AC073TC1_BTST1, [0x40, 0x1F, 0x1F, 0x2C])\n\n self._send_command(AC073TC1_BTST2, [0x6F, 0x1F, 0x16, 0x25])\n\n self._send_command(AC073TC1_BTST3, [0x6F, 0x1F, 0x1F, 0x22])\n\n self._send_command(AC073TC1_IPC, [0x00, 0x04])\n\n self._send_command(AC073TC1_PLL, [0x02])\n\n self._send_command(AC073TC1_TSE, [0x00])\n\n self._send_command(AC073TC1_CDI, [0x3F])\n\n self._send_command(AC073TC1_TCON, [0x02, 0x00])\n\n self._send_command(AC073TC1_TRES, [0x03, 0x20, 0x01, 0xE0])\n\n self._send_command(AC073TC1_VDCS, [0x1E])\n\n self._send_command(AC073TC1_T_VDCS, [0x00])\n\n self._send_command(AC073TC1_AGID, [0x00])\n\n self._send_command(AC073TC1_PWS, [0x2F])\n\n self._send_command(AC073TC1_CCSET, [0x00])\n\n self._send_command(AC073TC1_TSSET, [0x00])",
"def setup_gpio(self):\n logger.info(\"Setting up GPIO pins\")\n gpio.setmode(gpio.BOARD)\n gpio.setup(self.pins[\"SCLK\"], gpio.OUT)\n gpio.setup(self.pins[\"SDO\"], gpio.OUT)\n gpio.setup(self.pins[\"SDI\"], gpio.IN)\n gpio.setup(self.pins[\"IO_UPDATE\"], gpio.OUT)\n gpio.setup(self.pins[\"IO_RESET\"], gpio.OUT)\n gpio.setup(self.pins[\"RAM_SWP_OVR\"], gpio.IN)\n gpio.setup(self.pins[\"EXT_PWR_DOWN\"], gpio.OUT)\n gpio.setup(self.pins[\"MASTER_RESET\"], gpio.OUT)\n gpio.setup(self.pins[\"PLL_LOCK\"], gpio.IN)\n gpio.setup(self.pins[\"P_0\"], gpio.OUT)\n gpio.setup(self.pins[\"P_1\"], gpio.OUT)\n gpio.setup(self.pins[\"P_2\"], gpio.OUT)",
"def set_pin(self):\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BOARD)\n GPIO.setup(self.pin, GPIO.OUT)\n GPIO.output(self.pin, GPIO.LOW)\n time.sleep(3)\n GPIO.output(self.pin, GPIO.HIGH)",
"def setup(self):\n \n # Board refers to the P1 header of the Raspberry Pi board\n GPIO.setmode(GPIO.BOARD)\n\n # Set up pin as an input with a pull up resistor to 3.3V\n GPIO.setup(self.__pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)",
"async def async_reset_brightness(self):\n await self.local_meural.send_als_calibrate_off()",
"def gpio_cleanup(self):\n self.motor.gpio_cleanup()",
"def test_legacy_gpio_setup(GPIO):\n from plasma.gpio import PlasmaGPIO\n plasma = PlasmaGPIO(10, gpio_data=10, gpio_clock=11)\n plasma.show()\n\n GPIO.setmode.assert_called_once_with(GPIO.BCM)\n GPIO.setup.assert_has_calls([\n mock.call(10, GPIO.OUT),\n mock.call(11, GPIO.OUT)\n ])",
"def get_closed_state(self):\n return 1 if self.pull_up_down == GPIO.PUD_DOWN else 0",
"def setup():\n GPIO.setmode(GPIO.BCM)\n for pin in [CHURCH, CHURCH + 1, HALL, HALL + 1]:\n GPIO.setup(pin, GPIO.OUT, initial=GPIO.HIGH)",
"def disable_reset_pulls_reset(self):\n self._dll.JLINKARM_ResetPullsRESET(0)\n return None",
"def GPIOsetup():\n GPIO.setmode(GPIO.BOARD)\n for led in (RED,YELLOW,GREEN):\n GPIO.setup(LED[led],GPIO.OUT)",
"def cleanup():\n # Note, we don't do this as part of finished() but this is a utility\n # you can call to cleanup the pins as part of an exception or finally.\n import RPi.GPIO as GPIO\n\n GPIO.setmode(GPIO.BCM)\n GPIO.setwarnings(False)\n\n GPIO.setup(27, GPIO.IN) # Green LED\n GPIO.setup(22, GPIO.IN) # Red LED\n GPIO.setup(7, GPIO.IN) # CS\n GPIO.setup(8, GPIO.IN) # CS\n GPIO.setup(11, GPIO.IN) # SCLK\n GPIO.setup(10, GPIO.IN) # MOSI\n GPIO.setup(9, GPIO.IN) # MISO\n GPIO.setup(25, GPIO.IN) # RESET\n\n GPIO.cleanup()"
]
| [
"0.5966621",
"0.5765362",
"0.57463443",
"0.5715893",
"0.5691199",
"0.5689781",
"0.5672907",
"0.5666626",
"0.56485367",
"0.551315",
"0.5511797",
"0.54891694",
"0.5486965",
"0.5470313",
"0.54582816",
"0.5456358",
"0.53926957",
"0.53535706",
"0.534321",
"0.5318191",
"0.52833074",
"0.5275816",
"0.525179",
"0.5227126",
"0.5215252",
"0.5164427",
"0.51588947",
"0.5156114",
"0.515414",
"0.5144983"
]
| 0.64496887 | 0 |
Read the gpio level. Returns the gpio level if OK, otherwise PI_BAD_GPIO. | def read(gpio):
return _u2i(_pigpio_command(_control, _PI_CMD_READ, gpio, 0)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_mode(gpio):\n return _u2i(_pigpio_command(_control, _PI_CMD_MODEG, gpio, 0))",
"def _do_get_level(self):\n logging.info(__name__ + ' : Read level of channel 1')\n result = self._execute('R1')\n return float(result.replace(\"R\", \"\")) / 10",
"def gpio(self) -> int:",
"def read(pin):\n return _read_value(\"{0}/gpio{1}/value\".format(_path_prefix, pin))",
"def get_level(self, channel=None):\n return int(self.getSensorData(\"FILLING_LEVEL\", channel))",
"def read(self):\n self.pi.write(self.gpio, pigpio.LOW)\n time.sleep(0.017) # 17 ms\n self.pi.set_mode(self.gpio, pigpio.INPUT)\n self.pi.set_watchdog(self.gpio, 200)\n time.sleep(0.2)",
"def gpio_get(self, pin: str, pin_type: str) -> Union[bool, float]:\n self.__logger.debug('Eva.gpio_get called')\n return self.__http_client.gpio_get(pin, pin_type)",
"def read_from_gpio(self):\n # if (use_static):\n # if (self.device_id in static_values.keys()):\n # return static_values[self.device_id]\n # else:\n # return 1;\n \n # if (random.random()<0.2):\n # if (random.random()<0.5):\n # self._decrease_position_index()\n # else:\n # self._increase_position_index()\n # self._position_index = (self._position_index+1)%len(FakePositionDevice._values)\n\n # self.position = FakePositionDevice._values[self._position_index]\n mu = self._value\n sig = 1\n self._value = np.random.normal(mu,sig,1)[0]\n if self._value < self._min_value:\n self._value = self._min_value\n if self._value > self._max_value:\n self._value = self._max_value\n \n return self._value\n return 4\n # return FakePositionDevice._values[self._position_index]",
"def gpio(self) -> Gpio:\n\n return self._gpio",
"def get_battery_level(self) -> int:\n\n try:\n self._serial.transmit(b'\\x51\\x00')\n response = self._get_reply(0x51, 1, 0.25)\n finally:\n self._gpio.sleep()\n\n return response[2]",
"def level(self):\n return self.__pin.pwm",
"def gpio_read_analogue(self, pin: int) -> float:\n return randint(0, 500) / 100",
"def get(self):\n if self.mode == gpio.IN:\n self.state = gpio.input(self.bcm_id)\n\n return self.state",
"def read_pin(self, pin):\n value = 0\n pin = pin - 1\n if pin < 8:\n self.__port_a_value = self.__bus.read_byte_data(\n self.__ioaddress, self.GPIOA)\n value = self.__checkbit(self.__port_a_value, pin)\n else:\n pin = pin - 8\n self.__port_b_value = self.__bus.read_byte_data(\n self.__ioaddress, self.GPIOB)\n value = self.__checkbit(self.__port_b_value, pin)\n return value",
"def read_level(self):\n addresse = 0x48\n self.bus.write_byte(addresse,self.channel)\n value = self.bus.read_byte(addresse)\n time.sleep(1)\n volts = self.convert_volts(value,2)\n self.write_level(volts)\n alerteur = Alerteur()\n if volts < self.seuil_min:\n alerteur.add_alert(self.module_name, \"Batterie faible.\")\n else:\n alerteur.remove_alert(self.module_name)\n return volts",
"def gpio_function(self, channel):\n self._check_mode()\n return self.channels.get(channel)",
"def gpio_can_read_analogue(self, pin: int) -> bool:\n return pin > 5",
"def read_led(self, pin):\n value = 0 #Default to nowt\n if self.iface.connected:\n try:\n value = self.iface.get_PWM_dutycycle(pin)\n except (AttributeError, IOError, pigpio.error):\n logging.error(\" Cannot read PWM of pin #%s\" % (pin,))\n else:\n logging.error(\" Interface not connected. Cannot read PWM of pin #%s.\" % (pin,))\n return value",
"def write(gpio, level):\n return _u2i(_pigpio_command(_control, _PI_CMD_WRITE, gpio, level))",
"def read_level(self):\n current_level = 1\n\n try:\n if self.store.exists(LEVEL_STORE):\n current_level_str = self.store.get(LEVEL_STORE)['level']\n current_level = int(current_level_str)\n except:\n print 'Exception when reading Galaxy run level from JSON file!'\n current_level = 1\n\n return current_level",
"def gpioRequest():\n waterHeight,gateStatus = readingJson.serverReadGpioJson(\"192.168.42.3\",\"gpiodata.json\")\n return waterHeight,gateStatus",
"def gpio_status(self, mode=None):\n if mode == True:\n print('gtd: status(True), was {}'.format(repr(self.gpio_state)))\n # TODO: Turn on the GPIO for Heat and Fan\n self.gpio_state = True\n elif mode == False:\n print('gtd: status(False), was {}'.format(repr(self.gpio_state)))\n # TODO: Turn off the GPIO for Heat and Fan\n self.gpio_state = False\n \n # In any case, and especially if the incoming parameter MODE is None,\n # the return value of GPIO_STATUS is the state of the GPIO pins.\n\n return self.gpio_state",
"def gpio_read_digital(self, pin: int) -> bool:\n return bool(getrandbits(1))",
"def test_read(self):\n with patch('RPi.GPIO.setmode') as mock_setmode:\n gpio = GPIODevice()\n with patch('RPi.GPIO.setup') as mock_setup:\n with patch('RPi.GPIO.input') as mock_input:\n mock_input.return_value = True\n value = gpio.read(0)\n with patch('RPi.GPIO.cleanup') as mock_cleanup:\n gpio.close()\n self.assertDictEqual(value, {\"value\": True})",
"def get_level(self) -> int:\n return self.rstate.level()",
"def get_power(self):\n #GPIO.setmode(GPIO.BOARD)\n #GPIO.setup(self.input_pin, GPIO.IN)\n return 0",
"def read(self, blocking=False):\n if blocking:\n GPIO.wait_for_edge(self.d_in, GPIO.RISING)\n return 1\n else:\n return GPIO.input(self.d_in)",
"def get_level(self, level):\n return",
"def gpio_input(door: Door):\n input_state = GPIO.input(GPIO_PIN)\n if input_state:\n door.is_closed()\n else:\n door.is_opened()",
"def read(self):\n if self.mode == UNAVAILABLE:\n raise IOError, \"Cannot read pin %s\"% self.__str__()\n return self.value"
]
| [
"0.6246546",
"0.6178588",
"0.61473036",
"0.61233044",
"0.6045374",
"0.6043032",
"0.597285",
"0.5971351",
"0.58981156",
"0.58965963",
"0.58692306",
"0.5800032",
"0.57912314",
"0.57090676",
"0.5700577",
"0.5690867",
"0.56805086",
"0.56707335",
"0.5591471",
"0.55661803",
"0.55581",
"0.55545324",
"0.550868",
"0.54791224",
"0.54487866",
"0.5414029",
"0.5369082",
"0.536461",
"0.53600353",
"0.5315416"
]
| 0.67489696 | 0 |
Write the gpio level. Returns 0 if OK, otherwise PI_BAD_GPIO, PI_BAD_LEVEL, or PI_NOT_PERMITTED. | def write(gpio, level):
return _u2i(_pigpio_command(_control, _PI_CMD_WRITE, gpio, level)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def write_level(self, current_level):\n try:\n if isinstance(current_level, numbers.Number):\n if 1 <= current_level <= 3:\n current_level = str(current_level)\n self.store.put(LEVEL_STORE, level=current_level)\n except:\n print \"Error: cannot save game level!\"",
"def set_level(self, device_id, new_level):\n\t\treturn self.post(self.value_url % (ART_SERVER_HOST, device_id), {'value':new_level })",
"def gpio(self) -> int:",
"def test_write(self):\n with patch('RPi.GPIO.setmode') as mock_setmode:\n gpio = GPIODevice()\n with patch('RPi.GPIO.setup') as mock_setup:\n with patch('RPi.GPIO.output') as mock_output:\n value = gpio.write(0, False)\n mock_output.called_once_with(0, False)\n with patch('RPi.GPIO.cleanup') as mock_cleanup:\n gpio.close()",
"def high(self) -> None:\n self._fd.write(SYSFS_GPIO_VALUE_HIGH)\n self._fd.seek(0)",
"def write_level(self,level):\n self.last_level = level\n self.last_level_date = datetime.now(tzlocal()).strftime(\"%Y-%m-%d %H:%M:%S\")\n self.write_config()",
"def gpio_trigger(user_gpio, pulse_len=10, level=1):\n # pigpio message format\n\n # I p1 user_gpio\n # I p2 pulse_len\n ## extension ##\n # I level\n\n extents = [struct.pack(\"I\", level)]\n\n return _u2i(_pigpio_command_ext(\n _control, _PI_CMD_TRIG, user_gpio, pulse_len, extents))",
"def set_PIenable(self,highlow): \n GPIO.output(self.chanlist[4], highlow)",
"def gpio_status(self, mode=None):\n if mode == True:\n print('gtd: status(True), was {}'.format(repr(self.gpio_state)))\n # TODO: Turn on the GPIO for Heat and Fan\n self.gpio_state = True\n elif mode == False:\n print('gtd: status(False), was {}'.format(repr(self.gpio_state)))\n # TODO: Turn off the GPIO for Heat and Fan\n self.gpio_state = False\n \n # In any case, and especially if the incoming parameter MODE is None,\n # the return value of GPIO_STATUS is the state of the GPIO pins.\n\n return self.gpio_state",
"def write_pin(self, attr):\n \n self.logging.debug(\"Setting \" + attr.label + \" to \" + str(attr.value) + \" on pin \" + str(attr.io_pin))\n GPIO.output(attr.io_pin, attr.value)",
"def gpio_set(self, pin: str, status: Union[bool, str]) -> None:\n self.__logger.debug('Eva.gpio_set called')\n return self.__http_client.gpio_set(pin, status)",
"def async_set_level(self, value: int) -> None:\n value = max(0, min(255, value))\n self._position = int(value * 100 / 255)\n self.async_write_ha_state()",
"def writeGPIOs(self, nodeid, mask, vals):\n r = remote_hardware_pb2.HardwareMessage()\n r.typ = remote_hardware_pb2.HardwareMessage.Type.WRITE_GPIOS\n r.gpio_mask = mask\n r.gpio_value = vals\n return self._sendHardware(nodeid, r)",
"def set_level(self, node_uuid, index, data):\n p = self.values['num'].get_data_index(index=index)\n self._bus.i2c_acquire()\n try:\n self._bus.pca9685_manager.set_pwm(p, int(data*4096/100),0)\n self.values['level'].set_data_index(index=index, data=data)\n except Exception:\n logger.warning(\"[%s] - set_level invalid data : %s\", self.__class__.__name__, data)\n finally:\n self._bus.i2c_release()",
"def level(self):\n return self.__pin.pwm",
"def output(self, channel, value):\n self._check_mode()\n if channel in self.channels:\n print(f\"output fuer channel {channel} auf {value} gesetzt\")\n else:\n raise RuntimeError(\"The GPIO channel has not been set up as an OUTPUT\")",
"async def loglevel(self, ctx, level):\n level = level.lower()\n assert level in LEVELS\n await self.bot.log.change_level(LEVELS[level], ctx.author.name)\n await ctx.send(f\"Set log level to {level.upper()}\")",
"def write(self, value):\n if self.mode is UNAVAILABLE:\n raise IOError, \"%s can not be used through Firmata\" % self\n if self.mode is INPUT:\n raise IOError, \"%s is set up as an INPUT and can therefore not be written to\" % self\n if value is not self.value:\n self.value = value\n if self.mode is OUTPUT:\n if self.port:\n self.port.write()\n else:\n msg = chr(DIGITAL_MESSAGE)\n msg += chr(self.pin_number)\n msg += chr(value)\n self.board.sp.write(msg)\n elif self.mode is PWM:\n value = int(round(value * 255))\n msg = chr(ANALOG_MESSAGE + self.pin_number)\n# print(value)\n msg += chr(value % 128)\n msg += chr(value >> 7)\n self.board.sp.write(msg)\n elif self.mode is SERVO:\n value = int(value)\n msg = chr(ANALOG_MESSAGE + self.pin_number)\n msg += chr(value % 128)\n msg += chr(value >> 7)\n self.board.sp.write(msg)",
"def loglevel(level):\n # There's no way to getting the previous loglevel so imply we've\n # always made a change.\n ret = {\"changes\": True}\n\n myos = __grains__[\"os\"]\n if myos == \"FreeBSD\":\n all_levels = [\"none\", \"urgent\", \"misc\", \"loud\"]\n else:\n all_levels = [\n \"emerg\",\n \"alert\",\n \"crit\",\n \"err\",\n \"warning\",\n \"notice\",\n \"info\",\n \"debug\",\n ]\n if level not in all_levels:\n raise SaltInvocationError(\"Unknown loglevel: {}\".format(level))\n\n result = __salt__[\"cmd.run_all\"](\n \"pfctl -x {}\".format(level), output_loglevel=\"trace\", python_shell=False\n )\n\n if result[\"retcode\"] != 0:\n raise CommandExecutionError(\n \"Problem encountered setting loglevel\",\n info={\"errors\": [result[\"stderr\"]], \"changes\": False},\n )\n\n return ret",
"def set(pin, val=1):\n if val not in [0,1]:\n raise RuntimeError\n GPIO.output(pin,val)",
"def expanderWrite( self, _data ): # uint8_t\n\t\t#Wire.beginTransmission(_Addr);\n\t\t#printIIC((int)(_data) | _backlightval) # print II\n\t\tself.i2c.writeto( self.address, bytes( [_data | self._backlightval] ))\n\t\t#Wire.endTransmission();",
"def set_brightness(self, level):\n print(\"Got request to set brightness with level: %s\" % (level))\n # Home assistant sets brightness on a scale of 0 to 255\n if level > 0 and level < 255:\n new_level = level / 255\n print(\"Setting brightness to %s\" % (new_level))\n self.turn_on(r=self.r, g=self.g, b=self.b, brightness=new_level)\n self.client.publish(BRIGHTNESS_STATE_TOPIC, level) #publish",
"def set_battery_level(self, battery_level: int) -> None:\n\n if battery_level < 0 or battery_level > 255:\n raise ValueError('Bad battery level')\n\n cmd = b'\\x50\\x01' + bytes([battery_level])\n try:\n self._serial.transmit(cmd)\n self._get_reply(0x50, 0, 0.25)\n finally:\n self._gpio.sleep()\n\n return",
"def write_pin(self, pin: int, value: bool):\n RPi.GPIO.output(pin, value)",
"def get_level(self, channel=None):\n return int(self.getSensorData(\"FILLING_LEVEL\", channel))",
"def _setGPIOs(self, Dpin, direction, value):\n\n try:\n\n with open('/sys/class/gpio/export', 'a') as f_export:\n f_export.write(self.MAPPING[Dpin])\n\n with open('/sys/class/gpio/' + Dpin + '/direction', 'a') as f_dir:\n f_dir.write(direction)\n\n with open('/sys/class/gpio/' + Dpin + '/value', 'a') as f_value:\n f_value.write(value)\n\n with open('/sys/class/gpio/' + Dpin + '/value') as f_value:\n result = \"PIN \" + Dpin + \" value \" + f_value.read()\n\n except Exception as err:\n LOG.error(\"Error setting GPIO value: \" + str(err))\n result = None\n\n return result",
"def get_power(self):\n #GPIO.setmode(GPIO.BOARD)\n #GPIO.setup(self.input_pin, GPIO.IN)\n return 0",
"def logging_write(e=None):\n try:\n if e is not None: # Exception errors\n logging.error(e)\n\n if LIMIT_FLAG == 3:\n print('Moisture Level: Low (Dry)')\n logging.info('| Moisture Level Low (Dry)')\n red_led.on()\n green_led.off()\n blue_led.off()\n elif LIMIT_FLAG == 2:\n print('Moisture Level: Normal')\n logging.info('| Moisture Level: Normal')\n red_led.off()\n green_led.on()\n blue_led.off()\n else:\n print('Moisture Level: High')\n logging.warning('| Moisture Level: High')\n red_led.off()\n green_led.off()\n blue_led.on()\n except Exception as e:\n logging_write(e)",
"def set_high(pin):\n _write_value(HIGH, \"{0}/gpio{1}/value\".format(_path_prefix, pin))",
"def save_level(self):\n if self.project is not None:\n self.project.save_level()"
]
| [
"0.60646546",
"0.55102795",
"0.54987025",
"0.54412705",
"0.54124314",
"0.5370314",
"0.53354824",
"0.5312178",
"0.5277599",
"0.5264569",
"0.52260953",
"0.522468",
"0.5204833",
"0.51930666",
"0.516427",
"0.5160129",
"0.51343644",
"0.50884634",
"0.50754654",
"0.5062609",
"0.5013584",
"0.500788",
"0.50053674",
"0.50039524",
"0.5002449",
"0.49612695",
"0.49601215",
"0.4958385",
"0.49489155",
"0.4929537"
]
| 0.7548673 | 0 |
Start (nonzero dutycycle) or stop (0) PWM pulses on the gpio. Returns 0 if OK, otherwise PI_BAD_USER_GPIO, PI_BAD_DUTYCYCLE, or PI_NOT_PERMITTED. | def set_PWM_dutycycle(user_gpio, dutycycle):
return _u2i(_pigpio_command(_control, _PI_CMD_PWM, user_gpio, dutycycle)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_PWM_range(user_gpio):\n return _u2i(_pigpio_command(_control, _PI_CMD_PRG, user_gpio, 0))",
"def __init__(self, servo_gpio, pi=None, pulse_left_ns=2500, pulse_right_ns=1000, pulse_centre_ns=None):\n\n self.gpio = servo_gpio\n\n if pi is None:\n self.pi = pi = pigpio.pi()\n else:\n self.pi = pi\n\n self.pulse_left_ns = pulse_left_ns\n self.pulse_right_ns = pulse_right_ns\n\n if pulse_centre_ns is None:\n self.pulse_centre_ns = ((pulse_left_ns - pulse_right_ns) // 2) + pulse_right_ns",
"def get_PWM_frequency(user_gpio):\n return _u2i(_pigpio_command(_control, _PI_CMD_PFG, user_gpio, 0))",
"def get_PWM_real_range(user_gpio):\n return _u2i(_pigpio_command(_control, _PI_CMD_PRRG, user_gpio, 0))",
"def main():\n\n # Center positions when joystick is at rest\n center_x_pos = 530\n center_y_pos = 504\n\n GPIO.setmode(GPIO.BOARD)\n GPIO.setup([red_led, green_led, blue_led], GPIO.OUT, initial=GPIO.LOW)\n\n pwm_r = GPIO.PWM(red_led, 300)\n pwm_g = GPIO.PWM(green_led, 300)\n pwm_b = GPIO.PWM(blue_led, 300)\n\n pwm_instances = [pwm_r, pwm_g, pwm_b]\n\n for p in pwm_instances:\n p.start(0)\n\n try:\n while True:\n # If joystick switch is pressed down, turn off LEDs\n switch = read_spi_data_channel(mcp3008_switch_channel)\n if switch == 0:\n for p in pwm_instances:\n p.ChangeDutyCycle(0)\n continue\n\n # Read the joystick position data\n x_pos = read_spi_data_channel(mcp3008_x_voltage_channel)\n y_pos = read_spi_data_channel(mcp3008_y_voltage_channel)\n\n # If joystick is at rest in center, turn on all LEDs at max\n if is_joystick_near_center(x_pos, y_pos, center_x_pos, center_y_pos):\n for p in pwm_instances:\n p.ChangeDutyCycle(100)\n continue\n\n # Adjust duty cycle of LEDs based on joystick position\n angle = convert_coordinates_to_angle(x_pos, y_pos, center_x_pos, center_y_pos)\n pwm_r.ChangeDutyCycle(calculate_next_pwm_duty_cycle_for_led(angle, 'R'))\n pwm_g.ChangeDutyCycle(calculate_next_pwm_duty_cycle_for_led(angle, 'G'))\n pwm_b.ChangeDutyCycle(calculate_next_pwm_duty_cycle_for_led(angle, 'B'))\n\n # print(\"Position : ({},{}) -- Angle : {}\".format(x_pos, y_pos, round(angle, 2)))\n\n except KeyboardInterrupt:\n pass\n\n finally:\n for p in pwm_instances:\n p.stop()\n spi.close()\n GPIO.cleanup()",
"def set_PWM_range(user_gpio, range_):\n return _u2i(_pigpio_command(_control, _PI_CMD_PRS, user_gpio, range_))",
"def _set_pwm(self, raw_values):\n for i in range(len(self._pins)):\n self._pi.set_PWM_dutycycle(self._pins[i], raw_values[i])",
"def duty_cycle(self):\n pwm = self._pca.pwm_regs[self._index]\n if pwm[0] == 0x1000:\n return 0xffff\n return pwm[1] << 4",
"def goal_pwm(self):\n return self._read(MX_GOAL_PWM)",
"def set_PWM_frequency(user_gpio, frequency):\n return _u2i(_pigpio_command(_control, _PI_CMD_PFS, user_gpio, frequency))",
"def set_pwm(self, state):\n if state:\n if self.pwm:\n return self.get_status()\n self.pwm = GPIO.PWM(self.id_, 50)\n if self.state == GPIO.HIGH:\n self.pwm.start(100)\n else:\n self.pwm.start(0)\n else:\n if not self.pwm:\n return self.get_status()\n self.pwm.stop()\n self.pwm = False\n return self.get_status()",
"def __init__(self, pinForward, pinBackward, pinControlStraight,pinLeft, pinRight, pinControlSteering):\n\n self.pinForward = pinForward\n self.pinBackward = pinBackward\n self.pinControlStraight = pinControlStraight\n self.pinLeft = pinLeft\n self.pinRight = pinRight\n self.pinControlSteering = pinControlSteering\n GPIO.setup(self.pinForward, GPIO.OUT)\n GPIO.setup(self.pinBackward, GPIO.OUT)\n GPIO.setup(self.pinControlStraight, GPIO.OUT)\n\n GPIO.setup(self.pinLeft, GPIO.OUT)\n GPIO.setup(self.pinRight, GPIO.OUT)\n GPIO.setup(self.pinControlSteering, GPIO.OUT)\n\n self.pwm_forward = GPIO.PWM(self.pinForward, 100)\n self.pwm_backward = GPIO.PWM(self.pinBackward, 100)\n self.pwm_forward.start(0)\n self.pwm_backward.start(0)\n\n self.pwm_left = GPIO.PWM(self.pinLeft, 100)\n self.pwm_right = GPIO.PWM(self.pinRight, 100)\n self.pwm_left.start(0)\n self.pwm_right.start(0)\n\n GPIO.output(self.pinControlStraight,GPIO.HIGH) \n GPIO.output(self.pinControlSteering,GPIO.HIGH)",
"def goal_pwm(self, value):\n self._write(MX_GOAL_PWM, value)",
"def idle(self):\n self.pi.set_servo_pulsewidth(self.gpio, 0)",
"def duty(self):\n if self._chanRMT < 0:\n return self._pin.duty()\n else:\n return self._dutyRMT",
"def __init__(self, pwm_pin, dir_pin_1, dir_pin_2, pwm_freq):\n\t\tself._pwm_pin = pwm_pin # PWM input pin.\n\t\tself._dir_pin_1 = dir_pin_1 # GPIO number to control the direction of rotation of the wheel.\n\t\tself._dir_pin_2 = dir_pin_2 # GPIO number to control the direction of rotation of the wheel.\n\t\tself._pwm_freq = pwm_freq # PWM cycle.\n\n\t\tself._last_dir = 's' # Last rotation direction of this wheel. 's' indicates stop.\n\t\tself._last_dc_val = 0 # Last duty cycle value.\n\t\tself._current_dc_val = 0 # Current duty cycle value.\n\n\t\tGPIO.setmode(GPIO.BOARD)\n\n\t\t# Set the direction control GPIO output mode.\n\t\tGPIO.setup(self._pwm_pin, GPIO.OUT)\n\t\tGPIO.setup(self._dir_pin_1, GPIO.OUT)\n\t\tGPIO.setup(self._dir_pin_2, GPIO.OUT)\n\n\t\t# Inits PWM pin.\n\t\tself._motor_pwm = GPIO.PWM(self._pwm_pin, self._pwm_freq) # pwm_freq: Hz\n\t\tself._motor_pwm.start(0) # Set duty cycle to 0.",
"def __init__(self, pinForward1, pinBackward1,pinForward2, pinBackward2):\n\n self.pinForward1 = pinForward1\n self.pinBackward1 = pinBackward1\n self.pinForward2 = pinForward2\n self.pinBackward2 = pinBackward2\n\n GPIO.setup(self.pinForward1, GPIO.OUT)\n GPIO.setup(self.pinBackward1, GPIO.OUT)\n GPIO.setup(self.pinForward2, GPIO.OUT)\n GPIO.setup(self.pinBackward2, GPIO.OUT)\n\n self.pwm_forward1 = GPIO.PWM(self.pinForward1, 100)\n self.pwm_backward1 = GPIO.PWM(self.pinBackward1, 100)\n self.pwm_forward2 = GPIO.PWM(self.pinForward2, 100)\n self.pwm_backward2 = GPIO.PWM(self.pinBackward2, 100)\n \n self.pwm_forward1.start(0)\n self.pwm_backward1.start(0)\n self.pwm_forward2.start(0)\n self.pwm_backward2.start(0)",
"def led_duty_cycle(val):\n set_tmr_ocr(TMR1, OCRxB, val)",
"def _pin_pulse(pin, initial_state=GPIO.LOW, pulse_width=PULSE_WIDTH_SEC):\n # type: (int, bool, Union[int, float]) -> None\n GPIO.output(pin, not initial_state)\n try:\n time.sleep(pulse_width)\n finally:\n GPIO.output(pin, initial_state)",
"def rotate_servo_rel(pi, pin, pct):\n try:\n pw_old = pi.get_servo_pulsewidth(pin)\n except:\n pw_old = 0 # no PWM has been set yet, so assume 0 \n pct_old = pulsewidth2pct(pw_old)\n if pct_old == -25: # no PWM output commanded, go to center first to get a reference point\n pi.set_servo_pulsewidth(pin, pct2pulsewidth(50))\n pct_old = pulsewidth2pct(pi.get_servo_pulsewidth(pin))\n pct_cmd = pct_old + pct\n # saturate input to protect servo \n if pct_cmd < 10:\n pct_cmd = 10\n elif pct_cmd > 90:\n pct_cmd = 90\n pi.set_servo_pulsewidth(pin, pct2pulsewidth(pct_cmd))",
"def pwm(self, index, on=None, off=None):\n raise NotImplementedError()",
"def pwm(self):\n return self._pwm",
"def pi_close():\n print(\"\\nClosing lock :(\")\n GPIO.setmode(GPIO.BOARD)\n GPIO.setup(11, GPIO.OUT)\n servo1 = GPIO.PWM(11, 50)\n\n # start(0) pulse off\n print('servo.start(0)')\n servo1.start(0)\n time.sleep(1)\n\n # turns a little at a time using servo\n print('turning...')\n i = 8.5\n while i > 2:\n # pulse next degree \n print('ChangeDutyCycle(%d)' % i)\n servo1.ChangeDutyCycle(i)\n time.sleep(0.2)\n # no pulse, for smoother turn\n servo1.ChangeDutyCycle(0)\n time.sleep(0.1)\n i -= 1\n\n # stop pulse\n print('servo.ChangeDutyCycle(0)')\n servo1.ChangeDutyCycle(0)\n servo1.stop()\n GPIO.cleanup()\n print('done closing')",
"def pwm_loop(self, off_sleep = 0.1, Hz = 500, on_sleep = 0.01, signal_t = 300, dc = 50):\n if hasattr(self.p, 'start') == False:\n self.p = GPIO.PWM(self.LED, Hz)\n else:\n self.p.ChangeFrequency(Hz)\n self.p.start(0)\n t = timeit.default_timer()\n nt = t\n self.cycle_t = on_sleep + off_sleep\n while nt - t < signal_t:\n time.sleep(off_sleep)\n self.p.ChangeDutyCycle(dc)\n time.sleep(on_sleep)\n self.p.ChangeDutyCycle(0)\n nt = timeit.default_timer()\n\n self.p.stop()",
"def read_led(self, pin):\n value = 0 #Default to nowt\n if self.iface.connected:\n try:\n value = self.iface.get_PWM_dutycycle(pin)\n except (AttributeError, IOError, pigpio.error):\n logging.error(\" Cannot read PWM of pin #%s\" % (pin,))\n else:\n logging.error(\" Interface not connected. Cannot read PWM of pin #%s.\" % (pin,))\n return value",
"def gpio(self) -> int:",
"def get_fan_pwm(self, pwm_val=None):\n self.assertNotEqual(pwm_val, None, \"Expected PWM value needs to be set\")\n\n data = run_shell_cmd(\"/usr/local/bin/get_fan_speed.sh\")\n data = data.split(\"\\n\")\n for line in data:\n if len(line) == 0:\n continue\n line = line.split(\"(\")\n line = line[1].split(\"%\")\n if abs(int(line[0]) - int(pwm_val)) < 2:\n continue\n else:\n return [False, data]\n return [True, None]",
"def present_pwm(self):\n return self._read(MX_PRESENT_PWM)",
"def go(self, position):\n if self._is_on:\n val = min(180.0, position)\n val = max(0.0, position)\n val = (val / 180.0) * (self._max_duty - self._min_duty) + self._min_duty\n val = val * 100.0\n self._pwms.set_duty(self._pin_index, val)\n else:\n raise Exception(\"You must turn the servo on by calling the `on()` method before you can tell the servo to `go()`!\")",
"def set_pwm(self, duty_cycle):\n PWM.set_duty_cycle(self.pwm_pin, duty_cycle)"
]
| [
"0.59510094",
"0.5841308",
"0.5794132",
"0.5698013",
"0.5638398",
"0.55871224",
"0.5514766",
"0.55016214",
"0.54185426",
"0.5373289",
"0.5357235",
"0.5293218",
"0.52856135",
"0.5219256",
"0.52131397",
"0.51952195",
"0.5191489",
"0.51635724",
"0.5160985",
"0.5146592",
"0.51293534",
"0.5122684",
"0.51046985",
"0.50929743",
"0.50728786",
"0.5034855",
"0.5033582",
"0.5004995",
"0.4998046",
"0.4954549"
]
| 0.66211385 | 0 |
Set the range of PWM values to be used on the gpio. Returns 0 if OK, otherwise PI_BAD_USER_GPIO, PI_BAD_DUTYRANGE, or PI_NOT_PERMITTED. | def set_PWM_range(user_gpio, range_):
return _u2i(_pigpio_command(_control, _PI_CMD_PRS, user_gpio, range_)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_PWM_range(user_gpio):\n return _u2i(_pigpio_command(_control, _PI_CMD_PRG, user_gpio, 0))",
"def get_PWM_real_range(user_gpio):\n return _u2i(_pigpio_command(_control, _PI_CMD_PRRG, user_gpio, 0))",
"def _set_pwm(self, raw_values):\n for i in range(len(self._pins)):\n self._pi.set_PWM_dutycycle(self._pins[i], raw_values[i])",
"def set_range(self, value):\n self.gauge.SetRange(value)",
"def set_range(self, new_range):\n self.range = new_range\n if new_range == 2:\n self.i2c.writeto_mem(accel_address, data_format, b'\\x00')\n self.get_offset()\n elif new_range == 4:\n self.i2c.writeto_mem(accel_address, data_format, b'\\x01')\n self.get_offset()\n elif new_range == 8:\n self.i2c.writeto_mem(accel_address, data_format, b'\\x02')\n self.get_offset()\n elif new_range == 16:\n self.i2c.writeto_mem(accel_address, data_format, b'\\x03')\n self.get_offset()\n else:\n print(\"range can be 2, 4, 8, or 16\")",
"def setRange(self, range):\n\t\tself.range = range\n\t\tself.slider.setMinimum(0.0)\n\t\tself.slider.setMaximum(100.0)\n\t\tself.spinbox.setRange(self.range[0], self.range[1])\n\n\t\tdiff = self.range[1] - self.range[0]\n\t\tif diff <= 1:\n\t\t\tself.spinbox.setSingleStep(0.01)",
"def _pwm_percent_limits(self, limit_per: float):\r\n # Check if the input percent is an int or float\r\n try:\r\n limit_per = float(limit_per)\r\n except (ValueError, TypeError):\r\n # make new error here?\r\n raise ValueError(\"Not a valid input percent\")\r\n\r\n if limit_per > self.max_pwm:\r\n # Set to previous percent\r\n setpoint = self.percent\r\n elif limit_per < 0:\r\n # Set to 0 if negative\r\n setpoint = 0\r\n else:\r\n # Changes setpoint to be multiple of 0.5\r\n setpoint = self.percent_step * round(limit_per / self.percent_step)\r\n\r\n # FIXME: only change this for if checksum if False\r\n # Changes setpoint from 63 to 62.5% if checksum mode is disabled\r\n if setpoint in _PERCENT_TRANSFORMS:\r\n setpoint = _PERCENT_TRANSFORMS[setpoint]\r\n\r\n # TODO: make this to logging instead?\r\n print(\"Setpoint is {0}%\".format(setpoint))\r\n return setpoint",
"def setRange(self, x_range, y_range):\n pass",
"def set_output_range(self, output_index, output_range):\n\n allowed_ranges = [0.2, 0.4, 0.6, 0.8, 1, 2, 3, 4, 5]\n\n if output_index in range(self.num_outputs):\n if output_range in allowed_ranges:\n\n # Send change range command.\n self.setd(f'sigouts/{output_index}/range', output_range)\n\n # Wait for HDAWG to be ready, try 100 times before timeout.\n max_tries = 100\n num_tries = 0\n\n while self.geti(f'sigouts/{output_index}/busy') and num_tries < max_tries:\n time.sleep(0.2)\n num_tries += 1\n\n if num_tries is max_tries:\n self.log.error(\n f\"Range change timeout after {max_tries} tries.\"\n )\n else:\n self.log.info(\n f\"Changed range of wave output {output_index} to {output_range} V.\"\n )\n else:\n self.log.error(\n f\"Range {output_range} is not valid, allowed values for range are {allowed_ranges}\"\n )\n else:\n self.log.error(\n f\"This device has only {self.num_outputs} channels, channel index {output_index} is invalid.\"\n )",
"def set_progress_range(self, maximum):\r\n\r\n pass",
"def radio_range(self, value: int):\n self._radio_range = value",
"def goal_pwm(self, value):\n self._write(MX_GOAL_PWM, value)",
"def set_PWM_frequency(user_gpio, frequency):\n return _u2i(_pigpio_command(_control, _PI_CMD_PFS, user_gpio, frequency))",
"def set_par_range(self, mins, maxs, frozen):\n self.parmins = mins\n self.parmaxs = maxs\n self.pars_frozen = frozen\n return",
"def set_led(self, pin, value=0):\n value = self.int_lim(lower=PWM_MIN, upper=PWM_MAX, value=value) #Standardise the value to our correct range\n if self.iface.connected:\n try:\n self.iface.set_PWM_dutycycle(pin, value)\n except (AttributeError, IOError):\n logging.error(\" Cannot output to pins. PWM of pin #%s would be %s\" % (pin,value))\n else:\n logging.error(\" Interface not connected. Cannot output to pins. PWM of pin #%s would be %s\" % (pin,value))\n return value",
"def set_temp_range(self, temp_range=(0, 0, 1)):\n args = list(temp_range)\n assert len(args) == 3\n minimum, maximum, step = args\n if all([isinstance(i, int) for i in args]):\n if (maximum - minimum) % step == 0:\n maximum += 1\n self.temperatures = np.arange(minimum, maximum, step, dtype=float)\n self.qptanalyzer.temperatures = self.temperatures",
"def set_servo_pulsewidth(user_gpio, pulsewidth):\n return _u2i(_pigpio_command(_control, _PI_CMD_SERVO, user_gpio, pulsewidth))",
"def set_cgpio_analog_with_xyz(self, ionum, value, xyz, fault_tolerance_radius):\r\n return self._arm.set_cgpio_analog_with_xyz(ionum, value, xyz, fault_tolerance_radius)",
"def setRange(self, x_range, y_range):\n self._pipe.send(\"range,%f,%f,%f,%f\" % (x_range + y_range))",
"def set_slider_bounds(self,lower,upper,inclusive_bounds=None):\n self.bounds = (lower,upper)\n\n if inclusive_bounds is not None:\n self.inclusive_bounds = inclusive_bounds\n\n epsilon = max(self.slider['resolution'],0.00000000001)\n\n if self.inclusive_bounds[0] is False:\n lower+=epsilon\n if self.inclusive_bounds[1] is False:\n upper-=epsilon\n self.slider.config(from_=lower,to=upper)",
"def programMisc(self, p_upperRateLim, p_lowerRateLim, p_fixedAVDelay, p_modulationSensitivity, p_rateModulation):\n if not self.validMisc(p_upperRateLim, p_lowerRateLim, p_fixedAVDelay, p_modulationSensitivity):\n return FailureCodes.INVALID_USER_INPUT\n if p_upperRateLim is not None:\n self.user.data.setUpperRateLimit(p_upperRateLim)\n if p_lowerRateLim is not None:\n self.user.data.setLowerRateLimit(p_lowerRateLim)\n if p_fixedAVDelay is not None:\n self.user.data.setFixedAVDelay(p_fixedAVDelay)\n if p_modulationSensitivity is not None:\n self.user.data.setAccelerationFactor(p_modulationSensitivity)\n self.user.data.setRateModulation(p_rateModulation)\n return FailureCodes.VALID",
"def set_pwm(self, state):\n if state:\n if self.pwm:\n return self.get_status()\n self.pwm = GPIO.PWM(self.id_, 50)\n if self.state == GPIO.HIGH:\n self.pwm.start(100)\n else:\n self.pwm.start(0)\n else:\n if not self.pwm:\n return self.get_status()\n self.pwm.stop()\n self.pwm = False\n return self.get_status()",
"def pwm_limit(self, value):\n self._write(MX_PWM_LIMIT, value)",
"def set_PWM_dutycycle(user_gpio, dutycycle):\n return _u2i(_pigpio_command(_control, _PI_CMD_PWM, user_gpio, dutycycle))",
"def set_range(self, **rangekwargs):\n\n if 'xrange' in rangekwargs.keys(): \n xrange = rangekwargs['xrange']\n else: \n xrange = [-50.0, 50.0] # (default)\n\n if 'yrange' in rangekwargs.keys(): \n yrange = rangekwargs['yrange']\n else: \n yrange = [0.0, 1.25 * self.hist_max]\n\n self.sub.set_xlim(xrange) \n self.sub.set_ylim(yrange) \n\n self.sub.set_xlabel(r\"$\\mathtt{d_{LOS}}$ (Mpc/h)\", fontsize=20)\n\n return None",
"def set_tgpio_digital_with_xyz(self, ionum, value, xyz, fault_tolerance_radius):\r\n return self._arm.set_tgpio_digital_with_xyz(ionum, value, xyz, fault_tolerance_radius)",
"def updateRange(self):\n if self.autoFollow:\n self.xrange = self.param.activeRange()\n self.xrange = self.xrange # call getter & setter again to verify limits",
"def test_returns_custom_range(self):\n img = np.arange(5)\n\n actual = util.apply_gamma(img, out_min=0, out_max=255, gamma=1)\n self.assertEqual(actual.min(), 0)\n self.assertEqual(actual.max(), 255)",
"def set_output_limits(self, min_value, max_value):\n self.out_min = min_value\n self.out_max = max_value\n if self.out_min > self.out_max:\n print(\"set_output_limits(): min must be smaller than max.\")\n self.iterm = self.clip_to_output_limits(self.iterm)\n self.output = self.clip_to_output_limits(self.output)",
"def set_cgpio_digital_with_xyz(self, ionum, value, xyz, fault_tolerance_radius):\r\n return self._arm.set_cgpio_digital_with_xyz(ionum, value, xyz, fault_tolerance_radius)"
]
| [
"0.6903804",
"0.64802706",
"0.60067755",
"0.58858705",
"0.58708185",
"0.58489203",
"0.5753922",
"0.5605951",
"0.55939317",
"0.5473511",
"0.5472374",
"0.54461926",
"0.54359305",
"0.54064345",
"0.5387591",
"0.5380408",
"0.532704",
"0.5324829",
"0.5308869",
"0.5295573",
"0.52633965",
"0.5253915",
"0.5253168",
"0.52414507",
"0.52353007",
"0.52271307",
"0.51873565",
"0.51836705",
"0.5177464",
"0.51633877"
]
| 0.7968123 | 0 |
Get the range of PWM values being used on the gpio. Returns the dutycycle range used for the gpio if OK, otherwise PI_BAD_USER_GPIO. | def get_PWM_range(user_gpio):
return _u2i(_pigpio_command(_control, _PI_CMD_PRG, user_gpio, 0)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_PWM_real_range(user_gpio):\n return _u2i(_pigpio_command(_control, _PI_CMD_PRRG, user_gpio, 0))",
"def set_PWM_range(user_gpio, range_):\n return _u2i(_pigpio_command(_control, _PI_CMD_PRS, user_gpio, range_))",
"def get_PWM_frequency(user_gpio):\n return _u2i(_pigpio_command(_control, _PI_CMD_PFG, user_gpio, 0))",
"def get_servo_pct(pi, pin):\n return pulsewidth2pct(pi.get_servo_pulsewidth(pin))",
"def goal_pwm(self):\n return self._read(MX_GOAL_PWM)",
"def get_fan_pwm(self, pwm_val=None):\n self.assertNotEqual(pwm_val, None, \"Expected PWM value needs to be set\")\n\n data = run_shell_cmd(\"/usr/local/bin/get_fan_speed.sh\")\n data = data.split(\"\\n\")\n for line in data:\n if len(line) == 0:\n continue\n line = line.split(\"(\")\n line = line[1].split(\"%\")\n if abs(int(line[0]) - int(pwm_val)) < 2:\n continue\n else:\n return [False, data]\n return [True, None]",
"def pwm_limit(self):\n return self._read(MX_PWM_LIMIT)",
"def set_PWM_dutycycle(user_gpio, dutycycle):\n return _u2i(_pigpio_command(_control, _PI_CMD_PWM, user_gpio, dutycycle))",
"def pwm(self):\n return self._pwm",
"def PWMvalue(self, v, minPWM, maxPWM):\n pwm = 0\n if math.fabs(v) > self.SPEED_TOLERANCE:\n pwm = int(math.floor(math.fabs(v) * (maxPWM - minPWM) + minPWM))\n return min(pwm, maxPWM)",
"def pwm(self):\n\n ret = []\n for _ in range(self._count_dist_fn()):\n ret.append(self._pwm.pwm())\n return numpy.hstack(ret)",
"def _get_humidity_range(self):\n\n if not self._humidity_range:\n if not self.model_info:\n return None\n\n key = self._get_state_key(STATE_TARGET_HUM)\n range_info = self.model_info.value(key)\n if not range_info:\n min_hum = DEFAULT_MIN_HUM\n max_hum = DEFAULT_MAX_HUM\n else:\n min_hum = min(range_info.min, DEFAULT_MIN_HUM)\n max_hum = max(range_info.max, DEFAULT_MAX_HUM)\n self._humidity_range = [min_hum, max_hum]\n\n return self._humidity_range",
"def _calc_range(self) -> np.ndarray:\n if self._is_ct25k():\n range_resolution = 30\n n_gates = 256\n else:\n n_gates = int(self.metadata[\"number_of_gates\"])\n range_resolution = int(self.metadata[\"range_resolution\"])\n return np.arange(n_gates) * range_resolution + range_resolution / 2",
"def get_range(self):\n if self.battery_size == 75:\n range = 260\n elif self.battery_size == 100:\n range = 315\n\n print(f'This car can go about {range} miles on a full charge.')",
"def get_range(self):\n if self.battery_size == 75:\n range = 260\n elif self.battery_size == 100:\n range = 315\n \n print(f\"This car can go about {range} miles on a full charge.\")",
"def _get_range_squared(self) -> np.ndarray:\n m2km = 0.001\n return (self.range * m2km) ** 2",
"def get_range(self):\n if self.battery_size == 75:\n range = 260\n elif self.battery_size == 100:\n range = 315\n\n print(f\"This car can go about {range} miles on a full charge.\")",
"def get_range(self):\n if self.battery_size == 75:\n range = 260\n elif self.battery_size == 100:\n range = 315\n\n print(f\"This car can go to about {range} miles on a full charge.\")",
"def get_range(self):\n if self.battery_size == 75:\n range = 260\n elif self.battery_size == 100:\n range = 315\n\n print(f\"This car can go about {range} miles on a full charge\")",
"def get_range(self):\r\n\t\tif self.battery_size == 70:\r\n\t\t\trange = 240\r\n\t\telif self.battery_size == 85:\r\n\t\t\trange = 270\r\n\t\t\t\r\n\t\tmessage = \"This car can go approx. \" + str(range)\r\n\t\tmessage += \" miles on a full charge.\"\r\n\t\tprint(message)",
"def getRange(data, theta):\n\n car_theta = math.radians(theta) - math.pi / 2\n\n if car_theta > 3 * math.pi / 4:\n car_theta = 3 * math.pi / 4\n elif car_theta < -3 * math.pi / 4:\n car_theta = -3 * math.pi / 4\n\n\n float_index = (car_theta + 3 * math.pi / 4) / data.angle_increment\n index = int(float_index)\n\n\t## check the index and data (BK v0.11)\n# print \"idx= {} : data= {}\\n\".format(index, data.ranges[index])\n return data.ranges[index]",
"def get_range(self):\n if self.battery_size == 40:\n range = 150\n elif self.battery_size == 65:\n range = 225\n print(f\"This car can go about {range} miles on a full charge.\")",
"def possible_vals(pp):\n\n if pp[\"type\"] == \"w\":\n vals = [0, pp[\"pmax\"]]\n\n elif pp[\"type\"] == \"windturbine\":\n vals = [0, pp[\"pmin\"]]\n for i in range(pp[\"pmin\"], pp[\"pmax\"] - pp[\"pmin\"] + 1):\n vals.append(pp[\"pmin\"] + i)\n\n else: # Turbojet\n vals = [0]\n for i in range(pp[\"pmin\"], pp[\"pmax\"] - pp[\"pmin\"]):\n vals.append(pp[\"pmin\"] + i)\n return vals",
"def max_pwm(self):\r\n return self._max_pwm",
"def get_limits(self, device,percent=0.25):\n\t\tval = epics.caget(device)\n tol = (val*percent)\n lim_lo = val-tol\n lim_hi = val+tol\n limits = [lim_lo,lim_hi]\n\t\treturn limits",
"def get_range(self, channel):\n\n pr = self.device.query(f':PRANGE{channel}?')\n return pr",
"def i_range(Pd):\n return xrange(i_min(Pd), i_max(Pd) + 1)",
"def get_uniform_p_vals(self, min=1.5, max=9):\n rng = np.random.RandomState(12435)\n if self.power_activ == \"softplus\":\n p_vals = np.log(np.exp(rng.uniform(low=min, high=max, size=(self.num_units,))-1)-1)\n else:\n p_vals = np.sqrt(rng.uniform(low=min, high=max, size=(self.num_units,))-1)\n return p_vals",
"def _full_value_range(self):\n min_value, max_value = self._raw_data.data_range\n return max_value - min_value",
"def pwm(self):\n\n pwm_tot = []\n for x in self._motifs:\n pwm_tot.append(x.pwm())\n\n return numpy.hstack(pwm_tot)"
]
| [
"0.765557",
"0.66512173",
"0.61994433",
"0.6053772",
"0.5918388",
"0.5853133",
"0.58236945",
"0.57681507",
"0.57501",
"0.5722623",
"0.5573397",
"0.55700403",
"0.5500223",
"0.54832375",
"0.546351",
"0.5449539",
"0.54490256",
"0.54359204",
"0.5433655",
"0.5419566",
"0.54105836",
"0.5403074",
"0.53894985",
"0.5355119",
"0.53425515",
"0.5333928",
"0.53325415",
"0.53115726",
"0.52869177",
"0.5272612"
]
| 0.8118887 | 0 |
Get the real underlying range of PWM values being used on the gpio. Returns the real range used for the gpio if OK, otherwise PI_BAD_USER_GPIO. | def get_PWM_real_range(user_gpio):
return _u2i(_pigpio_command(_control, _PI_CMD_PRRG, user_gpio, 0)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_PWM_range(user_gpio):\n return _u2i(_pigpio_command(_control, _PI_CMD_PRG, user_gpio, 0))",
"def set_PWM_range(user_gpio, range_):\n return _u2i(_pigpio_command(_control, _PI_CMD_PRS, user_gpio, range_))",
"def PWMvalue(self, v, minPWM, maxPWM):\n pwm = 0\n if math.fabs(v) > self.SPEED_TOLERANCE:\n pwm = int(math.floor(math.fabs(v) * (maxPWM - minPWM) + minPWM))\n return min(pwm, maxPWM)",
"def _full_value_range(self):\n min_value, max_value = self._raw_data.data_range\n return max_value - min_value",
"def get_PWM_frequency(user_gpio):\n return _u2i(_pigpio_command(_control, _PI_CMD_PFG, user_gpio, 0))",
"def pwm_limit(self):\n return self._read(MX_PWM_LIMIT)",
"def pwm(self):\n return self._pwm",
"def goal_pwm(self):\n return self._read(MX_GOAL_PWM)",
"def get_fan_pwm(self, pwm_val=None):\n self.assertNotEqual(pwm_val, None, \"Expected PWM value needs to be set\")\n\n data = run_shell_cmd(\"/usr/local/bin/get_fan_speed.sh\")\n data = data.split(\"\\n\")\n for line in data:\n if len(line) == 0:\n continue\n line = line.split(\"(\")\n line = line[1].split(\"%\")\n if abs(int(line[0]) - int(pwm_val)) < 2:\n continue\n else:\n return [False, data]\n return [True, None]",
"def _get_range_squared(self) -> np.ndarray:\n m2km = 0.001\n return (self.range * m2km) ** 2",
"def get_uniform_p_vals(self, min=1.5, max=9):\n rng = np.random.RandomState(12435)\n if self.power_activ == \"softplus\":\n p_vals = np.log(np.exp(rng.uniform(low=min, high=max, size=(self.num_units,))-1)-1)\n else:\n p_vals = np.sqrt(rng.uniform(low=min, high=max, size=(self.num_units,))-1)\n return p_vals",
"def resolution_range(self) -> Optional[float]:\n return self._get_property(RESOLUTION_RANGE_PROP, float)",
"def get_servo_pct(pi, pin):\n return pulsewidth2pct(pi.get_servo_pulsewidth(pin))",
"def test_returns_custom_range(self):\n img = np.arange(5)\n\n actual = util.apply_gamma(img, out_min=0, out_max=255, gamma=1)\n self.assertEqual(actual.min(), 0)\n self.assertEqual(actual.max(), 255)",
"def get_range(self):\n if self.battery_size == 75:\n range = 260\n elif self.battery_size == 100:\n range = 315\n\n print(f'This car can go about {range} miles on a full charge.')",
"def _get_humidity_range(self):\n\n if not self._humidity_range:\n if not self.model_info:\n return None\n\n key = self._get_state_key(STATE_TARGET_HUM)\n range_info = self.model_info.value(key)\n if not range_info:\n min_hum = DEFAULT_MIN_HUM\n max_hum = DEFAULT_MAX_HUM\n else:\n min_hum = min(range_info.min, DEFAULT_MIN_HUM)\n max_hum = max(range_info.max, DEFAULT_MAX_HUM)\n self._humidity_range = [min_hum, max_hum]\n\n return self._humidity_range",
"def get_range(self):\r\n\t\tif self.battery_size == 70:\r\n\t\t\trange = 240\r\n\t\telif self.battery_size == 85:\r\n\t\t\trange = 270\r\n\t\t\t\r\n\t\tmessage = \"This car can go approx. \" + str(range)\r\n\t\tmessage += \" miles on a full charge.\"\r\n\t\tprint(message)",
"def get_range(self):\n if self.battery_size == 75:\n range = 260\n elif self.battery_size == 100:\n range = 315\n \n print(f\"This car can go about {range} miles on a full charge.\")",
"def get_range(self):\n if self.battery_size == 75:\n range = 260\n elif self.battery_size == 100:\n range = 315\n\n print(f\"This car can go about {range} miles on a full charge.\")",
"def read_gyro_range(self, raw = False):\n\t\traw_data = self.bus.read_byte_data(self.address, self.GYRO_CONFIG)\n\n\t\tif raw is True:\n\t\t\treturn raw_data\n\t\telif raw is False:\n\t\t\tif raw_data == self.GYRO_RANGE_250DEG:\n\t\t\t\treturn 250\n\t\t\telif raw_data == self.GYRO_RANGE_500DEG:\n\t\t\t\treturn 500\n\t\t\telif raw_data == self.GYRO_RANGE_1000DEG:\n\t\t\t\treturn 1000\n\t\t\telif raw_data == self.GYRO_RANGE_2000DEG:\n\t\t\t\treturn 2000\n\t\t\telse:\n\t\t\t\treturn -1",
"def get_range(self):\n if self.battery_size == 75:\n range = 260\n elif self.battery_size == 100:\n range = 315\n\n print(f\"This car can go to about {range} miles on a full charge.\")",
"def _calc_range(self) -> np.ndarray:\n if self._is_ct25k():\n range_resolution = 30\n n_gates = 256\n else:\n n_gates = int(self.metadata[\"number_of_gates\"])\n range_resolution = int(self.metadata[\"range_resolution\"])\n return np.arange(n_gates) * range_resolution + range_resolution / 2",
"def get_range(self):\n if self.battery_size == 75:\n range = 260\n elif self.battery_size == 100:\n range = 315\n\n print(f\"This car can go about {range} miles on a full charge\")",
"def read_gyro_range(self, raw = False):\n raw_data = self.bus.read_byte_data(self.address, self.GYRO_CONFIG)\n\n if raw is True:\n return raw_data\n elif raw is False:\n if raw_data == self.GYRO_RANGE_250DEG:\n return 250\n elif raw_data == self.GYRO_RANGE_500DEG:\n return 500\n elif raw_data == self.GYRO_RANGE_1000DEG:\n return 1000\n elif raw_data == self.GYRO_RANGE_2000DEG:\n return 2000\n else:\n return -1",
"def max_pwm(self):\r\n return self._max_pwm",
"def get_range(self):\n if self.battery_size == 40:\n range = 150\n elif self.battery_size == 65:\n range = 225\n print(f\"This car can go about {range} miles on a full charge.\")",
"def read_from_gpio(self):\n # if (use_static):\n # if (self.device_id in static_values.keys()):\n # return static_values[self.device_id]\n # else:\n # return 1;\n \n # if (random.random()<0.2):\n # if (random.random()<0.5):\n # self._decrease_position_index()\n # else:\n # self._increase_position_index()\n # self._position_index = (self._position_index+1)%len(FakePositionDevice._values)\n\n # self.position = FakePositionDevice._values[self._position_index]\n mu = self._value\n sig = 1\n self._value = np.random.normal(mu,sig,1)[0]\n if self._value < self._min_value:\n self._value = self._min_value\n if self._value > self._max_value:\n self._value = self._max_value\n \n return self._value\n return 4\n # return FakePositionDevice._values[self._position_index]",
"def get_refrange(self):\n if np.all(np.isnan(self.par)):\n print( 'Run params() before')\n return\n if hasattr(self,'refranges'):\n return self.refranges\n ice_r = [r for r in xrange(len(self.ref)) if ~ np.isnan(self.par[1,r,10,0])]\n liq_r = [r for r in xrange(len(self.ref)) if ~ np.isnan(self.par[0,r,10,0])]\n return (liq_r,ice_r)",
"def getRangeMM(self) -> float:\n ...",
"def gpio_read_analogue(self, pin: int) -> float:\n return randint(0, 500) / 100"
]
| [
"0.8013507",
"0.67188954",
"0.5904552",
"0.59033704",
"0.5854003",
"0.58518046",
"0.58146846",
"0.5794788",
"0.57866156",
"0.5736015",
"0.5717286",
"0.56514156",
"0.5626798",
"0.5618219",
"0.55474925",
"0.5529095",
"0.5525325",
"0.5523527",
"0.55130154",
"0.55095696",
"0.55055356",
"0.5495459",
"0.549427",
"0.549075",
"0.548251",
"0.547582",
"0.54703045",
"0.54108036",
"0.53963315",
"0.53871423"
]
| 0.8185473 | 0 |
Set the frequency (in Hz) of the PWM to be used on the gpio. Returns the numerically closest frequency if OK, otherwise PI_BAD_USER_GPIO or PI_NOT_PERMITTED. | def set_PWM_frequency(user_gpio, frequency):
return _u2i(_pigpio_command(_control, _PI_CMD_PFS, user_gpio, frequency)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_PWM_frequency(user_gpio):\n return _u2i(_pigpio_command(_control, _PI_CMD_PFG, user_gpio, 0))",
"def set_pwm_freq(self, servo_frequency: int):\n prescaleval = float(self._frequency)\n prescaleval /= float(self._resolution)\n prescaleval /= float(servo_frequency)\n prescaleval -= 1\n logger.info('Setting PWM frequency to %d Hz', servo_frequency)\n logger.info('Estimated pre-scale: %f', prescaleval)\n prescale = int(math.floor(prescaleval + 0.5))\n logger.info('Final pre-scale: %d', prescale)\n oldmode = self._device.readU8(MODE1)\n newmode = (oldmode & 0x7F) | 0x10 # sleep\n self._device.write8(MODE1, newmode) # go to sleep\n self._device.write8(PRESCALE, prescale)\n self._device.write8(MODE1, oldmode)\n time.sleep(0.005)\n self._device.write8(MODE1, oldmode | 0x80)",
"def pwm_freq(self):\r\n return self._pwm_freq",
"def pwm_freq(self, freq: int):\r\n self._pwm_freq = freq\r\n self.pwm_freq_hist.append(freq)\r\n\r\n if self.pwm_freq_hist[-2] != freq and self._daq:\r\n msg = Message(\"pwm_freq\", freq, self.checksum).message_bytes\r\n self._daq.asynch.transmit(msg)",
"def set_pwm_freq(self, freq_hz):\n prescaleval = 25000000.0 # 25MHz\n prescaleval /= 4096.0 # 12-bit\n prescaleval /= float(freq_hz)\n prescaleval -= 1.0\n prescale = int(math.floor(prescaleval + 0.5))\n oldmode = self.i2cBus.read_byte_data(self.address, MODE1)\n newmode = (oldmode & 0x7F) | 0x10 # sleep\n self.i2cBus.write_byte_data(self.address, MODE1, newmode) # go to sleep\n self.i2cBus.write_byte_data(self.address, PRESCALE, prescale)\n self.i2cBus.write_byte_data(self.address, MODE1, oldmode)\n time.sleep(0.005)\n self.i2cBus.write_byte_data(self.address, MODE1, oldmode | 0x80)",
"def set_frequency(self):\r\n def move_synth(delta_f_synth):\r\n sign_delta_f_synth = int(delta_f_synth/abs(delta_f_synth))\r\n stepsize_Hz = int(10)\r\n num_steps = int(abs(delta_f_synth)/stepsize_Hz)\r\n remainder_Hz = round(abs(delta_f_synth)%stepsize_Hz,1)\r\n self.synth.set_incr(stepsize_Hz, 'Hz')\r\n for nn in range(num_steps): # slowly move the synth by delta_f_synth in stepsize steps\r\n self.synth.walk(sign_delta_f_synth)\r\n time.sleep(0.1)\r\n self.synth.set_incr(remainder_Hz, 'Hz')\r\n self.synth.walk(sign_delta_f_synth)\r\n time.sleep(0.1)\r\n \r\n def get_delta_f_synth():\r\n #get latest f_rep,f_0\r\n self.get_frequency() \r\n #calculate required f_rep to get desired PA_freq. switches n and frep in above eq.\r\n f_rep_goal = (self.setfrequency - self.sign_lock * self.f_lock - self.sign_0 * self.f_0) / self.n\r\n # print 'f_rep_goal = %.0f Hz'%f_rep_goal\r\n # lock uses 3rd harmonic so synth must be set to *3\r\n delta_f_synth = (f_rep_goal - self.f_rep)*3 \r\n delta_f_synth = round(delta_f_synth,1)\r\n # print 'delta_f_synth = %.1f Hz'%delta_f_synth\r\n return delta_f_synth\r\n \r\n iteration = 0\r\n delta_f_synth = get_delta_f_synth()\r\n while abs(delta_f_synth) > self.synth_tol:\r\n move_synth(delta_f_synth)\r\n delta_f_synth = get_delta_f_synth()\r\n iteration += 1\r\n if iteration > self.max_iteration:\r\n # print 'REACHED MAX ITERATION: delta_f_synth = %.1f'%delta_f_synth\r\n break",
"def set_frequency(self):\n def f():\n freq = float(self.freq_edit.get())\n duty = float(self.duty_edit.get())\n if duty == 0:\n duty = 1\n if duty > 1:\n duty = duty / 100\n self.parent.update_frequency(freq, duty, self.model.upper())\n return f",
"def set_frequency(self, f=1e9):\r\n _debug('simq03b_api.set_frequency')\r\n \r\n self.write('SOUR:FREQ:CW '+str(f))",
"def set_frequency(value):\n # user input, minimum, maximum\n checklist = [int(value), 300000000, 928000000]\n\n if SendSignal.__verify_range(*checklist):\n SendSignal.__SIGNAL_SETTINGS['frequency'] = int(value)\n SendSignal.__SIGNAL_OBJ.setFreq(int(value))\n else:\n sys.stdout.write(\"Error {} not between {} and {}\".format(*checklist))\n sys.exit(2)",
"def change_frequency(self):\n if not self.ftext.text():\n return\n frequency = float(self.ftext.text())\n if frequency > 6.0:\n frequency = 6.0\n self.qbpm.change_frequency(frequency)\n self.ftext.setText(str(self.qbpm.frequency))",
"def set_frequency(self, pin, frequency):\n raise NotImplementedError",
"def freq(self, freq=None):\n if freq is not None:\n self.cmd(':AC:SETB:FREQ %0.2f\\n' % freq)\n self.freq_param = freq\n\n return freq",
"def set_frequency(self, f=1e9):\r\n return self._api.set_frequency(f)",
"def set_frequency(self, newval):\n rest_val = str(int(round(newval * 65536.0, 1)))\n return self._setAttr(\"frequency\", rest_val)",
"def frequency(self, freq):\n self.set_frequency(f'{freq}' if self._is_min_max(freq) else f'{freq}HZ')",
"def set_frequency(self, f=1e9):\r\n self.write('FREQ '+str(f))",
"def set_frequency(self):\n\t\t\"\"\"For Frequency Prescalar-0\"\"\"\n\t\tbus.write_byte_data(PCA9530_2C_1_DEFAULT_ADDRESS, PCA9530_2C_1_REG_PSC0, PCA9530_2C_1_PSC0_USERDEFINED)\n\t\t\n\t\t\"\"\"For Frequency Prescalar-1\"\"\"\n\t\tbus.write_byte_data(PCA9530_2C_1_DEFAULT_ADDRESS, PCA9530_2C_1_REG_PSC1, PCA9530_2C_1_PSC1_USERDEFINED)",
"def set_frequency(self, f=1e9):\r\n self.write('SOUR:FREQ:CW '+str(f))",
"def set_frequency(self, f=1e9):\r\n self.write('SOUR:FREQ:CW '+str(f))",
"def set_freq_hz(self, freq=None):\n if freq is None:\n freq = 1000000 * self.def_freq\n self.instr.write('F1 ' + str(freq) + ' H')\n time.sleep(self.sleep_time)",
"def set_frequency_exact(self, frequency):\n if (type(frequency) not in [int, float]):\n raise TypeError(\n \"ERROR: Invalid Input type. Frequency can only be a float value\")\n\n if (frequency < self.FREQ_MIN):\n self._frequency = self.FREQ_MIN\n\n if (frequency > self.FREQ_MAX):\n self._frequency = self.FREQ_MAX\n\n self._frequency = float(frequency)\n\n # Choose the appropriate frequency range based on the passed value.\n for i in range(len(self.FREQ_RANGE)):\n if (frequency <= self.FREQ_RANGE[i][0] and frequency >= self.FREQ_RANGE[i][0]):\n self._frequency_range = self.FREQ_RANGE[i]\n break\n\n if self._frequency != 0:\n self._time_period = float(1) / self._frequency\n\n else:\n self._time_period = float(\"inf\")\n\n self._sampling_time_interval = self._time_period / 500.0\n # Much greater than the nyquist rate for an accurate output with the\n # least deviation.",
"def set_frequency(self, f=1e9):\r\n self.f = f",
"def set_freq(self, freq):\n\n return self._service.exposed_set_freq(freq)",
"def set_sg_freq():\n freq = request.params.get(\"freq\", 0, type=float)\n output = request.params.get(\"output\", 1, type=int)\n retval = RP_LIB.rp_GenFreq(output, ctypes.c_float(freq))\n if retval != 0:\n LOG.error(\"Failed to set signal generator frequency. Error code: %s\", ERROR_CODES[retval])",
"def set_pwm(self, fan, pct):\n Logger.debug(\"Set pwm %d to %d\" % (int(fan.source.name), pct))\n fan.source.write(pct)\n self.last_fan_speed = pct",
"def get_ao_manual_control_freq( channel ):\n freq = float64(0)\n CALL('GetPhysicalChanAOManualControlFreq', channel, byref(freq))\n return freq.value",
"def setfrequency(self, value):\n self.instrument.write('FREQ {0}'.format(value))",
"def set_frequency(self, percent_range):\n if (type(percent_range) not in [int, float]):\n raise TypeError(\"ERROR: Only int or float values can be passed.\")\n\n if (percent_range < 0 or percent_range > 100):\n raise ValueError(\n \"ERROR: Only values between 1 to 100 can be passed\")\n\n self._frequency = float(self._frequency_range[\n 0] + (self._frequency_range[1] - self._frequency_range[0]) * percent_range)\n\n if self._frequency != 0:\n self._time_period = float(1) / self._frequency\n else:\n self._time_period = float(\"inf\")\n\n self._sampling_time_interval = self._time_period / 500.0",
"def set_frequency(self, frequency):\n\n if frequency == 1:\n self.__rtcconfig = self.__helper.updatebyte(self.__rtcconfig, 0, 0)\n self.__rtcconfig = self.__helper.updatebyte(self.__rtcconfig, 1, 0)\n if frequency == 2:\n self.__rtcconfig = self.__helper.updatebyte(self.__rtcconfig, 0, 1)\n self.__rtcconfig = self.__helper.updatebyte(self.__rtcconfig, 1, 0)\n if frequency == 3:\n self.__rtcconfig = self.__helper.updatebyte(self.__rtcconfig, 0, 0)\n self.__rtcconfig = self.__helper.updatebyte(self.__rtcconfig, 1, 1)\n if frequency == 4:\n self.__rtcconfig = self.__helper.updatebyte(self.__rtcconfig, 0, 1)\n self.__rtcconfig = self.__helper.updatebyte(self.__rtcconfig, 1, 1)\n self.__bus.write_byte_data(\n self.__rtcaddress, self.CONTROL, self.__rtcconfig)\n return",
"def frequency_watt(self, p_req = 0, p_prev = 0, ts=datetime.utcnow(), location=0, db_UF = 0.05, db_OF = 0.05): #datetime.\n f = self.grid.get_frequency(ts,location)\n \n if (f < 60 - db_UF).any():\n p_mod = 0\n elif (f > 60 + db_OF).any():\n p_mod = p_req\n else:\n p_mod = p_prev\n \n return p_mod"
]
| [
"0.733027",
"0.6947587",
"0.66606504",
"0.65479803",
"0.6421843",
"0.6393472",
"0.6266007",
"0.6158921",
"0.61287534",
"0.61092573",
"0.60931885",
"0.6061429",
"0.6047837",
"0.59890205",
"0.5946561",
"0.591042",
"0.5868069",
"0.58495045",
"0.58495045",
"0.5811198",
"0.5799517",
"0.57992923",
"0.5783894",
"0.5779903",
"0.57652503",
"0.5742873",
"0.5687896",
"0.56853193",
"0.56588244",
"0.5635464"
]
| 0.78752476 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.